-
Notifications
You must be signed in to change notification settings - Fork 3.9k
/
Copy pathstart.go
1568 lines (1424 loc) · 59.9 KB
/
start.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package cli
import (
"context"
"fmt"
"math"
"net"
"net/url"
"os"
"os/signal"
"path/filepath"
"runtime"
"runtime/debug"
"strings"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/build"
"github.com/cockroachdb/cockroach/pkg/cli/clierror"
"github.com/cockroachdb/cockroach/pkg/cli/clierrorplus"
"github.com/cockroachdb/cockroach/pkg/cli/cliflagcfg"
"github.com/cockroachdb/cockroach/pkg/cli/cliflags"
"github.com/cockroachdb/cockroach/pkg/cli/exit"
"github.com/cockroachdb/cockroach/pkg/docs"
"github.com/cockroachdb/cockroach/pkg/geo/geos"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvstorage"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security/clientsecopts"
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/server/serverctl"
"github.com/cockroachdb/cockroach/pkg/server/status"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/storage/fs"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/cgroups"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/humanizeutil"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/log/logcrash"
"github.com/cockroachdb/cockroach/pkg/util/log/severity"
"github.com/cockroachdb/cockroach/pkg/util/sdnotify"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/sysutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/errors/oserror"
"github.com/cockroachdb/logtags"
"github.com/cockroachdb/pebble/vfs"
"github.com/cockroachdb/redact"
"github.com/spf13/cobra"
)
// debugTSImportFile is the path to a file (containing data coming from
// `./cockroach debug tsdump --format=raw` that will be ingested upon server
// start. This is an experimental feature and may break clusters it is invoked
// against. The data will not display properly in the UI unless the source
// cluster had one store to each node, with the store ID and node ID lining up.
// Additionally, the local server's stores and nodes must match this pattern as
// well. The only expected use case for this env var is against local single
// node throwaway clusters and consequently this variable is only used for
// the start-single-node command.
//
// To be able to visualize the timeseries data properly, a mapping file must be
// provided as well. This maps StoreIDs to the owning NodeID, i.e. the file
// looks like this (if s1 is on n3 and s2 is on n4):
// 1: 3
// 2: 4
// [...]
//
// See #64329 for details.
var debugTSImportFile = envutil.EnvOrDefaultString("COCKROACH_DEBUG_TS_IMPORT_FILE", "")
var debugTSImportMappingFile = envutil.EnvOrDefaultString("COCKROACH_DEBUG_TS_IMPORT_MAPPING_FILE", "")
// startCmd starts a node by initializing the stores and joining
// the cluster.
var startCmd = &cobra.Command{
Use: "start",
Short: "start a node in a multi-node cluster",
Long: `
Start a CockroachDB node, which will export data from one or more
storage devices, specified via --store flags.
Specify the --join flag to point to another node or nodes that are
part of the same cluster. The other nodes do not need to be started
yet, and if the address of the other nodes to be added are not yet
known it is legal for the first node to join itself.
To initialize the cluster, use 'cockroach init'.
`,
Example: ` cockroach start --insecure --store=attrs=ssd,path=/mnt/ssd1 --join=host:port,[host:port]`,
Args: cobra.NoArgs,
RunE: clierrorplus.MaybeShoutError(clierrorplus.MaybeDecorateError(runStartJoin)),
}
// startSingleNodeCmd starts a node by initializing the stores.
var startSingleNodeCmd = &cobra.Command{
Use: "start-single-node",
Short: "start a single-node cluster",
Long: `
Start a CockroachDB node, which will export data from one or more
storage devices, specified via --store flags.
The cluster will also be automatically initialized with
replication disabled (replication factor = 1).
`,
Example: ` cockroach start-single-node --insecure --store=attrs=ssd,path=/mnt/ssd1`,
Args: cobra.NoArgs,
RunE: clierrorplus.MaybeShoutError(clierrorplus.MaybeDecorateError(runStartSingleNode)),
}
// StartCmds lists the commands that start KV nodes as a server.
// This includes 'start' and 'start-single-node' but excludes
// the MT SQL server (not KV node) and 'demo' (not a server).
var StartCmds = []*cobra.Command{startCmd, startSingleNodeCmd}
// serverCmds lists the commands that start servers.
var serverCmds = append(StartCmds, mtStartSQLCmd)
// customLoggingSetupCmds lists the commands that call setupLogging()
// after other types of configuration.
var customLoggingSetupCmds = append(
serverCmds, debugCheckLogConfigCmd, demoCmd, statementBundleRecreateCmd,
)
// RegisterCommandWithCustomLogging is used by cliccl to note commands which
// want to suppress default logging setup.
func RegisterCommandWithCustomLogging(cmd *cobra.Command) {
customLoggingSetupCmds = append(customLoggingSetupCmds, cmd)
}
func initBlockProfile() {
// Enable the block profile for a sample of mutex and channel operations.
// Smaller values provide more accurate profiles but are more
// expensive. 0 and 1 are special: 0 disables the block profile and
// 1 captures 100% of block events. For other values, the profiler
// will sample one event per X nanoseconds spent blocking.
//
// The block profile can be viewed with `pprof http://HOST:PORT/debug/pprof/block`
//
// The utility of the block profile (aka blocking profile) has diminished
// with the advent of the mutex profile. We currently leave the block profile
// disabled by default as it has a non-zero performance impact.
d := envutil.EnvOrDefaultInt64("COCKROACH_BLOCK_PROFILE_RATE", 0)
runtime.SetBlockProfileRate(int(d))
}
func initMutexProfile() {
// Enable the mutex profile for a fraction of mutex contention events.
// Smaller values provide more accurate profiles but are more expensive. 0
// and 1 are special: 0 disables the mutex profile and 1 captures 100% of
// mutex contention events. For other values, the profiler will sample on
// average 1/X events.
//
// The mutex profile can be viewed with `pprof http://HOST:PORT/debug/pprof/mutex`
d := envutil.EnvOrDefaultInt("COCKROACH_MUTEX_PROFILE_RATE",
1000 /* 1 sample per 1000 mutex contention events */)
runtime.SetMutexProfileFraction(d)
}
func initTraceDir(ctx context.Context, dir string) {
if dir == "" {
return
}
if err := os.MkdirAll(dir, 0755); err != nil {
// This is possible when running with only in-memory stores;
// in that case the start-up code sets the output directory
// to the current directory (.). If running the process
// from a directory which is not writable, we won't
// be able to create a sub-directory here.
err = errors.WithHint(err, "Try changing the CWD of the cockroach process to a writable directory.")
log.Warningf(ctx, "cannot create trace dir; traces will not be dumped: %v", err)
return
}
}
func initTempStorageConfig(
ctx context.Context, st *cluster.Settings, stopper *stop.Stopper, stores base.StoreSpecList,
) (base.TempStorageConfig, error) {
// Initialize the target directory for temporary storage. If encryption at
// rest is enabled in any fashion, we'll want temp storage to be encrypted
// too. To achieve this, we use the first encrypted store as temp dir
// target, if any. If we can't find one, we use the first StoreSpec in the
// list.
//
// While we look, we also clean up any abandoned temporary directories. We
// don't know which store spec was used previously—and it may change if
// encryption gets enabled after the fact—so we check each store.
specIdxDisk := -1
specIdxEncrypted := -1
for i, spec := range stores.Specs {
if spec.InMemory {
continue
}
if spec.IsEncrypted() && specIdxEncrypted == -1 {
// TODO(jackson): One store's EncryptionOptions may say to encrypt
// with a real key, while another store's say to use key=plain.
// This provides no guarantee that we'll use the encrypted one's.
specIdxEncrypted = i
}
if specIdxDisk == -1 {
specIdxDisk = i
}
recordPath := filepath.Join(spec.Path, server.TempDirsRecordFilename)
if err := fs.CleanupTempDirs(recordPath); err != nil {
return base.TempStorageConfig{}, errors.Wrap(err,
"could not cleanup temporary directories from record file")
}
}
// Use first store by default. This might be an in-memory store.
specIdx := 0
if specIdxEncrypted >= 0 {
// Prefer an encrypted store.
specIdx = specIdxEncrypted
} else if specIdxDisk >= 0 {
// Prefer a non-encrypted on-disk store.
specIdx = specIdxDisk
}
useStore := stores.Specs[specIdx]
var recordPath string
if !useStore.InMemory {
recordPath = filepath.Join(useStore.Path, server.TempDirsRecordFilename)
}
// The temp store size can depend on the location of the first regular store
// (if it's expressed as a percentage), so we resolve that flag here.
var tempStorePercentageResolver percentResolverFunc
if !useStore.InMemory {
dir := useStore.Path
// Create the store dir, if it doesn't exist. The dir is required to exist
// by diskPercentResolverFactory.
if err := os.MkdirAll(dir, 0755); err != nil {
return base.TempStorageConfig{}, errors.Wrapf(err, "failed to create dir for first store: %s", dir)
}
var err error
tempStorePercentageResolver, err = diskPercentResolverFactory(dir)
if err != nil {
return base.TempStorageConfig{}, errors.Wrapf(err, "failed to create resolver for: %s", dir)
}
} else {
tempStorePercentageResolver = memoryPercentResolver
}
var tempStorageMaxSizeBytes int64
if err := startCtx.diskTempStorageSizeValue.Resolve(
&tempStorageMaxSizeBytes, tempStorePercentageResolver,
); err != nil {
return base.TempStorageConfig{}, err
}
if !startCtx.diskTempStorageSizeValue.IsSet() {
// The default temp storage size is different when the temp
// storage is in memory (which occurs when no temp directory
// is specified and the first store is in memory).
if startCtx.tempDir == "" && useStore.InMemory {
tempStorageMaxSizeBytes = base.DefaultInMemTempStorageMaxSizeBytes
} else {
tempStorageMaxSizeBytes = base.DefaultTempStorageMaxSizeBytes
}
}
// Initialize a base.TempStorageConfig based on first store's spec and
// cli flags.
tempStorageConfig := base.TempStorageConfigFromEnv(
ctx,
st,
useStore,
startCtx.tempDir,
tempStorageMaxSizeBytes,
)
// Set temp directory to first store's path if the temp storage is not
// in memory.
tempDir := startCtx.tempDir
if tempDir == "" && !tempStorageConfig.InMemory {
tempDir = useStore.Path
}
// Create the temporary subdirectory for the temp engine.
{
var err error
if tempStorageConfig.Path, err = fs.CreateTempDir(tempDir, server.TempDirPrefix, stopper); err != nil {
return base.TempStorageConfig{}, errors.Wrap(err, "could not create temporary directory for temp storage")
}
}
// We record the new temporary directory in the record file (if it
// exists) for cleanup in case the node crashes.
if recordPath != "" {
if err := fs.RecordTempDir(recordPath, tempStorageConfig.Path); err != nil {
return base.TempStorageConfig{}, errors.Wrapf(
err,
"could not record temporary directory path to record file: %s",
recordPath,
)
}
}
return tempStorageConfig, nil
}
type newServerFn func(ctx context.Context, serverCfg server.Config, stopper *stop.Stopper) (serverctl.ServerStartupInterface, error)
var errCannotUseJoin = errors.New("cannot use --join with 'cockroach start-single-node' -- use 'cockroach start' instead")
func runStartSingleNode(cmd *cobra.Command, args []string) error {
joinFlag := cliflagcfg.FlagSetForCmd(cmd).Lookup(cliflags.Join.Name)
if joinFlag.Changed {
return errCannotUseJoin
}
// Now actually set the flag as changed so that the start code
// doesn't warn that it was not set. This is all to let `start-single-node`
// get by without the use of --join flags.
joinFlag.Changed = true
// Make the node auto-init the cluster if not done already.
serverCfg.AutoInitializeCluster = true
// Allow passing in a timeseries file.
if debugTSImportFile != "" {
serverCfg.TestingKnobs.Server = &server.TestingKnobs{
ImportTimeseriesFile: debugTSImportFile,
ImportTimeseriesMappingFile: debugTSImportMappingFile,
}
}
return runStart(cmd, args, true /*startSingleNode*/)
}
func runStartJoin(cmd *cobra.Command, args []string) error {
return runStart(cmd, args, false /*startSingleNode*/)
}
// runStart starts the cockroach node using --store as the list of
// storage devices ("stores") on this machine and --join as the list
// of other active nodes used to join this node to the cockroach
// cluster, if this is its first time connecting.
//
// The argument startSingleNode is morally equivalent to `cmd ==
// startSingleNodeCmd`, and triggers special initialization specific
// to one-node clusters. See server/initial_sql.go for details.
//
// We need a separate argument instead of solely relying on cmd
// because we cannot refer to startSingleNodeCmd under
// runStartInternal: there would be a cyclic dependency between
// runStart, runStartSingleNode and runStartSingleNodeCmd.
func runStart(cmd *cobra.Command, args []string, startSingleNode bool) error {
const serverType redact.SafeString = "node"
newServerFn := func(_ context.Context, serverCfg server.Config, stopper *stop.Stopper) (serverctl.ServerStartupInterface, error) {
// Beware of not writing simply 'return server.NewServer()'. This is
// because it would cause the serverctl.ServerStartupInterface reference to
// always be non-nil, even if NewServer returns a nil pointer (and
// an error). The code below is dependent on the interface
// reference remaining nil in case of error.
s, err := server.NewServer(serverCfg, stopper)
if err != nil {
return nil, err
}
return s, nil
}
return runStartInternal(cmd, serverType, serverCfg.InitNode, newServerFn, startSingleNode)
}
// runStartInternal contains the code common to start a regular server
// or a SQL-only server.
func runStartInternal(
cmd *cobra.Command,
serverType redact.SafeString,
initConfigFn func(context.Context) error,
newServerFn newServerFn,
startSingleNode bool,
) error {
tBegin := timeutil.Now()
// First things first: if the user wants background processing,
// relinquish the terminal ASAP by forking and exiting.
//
// If executing in the background, the function returns ok == true in
// the parent process (regardless of err) and the parent exits at
// this point.
if ok, err := maybeRerunBackground(); ok {
return err
}
// Change the permission mask for all created files.
//
// We're considering everything produced by a cockroach node
// to potentially contain sensitive information, so it should
// not be world-readable.
disableOtherPermissionBits()
// Set up the signal handlers. This also ensures that any of these
// signals received beyond this point do not interrupt the startup
// sequence until the point signals are checked below.
// We want to set up signal handling before starting logging, because
// logging uses buffering, and we want to be able to sync
// the buffers in the signal handler below. If we started capturing
// signals later, some startup logging might be lost.
signalCh := make(chan os.Signal, 1)
signal.Notify(signalCh, DrainSignals...)
if exitAbruptlySignal != nil {
signal.Notify(signalCh, exitAbruptlySignal)
}
// Check for stores with full disks and exit with an informative exit
// code. This needs to happen early during start, before we perform any
// writes to the filesystem including log rotation. We need to guarantee
// that the process continues to exit with the Disk Full exit code. A
// flapping exit code can affect alerting, including the alerting
// performed within CockroachCloud.
if err := exitIfDiskFull(vfs.Default, serverCfg.Stores.Specs); err != nil {
return err
}
// If any store has something to say against a server start-up
// (e.g. previously detected corruption), listen to them now.
if err := serverCfg.Stores.PriorCriticalAlertError(); err != nil {
return clierror.NewError(err, exit.FatalError())
}
// Set a MakeProcessUnavailableFunc that will close all sockets. This guards
// against a persistent disk stall that prevents the process from exiting or
// making progress.
log.SetMakeProcessUnavailableFunc(closeAllSockets)
// Set up a cancellable context for the entire start command.
// The context will be canceled at the end.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// The context annotation ensures that server identifiers show up
// in the logging metadata as soon as they are known.
ambientCtx := serverCfg.AmbientCtx
// Annotate the context, and set up a tracing span for the start process.
//
// The context annotation ensures that server identifiers show up
// in the logging metadata as soon as they are known.
//
// The tracing span is because we want any logging happening beyond
// this point to be accounted to this start context, including
// logging related to the initialization of the logging
// infrastructure below. This span concludes when the startup
// goroutine started below has completed. TODO(andrei): we don't
// close the span on the early returns below.
var startupSpan *tracing.Span
ctx, startupSpan = ambientCtx.AnnotateCtxWithSpan(ctx, "server start")
// Set up the logging and profiling output.
//
// We want to do this as early as possible, because most of the code
// in CockroachDB may use logging, and until logging has been
// initialized log files will be created in $TMPDIR instead of their
// expected location.
//
// This initialization uses the various configuration parameters
// initialized by flag handling (before runStart was called). Any
// additional server configuration tweaks for the startup process
// must be necessarily non-logging-related, as logging parameters
// cannot be picked up beyond this point.
stopper, err := setupAndInitializeLoggingAndProfiling(ctx, cmd, true /* isServerCmd */)
if err != nil {
return err
}
stopper.SetTracer(serverCfg.BaseConfig.AmbientCtx.Tracer)
// Tweak GOMAXPROCS if we're in a cgroup / container that has cpu limits set.
// The GO default for GOMAXPROCS is NumCPU(), however this is less
// than ideal if the cgroup is limited to a number lower than that.
//
// TODO(bilal): various global settings have already been initialized based on
// GOMAXPROCS(0) by now.
cgroups.AdjustMaxProcs(ctx)
fs := cliflagcfg.FlagSetForCmd(cmd)
// Check the --join flag.
if fl := fs.Lookup(cliflags.Join.Name); fl != nil && !fl.Changed {
err := errors.WithHint(
errors.New("no --join flags provided to 'cockroach start'"),
"Consider using 'cockroach init' or 'cockroach start-single-node' instead")
return err
}
// Check the --tenant-id-file flag.
if fl := fs.Lookup(cliflags.TenantIDFile.Name); fl != nil && fl.Changed {
fileName := fl.Value.String()
serverCfg.DelayedSetTenantID = func(
ctx context.Context,
) (roachpb.TenantID, roachpb.Locality, error) {
tenantID, err := tenantIDFromFile(ctx, fileName, nil, nil, nil)
if err != nil {
return roachpb.TenantID{}, roachpb.Locality{}, err
}
if err := tryReadLocalityFileFlag(fs); err != nil {
return roachpb.TenantID{}, roachpb.Locality{}, err
}
return tenantID, serverCfg.Locality, nil
}
}
// Now perform additional configuration tweaks specific to the start
// command.
// Set the soft memory limit on the Go runtime.
if err = func() error {
if startCtx.goMemLimitValue.IsSet() {
if goMemLimit < 0 {
return errors.New("--max-go-memory must be non-negative")
} else if goMemLimit > 0 && goMemLimit < defaultGoMemLimitMinValue {
log.Ops.Shoutf(
ctx, severity.WARNING, "--max-go-memory (%s) is smaller "+
"than the recommended minimum (%s), consider increasing it",
humanizeutil.IBytes(goMemLimit), humanizeutil.IBytes(defaultGoMemLimitMinValue),
)
}
} else {
if envVarLimitString, envVarSet := envutil.ExternalEnvString("GOMEMLIMIT", 1); envVarSet {
// When --max-go-memory is not specified, but the env var is
// set, we don't change it, so we just log a warning if the
// value is too small.
envVarLimit, err := humanizeutil.ParseBytes(envVarLimitString)
if err != nil {
return errors.Wrapf(err, "couldn't parse GOMEMLIMIT value %s", envVarLimitString)
}
if envVarLimit < defaultGoMemLimitMinValue {
log.Ops.Shoutf(
ctx, severity.WARNING, "GOMEMLIMIT (%s) is smaller "+
"than the recommended minimum (%s), consider increasing it",
humanizeutil.IBytes(envVarLimit), humanizeutil.IBytes(defaultGoMemLimitMinValue),
)
}
return nil
}
// If --max-go-memory wasn't specified, we set it to a reasonable
// default value.
goMemLimit = getDefaultGoMemLimit(ctx)
}
if goMemLimit == 0 {
// Value of 0 indicates that the soft memory limit should be
// disabled.
goMemLimit = math.MaxInt64
} else {
log.Ops.Infof(ctx, "soft memory limit of Go runtime is set to %s", humanizeutil.IBytes(goMemLimit))
}
debug.SetMemoryLimit(goMemLimit)
return nil
}(); err != nil {
return err
}
// Set the GC target percent on the Go runtime.
if err := func() error {
var goGCPercent int
if fs.Changed(cliflags.GoGCPercent.Name) {
goGCPercent = startCtx.goGCPercent
} else {
if _, envVarSet := envutil.ExternalEnvString("GOGC", 1); envVarSet {
// When --go-gc-percent is not specified but the env var is, we defer to
// the env var.
return nil
}
// When neither the --go-gc-percent flag nor the GOGC env var is set,
// increase the GC target percent to 300% (default 100%) to reduce the
// frequency of GC cycles. However, only do so if a soft memory limit is
// also configured, to avoid introducing OOMs.
goMemLimit := debug.SetMemoryLimit(-1 /* get without adjusting */)
if goMemLimit == math.MaxInt64 {
// If the soft memory limit is disabled, don't adjust the GC percent.
// Leave it at the default 100%.
return nil
}
goGCPercent = 300
}
var goGCPercentStr redact.RedactableString
if goGCPercent < 0 {
goGCPercentStr = `"off"`
} else {
goGCPercentStr = redact.Sprintf("%d%%", goGCPercent)
}
log.Ops.Infof(ctx, "GC target percentage of Go runtime is set to %s", goGCPercentStr)
debug.SetGCPercent(goGCPercent)
return nil
}(); err != nil {
return err
}
// Initialize the node's configuration from startup parameters.
// This also reads the part of the configuration that comes from
// environment variables.
if err := initConfigFn(ctx); err != nil {
return errors.Wrapf(err, "failed to initialize %s", serverType)
}
// Derive temporary/auxiliary directory specifications.
serverCfg.ExternalIODir = startCtx.externalIODir
st := serverCfg.BaseConfig.Settings
if serverCfg.SQLConfig.TempStorageConfig, err = initTempStorageConfig(
ctx, st, stopper, serverCfg.Stores,
); err != nil {
return err
}
// The configuration is now ready to report to the user and the log
// file. We had to wait after InitNode() so that all configuration
// environment variables, which are reported too, have been read and
// registered.
reportConfiguration(ctx)
// ReadyFn will be called when the server has started listening on
// its network sockets, but perhaps before it has done bootstrapping
// and thus before Start() completes.
serverCfg.ReadyFn = func(waitForInit bool) { reportReadinessExternally(ctx, cmd, waitForInit) }
// DelayedBootstrapFn will be called if the bootstrap process is
// taking a bit long.
serverCfg.DelayedBootstrapFn = func() {
const msg = `The server appears to be unable to contact the other nodes in the cluster. Please try:
- starting the other nodes, if you haven't already;
- double-checking that the '--join' and '--listen'/'--advertise' flags are set up correctly;
- running the 'cockroach init' command if you are trying to initialize a new cluster.
If problems persist, please see %s.`
docLink := docs.URL("cluster-setup-troubleshooting.html")
if !startCtx.inBackground {
log.Ops.Shoutf(ctx, severity.WARNING, msg, docLink)
} else {
// Don't shout to stderr since the server will have detached by
// the time this function gets called.
log.Ops.Warningf(ctx, msg, docLink)
}
}
initGEOS(ctx)
// Beyond this point, the configuration is set and the server is
// ready to start.
// Run the rest of the startup process in a goroutine separate from
// the main goroutine to avoid preventing proper handling of signals
// if we get stuck on something during initialization (#10138).
srvStatus, serverShutdownReqC := createAndStartServerAsync(ctx,
tBegin, &serverCfg, stopper, startupSpan, newServerFn, startSingleNode, serverType)
return waitForShutdown(
// NB: we delay the access to s, as it is assigned
// asynchronously in a goroutine above.
stopper, serverShutdownReqC, signalCh,
srvStatus)
}
const (
// defaultGoMemLimitSQLMultiple determines the multiple of SQL memory pool
// size that we use in the calculation of the default value of the
// goMemLimit.
//
// Since not every memory allocation is registered with the memory
// accounting system of CRDB, we need to give it some room to prevent Go GC
// from being too aggressive to stay under the GOMEMLIMIT. The default
// multiple of 2.25x over the memory pool size should give enough room for
// those unaccounted for allocations.
defaultGoMemLimitSQLMultiple = 2.25
// Lower bound on the default value for goMemLimit. Lower bound has higher
// precedence that the upper bound when two bounds conflict with each other.
// defaultGoMemLimitMinValue determines the lower bound on the default value
// of the goMemLimit.
defaultGoMemLimitMinValue = 256 << 20 /* 256MiB */
// Upper bound on the default value for goMemLimit is computed as follows:
//
// upper bound = 0.9 * SystemMemory - 1.15 * PebbleCache
//
// The rationale for this formula is as follows:
// - we don't want for the estimated max memory usage to exceed 90% of the
// available RAM to prevent the OOMs
// - Go runtime doesn't control the pebble cache, so we need to subtract it
// - anecdotally, the pebble cache can have some slop over its target size
// (perhaps, due to memory fragmentation), so we adjust the footprint of the
// cache by 15%.
// defaultGoMemLimitMaxTotalSystemMemUsage determines the maximum percentage
// of the system memory that goMemLimit and the pebble cache can use
// together.
defaultGoMemLimitMaxTotalSystemMemUsage = 0.9
// defaultGoMemLimitCacheSlopMultiple determines a "slop" multiple that we
// use on top of the pebble cache size when computing the upper bound.
defaultGoMemLimitCacheSlopMultiple = 1.15
)
// getDefaultGoMemLimit returns a reasonable default value for the soft memory
// limit of the Go runtime based on SQL memory pool and the cache sizes (which
// must be already set in serverCfg). It also warns the user in some cases when
// suboptimal flags or hardware is detected.
func getDefaultGoMemLimit(ctx context.Context) int64 {
sysMem, err := status.GetTotalMemory(ctx)
if err != nil {
return 0
}
maxGoMemLimit := int64(defaultGoMemLimitMaxTotalSystemMemUsage*float64(sysMem) -
defaultGoMemLimitCacheSlopMultiple*float64(serverCfg.CacheSize))
if maxGoMemLimit < defaultGoMemLimitMinValue {
// Most likely, --cache is set to at least 75% of available RAM which
// has already triggered a warning in maybeWarnMemorySizes(), so we
// don't shout here.
maxGoMemLimit = defaultGoMemLimitMinValue
}
limit := int64(defaultGoMemLimitSQLMultiple * float64(serverCfg.MemoryPoolSize))
if limit < defaultGoMemLimitMinValue {
log.Ops.Shoutf(
ctx, severity.WARNING, "--max-sql-memory (%s) is set too low, "+
"consider increasing it", humanizeutil.IBytes(serverCfg.MemoryPoolSize),
)
limit = defaultGoMemLimitMinValue
}
if limit > maxGoMemLimit {
log.Ops.Shoutf(
ctx, severity.WARNING, "recommended default value of "+
"--max-go-memory (%s) was truncated to %s, consider reducing "+
"--max-sql-memory (%s) and / or --cache (%s); total system/cgroup memory: %s.",
humanizeutil.IBytes(limit), humanizeutil.IBytes(maxGoMemLimit),
humanizeutil.IBytes(serverCfg.MemoryPoolSize),
humanizeutil.IBytes(serverCfg.CacheSize),
humanizeutil.IBytes(sysMem),
)
limit = maxGoMemLimit
}
return limit
}
// createAndStartServerAsync starts an async goroutine which instantiates
// the server and starts it.
// We run it in a separate goroutine because the instantiation&start
// could block, and we want to retain the option to start shutting down
// the process (e.g. via Ctrl+C on the terminal) even in that case.
// The shutdown logic thus starts running asynchronously, via waitForShutdown,
// concurrently with createAndStartServerAsync.
//
// The arguments are as follows:
// - tBegin: time when startup began; used to report statistics at the end of startup.
// - serverCfg: the server configuration.
// - stopper: the stopper used to start all the async tasks. This is the stopper
// used by the shutdown logic.
// - startupSpan: the tracing span for the context that was started earlier
// during startup. It needs to be finalized when the async goroutine completes.
// - newServerFn: a constructor function for the server object.
// - serverType: a title used for the type of server. This is used
// when reporting the startup messages on the terminal & logs.
func createAndStartServerAsync(
ctx context.Context,
tBegin time.Time,
serverCfg *server.Config,
stopper *stop.Stopper,
startupSpan *tracing.Span,
newServerFn newServerFn,
startSingleNode bool,
serverType redact.SafeString,
) (srvStatus *serverStatus, serverShutdownReqC <-chan serverctl.ShutdownRequest) {
var serverStatusMu serverStatus
var s serverctl.ServerStartupInterface
shutdownReqC := make(chan serverctl.ShutdownRequest, 1)
log.Ops.Infof(ctx, "starting cockroach %s", serverType)
go func() {
// Ensure that the log files see the startup messages immediately.
defer log.FlushAllSync()
// If anything goes dramatically wrong, use Go's panic/recover
// mechanism to intercept the panic and log the panic details to
// the error reporting server.
defer func() {
var sv *settings.Values
if s != nil {
sv = &s.ClusterSettings().SV
}
if r := recover(); r != nil {
// This ensures that the panic, if any, is also reported on stderr.
// The settings.Values, if available, determines whether a Sentry
// report should be sent. No Sentry report is sent if sv is nil.
logcrash.ReportPanic(ctx, sv, r, 1 /* depth */)
panic(r)
}
}()
// When the start up goroutine completes, so can the start up span.
defer startupSpan.Finish()
// Any error beyond this point is reported through shutdownReqC.
if err := func() error {
// Instantiate the server.
var err error
s, err = newServerFn(ctx, *serverCfg, stopper)
if err != nil {
return errors.Wrap(err, "failed to start server")
}
// Have we already received a signal to terminate? If so, just
// stop here.
if serverStatusMu.shutdownInProgress() {
return nil
}
// Attempt to start the server.
if err := s.PreStart(ctx); err != nil {
if le := (*server.ListenError)(nil); errors.As(err, &le) {
const errorPrefix = "consider changing the port via --%s"
if le.Addr == serverCfg.Addr {
err = errors.Wrapf(err, errorPrefix, cliflags.ListenAddr.Name)
} else if le.Addr == serverCfg.HTTPAddr {
err = errors.Wrapf(err, errorPrefix, cliflags.ListenHTTPAddr.Name)
}
}
return errors.Wrap(err, "cockroach server exited with error")
}
// Server started, notify the shutdown monitor running concurrently.
if shutdownInProgress := serverStatusMu.setStarted(s, stopper); shutdownInProgress {
// A shutdown was requested already, e.g. by sending SIGTERM to the process:
// maybeWaitForShutdown (which runs concurrently with this goroutine) has
// called serverStatusMu.startShutdown() already.
// However, because setStarted() had not been called before,
// maybeWaitForShutdown did not call Stop on the stopper.
// So we do it here.
stopper.Stop(ctx)
return nil
}
// After this point, if a shutdown is requested concurrently
// with the startup steps below, the stopper.Stop() method will
// be called by the shutdown goroutine, which in turn will cause
// all these startup steps to fail. So we do not need to look at
// the "shutdown status" in serverStatusMu any more.
// Accept internal clients early, as RunInitialSQL might need it.
if err := s.AcceptInternalClients(ctx); err != nil {
return err
}
// Run one-off cluster initialization.
if err := s.RunInitialSQL(ctx, startSingleNode, "" /* adminUser */, "" /* adminPassword */); err != nil {
return err
}
// Now let SQL clients in.
if err := s.AcceptClients(ctx); err != nil {
return err
}
// Now inform the user that the server is running and tell the
// user about its run-time derived parameters.
return reportServerInfo(ctx, tBegin, serverCfg, s.ClusterSettings(),
serverType, s.InitialStart(), s.LogicalClusterID(), startCtx.externalIODir)
}(); err != nil {
shutdownReqC <- serverctl.MakeShutdownRequest(
serverctl.ShutdownReasonServerStartupError, errors.Wrapf(err, "server startup failed"))
} else {
// Start a goroutine that watches for shutdown requests and notifies
// errChan.
go func() {
select {
case req := <-s.ShutdownRequested():
shutdownCtx := s.AnnotateCtx(context.Background())
log.Infof(shutdownCtx, "server requesting spontaneous shutdown: %v", req.ShutdownCause())
shutdownReqC <- req
case <-stopper.ShouldQuiesce():
}
}()
}
}()
serverShutdownReqC = shutdownReqC
srvStatus = &serverStatusMu
return srvStatus, serverShutdownReqC
}
// serverStatus coordinates the async goroutine that starts the server
// up (e.g. in runStart) and the async goroutine that stops the server
// (in waitForShutdown).
//
// We need this intermediate coordination because it isn't safe to try
// to drain a server that doesn't exist or is in the middle of
// starting up, or to start a server after shutdown has begun.
type serverStatus struct {
syncutil.Mutex
// s is a reference to the server, to be used by the shutdown process. This
// starts as nil, and is set by setStarted(). Once set, a graceful shutdown
// should use a soft drain.
s serverctl.ServerShutdownInterface
// stopper is the server's stopper. This is set in setStarted(), together with
// `s`. The stopper is handed out to callers of startShutdown(), who will
// Stop() it.
stopper *stop.Stopper
// shutdownRequested indicates that shutdown has started
// already. After draining has become true, server startup should
// stop.
shutdownRequested bool
}
// setStarted marks the server as started. The serverStatus receives a reference
// to the server and to the server's stopper. These references will be handed to
// the shutdown process, which calls startShutdown(). In particular, the
// shutdown process will take resposibility for calling stopper.Stop().
//
// setStarted returns whether shutdown has been requested already. If it has,
// then the serverStatus does not take ownership of the stopper; the caller is
// responsible for calling stopper.Stop().
func (s *serverStatus) setStarted(
server serverctl.ServerShutdownInterface, stopper *stop.Stopper,
) bool {
s.Lock()
defer s.Unlock()
if s.shutdownRequested {
return true
}
s.s = server
s.stopper = stopper
return false
}
// shutdownInProgress returns whether a shutdown has been requested
// already.
func (s *serverStatus) shutdownInProgress() bool {
s.Lock()
defer s.Unlock()
return s.shutdownRequested
}
// startShutdown registers the shutdown request and returns whether the server
// was started already. If the server started, a reference to the server is also
// returned, and a reference to the stopper that the caller needs to eventually
// Stop().
func (s *serverStatus) startShutdown() (bool, serverctl.ServerShutdownInterface, *stop.Stopper) {
s.Lock()
defer s.Unlock()
s.shutdownRequested = true
return s.s != nil, s.s, s.stopper
}
// waitForShutdown blocks until interrupted by a shutdown signal, which can come
// in several forms:
// - a shutdown request coming from an internal module being signaled on
// shutdownC. This can be some internal error or a drain RPC.
// - receiving a Unix signal on signalCh.
// - a log.Fatal() call.
//
// Depending on what interruption is received, the server might be drained
// before shutting down.
func waitForShutdown(
stopper *stop.Stopper,
shutdownC <-chan serverctl.ShutdownRequest,
signalCh <-chan os.Signal,
serverStatusMu *serverStatus,
) (returnErr error) {
shutdownCtx, shutdownSpan := serverCfg.AmbientCtx.AnnotateCtxWithSpan(context.Background(), "server shutdown")
defer shutdownSpan.Finish()
stopWithoutDrain := make(chan struct{}) // closed if interrupted very early
msgDrain := redact.SafeString("initiating graceful shutdown of server")
select {
case shutdownRequest := <-shutdownC:
returnErr = shutdownRequest.ShutdownCause()
drain := shutdownRequest.TerminateUsingGracefulDrain()
if !drain {
msgDrain = "initiating hard shutdown of server"
}
startShutdownAsync(serverStatusMu, stopWithoutDrain, drain)
case sig := <-signalCh:
// We start flushing log writes from here, because if a
// signal was received there is a non-zero chance the sender of
// this signal will follow up with SIGKILL if the shutdown is not
// timely, and we don't want logs to be lost.
log.StartAlwaysFlush()
if sig == exitAbruptlySignal {
log.Ops.Shoutf(shutdownCtx, severity.ERROR, "received signal '%s', exiting", redact.Safe(sig))
exit.WithCode(exit.Killed())
}
log.Ops.Infof(shutdownCtx, "received signal '%s'", sig)
if sig == os.Interrupt {
// Graceful shutdown after an interrupt should cause the process
// to terminate with a non-zero exit code; however SIGTERM is
// "legitimate" and should be acknowledged with a success exit
// code. So we keep the error state here for later.
returnErr = clierror.NewErrorWithSeverity(
errors.New("interrupted"),