-
Notifications
You must be signed in to change notification settings - Fork 2.1k
/
Copy pathexecutor.go
5178 lines (4754 loc) · 194 KB
/
executor.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
Copyright 2019 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Functionality of this Executor is tested in go/test/endtoend/onlineddl/...
*/
package onlineddl
import (
"context"
"errors"
"fmt"
"os"
"path"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/spf13/pflag"
"google.golang.org/protobuf/encoding/prototext"
"vitess.io/vitess/go/constants/sidecar"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/mysql/capabilities"
"vitess.io/vitess/go/mysql/replication"
"vitess.io/vitess/go/mysql/sqlerror"
"vitess.io/vitess/go/sqlescape"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/syscallutil"
"vitess.io/vitess/go/textutil"
"vitess.io/vitess/go/timer"
"vitess.io/vitess/go/vt/binlog/binlogplayer"
"vitess.io/vitess/go/vt/dbconnpool"
"vitess.io/vitess/go/vt/log"
binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
querypb "vitess.io/vitess/go/vt/proto/query"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/schema"
"vitess.io/vitess/go/vt/schemadiff"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication"
"vitess.io/vitess/go/vt/vttablet/tabletserver/connpool"
"vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv"
"vitess.io/vitess/go/vt/vttablet/tabletserver/throttle"
"vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp"
"vitess.io/vitess/go/vt/vttablet/tmclient"
)
var (
// ErrExecutorNotWritableTablet is generated when executor is asked to run gh-ost on a read-only server
ErrExecutorNotWritableTablet = errors.New("cannot run migration on non-writable tablet")
// ErrExecutorMigrationAlreadyRunning is generated when an attempt is made to run an operation that conflicts with a running migration
ErrExecutorMigrationAlreadyRunning = errors.New("cannot run migration since a migration is already running")
// ErrMigrationNotFound is returned by readMigration when given UUI cannot be found
ErrMigrationNotFound = errors.New("migration not found")
)
var (
// fixCompletedTimestampDone fixes a nil `completed_timestamp` columns, see
// https://github.com/vitessio/vitess/issues/13927
// The fix is in release-18.0
// TODO: remove in release-19.0
fixCompletedTimestampDone bool
)
var emptyResult = &sqltypes.Result{}
var acceptableDropTableIfExistsErrorCodes = []sqlerror.ErrorCode{sqlerror.ERCantFindFile, sqlerror.ERNoSuchTable}
var copyAlgorithm = sqlparser.AlgorithmValue(sqlparser.CopyStr)
var (
ghostBinaryPath = "gh-ost"
ptOSCBinaryPath = "/usr/bin/pt-online-schema-change"
migrationCheckInterval = 1 * time.Minute
retainOnlineDDLTables = 24 * time.Hour
defaultCutOverThreshold = 10 * time.Second
maxConcurrentOnlineDDLs = 256
migrationNextCheckIntervals = []time.Duration{1 * time.Second, 5 * time.Second, 10 * time.Second, 20 * time.Second}
cutoverIntervals = []time.Duration{0, 1 * time.Minute, 5 * time.Minute, 10 * time.Minute, 30 * time.Minute}
)
func init() {
servenv.OnParseFor("vtcombo", registerOnlineDDLFlags)
servenv.OnParseFor("vttablet", registerOnlineDDLFlags)
}
func registerOnlineDDLFlags(fs *pflag.FlagSet) {
fs.StringVar(&ghostBinaryPath, "gh-ost-path", ghostBinaryPath, "override default gh-ost binary full path")
fs.StringVar(&ptOSCBinaryPath, "pt-osc-path", ptOSCBinaryPath, "override default pt-online-schema-change binary full path")
fs.DurationVar(&migrationCheckInterval, "migration_check_interval", migrationCheckInterval, "Interval between migration checks")
fs.DurationVar(&retainOnlineDDLTables, "retain_online_ddl_tables", retainOnlineDDLTables, "How long should vttablet keep an old migrated table before purging it")
fs.IntVar(&maxConcurrentOnlineDDLs, "max_concurrent_online_ddl", maxConcurrentOnlineDDLs, "Maximum number of online DDL changes that may run concurrently")
}
const (
maxPasswordLength = 32 // MySQL's *replication* password may not exceed 32 characters
staleMigrationMinutes = 180
progressPctStarted float64 = 0
progressPctFull float64 = 100.0
etaSecondsUnknown = -1
etaSecondsNow = 0
rowsCopiedUnknown = 0
emptyHint = ""
readyToCompleteHint = "ready_to_complete"
databasePoolSize = 3
qrBufferExtraTimeout = 5 * time.Second
grpcTimeout = 30 * time.Second
vreplicationTestSuiteWaitSeconds = 5
)
var (
migrationLogFileName = "migration.log"
migrationFailureFileName = "migration-failure.log"
onlineDDLUser = "vt-online-ddl-internal"
onlineDDLGrant = fmt.Sprintf("'%s'@'%s'", onlineDDLUser, "%")
)
type mysqlVariables struct {
host string
port int
readOnly bool
version string
versionComment string
}
// Executor wraps and manages the execution of a gh-ost migration.
type Executor struct {
env tabletenv.Env
pool *connpool.Pool
tabletTypeFunc func() topodatapb.TabletType
ts *topo.Server
lagThrottler *throttle.Throttler
toggleBufferTableFunc func(cancelCtx context.Context, tableName string, timeout time.Duration, bufferQueries bool)
isPreparedPoolEmpty func(tableName string) bool
requestGCChecksFunc func()
tabletAlias *topodatapb.TabletAlias
keyspace string
shard string
dbName string
initMutex sync.Mutex
migrationMutex sync.Mutex
submitMutex sync.Mutex // used when submitting migrations
// ownedRunningMigrations lists UUIDs owned by this executor (consider this a map[string]bool)
// A UUID listed in this map stands for a migration that is executing, and that this executor can control.
// Migrations found to be running which are not listed in this map will either:
// - be adopted by this executor (possible for vreplication migrations), or
// - be terminated (example: pt-osc migration gone rogue, process still running even as the migration failed)
// The Executor auto-reviews the map and cleans up migrations thought to be running which are not running.
ownedRunningMigrations sync.Map
vreplicationLastError map[string]*vterrors.LastError
tickReentranceFlag int64
reviewedRunningMigrationsFlag bool
ticks *timer.Timer
isOpen int64
// This will be a pointer to the executeQuery function unless
// a custom sidecar database is used, then it will point to
// the executeQueryWithSidecarDBReplacement function. This
// variable assignment must be managed in the Open function.
execQuery func(ctx context.Context, query string) (result *sqltypes.Result, err error)
}
type cancellableMigration struct {
uuid string
message string
}
func newCancellableMigration(uuid string, message string) *cancellableMigration {
return &cancellableMigration{uuid: uuid, message: message}
}
// newGCTableRetainTime returns the time until which a new GC table is to be retained
func newGCTableRetainTime() time.Time {
return time.Now().UTC().Add(retainOnlineDDLTables)
}
// getMigrationCutOverThreshold returns the cut-over threshold for the given migration. The migration's
// DDL Strategy may explicitly set the threshold; otherwise, we return the default cut-over threshold.
func getMigrationCutOverThreshold(onlineDDL *schema.OnlineDDL) time.Duration {
if threshold, _ := onlineDDL.StrategySetting().CutOverThreshold(); threshold != 0 {
return threshold
}
return defaultCutOverThreshold
}
// NewExecutor creates a new gh-ost executor.
func NewExecutor(env tabletenv.Env, tabletAlias *topodatapb.TabletAlias, ts *topo.Server,
lagThrottler *throttle.Throttler,
tabletTypeFunc func() topodatapb.TabletType,
toggleBufferTableFunc func(cancelCtx context.Context, tableName string, timeout time.Duration, bufferQueries bool),
requestGCChecksFunc func(),
isPreparedPoolEmpty func(tableName string) bool,
) *Executor {
// sanitize flags
if maxConcurrentOnlineDDLs < 1 {
maxConcurrentOnlineDDLs = 1 // or else nothing will ever run
}
return &Executor{
env: env,
tabletAlias: tabletAlias.CloneVT(),
pool: connpool.NewPool(env, "OnlineDDLExecutorPool", tabletenv.ConnPoolConfig{
Size: databasePoolSize,
IdleTimeout: env.Config().OltpReadPool.IdleTimeout,
}),
tabletTypeFunc: tabletTypeFunc,
ts: ts,
lagThrottler: lagThrottler,
toggleBufferTableFunc: toggleBufferTableFunc,
isPreparedPoolEmpty: isPreparedPoolEmpty,
requestGCChecksFunc: requestGCChecksFunc,
ticks: timer.NewTimer(migrationCheckInterval),
// Gracefully return an error if any caller tries to execute
// a query before the executor has been fully opened.
execQuery: func(ctx context.Context, query string) (result *sqltypes.Result, err error) {
return nil, vterrors.New(vtrpcpb.Code_UNAVAILABLE, "onlineddl executor is closed")
},
}
}
func (e *Executor) executeQuery(ctx context.Context, query string) (result *sqltypes.Result, err error) {
defer e.env.LogError()
conn, err := e.pool.Get(ctx, nil)
if err != nil {
return result, err
}
defer conn.Recycle()
return conn.Conn.Exec(ctx, query, -1, true)
}
func (e *Executor) executeQueryWithSidecarDBReplacement(ctx context.Context, query string) (result *sqltypes.Result, err error) {
defer e.env.LogError()
conn, err := e.pool.Get(ctx, nil)
if err != nil {
return result, err
}
defer conn.Recycle()
// Replace any provided sidecar DB qualifiers with the correct one.
uq, err := e.env.Environment().Parser().ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName())
if err != nil {
return nil, err
}
return conn.Conn.Exec(ctx, uq, -1, true)
}
// TabletAliasString returns tablet alias as string (duh)
func (e *Executor) TabletAliasString() string {
return topoproto.TabletAliasString(e.tabletAlias)
}
// InitDBConfig initializes keyspace
func (e *Executor) InitDBConfig(keyspace, shard, dbName string) {
e.keyspace = keyspace
e.shard = shard
e.dbName = dbName
}
// Open opens database pool and initializes the schema
func (e *Executor) Open() error {
e.initMutex.Lock()
defer e.initMutex.Unlock()
if atomic.LoadInt64(&e.isOpen) > 0 || !e.env.Config().EnableOnlineDDL {
return nil
}
log.Infof("onlineDDL Executor Open()")
e.reviewedRunningMigrationsFlag = false // will be set as "true" by reviewRunningMigrations()
e.ownedRunningMigrations.Range(func(k, _ any) bool {
e.ownedRunningMigrations.Delete(k)
return true
})
e.vreplicationLastError = make(map[string]*vterrors.LastError)
if sidecar.GetName() != sidecar.DefaultName {
e.execQuery = e.executeQueryWithSidecarDBReplacement
} else {
e.execQuery = e.executeQuery
}
e.pool.Open(e.env.Config().DB.AppWithDB(), e.env.Config().DB.DbaWithDB(), e.env.Config().DB.AppDebugWithDB())
e.ticks.Start(e.onMigrationCheckTick)
e.triggerNextCheckInterval()
atomic.StoreInt64(&e.isOpen, 1)
return nil
}
// Close frees resources
func (e *Executor) Close() {
e.initMutex.Lock()
defer e.initMutex.Unlock()
if atomic.LoadInt64(&e.isOpen) == 0 {
return
}
log.Infof("onlineDDL Executor Close()")
e.ticks.Stop()
e.pool.Close()
atomic.StoreInt64(&e.isOpen, 0)
}
// triggerNextCheckInterval the next tick sooner than normal
func (e *Executor) triggerNextCheckInterval() {
for _, interval := range migrationNextCheckIntervals {
e.ticks.TriggerAfter(interval)
}
}
// matchesShards checks whether given comma delimited shard names include this tablet's shard. If the input param is empty then
// that implicitly means "true"
func (e *Executor) matchesShards(commaDelimitedShards string) bool {
shards := textutil.SplitDelimitedList(commaDelimitedShards)
if len(shards) == 0 {
// Nothing explicitly defined, so implicitly all shards are allowed
return true
}
for _, shard := range shards {
if shard == e.shard {
return true
}
}
return false
}
// countOwnedRunningMigrations returns an estimate of current count of running migrations; this is
// normally an accurate number, but can be inexact because the executor periodically reviews
// e.ownedRunningMigrations and adds/removes migrations based on actual migration state.
func (e *Executor) countOwnedRunningMigrations() (count int) {
e.ownedRunningMigrations.Range(func(_, val any) bool {
if _, ok := val.(*schema.OnlineDDL); ok {
count++
}
return true // continue iteration
})
return count
}
// allowConcurrentMigration checks if the given migration is allowed to run concurrently.
// First, the migration itself must declare --allow-concurrent. But then, there's also some
// restrictions on which migrations exactly are allowed such concurrency.
func (e *Executor) allowConcurrentMigration(onlineDDL *schema.OnlineDDL) (action sqlparser.DDLAction, allowConcurrent bool) {
if !onlineDDL.StrategySetting().IsAllowConcurrent() {
return action, false
}
var err error
action, err = onlineDDL.GetAction(e.env.Environment().Parser())
if err != nil {
return action, false
}
switch action {
case sqlparser.CreateDDLAction, sqlparser.DropDDLAction:
// CREATE TABLE, DROP TABLE are allowed to run concurrently.
return action, true
case sqlparser.AlterDDLAction:
// ALTER is only allowed concurrent execution if this is a Vitess migration
strategy := onlineDDL.StrategySetting().Strategy
return action, (strategy == schema.DDLStrategyOnline || strategy == schema.DDLStrategyVitess)
case sqlparser.RevertDDLAction:
// REVERT is allowed to run concurrently.
// Reminder that REVERT is supported for CREATE, DROP and for 'vitess' ALTER, but never for
// 'gh-ost' or 'pt-osc' ALTERs
return action, true
}
return action, false
}
func (e *Executor) proposedMigrationConflictsWithRunningMigration(runningMigration, proposedMigration *schema.OnlineDDL) bool {
if runningMigration.Table == proposedMigration.Table {
// migrations operate on same table
return true
}
_, isRunningMigrationAllowConcurrent := e.allowConcurrentMigration(runningMigration)
proposedMigrationAction, isProposedMigrationAllowConcurrent := e.allowConcurrentMigration(proposedMigration)
if !isRunningMigrationAllowConcurrent && !isProposedMigrationAllowConcurrent {
// neither allowed concurrently
return true
}
if proposedMigrationAction == sqlparser.AlterDDLAction {
// A new ALTER migration conflicts with an existing migration if the existing migration is still not ready to complete.
// Specifically, if the running migration is an ALTER, and is still busy with copying rows (copy_state), then
// we consider the two to be conflicting. But, if the running migration is done copying rows, and is now only
// applying binary logs, and is up-to-date, then we consider a new ALTER migration to be non-conflicting.
if atomic.LoadInt64(&runningMigration.WasReadyToComplete) == 0 {
return true
}
}
return false
}
// isAnyConflictingMigrationRunning checks if there's any running migration that conflicts with the
// given migration, such that they can't both run concurrently.
func (e *Executor) isAnyConflictingMigrationRunning(onlineDDL *schema.OnlineDDL) (conflictFound bool, conflictingMigration *schema.OnlineDDL) {
e.ownedRunningMigrations.Range(func(_, val any) bool {
runningMigration, ok := val.(*schema.OnlineDDL)
if !ok {
return true // continue iteration
}
if e.proposedMigrationConflictsWithRunningMigration(runningMigration, onlineDDL) {
conflictingMigration = runningMigration
return false // stop iteration, no need to review other migrations
}
return true // continue iteration
})
return (conflictingMigration != nil), conflictingMigration
}
func (e *Executor) ghostPanicFlagFileName(uuid string) string {
return path.Join(os.TempDir(), fmt.Sprintf("ghost.%s.panic.flag", uuid))
}
func (e *Executor) createGhostPanicFlagFile(uuid string) error {
_, err := os.Create(e.ghostPanicFlagFileName(uuid))
return err
}
func (e *Executor) deleteGhostPanicFlagFile(uuid string) error {
// We use RemoveAll because if the file does not exist that's fine. Remove will return an error
// if file does not exist; RemoveAll does not.
return os.RemoveAll(e.ghostPanicFlagFileName(uuid))
}
func (e *Executor) ghostPostponeFlagFileName(uuid string) string {
return path.Join(os.TempDir(), fmt.Sprintf("ghost.%s.postpone.flag", uuid))
}
func (e *Executor) deleteGhostPostponeFlagFile(uuid string) error {
// We use RemoveAll because if the file does not exist that's fine. Remove will return an error
// if file does not exist; RemoveAll does not.
return os.RemoveAll(e.ghostPostponeFlagFileName(uuid))
}
func (e *Executor) ptPidFileName(uuid string) string {
return path.Join(os.TempDir(), fmt.Sprintf("pt-online-schema-change.%s.pid", uuid))
}
// readMySQLVariables contacts the backend MySQL server to read some of its configuration
func (e *Executor) readMySQLVariables(ctx context.Context) (variables *mysqlVariables, err error) {
conn, err := e.pool.Get(ctx, nil)
if err != nil {
return nil, err
}
defer conn.Recycle()
tm, err := conn.Conn.Exec(ctx, `select
@@global.hostname as hostname,
@@global.port as port,
@@global.read_only as read_only,
@@global.version AS version,
@@global.version_comment AS version_comment
from dual`, 1, true)
if err != nil {
return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not read MySQL variables: %v", err)
}
row := tm.Named().Row()
if row == nil {
return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "unexpected result for MySQL variables: %+v", tm.Rows)
}
variables = &mysqlVariables{}
if e.env.Config().DB.Host != "" {
variables.host = e.env.Config().DB.Host
} else {
variables.host = row["hostname"].ToString()
}
if e.env.Config().DB.Port != 0 {
variables.port = e.env.Config().DB.Port
} else if port, err := row.ToInt("port"); err != nil {
return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not parse @@global.port %v: %v", tm, err)
} else {
variables.port = port
}
if variables.readOnly, err = row.ToBool("read_only"); err != nil {
return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not parse @@global.read_only %v: %v", tm, err)
}
variables.version = row["version"].ToString()
variables.versionComment = row["version_comment"].ToString()
return variables, nil
}
// createOnlineDDLUser creates a gh-ost or pt-osc user account with all
// necessary privileges and with a random password
func (e *Executor) createOnlineDDLUser(ctx context.Context) (password string, err error) {
conn, err := dbconnpool.NewDBConnection(ctx, e.env.Config().DB.DbaConnector())
if err != nil {
return password, err
}
defer conn.Close()
password = RandomHash()[0:maxPasswordLength]
for _, query := range sqlCreateOnlineDDLUser {
parsed := sqlparser.BuildParsedQuery(query, onlineDDLGrant, password)
if _, err := conn.ExecuteFetch(parsed.Query, 0, false); err != nil {
return password, err
}
}
for _, query := range sqlGrantOnlineDDLSuper {
parsed := sqlparser.BuildParsedQuery(query, onlineDDLGrant)
conn.ExecuteFetch(parsed.Query, 0, false)
// We ignore failure, since we might not be able to grant
// SUPER privs (e.g. Aurora)
}
for _, query := range sqlGrantOnlineDDLUser {
parsed := sqlparser.BuildParsedQuery(query, onlineDDLGrant)
if _, err := conn.ExecuteFetch(parsed.Query, 0, false); err != nil {
return password, err
}
}
return password, err
}
// dropOnlineDDLUser drops the given ddl user account at the end of migration
func (e *Executor) dropOnlineDDLUser(ctx context.Context) error {
conn, err := dbconnpool.NewDBConnection(ctx, e.env.Config().DB.DbaConnector())
if err != nil {
return err
}
defer conn.Close()
parsed := sqlparser.BuildParsedQuery(sqlDropOnlineDDLUser, onlineDDLGrant)
_, err = conn.ExecuteFetch(parsed.Query, 0, false)
return err
}
// tableExists checks if a given table exists.
func (e *Executor) tableExists(ctx context.Context, tableName string) (bool, error) {
tableName = strings.ReplaceAll(tableName, `_`, `\_`)
parsed := sqlparser.BuildParsedQuery(sqlShowTablesLike, tableName)
rs, err := e.execQuery(ctx, parsed.Query)
if err != nil {
return false, err
}
row := rs.Named().Row()
return (row != nil), nil
}
// showCreateTable returns the SHOW CREATE statement for a table or a view
func (e *Executor) showCreateTable(ctx context.Context, tableName string) (string, error) {
parsed := sqlparser.BuildParsedQuery(sqlShowCreateTable, tableName)
rs, err := e.execQuery(ctx, parsed.Query)
if err != nil {
return "", err
}
if len(rs.Rows) == 0 {
return "", nil
}
row := rs.Rows[0]
return row[1].ToString(), nil
}
// getCreateTableStatement gets a formal AlterTable representation of the given table
func (e *Executor) getCreateTableStatement(ctx context.Context, tableName string) (*sqlparser.CreateTable, error) {
showCreateTable, err := e.showCreateTable(ctx, tableName)
if err != nil {
return nil, vterrors.Wrapf(err, "in Executor.getCreateTableStatement()")
}
stmt, err := e.env.Environment().Parser().ParseStrictDDL(showCreateTable)
if err != nil {
return nil, err
}
createTable, ok := stmt.(*sqlparser.CreateTable)
if !ok {
return nil, schemadiff.ErrExpectedCreateTable
}
return createTable, nil
}
func (e *Executor) parseAlterOptions(ctx context.Context, onlineDDL *schema.OnlineDDL) string {
// Temporary hack (2020-08-11)
// Because sqlparser does not do full blown ALTER TABLE parsing,
// and because we don't want gh-ost to know about WITH_GHOST and WITH_PT syntax,
// we resort to regexp-based parsing of the query.
// TODO(shlomi): generate _alter options_ via sqlparser when it full supports ALTER TABLE syntax.
_, _, alterOptions := schema.ParseAlterTableOptions(onlineDDL.SQL)
return alterOptions
}
// executeDirectly runs a DDL query directly on the backend MySQL server
func (e *Executor) executeDirectly(ctx context.Context, onlineDDL *schema.OnlineDDL, acceptableMySQLErrorCodes ...sqlerror.ErrorCode) (acceptableErrorCodeFound bool, err error) {
conn, err := dbconnpool.NewDBConnection(ctx, e.env.Config().DB.DbaWithDB())
if err != nil {
return false, err
}
defer conn.Close()
restoreSQLModeFunc, err := e.initMigrationSQLMode(ctx, onlineDDL, conn)
defer restoreSQLModeFunc()
if err != nil {
return false, err
}
_ = e.onSchemaMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusRunning, false, progressPctStarted, etaSecondsUnknown, rowsCopiedUnknown, emptyHint)
if onlineDDL.StrategySetting().IsAllowForeignKeysFlag() {
// Foreign key support is curently "unsafe". We further put the burden on the user
// by disabling foreign key checks. With this, the user is able to create cyclic
// foreign key references (e.g. t1<->t2) without going through the trouble of
// CREATE TABLE t1->CREATE TABLE t2->ALTER TABLE t1 ADD FOREIGN KEY ... REFERENCES ts
// Grab current sql_mode value
if _, err := conn.ExecuteFetch(`set @vt_onlineddl_foreign_key_checks=@@foreign_key_checks`, 0, false); err != nil {
return false, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not read foreign_key_checks: %v", err)
}
_, err = conn.ExecuteFetch("SET foreign_key_checks=0", 0, false)
if err != nil {
return false, err
}
defer conn.ExecuteFetch("SET foreign_key_checks=@vt_onlineddl_foreign_key_checks", 0, false)
}
_, err = conn.ExecuteFetch(onlineDDL.SQL, 0, false)
if err != nil {
// let's see if this error is actually acceptable
if merr, ok := err.(*sqlerror.SQLError); ok {
for _, acceptableCode := range acceptableMySQLErrorCodes {
if merr.Num == acceptableCode {
// we don't consider this to be an error.
acceptableErrorCodeFound = true
err = nil
break
}
}
}
}
if err != nil {
return false, err
}
defer e.reloadSchema(ctx)
_ = e.onSchemaMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusComplete, false, progressPctFull, etaSecondsNow, rowsCopiedUnknown, emptyHint)
return acceptableErrorCodeFound, nil
}
// doesConnectionInfoMatch checks if theres a MySQL connection in PROCESSLIST whose Info matches given text
func (e *Executor) doesConnectionInfoMatch(ctx context.Context, connID int64, submatch string) (bool, error) {
findProcessQuery, err := sqlparser.ParseAndBind(sqlFindProcess,
sqltypes.Int64BindVariable(connID),
sqltypes.StringBindVariable("%"+submatch+"%"),
)
if err != nil {
return false, err
}
rs, err := e.execQuery(ctx, findProcessQuery)
if err != nil {
return false, err
}
return len(rs.Rows) == 1, nil
}
// tableParticipatesInForeignKeyRelationship checks if a given table is either a parent or a child in at least one foreign key constraint
func (e *Executor) tableParticipatesInForeignKeyRelationship(ctx context.Context, schema string, table string) (bool, error) {
for _, fkQuery := range []string{selSelectCountFKParentConstraints, selSelectCountFKChildConstraints} {
query, err := sqlparser.ParseAndBind(fkQuery,
sqltypes.StringBindVariable(schema),
sqltypes.StringBindVariable(table),
)
if err != nil {
return false, err
}
r, err := e.execQuery(ctx, query)
if err != nil {
return false, err
}
row := r.Named().Row()
if row == nil {
return false, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "unexpected result from INFORMATION_SCHEMA.KEY_COLUMN_USAGE query: %s", query)
}
countFKConstraints := row.AsInt64("num_fk_constraints", 0)
if countFKConstraints > 0 {
return true, nil
}
}
return false, nil
}
func (e *Executor) validateTableForAlterAction(ctx context.Context, onlineDDL *schema.OnlineDDL) (err error) {
participatesInFK, err := e.tableParticipatesInForeignKeyRelationship(ctx, onlineDDL.Schema, onlineDDL.Table)
if err != nil {
return vterrors.Wrapf(err, "error while attempting to validate whether table %s participates in FOREIGN KEY constraint", onlineDDL.Table)
}
if participatesInFK {
if !onlineDDL.StrategySetting().IsAllowForeignKeysFlag() {
// FK migrations not allowed
return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "table %s participates in a FOREIGN KEY constraint and FOREIGN KEY constraints are not supported in Online DDL unless the *experimental and unsafe* --unsafe-allow-foreign-keys strategy flag is specified", onlineDDL.Table)
}
// FK migrations allowed. Validate that underlying MySQL server supports it.
preserveFKSupported, err := e.isPreserveForeignKeySupported(ctx)
if err != nil {
return vterrors.Wrapf(err, "error while attempting to validate whether 'rename_table_preserve_foreign_key' is supported")
}
if !preserveFKSupported {
return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "table %s participates in a FOREIGN KEY constraint and underlying database server does not support `rename_table_preserve_foreign_key`", onlineDDL.Table)
}
}
return nil
}
// primaryPosition returns the MySQL/MariaDB position (typically GTID pos) on the tablet
func (e *Executor) primaryPosition(ctx context.Context) (pos replication.Position, err error) {
conn, err := dbconnpool.NewDBConnection(ctx, e.env.Config().DB.DbaWithDB())
if err != nil {
return pos, err
}
defer conn.Close()
pos, err = conn.PrimaryPosition()
return pos, err
}
// terminateVReplMigration stops vreplication, then removes the _vt.vreplication entry for the given migration
func (e *Executor) terminateVReplMigration(ctx context.Context, uuid string) error {
tablet, err := e.ts.GetTablet(ctx, e.tabletAlias)
if err != nil {
return err
}
query, err := sqlparser.ParseAndBind(sqlStopVReplStream,
sqltypes.StringBindVariable(e.dbName),
sqltypes.StringBindVariable(uuid),
)
if err != nil {
return err
}
// silently skip error; stopping the stream is just a graceful act; later deleting it is more important
if _, err := e.vreplicationExec(ctx, tablet.Tablet, query); err != nil {
log.Errorf("FAIL vreplicationExec: uuid=%s, query=%v, error=%v", uuid, query, err)
}
if err := e.deleteVReplicationEntry(ctx, uuid); err != nil {
return err
}
return nil
}
// killTableLockHoldersAndAccessors kills any active queries using the given table, and also kills
// connections with open transactions, holding locks on the table.
// This is done on a best-effort basis, by issuing `KILL` and `KILL QUERY` commands. As MySQL goes,
// it is not guaranteed that the queries/transactions will terminate in a timely manner.
func (e *Executor) killTableLockHoldersAndAccessors(ctx context.Context, tableName string) error {
log.Infof("killTableLockHoldersAndAccessors: %v", tableName)
conn, err := dbconnpool.NewDBConnection(ctx, e.env.Config().DB.DbaWithDB())
if err != nil {
return err
}
defer conn.Close()
{
// First, let's look at PROCESSLIST for queries that _might_ be operating on our table. This may have
// plenty false positives as we're simply looking for the table name as a query substring.
likeVariable := "%" + tableName + "%"
query, err := sqlparser.ParseAndBind(sqlFindProcessByInfo, sqltypes.StringBindVariable(likeVariable))
if err != nil {
return err
}
rs, err := conn.Conn.ExecuteFetch(query, -1, true)
if err != nil {
return err
}
log.Infof("killTableLockHoldersAndAccessors: found %v potential queries", len(rs.Rows))
// Now that we have some list of queries, we actually parse them to find whether the query actually references our table:
for _, row := range rs.Named().Rows {
threadId := row.AsInt64("id", 0)
infoQuery := row.AsString("info", "")
stmt, err := e.env.Environment().Parser().Parse(infoQuery)
if err != nil {
log.Error(vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unable to parse processlist Info query: %v", infoQuery))
continue
}
queryUsesTable := false
_ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
switch node := node.(type) {
case *sqlparser.TableName:
if node.Name.String() == tableName {
queryUsesTable = true
return false, nil
}
case *sqlparser.AliasedTableExpr:
if alasedTableName, ok := node.Expr.(sqlparser.TableName); ok {
if alasedTableName.Name.String() == tableName {
queryUsesTable = true
return false, nil
}
}
}
return true, nil
}, stmt)
if queryUsesTable {
log.Infof("killTableLockHoldersAndAccessors: killing query %v: %.100s", threadId, infoQuery)
killQuery := fmt.Sprintf("KILL QUERY %d", threadId)
if _, err := conn.Conn.ExecuteFetch(killQuery, 1, false); err != nil {
log.Error(vterrors.Errorf(vtrpcpb.Code_ABORTED, "could not kill query %v. Ignoring", threadId))
}
}
}
}
capableOf := mysql.ServerVersionCapableOf(conn.ServerVersion)
capable, err := capableOf(capabilities.PerformanceSchemaDataLocksTableCapability)
if err != nil {
return err
}
if capable {
{
// Kill connections that have open transactions locking the table. These potentially (probably?) are not
// actively running a query on our table. They're doing other things while holding locks on our table.
query, err := sqlparser.ParseAndBind(sqlProcessWithLocksOnTable, sqltypes.StringBindVariable(tableName))
if err != nil {
return err
}
rs, err := conn.Conn.ExecuteFetch(query, -1, true)
if err != nil {
return err
}
log.Infof("killTableLockHoldersAndAccessors: found %v locking transactions", len(rs.Rows))
for _, row := range rs.Named().Rows {
threadId := row.AsInt64("trx_mysql_thread_id", 0)
log.Infof("killTableLockHoldersAndAccessors: killing connection %v with transaction on table", threadId)
killConnection := fmt.Sprintf("KILL %d", threadId)
_, err = conn.Conn.ExecuteFetch(killConnection, 1, false)
if err != nil {
log.Errorf("Unable to kill the connection %d: %v", threadId, err)
}
}
}
}
return nil
}
// cutOverVReplMigration stops vreplication, then removes the _vt.vreplication entry for the given migration
func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, shouldForceCutOver bool) error {
if err := e.incrementCutoverAttempts(ctx, s.workflow); err != nil {
return err
}
tmClient := e.tabletManagerClient()
defer tmClient.Close()
// sanity checks:
vreplTable, err := getVreplTable(s)
if err != nil {
return err
}
// get topology client & entities:
tablet, err := e.ts.GetTablet(ctx, e.tabletAlias)
if err != nil {
return err
}
// information about source tablet
onlineDDL, _, err := e.readMigration(ctx, s.workflow)
if err != nil {
return err
}
isVreplicationTestSuite := onlineDDL.StrategySetting().IsVreplicationTestSuite()
e.updateMigrationStage(ctx, onlineDDL.UUID, "starting cut-over")
var sentryTableName string
migrationCutOverThreshold := getMigrationCutOverThreshold(onlineDDL)
waitForPos := func(s *VReplStream, pos replication.Position) error {
ctx, cancel := context.WithTimeout(ctx, migrationCutOverThreshold)
defer cancel()
// Wait for target to reach the up-to-date pos
if err := tmClient.VReplicationWaitForPos(ctx, tablet.Tablet, s.id, replication.EncodePosition(pos)); err != nil {
return err
}
// Target is now in sync with source!
return nil
}
if !isVreplicationTestSuite {
// A bit early on, we generate a name for the sentry table
// We do this here because right now we're in a safe place where nothing happened yet. If there's an error now, bail out
// and no harm done.
// Later on, when traffic is blocked and tables renamed, that's a more dangerous place to be in; we want as little logic
// in that place as possible.
sentryTableName, err = schema.GenerateGCTableName(schema.HoldTableGCState, newGCTableRetainTime())
if err != nil {
return nil
}
// We create the sentry table before toggling writes, because this involves a WaitForPos, which takes some time. We
// don't want to overload the buffering time with this excessive wait.
if err := e.updateArtifacts(ctx, onlineDDL.UUID, sentryTableName); err != nil {
return err
}
dropSentryTableQuery := sqlparser.BuildParsedQuery(sqlDropTableIfExists, sentryTableName)
defer func() {
// cut-over attempts may fail. We create a new, unique sentry table for every
// cut-over attempt. We could just leave them hanging around, and let gcArtifacts()
// and the table GC mechanism to take care of them. But then again, if we happen
// to have many cut-over attempts, that just proliferates and overloads the schema,
// and also bloats the `artifacts` column.
// The thing is, the sentry table is empty, and we really don't need it once the cut-over
// step is done (whether successful or failed). So, it's a cheap operation to drop the
// table right away, which we do, and then also reduce the `artifact` column length by
// removing the entry
_, err := e.execQuery(ctx, dropSentryTableQuery.Query)
if err == nil {
e.clearSingleArtifact(ctx, onlineDDL.UUID, sentryTableName)
}
// This was a best effort optimization. Possibly the error is not nil. Which means we
// still have a record of the sentry table, and gcArtifacts() will still be able to take
// care of it in the future.
}()
parsed := sqlparser.BuildParsedQuery(sqlCreateSentryTable, sentryTableName)
if _, err := e.execQuery(ctx, parsed.Query); err != nil {
return err
}
e.updateMigrationStage(ctx, onlineDDL.UUID, "sentry table created: %s", sentryTableName)
postSentryPos, err := e.primaryPosition(ctx)
if err != nil {
return err
}
e.updateMigrationStage(ctx, onlineDDL.UUID, "waiting for post-sentry pos: %v", replication.EncodePosition(postSentryPos))
if err := waitForPos(s, postSentryPos); err != nil {
return err
}
e.updateMigrationStage(ctx, onlineDDL.UUID, "post-sentry pos reached")
}
lockConn, err := e.pool.Get(ctx, nil)
if err != nil {
return err
}
// Set large enough `@@lock_wait_timeout` so that it does not interfere with the cut-over operation.
// The code will ensure everything that needs to be terminated by `migrationCutOverThreshold` will be terminated.
lockConnRestoreLockWaitTimeout, err := e.initConnectionLockWaitTimeout(ctx, lockConn.Conn, 5*migrationCutOverThreshold)
if err != nil {
return err
}
defer lockConn.Recycle()
defer lockConnRestoreLockWaitTimeout()
defer lockConn.Conn.Exec(ctx, sqlUnlockTables, 1, false)
renameCompleteChan := make(chan error)
renameWasSuccessful := false
renameConn, err := e.pool.Get(ctx, nil)
if err != nil {
return err
}
// Set large enough `@@lock_wait_timeout` so that it does not interfere with the cut-over operation.
// The code will ensure everything that needs to be terminated by `migrationCutOverThreshold` will be terminated.
renameConnRestoreLockWaitTimeout, err := e.initConnectionLockWaitTimeout(ctx, renameConn.Conn, 5*migrationCutOverThreshold*4)
if err != nil {
return err
}
defer renameConn.Recycle()
defer func() {
if !renameWasSuccessful {
err := renameConn.Conn.Kill("premature exit while renaming tables", 0)
if err != nil {
log.Warningf("Failed to kill connection being used to rename tables in OnlineDDL migration %s: %v", onlineDDL.UUID, err)
}
}
}()
defer renameConnRestoreLockWaitTimeout()
// See if backend MySQL server supports 'rename_table_preserve_foreign_key' variable
preserveFKSupported, err := e.isPreserveForeignKeySupported(ctx)
if err != nil {
return err
}