From f112da7eedc651db4b4c3c54dd2deec0aa95e16c Mon Sep 17 00:00:00 2001 From: 3pointer Date: Thu, 26 Mar 2020 20:41:37 +0800 Subject: [PATCH] fix incremental bug in llroad test (#199) * restore: filter same table ddl * *: do not return error when backup/restore data is empty * fix create database double during incremental restore * add tests * fix ci * address comment --- pkg/backup/client.go | 3 +- pkg/backup/schema_test.go | 8 +-- pkg/restore/db.go | 15 +++-- pkg/task/backup.go | 4 ++ pkg/task/restore.go | 30 +++++---- tests/br_incremental_only_ddl/run.sh | 72 +++++++++++++++++++++ tests/br_incremental_same_table/run.sh | 86 ++++++++++++++++++++++++++ 7 files changed, 198 insertions(+), 20 deletions(-) create mode 100755 tests/br_incremental_only_ddl/run.sh create mode 100755 tests/br_incremental_same_table/run.sh diff --git a/pkg/backup/client.go b/pkg/backup/client.go index 219f58550..6cd8abe45 100644 --- a/pkg/backup/client.go +++ b/pkg/backup/client.go @@ -255,7 +255,8 @@ func BuildBackupRangeAndSchema( } if backupSchemas.Len() == 0 { - return nil, nil, errors.New("nothing to backup") + log.Info("nothing to backup") + return nil, nil, nil } return ranges, backupSchemas, nil } diff --git a/pkg/backup/schema_test.go b/pkg/backup/schema_test.go index d3c82f172..98173dd55 100644 --- a/pkg/backup/schema_test.go +++ b/pkg/backup/schema_test.go @@ -62,7 +62,7 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { c.Assert(err, IsNil) _, backupSchemas, err := BuildBackupRangeAndSchema( s.mock.Domain, s.mock.Storage, testFilter, math.MaxUint64) - c.Assert(err, NotNil) + c.Assert(err, IsNil) c.Assert(backupSchemas, IsNil) // Database is not exist. @@ -72,15 +72,15 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { c.Assert(err, IsNil) _, backupSchemas, err = BuildBackupRangeAndSchema( s.mock.Domain, s.mock.Storage, fooFilter, math.MaxUint64) - c.Assert(err, NotNil) + c.Assert(err, IsNil) c.Assert(backupSchemas, IsNil) - // Empty databse. + // Empty database. noFilter, err := filter.New(false, &filter.Rules{}) c.Assert(err, IsNil) _, backupSchemas, err = BuildBackupRangeAndSchema( s.mock.Domain, s.mock.Storage, noFilter, math.MaxUint64) - c.Assert(err, NotNil) + c.Assert(err, IsNil) c.Assert(backupSchemas, IsNil) tk.MustExec("use test") diff --git a/pkg/restore/db.go b/pkg/restore/db.go index be24a1ad9..ae90df371 100644 --- a/pkg/restore/db.go +++ b/pkg/restore/db.go @@ -203,19 +203,26 @@ func FilterDDLJobs(allDDLJobs []*model.Job, tables []*utils.Table) (ddlJobs []*m } } + type namePair struct { + db string + table string + } + for _, table := range tables { tableIDs := make(map[int64]bool) tableIDs[table.Info.ID] = true - tableNames := make(map[string]bool) - tableNames[table.Info.Name.String()] = true + tableNames := make(map[namePair]bool) + name := namePair{table.Db.Name.String(), table.Info.Name.String()} + tableNames[name] = true for _, job := range allDDLJobs { if job.BinlogInfo.TableInfo != nil { - if tableIDs[job.TableID] || tableNames[job.BinlogInfo.TableInfo.Name.String()] { + name := namePair{job.SchemaName, job.BinlogInfo.TableInfo.Name.String()} + if tableIDs[job.TableID] || tableNames[name] { ddlJobs = append(ddlJobs, job) tableIDs[job.TableID] = true // For truncate table, the id may be changed tableIDs[job.BinlogInfo.TableInfo.ID] = true - tableNames[job.BinlogInfo.TableInfo.Name.String()] = true + tableNames[name] = true } } } diff --git a/pkg/task/backup.go b/pkg/task/backup.go index fe3021a33..040be0444 100644 --- a/pkg/task/backup.go +++ b/pkg/task/backup.go @@ -126,6 +126,10 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig if err != nil { return err } + // nothing to backup + if ranges == nil { + return nil + } ddlJobs := make([]*model.Job, 0) if cfg.LastBackupTS > 0 { diff --git a/pkg/task/restore.go b/pkg/task/restore.go index ff2374fa8..33bac7a6c 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -118,13 +118,10 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf return errors.New("cannot do transactional restore from raw kv data") } - files, tables, err := filterRestoreFiles(client, cfg) + files, tables, dbs, err := filterRestoreFiles(client, cfg) if err != nil { return err } - if len(files) == 0 { - return errors.New("all files are filtered out from the backup archive, nothing to restore") - } var newTS uint64 if client.IsIncremental() { @@ -137,10 +134,25 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf if err != nil { return err } + // execute DDL first err = client.ExecDDLs(ddlJobs) if err != nil { return errors.Trace(err) } + + // nothing to restore, maybe only ddl changes in incremental restore + if len(files) == 0 { + log.Info("all files are filtered out from the backup archive, nothing to restore") + return nil + } + + for _, db := range dbs { + err = client.CreateDatabase(db.Info) + if err != nil { + return err + } + } + rewriteRules, newTables, err := client.CreateTables(mgr.GetDomain(), tables, newTS) if err != nil { return err @@ -263,10 +275,10 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf func filterRestoreFiles( client *restore.Client, cfg *RestoreConfig, -) (files []*backup.File, tables []*utils.Table, err error) { +) (files []*backup.File, tables []*utils.Table, dbs []*utils.Database, err error) { tableFilter, err := filter.New(cfg.CaseSensitive, &cfg.Filter) if err != nil { - return nil, nil, err + return nil, nil, nil, err } for _, db := range client.GetDatabases() { @@ -277,17 +289,13 @@ func filterRestoreFiles( } if !createdDatabase { - if err = client.CreateDatabase(db.Info); err != nil { - return nil, nil, err - } + dbs = append(dbs, db) createdDatabase = true } - files = append(files, table.Files...) tables = append(tables, table) } } - return } diff --git a/tests/br_incremental_only_ddl/run.sh b/tests/br_incremental_only_ddl/run.sh new file mode 100755 index 000000000..f525acda4 --- /dev/null +++ b/tests/br_incremental_only_ddl/run.sh @@ -0,0 +1,72 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" +TABLE="usertable" +ROW_COUNT=100 +PATH="tests/$TEST_NAME:bin:$PATH" + +echo "load data..." +# create database +run_sql "CREATE DATABASE IF NOT EXISTS $DB;" +# create table +run_sql "CREATE TABLE IF NOT EXISTS ${DB}.${TABLE} (c1 INT);" +# insert records +for i in $(seq $ROW_COUNT); do + run_sql "INSERT INTO ${DB}.${TABLE}(c1) VALUES ($i);" +done + +# full backup +echo "full backup start..." +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/full" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 +# run ddls +echo "run ddls..." +run_sql "RENAME TABLE ${DB}.${TABLE} to ${DB}.${TABLE}1;" +run_sql "DROP TABLE ${DB}.${TABLE}1;" +run_sql "DROP DATABASE ${DB};" +run_sql "CREATE DATABASE ${DB};" +run_sql "CREATE TABLE ${DB}.${TABLE}1 (c2 CHAR(255));" +run_sql "RENAME TABLE ${DB}.${TABLE}1 to ${DB}.${TABLE};" +run_sql "TRUNCATE TABLE ${DB}.${TABLE};" + +# incremental backup +echo "incremental backup start..." +last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB/full" | tail -n1) +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/inc" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts + +run_sql "DROP DATABASE $DB;" + +# full restore +echo "full restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/full" --pd $PD_ADDR +row_count_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_full}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] full restore fail on database $DB" + exit 1 +fi +# incremental restore +echo "incremental restore start..." +fail=false +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/inc" --pd $PD_ADDR || fail=true +if $fail; then + echo "TEST: [$TEST_NAME] incremental restore fail on database $DB" + exit 1 +else + echo "TEST: [$TEST_NAME] successed!" +fi + +run_sql "DROP DATABASE $DB;" diff --git a/tests/br_incremental_same_table/run.sh b/tests/br_incremental_same_table/run.sh new file mode 100755 index 000000000..797806837 --- /dev/null +++ b/tests/br_incremental_same_table/run.sh @@ -0,0 +1,86 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" +TABLE="usertable" +ROW_COUNT=100 +PATH="tests/$TEST_NAME:bin:$PATH" +DB_COUNT=3 + +echo "load data..." + +# create database +run_sql "CREATE DATABASE IF NOT EXISTS $DB;" +# create table +run_sql "CREATE TABLE IF NOT EXISTS ${DB}.${TABLE} (c1 INT);" +# insert records +for i in $(seq $ROW_COUNT); do + run_sql "INSERT INTO ${DB}.${TABLE}(c1) VALUES ($i);" +done + +# full backup +echo "full backup start..." +run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB/full" --ratelimit 5 --concurrency 4 +# run ddls + +# create 3 databases, each db has one table with same name +for i in $(seq $DB_COUNT); do + # create database + run_sql "CREATE DATABASE $DB$i;" + # create table + run_sql "CREATE TABLE IF NOT EXISTS $DB$i.${TABLE} (c1 INT);" + # insert records + for j in $(seq $ROW_COUNT); do + run_sql "INSERT INTO $DB$i.${TABLE}(c1) VALUES ($j);" + done +done + +# incremental backup +echo "incremental backup start..." +last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB/full" | tail -n1) +run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB/inc" --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts + +# cleanup env +run_sql "DROP DATABASE $DB;" +for i in $(seq $DB_COUNT); do + run_sql "DROP DATABASE $DB$i;" +done + +# full restore +echo "full restore start..." +run_br restore full -s "local://$TEST_DIR/$DB/full" --pd $PD_ADDR +row_count_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_full}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] full restore fail on database $DB" + exit 1 +fi + +# incremental restore only DB2.Table +echo "incremental restore start..." +run_br restore table --db ${DB}2 --table $TABLE -s "local://$TEST_DIR/$DB/inc" --pd $PD_ADDR +row_count_inc=$(run_sql "SELECT COUNT(*) FROM ${DB}2.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_inc}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] incremental restore fail on database $DB" + exit 1 +fi + +# cleanup env +run_sql "DROP DATABASE $DB;" +for i in $(seq $DB_COUNT); do + run_sql "DROP DATABASE IF EXISTS $DB$i;" +done