From 69731ec6553e694d6038515434d1acbc13bc361d Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Mon, 26 Aug 2019 17:23:31 +0800 Subject: [PATCH 01/49] consecutive id --- meta/autoid/autoid.go | 115 +++++++++++++++++++++++++++++++++++++ meta/autoid/autoid_test.go | 109 +++++++++++++++++++++++++++++++++++ 2 files changed, 224 insertions(+) diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index 03dc2b67ece7d..966f9817ff3b3 100644 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -57,6 +57,9 @@ type Allocator interface { End() int64 // NextGlobalAutoID returns the next global autoID. NextGlobalAutoID(tableID int64) (int64, error) + // AllocN allocates N consecutive autoID for table with tableID. It is used to insert multiple + // rows in a statement, cause JDBC will presume the id of these rows are consecutive. + AllocN(tableID int64, N uint64) ([]int64, error) } type allocator struct { @@ -348,3 +351,115 @@ var localSchemaID = int64(math.MaxInt64) func GenLocalSchemaID() int64 { return atomic.AddInt64(&localSchemaID, -1) } + +// AllocN implements autoid.Allocator Alloc interface. +func (alloc *allocator) AllocN(tableID int64, N uint64) ([]int64, error) { + if tableID == 0 { + return nil, errInvalidTableID.GenWithStackByArgs("Invalid tableID") + } + alloc.mu.Lock() + defer alloc.mu.Unlock() + if alloc.isUnsigned { + return alloc.allocN4Unsigned(tableID, N) + } + return alloc.allocN4Signed(tableID, N) +} + +func (alloc *allocator) allocN4Signed(tableID int64, N uint64) ([]int64, error) { + N1 := int64(N) + if alloc.base+N1 > alloc.end { // 说明剩余的批量autoID已经不足以支持本次的批量的连续分配 + var newBase, newEnd int64 + startTime := time.Now() + consumeDur := startTime.Sub(alloc.lastAllocTime) // 虽然有一部分被冲掉了,但是目的也是为了放大step + nextStep := NextStep(alloc.step, consumeDur) + if nextStep <= N1 { + alloc.step = mathutil.MinInt64(N1*2, maxStep) + } else { + alloc.step = nextStep + } + err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error { + m := meta.NewMeta(txn) + var err1 error + newBase, err1 = m.GetAutoTableID(alloc.dbID, tableID) //TID也是一种元数据,保存在tikv端 + if err1 != nil { + return err1 + } + tmpStep := mathutil.MinInt64(math.MaxInt64-newBase, alloc.step) + if tmpStep < N1 { // 剩余的根本不够分配,就免的再去写了,直接err + return ErrAutoincReadFailed + } + newEnd, err1 = m.GenAutoTableID(alloc.dbID, tableID, tmpStep) //这边会对TID进行加add step操作,表示批量分配过了 + return err1 + }) + metrics.AutoIDHistogram.WithLabelValues(metrics.TableAutoIDAlloc, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds()) + if err != nil { + return nil, err + } + alloc.lastAllocTime = time.Now() + if newBase == math.MaxInt64 { + return nil, ErrAutoincReadFailed + } + alloc.base, alloc.end = newBase, newEnd + } + logutil.BgLogger().Debug("alloc N signed ID", + zap.Uint64("from ID", uint64(alloc.base)), + zap.Uint64("to ID", uint64(alloc.base+N1)), + zap.Int64("table ID", tableID), + zap.Int64("database ID", alloc.dbID)) + resN := make([]int64, 0, N1) + for i := alloc.base + 1; i <= alloc.base+N1; i++ { + resN = append(resN, i) + } + alloc.base += N1 + return resN, nil +} + +func (alloc *allocator) allocN4Unsigned(tableID int64, N uint64) ([]int64, error) { //signed和unsigned就是max的值不同 + N1 := int64(N) + if alloc.base+N1 > alloc.end { + var newBase, newEnd int64 + startTime := time.Now() + consumeDur := startTime.Sub(alloc.lastAllocTime) + nextStep := NextStep(alloc.step, consumeDur) + if nextStep <= N1 { + alloc.step = mathutil.MinInt64(N1*2, maxStep) + } else { + alloc.step = nextStep + } + err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error { + m := meta.NewMeta(txn) + var err1 error + newBase, err1 = m.GetAutoTableID(alloc.dbID, tableID) + if err1 != nil { + return err1 + } + tmpStep := int64(mathutil.MinUint64(math.MaxUint64-uint64(newBase), uint64(alloc.step))) + if tmpStep < N1 { + return ErrAutoincReadFailed + } + newEnd, err1 = m.GenAutoTableID(alloc.dbID, tableID, tmpStep) + return err1 + }) + metrics.AutoIDHistogram.WithLabelValues(metrics.TableAutoIDAlloc, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds()) + if err != nil { + return nil, err + } + alloc.lastAllocTime = time.Now() + if uint64(newBase) == math.MaxUint64 { + return nil, ErrAutoincReadFailed + } + alloc.base, alloc.end = newBase, newEnd + } + logutil.BgLogger().Debug("alloc unsigned ID", + zap.Uint64(" from ID", uint64(alloc.base)), + zap.Uint64("to ID", uint64(alloc.base+N1)), + zap.Int64("table ID", tableID), + zap.Int64("database ID", alloc.dbID)) + resN := make([]int64, 0, N1) + for i := alloc.base + 1; i <= alloc.base+N1; i++ { + resN = append(resN, i) + } + // use uint64 N directly + alloc.base = int64(uint64(alloc.base) + N) + return resN, nil +} diff --git a/meta/autoid/autoid_test.go b/meta/autoid/autoid_test.go index 23961eac150e7..76aa66a57a8f0 100644 --- a/meta/autoid/autoid_test.go +++ b/meta/autoid/autoid_test.go @@ -16,6 +16,7 @@ package autoid_test import ( "fmt" "math" + "math/rand" "sync" "testing" "time" @@ -60,6 +61,8 @@ func (*testSuite) TestT(c *C) { c.Assert(err, IsNil) err = m.CreateTableOrView(1, &model.TableInfo{ID: 3, Name: model.NewCIStr("t1")}) c.Assert(err, IsNil) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 4, Name: model.NewCIStr("t2")}) + c.Assert(err, IsNil) return nil }) c.Assert(err, IsNil) @@ -142,6 +145,52 @@ func (*testSuite) TestT(c *C) { c.Assert(alloc, NotNil) err = alloc.Rebase(3, int64(math.MaxInt64), true) c.Assert(err, IsNil) + + // allocN for signed + alloc = autoid.NewAllocator(store, 1, false) + c.Assert(alloc, NotNil) + globalAutoID, err = alloc.NextGlobalAutoID(4) + c.Assert(err, IsNil) + c.Assert(globalAutoID, Equals, int64(1)) + idN, err := alloc.AllocN(4, 1) + c.Assert(err, IsNil) + c.Assert(len(idN), Equals, 1) + c.Assert(idN[0], Equals, int64(1)) + + idN, err = alloc.AllocN(4, 2) + c.Assert(err, IsNil) + c.Assert(len(idN), Equals, 2) + c.Assert(idN[0], Equals, int64(2)) + c.Assert(idN[1], Equals, int64(3)) + + idN, err = alloc.AllocN(4, 100) + c.Assert(err, IsNil) + c.Assert(len(idN), Equals, 100) + for i := 0; i < 100; i++ { + c.Assert(idN[i], Equals, int64(i+4)) + } + + err = alloc.Rebase(4, int64(1000), false) + c.Assert(err, IsNil) + idN, err = alloc.AllocN(4, 3) + c.Assert(err, IsNil) + c.Assert(len(idN), Equals, 3) + c.Assert(idN[0], Equals, int64(1001)) + c.Assert(idN[0], Equals, int64(1002)) + c.Assert(idN[0], Equals, int64(1003)) + + lastRemainOne := alloc.End() + err = alloc.Rebase(4, alloc.End()-2, false) + c.Assert(err, IsNil) + idN, err = alloc.AllocN(4, 5) + c.Assert(err, IsNil) + c.Assert(len(idN), Equals, 5) + c.Assert(idN[0], Greater, lastRemainOne) + consecutive := idN[0] + for i := 1; i < 5; i++ { + consecutive++ + c.Assert(idN[i], Equals, consecutive) + } } func (*testSuite) TestUnsignedAutoid(c *C) { @@ -164,6 +213,8 @@ func (*testSuite) TestUnsignedAutoid(c *C) { c.Assert(err, IsNil) err = m.CreateTableOrView(1, &model.TableInfo{ID: 3, Name: model.NewCIStr("t1")}) c.Assert(err, IsNil) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 4, Name: model.NewCIStr("t2")}) + c.Assert(err, IsNil) return nil }) c.Assert(err, IsNil) @@ -249,6 +300,40 @@ func (*testSuite) TestUnsignedAutoid(c *C) { un = int64(n + 1) err = alloc.Rebase(3, un, true) c.Assert(err, IsNil) + + // allocN for unsigned + alloc = autoid.NewAllocator(store, 1, true) + c.Assert(alloc, NotNil) + globalAutoID, err = alloc.NextGlobalAutoID(4) + c.Assert(err, IsNil) + c.Assert(globalAutoID, Equals, int64(1)) + + idN, err := alloc.AllocN(4, 2) + c.Assert(err, IsNil) + c.Assert(len(idN), Equals, 2) + c.Assert(idN[0], Equals, int64(2)) + c.Assert(idN[0], Equals, int64(3)) + + err = alloc.Rebase(4, int64(500), true) + c.Assert(err, IsNil) + idN, err = alloc.AllocN(4, 2) + c.Assert(err, IsNil) + c.Assert(len(idN), Equals, 2) + c.Assert(idN[0], Equals, int64(501)) + c.Assert(idN[0], Equals, int64(502)) + + lastRemainOne := alloc.End() + err = alloc.Rebase(4, alloc.End()-2, false) + c.Assert(err, IsNil) + idN, err = alloc.AllocN(4, 5) + c.Assert(err, IsNil) + c.Assert(len(idN), Equals, 5) + c.Assert(idN[0], Greater, lastRemainOne) + consecutive := idN[0] + for i := 1; i < 5; i++ { + consecutive++ + c.Assert(idN[i], Equals, consecutive) + } } // TestConcurrentAlloc is used for the test that @@ -297,6 +382,30 @@ func (*testSuite) TestConcurrentAlloc(c *C) { } m[id] = struct{}{} mu.Unlock() + + //test AllocN + N := rand.Uint64() % 100 + idN, err1 := alloc.AllocN(tblID, N) + if err1 != nil { + errCh <- err1 + break + } + + errFlag := false + mu.Lock() + for i := uint64(0); i < N; i++ { + if _, ok := m[idN[i]]; ok { + errCh <- fmt.Errorf("duplicate id:%v", idN[i]) + errFlag = true + mu.Unlock() + break + } + } + if errFlag { + break + } + m[id] = struct{}{} + mu.Unlock() } } for i := 0; i < count; i++ { From af06fa44df4567742f6cf011619af655334df3c1 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Mon, 26 Aug 2019 17:40:24 +0800 Subject: [PATCH 02/49] add AllocN interface func for util/kvencoder/allocator.go --- go.mod | 1 + 1 file changed, 1 insertion(+) diff --git a/go.mod b/go.mod index e1ae085082f7b..22987a370bd12 100644 --- a/go.mod +++ b/go.mod @@ -69,6 +69,7 @@ require ( golang.org/x/text v0.3.2 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110 + google.golang.org/appengine v1.4.0 // indirect google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 // indirect google.golang.org/grpc v1.23.0 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect From fe2504becc10d1f913625c3f2d34ae81c66fcf22 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Mon, 26 Aug 2019 17:52:06 +0800 Subject: [PATCH 03/49] fix allocN test --- meta/autoid/autoid_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/meta/autoid/autoid_test.go b/meta/autoid/autoid_test.go index 76aa66a57a8f0..0a2b58d6f4e45 100644 --- a/meta/autoid/autoid_test.go +++ b/meta/autoid/autoid_test.go @@ -176,8 +176,8 @@ func (*testSuite) TestT(c *C) { c.Assert(err, IsNil) c.Assert(len(idN), Equals, 3) c.Assert(idN[0], Equals, int64(1001)) - c.Assert(idN[0], Equals, int64(1002)) - c.Assert(idN[0], Equals, int64(1003)) + c.Assert(idN[1], Equals, int64(1002)) + c.Assert(idN[2], Equals, int64(1003)) lastRemainOne := alloc.End() err = alloc.Rebase(4, alloc.End()-2, false) @@ -312,7 +312,7 @@ func (*testSuite) TestUnsignedAutoid(c *C) { c.Assert(err, IsNil) c.Assert(len(idN), Equals, 2) c.Assert(idN[0], Equals, int64(2)) - c.Assert(idN[0], Equals, int64(3)) + c.Assert(idN[1], Equals, int64(3)) err = alloc.Rebase(4, int64(500), true) c.Assert(err, IsNil) @@ -320,7 +320,7 @@ func (*testSuite) TestUnsignedAutoid(c *C) { c.Assert(err, IsNil) c.Assert(len(idN), Equals, 2) c.Assert(idN[0], Equals, int64(501)) - c.Assert(idN[0], Equals, int64(502)) + c.Assert(idN[1], Equals, int64(502)) lastRemainOne := alloc.End() err = alloc.Rebase(4, alloc.End()-2, false) From 282808bed4087cd7ecb625477d215e4823677268 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Mon, 26 Aug 2019 17:54:45 +0800 Subject: [PATCH 04/49] fix allocN tests --- meta/autoid/autoid_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/meta/autoid/autoid_test.go b/meta/autoid/autoid_test.go index 0a2b58d6f4e45..58032d37a0237 100644 --- a/meta/autoid/autoid_test.go +++ b/meta/autoid/autoid_test.go @@ -399,12 +399,13 @@ func (*testSuite) TestConcurrentAlloc(c *C) { errFlag = true mu.Unlock() break + } else { + m[id] = struct{}{} } } if errFlag { break } - m[id] = struct{}{} mu.Unlock() } } From 5152858da7683d7aa2b0cf626a04da5000220cea Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Mon, 26 Aug 2019 17:55:20 +0800 Subject: [PATCH 05/49] fix allocN test 1 --- meta/autoid/autoid_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meta/autoid/autoid_test.go b/meta/autoid/autoid_test.go index 58032d37a0237..aca870faf61e0 100644 --- a/meta/autoid/autoid_test.go +++ b/meta/autoid/autoid_test.go @@ -400,7 +400,7 @@ func (*testSuite) TestConcurrentAlloc(c *C) { mu.Unlock() break } else { - m[id] = struct{}{} + m[idN[i]] = struct{}{} } } if errFlag { From f13219bae039618ed6f2ebe3eb7c7ed33f6db7db Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Mon, 26 Aug 2019 17:56:27 +0800 Subject: [PATCH 06/49] fix allocN test 2 --- meta/autoid/autoid_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/meta/autoid/autoid_test.go b/meta/autoid/autoid_test.go index aca870faf61e0..1cbf9da13657d 100644 --- a/meta/autoid/autoid_test.go +++ b/meta/autoid/autoid_test.go @@ -399,9 +399,8 @@ func (*testSuite) TestConcurrentAlloc(c *C) { errFlag = true mu.Unlock() break - } else { - m[idN[i]] = struct{}{} } + m[idN[i]] = struct{}{} } if errFlag { break From 2da1b02722104c221b187fc7f98249f0f0b5ee15 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Mon, 26 Aug 2019 17:59:09 +0800 Subject: [PATCH 07/49] fix allocN test 3 --- meta/autoid/autoid_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/meta/autoid/autoid_test.go b/meta/autoid/autoid_test.go index 1cbf9da13657d..f24cd8fd8537e 100644 --- a/meta/autoid/autoid_test.go +++ b/meta/autoid/autoid_test.go @@ -311,8 +311,8 @@ func (*testSuite) TestUnsignedAutoid(c *C) { idN, err := alloc.AllocN(4, 2) c.Assert(err, IsNil) c.Assert(len(idN), Equals, 2) - c.Assert(idN[0], Equals, int64(2)) - c.Assert(idN[1], Equals, int64(3)) + c.Assert(idN[0], Equals, int64(1)) + c.Assert(idN[1], Equals, int64(2)) err = alloc.Rebase(4, int64(500), true) c.Assert(err, IsNil) From c54b8d791b7adfbf487ae8b42140f3f1d0fd89e8 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Mon, 26 Aug 2019 19:22:51 +0800 Subject: [PATCH 08/49] fix allocN test 4 --- meta/autoid/autoid.go | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index 966f9817ff3b3..cffe86b696972 100644 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -315,11 +315,11 @@ func (alloc *allocator) Alloc(tableID int64) (int64, error) { // NextStep return new auto id step according to previous step and consuming time. func NextStep(curStep int64, consumeDur time.Duration) int64 { - failpoint.Inject("mockAutoIDChange", func(val failpoint.Value) { + if val, ok := failpoint.Eval(_curpkg_("mockAutoIDChange")); ok { if val.(bool) { - failpoint.Return(step) + return step } - }) + } consumeRate := defaultConsumeTime.Seconds() / consumeDur.Seconds() res := int64(float64(curStep) * consumeRate) @@ -357,6 +357,9 @@ func (alloc *allocator) AllocN(tableID int64, N uint64) ([]int64, error) { if tableID == 0 { return nil, errInvalidTableID.GenWithStackByArgs("Invalid tableID") } + if N == 0 { + return []int64{}, nil + } alloc.mu.Lock() defer alloc.mu.Unlock() if alloc.isUnsigned { @@ -367,11 +370,14 @@ func (alloc *allocator) AllocN(tableID int64, N uint64) ([]int64, error) { func (alloc *allocator) allocN4Signed(tableID int64, N uint64) ([]int64, error) { N1 := int64(N) - if alloc.base+N1 > alloc.end { // 说明剩余的批量autoID已经不足以支持本次的批量的连续分配 + // The local rest is not enough for allocN, skip it. + if alloc.base+N1 > alloc.end { var newBase, newEnd int64 startTime := time.Now() - consumeDur := startTime.Sub(alloc.lastAllocTime) // 虽然有一部分被冲掉了,但是目的也是为了放大step + // Although it may skip a segment here, we still think it is consumed. + consumeDur := startTime.Sub(alloc.lastAllocTime) nextStep := NextStep(alloc.step, consumeDur) + // Make sure nextStep is big enough. if nextStep <= N1 { alloc.step = mathutil.MinInt64(N1*2, maxStep) } else { @@ -380,15 +386,16 @@ func (alloc *allocator) allocN4Signed(tableID int64, N uint64) ([]int64, error) err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error { m := meta.NewMeta(txn) var err1 error - newBase, err1 = m.GetAutoTableID(alloc.dbID, tableID) //TID也是一种元数据,保存在tikv端 + newBase, err1 = m.GetAutoTableID(alloc.dbID, tableID) if err1 != nil { return err1 } tmpStep := mathutil.MinInt64(math.MaxInt64-newBase, alloc.step) - if tmpStep < N1 { // 剩余的根本不够分配,就免的再去写了,直接err + // The global rest is not enough for allocN. + if tmpStep < N1 { return ErrAutoincReadFailed } - newEnd, err1 = m.GenAutoTableID(alloc.dbID, tableID, tmpStep) //这边会对TID进行加add step操作,表示批量分配过了 + newEnd, err1 = m.GenAutoTableID(alloc.dbID, tableID, tmpStep) return err1 }) metrics.AutoIDHistogram.WithLabelValues(metrics.TableAutoIDAlloc, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds()) @@ -414,13 +421,16 @@ func (alloc *allocator) allocN4Signed(tableID int64, N uint64) ([]int64, error) return resN, nil } -func (alloc *allocator) allocN4Unsigned(tableID int64, N uint64) ([]int64, error) { //signed和unsigned就是max的值不同 +func (alloc *allocator) allocN4Unsigned(tableID int64, N uint64) ([]int64, error) { N1 := int64(N) + // The local rest is not enough for allocN, skip it. if alloc.base+N1 > alloc.end { var newBase, newEnd int64 startTime := time.Now() + // Although it may skip a segment here, we still think it is consumed. consumeDur := startTime.Sub(alloc.lastAllocTime) nextStep := NextStep(alloc.step, consumeDur) + // Make sure nextStep is big enough. if nextStep <= N1 { alloc.step = mathutil.MinInt64(N1*2, maxStep) } else { @@ -434,6 +444,7 @@ func (alloc *allocator) allocN4Unsigned(tableID int64, N uint64) ([]int64, error return err1 } tmpStep := int64(mathutil.MinUint64(math.MaxUint64-uint64(newBase), uint64(alloc.step))) + // The global rest is not enough for allocN. if tmpStep < N1 { return ErrAutoincReadFailed } @@ -459,7 +470,7 @@ func (alloc *allocator) allocN4Unsigned(tableID int64, N uint64) ([]int64, error for i := alloc.base + 1; i <= alloc.base+N1; i++ { resN = append(resN, i) } - // use uint64 N directly + // Use uint64 N directly. alloc.base = int64(uint64(alloc.base) + N) return resN, nil } From e9d961855997ab036a4bf3f887f481993be43471 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Mon, 26 Aug 2019 19:47:54 +0800 Subject: [PATCH 09/49] fix cache --- meta/autoid/autoid.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index cffe86b696972..2ec3184b542d2 100644 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -315,11 +315,11 @@ func (alloc *allocator) Alloc(tableID int64) (int64, error) { // NextStep return new auto id step according to previous step and consuming time. func NextStep(curStep int64, consumeDur time.Duration) int64 { - if val, ok := failpoint.Eval(_curpkg_("mockAutoIDChange")); ok { + failpoint.Inject("mockAutoIDChange", func(val failpoint.Value) { if val.(bool) { - return step + failpoint.Return(step) } - } + }) consumeRate := defaultConsumeTime.Seconds() / consumeDur.Seconds() res := int64(float64(curStep) * consumeRate) From b914a78a55f587f7697993aa4efea02ef35d7c7d Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Thu, 29 Aug 2019 21:37:13 +0800 Subject: [PATCH 10/49] add auto consecutive increment id in insert executor --- executor/insert_common.go | 257 +++++++++++++++++++++++++++++++++++++- executor/insert_test.go | 143 ++++++++++++++++++++- table/table.go | 8 ++ 3 files changed, 406 insertions(+), 2 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index 39da4fe3458c5..861b1ade0d88c 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -60,6 +60,9 @@ type InsertValues struct { colDefaultVals []defaultVal evalBuffer chunk.MutRow evalBufferTypes []*types.FieldType + + // Cache datumLazy for consecutive autoid batch alloc + cache []datumLazy } type defaultVal struct { @@ -205,6 +208,9 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { } rows := make([][]types.Datum, 0, len(e.Lists)) + // Cache for consecutive autoID batch alloc + e.cache = make([]datumLazy, 0, len(e.Lists)) + for i, list := range e.Lists { e.rowCount++ var row []types.Datum @@ -214,6 +220,12 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { } rows = append(rows, row) if batchInsert && e.rowCount%uint64(batchSize) == 0 { + // Before batch insert, should fill the batch allocated autoIDs + rows, err := e.autoIdAllocN(ctx, rows) + if err != nil { + return err + } + e.cache = e.cache[:0] if err = base.exec(ctx, rows); err != nil { return err } @@ -223,9 +235,121 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { } } } + // Fill the batch allocated autoIDs + rows, err = e.autoIdAllocN(ctx, rows) + if err != nil { + return err + } return base.exec(ctx, rows) } +func (e *InsertValues) autoIdAllocN(ctx context.Context, rows [][]types.Datum) ([][]types.Datum, error) { + retryInfo := e.ctx.GetSessionVars().RetryInfo + length := len(e.cache) + for i := 0; i < length; i++ { + switch e.cache[i].kind { + case AutoIdNull: + // Find consecutive num. + var cnt int + var start int + start = i + for i < length { + if e.cache[i].kind == AutoIdNull { + cnt++ + i++ + } else { + break + } + } + // Fix the index i + i-- + // Alloc batch N consecutive autoIDs. + recordIDs, err := table.AllocBatchAutoIncrementValue(ctx, e.Table, e.ctx, cnt) + if e.filterErr(err) != nil { + return nil, err + } + // Distribute autoIDs to rows. + var d types.Datum + var c *table.Column + for j := 0; j < cnt; j++ { + offset := j + start + rowIdx := e.cache[offset].rowIdx + colIdx := e.cache[offset].colIdx + + d = e.cache[offset].datum + c = e.Table.Cols()[colIdx] + + // It's compatible with mysql. So it sets the first allocated autoID to the lastInsertId. + if e.lastInsertID == 0 { + e.lastInsertID = uint64(recordIDs[0]) + } + + d.SetAutoID(recordIDs[j], c.Flag) + retryInfo.AddAutoIncrementID(recordIDs[j]) + + // The value of d is adjusted by auto ID, so we need to cast it again. + d, err := table.CastValue(e.ctx, d, c.ToInfo()) + if err != nil { + return nil, err + } + // Handle the bad null error. + if d, err = c.HandleBadNull(d, e.ctx.GetSessionVars().StmtCtx); err != nil { + return nil, err + } + rows[rowIdx][colIdx] = d + } + case AutoIdRebase: + // Rebase action + rowIdx := e.cache[i].rowIdx + colIdx := e.cache[i].colIdx + + // recordID has been casted in evalRow + recordID := e.cache[i].recordID + d := e.cache[i].datum + c := e.Table.Cols()[colIdx] + + err := e.Table.RebaseAutoID(e.ctx, recordID, true) + if err != nil { + return nil, err + } + e.ctx.GetSessionVars().StmtCtx.InsertID = uint64(recordID) + retryInfo.AddAutoIncrementID(recordID) + + // Handle the bad null error. + if d, err = c.HandleBadNull(d, e.ctx.GetSessionVars().StmtCtx); err != nil { + return nil, err + } + // Cause d may changed in HandleBadNull, do assignment here + rows[rowIdx][colIdx] = d + case AutoIdZero: + // Don't change value 0 to auto id, if NoAutoValueOnZero SQL mode is set. + rowIdx := e.cache[i].rowIdx + colIdx := e.cache[i].colIdx + + // recordID has been casted in evalRow + recordID := e.cache[i].recordID + d := e.cache[i].datum + c := e.Table.Cols()[colIdx] + + d.SetAutoID(recordID, c.Flag) + retryInfo.AddAutoIncrementID(recordID) + + // The value of d is adjusted by auto ID, so we need to cast it again. + d, err := table.CastValue(e.ctx, d, c.ToInfo()) + if err != nil { + return nil, err + } + + // Handle the bad null error. + if d, err = c.HandleBadNull(d, e.ctx.GetSessionVars().StmtCtx); err != nil { + return nil, err + } + rows[rowIdx][colIdx] = d + } + } + return rows, nil +} + func (e *InsertValues) handleErr(col *table.Column, val *types.Datum, rowIdx int, err error) error { if err == nil { return nil @@ -281,7 +405,8 @@ func (e *InsertValues) evalRow(ctx context.Context, list []expression.Expression e.evalBuffer.SetDatum(offset, val1) } - return e.fillRow(ctx, row, hasValue) + // Row may lack of generated column、autoIncrement column、empty column here. + return e.fillRowLazy(ctx, row, hasValue, rowIdx) } var emptyRow chunk.Row @@ -468,6 +593,26 @@ func (e *InsertValues) getColDefaultValue(idx int, col *table.Column) (d types.D return defaultVal, nil } +// fillColValueLazy is quite same to fillColValue() except it will cache auto increment datum for lazy batch allocation. +func (e *InsertValues) fillColValueLazy(ctx context.Context, datum types.Datum, idx int, column *table.Column, hasValue bool) (datumLazy, + error) { + if mysql.HasAutoIncrementFlag(column.Flag) { + d, err := e.adjustAutoIncrementDatumLazy(ctx, datum, hasValue, column) + if err != nil { + return datumLazy{}, err + } + return d, nil + } + if !hasValue { + d, err := e.getColDefaultValue(idx, column) + if e.filterErr(err) != nil { + return datumLazy{}, err + } + return datumLazy{isInAutoIncrement: false, datum: d}, nil + } + return datumLazy{isInAutoIncrement: false, datum: datum}, nil +} + // fillColValue fills the column value if it is not set in the insert statement. func (e *InsertValues) fillColValue(ctx context.Context, datum types.Datum, idx int, column *table.Column, hasValue bool) (types.Datum, error) { @@ -527,6 +672,98 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue return row, nil } +// fillRowLazy is quite same to fillRow() except it will cache auto increment datum for lazy batch allocation of autoID. +// This case is used in insert|replace into values (row),(row),(row)... +func (e *InsertValues) fillRowLazy(ctx context.Context, row []types.Datum, hasValue []bool, rowIdx int) ([]types.Datum, error) { + gCols := make([]*table.Column, 0) + + for i, c := range e.Table.Cols() { + var err error + var datumLazyTmp datumLazy + // Evaluate the generated columns later after real columns set + if c.IsGenerated() { + gCols = append(gCols, c) + } else { + // Get the default value for all no value columns, the auto increment column is different from the others. + datumLazyTmp, err = e.fillColValueLazy(ctx, row[i], i, c, hasValue[i]) + if err != nil { + return nil, err + } + row[i] = datumLazyTmp.datum + if !datumLazyTmp.isInAutoIncrement { + // Handle the bad null error. + if row[i], err = c.HandleBadNull(row[i], e.ctx.GetSessionVars().StmtCtx); err != nil { + return nil, err + } + } else { + // Cache the autoIncrement datum for lazy batch alloc + datumLazyTmp.rowIdx = rowIdx + datumLazyTmp.colIdx = i + e.cache = append(e.cache, datumLazyTmp) + } + } + } + for i, gCol := range gCols { + colIdx := gCol.ColumnInfo.Offset + val, err := e.GenExprs[i].Eval(chunk.MutRowFromDatums(row).ToRow()) + if e.filterErr(err) != nil { + return nil, err + } + row[colIdx], err = table.CastValue(e.ctx, val, gCol.ToInfo()) + if err != nil { + return nil, err + } + // Handle the bad null error. + if row[colIdx], err = gCol.HandleBadNull(row[colIdx], e.ctx.GetSessionVars().StmtCtx); err != nil { + return nil, err + } + } + return row, nil +} + +// adjustAutoIncrementDatumLazy is quite same to adjustAutoIncrementDatum() +// except it will cache auto increment datum for lazy batch allocation of autoID. +func (e *InsertValues) adjustAutoIncrementDatumLazy(ctx context.Context, d types.Datum, hasValue bool, c *table.Column) (datumLazy, error) { + retryInfo := e.ctx.GetSessionVars().RetryInfo + if retryInfo.Retrying { + id, err := retryInfo.GetCurrAutoIncrementID() + if err != nil { + return datumLazy{}, err + } + d.SetAutoID(id, c.Flag) + return datumLazy{isInAutoIncrement: false, datum: d}, nil + } + + var err error + var recordID int64 + if !hasValue { + d.SetNull() + } + if !d.IsNull() { + recordID, err = getAutoRecordID(d, &c.FieldType, true) + if err != nil { + return datumLazy{}, err + } + } + // Use the value if it's not null and not 0. + if recordID != 0 { + // Do the rebase action lazily. + return datumLazy{isInAutoIncrement: true, kind: AutoIdRebase, recordID: recordID, datum: d}, nil + } + + // Change NULL to auto id. + // Change value 0 to auto id, if NoAutoValueOnZero SQL mode is not set. + if d.IsNull() || e.ctx.GetSessionVars().SQLMode&mysql.ModeNoAutoValueOnZero == 0 { + // Do the alloc action lazily. + return datumLazy{isInAutoIncrement: true, kind: AutoIdNull, datum: d}, nil + } + + // Use the 0 value as auto id directly + // Do the action lazily. + return datumLazy{isInAutoIncrement: true, kind: AutoIdZero, recordID: recordID, datum: d}, nil + +} + func (e *InsertValues) adjustAutoIncrementDatum(ctx context.Context, d types.Datum, hasValue bool, c *table.Column) (types.Datum, error) { retryInfo := e.ctx.GetSessionVars().RetryInfo if retryInfo.Retrying { @@ -689,3 +926,21 @@ func (e *InsertValues) addRecord(ctx context.Context, row []types.Datum) (int64, } return h, nil } + +type datumAutoIDType int + +const ( + AutoIdNull datumAutoIDType = iota + AutoIdRebase + AutoIdZero +) + +type datumLazy struct { + isInAutoIncrement bool + kind datumAutoIDType + datum types.Datum + // recordID is for AutoIdRebase type + recordID int64 + rowIdx int + colIdx int +} diff --git a/executor/insert_test.go b/executor/insert_test.go index 0ec561cb04a40..3a8a55476e776 100644 --- a/executor/insert_test.go +++ b/executor/insert_test.go @@ -298,6 +298,8 @@ func (s *testSuite3) TestInsertWithAutoidSchema(c *C) { tk.MustExec(`create table t5(id int primary key, n float unsigned auto_increment, key I_n(n));`) tk.MustExec(`create table t6(id int primary key, n double auto_increment, key I_n(n));`) tk.MustExec(`create table t7(id int primary key, n double unsigned auto_increment, key I_n(n));`) + // test for inserting multiple values + tk.MustExec(`create table t8(id int primary key auto_increment, n int);`) tests := []struct { insert string @@ -540,11 +542,150 @@ func (s *testSuite3) TestInsertWithAutoidSchema(c *C) { `select * from t7 where id = 3`, testkit.Rows(`3 3`), }, + + // the following is test for insert multiple values. + { + `insert into t8(n) values(1),(2)`, + `select * from t8 where id = 1`, + testkit.Rows(`1 1`), + }, + { + `;`, + `select * from t8 where id = 2`, + testkit.Rows(`2 2`), + }, + { + `;`, + `select last_insert_id();`, + testkit.Rows(`1`), + }, + // test user rebase and auto alloc mixture. + { + `insert into t8 values(null, 3),(-1, -1),(null,4),(null, 5)`, + `select * from t8 where id = 3`, + testkit.Rows(`3 3`), + }, + // -1 won't rebase allocator here cause -1 < base. + { + `;`, + `select * from t8 where id = -1`, + testkit.Rows(`-1 -1`), + }, + { + `;`, + `select * from t8 where id = 4`, + testkit.Rows(`4 4`), + }, + { + `;`, + `select * from t8 where id = 5`, + testkit.Rows(`5 5`), + }, + { + `;`, + `select last_insert_id();`, + testkit.Rows(`3`), + }, + { + `insert into t8 values(null, 6),(10, 7),(null, 8)`, + `select * from t8 where id = 6`, + testkit.Rows(`6 6`), + }, + // 10 will rebase allocator here. + { + `;`, + `select * from t8 where id = 10`, + testkit.Rows(`10 7`), + }, + { + `;`, + `select * from t8 where id = 11`, + testkit.Rows(`11 8`), + }, + { + `;`, + `select last_insert_id()`, + testkit.Rows(`6`), + }, + // fix bug for last_insert_id should be first allocated id in insert rows (skip the rebase id). + { + `insert into t8 values(100, 9),(null,10),(null,11)`, + `select * from t8 where id = 100`, + testkit.Rows(`100 9`), + }, + { + `;`, + `select * from t8 where id = 101`, + testkit.Rows(`101 10`), + }, + { + `;`, + `select * from t8 where id = 102`, + testkit.Rows(`102 11`), + }, + { + `;`, + `select last_insert_id()`, + testkit.Rows(`101`), + }, + // test with sql_mode: NO_AUTO_VALUE_ON_ZERO. + { + `;`, + `select @@sql_mode`, + testkit.Rows(`ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION`), + }, + { + `;`, + "set session sql_mode = `ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION,NO_AUTO_VALUE_ON_ZERO`", + nil, + }, + { + `insert into t8 values (0, 12), (null, 13)`, + `select * from t8 where id = 0`, + testkit.Rows(`0 12`), + }, + { + `;`, + `select * from t8 where id = 103`, + testkit.Rows(`103 13`), + }, + { + `;`, + `select last_insert_id()`, + testkit.Rows(`103`), + }, + // test without sql_mode: NO_AUTO_VALUE_ON_ZERO. + { + `;`, + "set session sql_mode = `ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION`", + nil, + }, + // value 0 will be substitute by autoid. + { + `insert into t8 values (0, 14), (null, 15)`, + `select * from t8 where id = 104`, + testkit.Rows(`104 14`), + }, + { + `;`, + `select * from t8 where id = 105`, + testkit.Rows(`105 15`), + }, + { + `;`, + `select last_insert_id()`, + testkit.Rows(`104`), + }, } for _, tt := range tests { tk.MustExec(tt.insert) - tk.MustQuery(tt.query).Check(tt.result) + if tt.query == "set session sql_mode = `ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION,NO_AUTO_VALUE_ON_ZERO`" || + tt.query == "set session sql_mode = `ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION`" { + tk.MustExec(tt.query) + } else { + tk.MustQuery(tt.query).Check(tt.result) + } } } diff --git a/table/table.go b/table/table.go index f233e2cfa517d..c9bd2ef49e6c3 100644 --- a/table/table.go +++ b/table/table.go @@ -195,6 +195,14 @@ func AllocAutoIncrementValue(ctx context.Context, t Table, sctx sessionctx.Conte return t.Allocator(sctx).Alloc(t.Meta().ID) } +func AllocBatchAutoIncrementValue(ctx context.Context, t Table, sctx sessionctx.Context, N int) ([]int64, error) { + if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil { + span1 := span.Tracer().StartSpan("table.AllocBatchAutoIncrementValueN", opentracing.ChildOf(span.Context())) + defer span1.Finish() + } + return t.Allocator(sctx).AllocN(t.Meta().ID, uint64(N)) +} + // PhysicalTable is an abstraction for two kinds of table representation: partition or non-partitioned table. // PhysicalID is a ID that can be used to construct a key ranges, all the data in the key range belongs to the corresponding PhysicalTable. // For a non-partitioned table, its PhysicalID equals to its TableID; For a partition of a partitioned table, its PhysicalID is the partition's ID. From 3082af7e3976d37fc6e5ffbd55d6ef2d083b5ea2 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Thu, 29 Aug 2019 21:51:42 +0800 Subject: [PATCH 11/49] add auto consecutive increment id comment in insert executor --- executor/insert_common.go | 31 ++++++++++++++++++------------- table/table.go | 3 ++- 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index 861b1ade0d88c..993b1f21337db 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -221,7 +221,7 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { rows = append(rows, row) if batchInsert && e.rowCount%uint64(batchSize) == 0 { // Before batch insert, should fill the batch allocated autoIDs - rows, err := e.autoIdAllocN(ctx, rows) + rows, err := e.autoIDAllocN(ctx, rows) if err != nil { return err } @@ -236,25 +236,25 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { } } // Fill the batch allocated autoIDs - rows, err = e.autoIdAllocN(ctx, rows) + rows, err = e.autoIDAllocN(ctx, rows) if err != nil { return err } return base.exec(ctx, rows) } -func (e *InsertValues) autoIdAllocN(ctx context.Context, rows [][]types.Datum) ([][]types.Datum, error) { +func (e *InsertValues) autoIDAllocN(ctx context.Context, rows [][]types.Datum) ([][]types.Datum, error) { retryInfo := e.ctx.GetSessionVars().RetryInfo length := len(e.cache) for i := 0; i < length; i++ { switch e.cache[i].kind { - case AutoIdNull: + case AutoIDNull: // Find consecutive num. var cnt int var start int start = i for i < length { - if e.cache[i].kind == AutoIdNull { + if e.cache[i].kind == AutoIDNull { cnt++ i++ } else { @@ -298,7 +298,7 @@ func (e *InsertValues) autoIdAllocN(ctx context.Context, rows [][]types.Datum) ( } rows[rowIdx][colIdx] = d } - case AutoIdRebase: + case AutoIDRebase: // Rebase action rowIdx := e.cache[i].rowIdx colIdx := e.cache[i].colIdx @@ -321,7 +321,7 @@ func (e *InsertValues) autoIdAllocN(ctx context.Context, rows [][]types.Datum) ( } // Cause d may changed in HandleBadNull, do assignment here rows[rowIdx][colIdx] = d - case AutoIdZero: + case AutoIDZero: // Don't change value 0 to auto id, if NoAutoValueOnZero SQL mode is set. rowIdx := e.cache[i].rowIdx colIdx := e.cache[i].colIdx @@ -748,19 +748,19 @@ func (e *InsertValues) adjustAutoIncrementDatumLazy(ctx context.Context, d types // Use the value if it's not null and not 0. if recordID != 0 { // Do the rebase action lazily. - return datumLazy{isInAutoIncrement: true, kind: AutoIdRebase, recordID: recordID, datum: d}, nil + return datumLazy{isInAutoIncrement: true, kind: AutoIDRebase, recordID: recordID, datum: d}, nil } // Change NULL to auto id. // Change value 0 to auto id, if NoAutoValueOnZero SQL mode is not set. if d.IsNull() || e.ctx.GetSessionVars().SQLMode&mysql.ModeNoAutoValueOnZero == 0 { // Do the alloc action lazily. - return datumLazy{isInAutoIncrement: true, kind: AutoIdNull, datum: d}, nil + return datumLazy{isInAutoIncrement: true, kind: AutoIDNull, datum: d}, nil } // Use the 0 value as auto id directly // Do the action lazily. - return datumLazy{isInAutoIncrement: true, kind: AutoIdZero, recordID: recordID, datum: d}, nil + return datumLazy{isInAutoIncrement: true, kind: AutoIDZero, recordID: recordID, datum: d}, nil } @@ -930,9 +930,14 @@ func (e *InsertValues) addRecord(ctx context.Context, row []types.Datum) (int64, type datumAutoIDType int const ( - AutoIdNull datumAutoIDType = iota - AutoIdRebase - AutoIdZero + // AutoIDNull stands for auto increment datum that need get a autoID from allocator. + AutoIDNull datumAutoIDType = iota + // AutoIDRebase stands for auto increment datum that has a specified value from user, + // so allocator need to rebase the value. + AutoIDRebase + // AutoIDZero stands for 0 value of auto increment datum, it won't replaced by autoID + // when sql-model NoAutoValueOnZero is set. + AutoIDZero ) type datumLazy struct { diff --git a/table/table.go b/table/table.go index c9bd2ef49e6c3..85ff9b15cdd4d 100644 --- a/table/table.go +++ b/table/table.go @@ -195,9 +195,10 @@ func AllocAutoIncrementValue(ctx context.Context, t Table, sctx sessionctx.Conte return t.Allocator(sctx).Alloc(t.Meta().ID) } +// AllocBatchAutoIncrementValue allocates batch auto_increment value for rows. func AllocBatchAutoIncrementValue(ctx context.Context, t Table, sctx sessionctx.Context, N int) ([]int64, error) { if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil { - span1 := span.Tracer().StartSpan("table.AllocBatchAutoIncrementValueN", opentracing.ChildOf(span.Context())) + span1 := span.Tracer().StartSpan("table.AllocBatchAutoIncrementValue", opentracing.ChildOf(span.Context())) defer span1.Finish() } return t.Allocator(sctx).AllocN(t.Meta().ID, uint64(N)) From 9e9ab6e28f953be5b25dfbb9ba7bbcfae2891f6e Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Fri, 30 Aug 2019 10:05:56 +0800 Subject: [PATCH 12/49] try fix test case --- executor/insert_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/executor/insert_test.go b/executor/insert_test.go index 3a8a55476e776..3dd4db534e1ce 100644 --- a/executor/insert_test.go +++ b/executor/insert_test.go @@ -15,7 +15,6 @@ package executor_test import ( "fmt" - . "github.com/pingcap/check" "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/table" From 40717f55df61c441616bc26ffc2eb4a5e862c95a Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Fri, 30 Aug 2019 10:37:36 +0800 Subject: [PATCH 13/49] fix session_test.TestAutoIncrementID autoid overflow --- meta/autoid/autoid.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index 2ec3184b542d2..6b73ae222bb72 100644 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -370,6 +370,10 @@ func (alloc *allocator) AllocN(tableID int64, N uint64) ([]int64, error) { func (alloc *allocator) allocN4Signed(tableID int64, N uint64) ([]int64, error) { N1 := int64(N) + // Condition alloc.base+N1 > alloc.end will overflow when alloc.base + N1 > MaxInt64. So need this. + if math.MaxInt64-alloc.base < N1 { + return nil, ErrAutoincReadFailed + } // The local rest is not enough for allocN, skip it. if alloc.base+N1 > alloc.end { var newBase, newEnd int64 @@ -423,6 +427,10 @@ func (alloc *allocator) allocN4Signed(tableID int64, N uint64) ([]int64, error) func (alloc *allocator) allocN4Unsigned(tableID int64, N uint64) ([]int64, error) { N1 := int64(N) + // Condition alloc.base+N1 > alloc.end will overflow when alloc.base + N1 > MaxInt64. So need this. + if math.MaxInt64-alloc.base < N1 { + return nil, ErrAutoincReadFailed + } // The local rest is not enough for allocN, skip it. if alloc.base+N1 > alloc.end { var newBase, newEnd int64 From bcd146de8b20013129549dd6805d4fcb65ad9a3a Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Fri, 30 Aug 2019 10:45:13 +0800 Subject: [PATCH 14/49] fix session_test.TestAutoIncrementID unsighed autoid overflow --- meta/autoid/autoid.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index 6b73ae222bb72..f408dfc4ac958 100644 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -428,7 +428,7 @@ func (alloc *allocator) allocN4Signed(tableID int64, N uint64) ([]int64, error) func (alloc *allocator) allocN4Unsigned(tableID int64, N uint64) ([]int64, error) { N1 := int64(N) // Condition alloc.base+N1 > alloc.end will overflow when alloc.base + N1 > MaxInt64. So need this. - if math.MaxInt64-alloc.base < N1 { + if math.MaxUint64-uint64(alloc.base) < N { return nil, ErrAutoincReadFailed } // The local rest is not enough for allocN, skip it. From fc30110591773580b3ee20de1881db3260dbbc7e Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Fri, 30 Aug 2019 10:53:55 +0800 Subject: [PATCH 15/49] fix autoid usigned computing overflow --- meta/autoid/autoid.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index f408dfc4ac958..7aaeea1d2e846 100644 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -432,7 +432,7 @@ func (alloc *allocator) allocN4Unsigned(tableID int64, N uint64) ([]int64, error return nil, ErrAutoincReadFailed } // The local rest is not enough for allocN, skip it. - if alloc.base+N1 > alloc.end { + if uint64(alloc.base)+N > uint64(alloc.end) { var newBase, newEnd int64 startTime := time.Now() // Although it may skip a segment here, we still think it is consumed. @@ -475,8 +475,8 @@ func (alloc *allocator) allocN4Unsigned(tableID int64, N uint64) ([]int64, error zap.Int64("table ID", tableID), zap.Int64("database ID", alloc.dbID)) resN := make([]int64, 0, N1) - for i := alloc.base + 1; i <= alloc.base+N1; i++ { - resN = append(resN, i) + for i := uint64(alloc.base) + 1; i <= uint64(alloc.base)+N; i++ { + resN = append(resN, int64(i)) } // Use uint64 N directly. alloc.base = int64(uint64(alloc.base) + N) From 5394a0c998eae84a4d6e75c29f73fb0eae7d258f Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Fri, 6 Sep 2019 17:15:15 +0800 Subject: [PATCH 16/49] fix conflict with suli's PR --- executor/insert_common.go | 44 +++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index 993b1f21337db..565c5aa0bfbc2 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -61,8 +61,12 @@ type InsertValues struct { evalBuffer chunk.MutRow evalBufferTypes []*types.FieldType - // Cache datumLazy for consecutive autoid batch alloc + // Cache datumLazy for consecutive autoid batch alloc. cache []datumLazy + // Fill the autoID lazily to datum. + // This is used for being compatible with JDBC using getGeneratedKeys(). + // By now, we can guarantee consecutive autoID in a batch. + lazyFillAutoID bool } type defaultVal struct { @@ -202,13 +206,14 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { batchInsert := sessVars.BatchInsert && !sessVars.InTxn() batchSize := sessVars.DMLBatchSize + e.lazyFillAutoID = true evalRowFunc := e.fastEvalRow if !e.allAssignmentsAreConstant { evalRowFunc = e.evalRow } rows := make([][]types.Datum, 0, len(e.Lists)) - // Cache for consecutive autoID batch alloc + // Cache for consecutive autoID batch alloc. e.cache = make([]datumLazy, 0, len(e.Lists)) for i, list := range e.Lists { @@ -220,8 +225,8 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { } rows = append(rows, row) if batchInsert && e.rowCount%uint64(batchSize) == 0 { - // Before batch insert, should fill the batch allocated autoIDs - rows, err := e.autoIDAllocN(ctx, rows) + // Before batch insert, should fill the batch allocated autoIDs. + rows, err = e.autoIDAllocN(ctx, rows) if err != nil { return err } @@ -235,7 +240,7 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { } } } - // Fill the batch allocated autoIDs + // Fill the batch allocated autoIDs. rows, err = e.autoIDAllocN(ctx, rows) if err != nil { return err @@ -250,25 +255,18 @@ func (e *InsertValues) autoIDAllocN(ctx context.Context, rows [][]types.Datum) ( switch e.cache[i].kind { case AutoIDNull: // Find consecutive num. - var cnt int - var start int - start = i - for i < length { - if e.cache[i].kind == AutoIDNull { - cnt++ - i++ - } else { - break - } + start := i + cnt := 1 + for i+1 <= length && e.cache[i].kind == AutoIDNull { + i++ + cnt++ } - // Fix the index i - i-- // Alloc batch N consecutive autoIDs. recordIDs, err := table.AllocBatchAutoIncrementValue(ctx, e.Table, e.ctx, cnt) if e.filterErr(err) != nil { return nil, err } - // Distribute autoIDs to rows. + // Assign autoIDs to rows. var d types.Datum var c *table.Column for j := 0; j < cnt; j++ { @@ -299,11 +297,11 @@ func (e *InsertValues) autoIDAllocN(ctx context.Context, rows [][]types.Datum) ( rows[rowIdx][colIdx] = d } case AutoIDRebase: - // Rebase action + // Rebase action. rowIdx := e.cache[i].rowIdx colIdx := e.cache[i].colIdx - // recordID has been casted in evalRow + // recordID has been casted in evalRow. recordID := e.cache[i].recordID d := e.cache[i].datum c := e.Table.Cols()[colIdx] @@ -319,14 +317,14 @@ func (e *InsertValues) autoIDAllocN(ctx context.Context, rows [][]types.Datum) ( if d, err = c.HandleBadNull(d, e.ctx.GetSessionVars().StmtCtx); err != nil { return nil, err } - // Cause d may changed in HandleBadNull, do assignment here + // Cause d may changed in HandleBadNull, do assignment here. rows[rowIdx][colIdx] = d case AutoIDZero: - // Don't change value 0 to auto id, if NoAutoValueOnZero SQL mode is set. + // Won't change value 0 to auto id, if NO_AUTO_VALUE_ON_ZERO SQL mode is set. rowIdx := e.cache[i].rowIdx colIdx := e.cache[i].colIdx - // recordID has been casted in evalRow + // recordID has been casted in evalRow. recordID := e.cache[i].recordID d := e.cache[i].datum c := e.Table.Cols()[colIdx] From ede4957eeedd883608dd4f631f6dda6baa43aae2 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Fri, 6 Sep 2019 17:29:55 +0800 Subject: [PATCH 17/49] fix index out of range --- executor/insert_common.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index 565c5aa0bfbc2..d6eeedf9b317b 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -257,7 +257,7 @@ func (e *InsertValues) autoIDAllocN(ctx context.Context, rows [][]types.Datum) ( // Find consecutive num. start := i cnt := 1 - for i+1 <= length && e.cache[i].kind == AutoIDNull { + for i+1 < length && e.cache[i].kind == AutoIDNull { i++ cnt++ } From 3f37e0ebf88ecd3e39da89bd42c34aae6dfa9466 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Mon, 9 Sep 2019 10:57:03 +0800 Subject: [PATCH 18/49] merge similar function --- executor/insert_common.go | 49 +++++++++++++++++++++++++++++++-------- 1 file changed, 39 insertions(+), 10 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index d6eeedf9b317b..fc8567139ec0b 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -65,8 +65,10 @@ type InsertValues struct { cache []datumLazy // Fill the autoID lazily to datum. // This is used for being compatible with JDBC using getGeneratedKeys(). - // By now, we can guarantee consecutive autoID in a batch. + // By now in insert multiple values, TiDB can guarantee consecutive autoID in a batch. lazyFillAutoID bool + // RowIdx + rowIdx int } type defaultVal struct { @@ -402,9 +404,11 @@ func (e *InsertValues) evalRow(ctx context.Context, list []expression.Expression row[offset], hasValue[offset] = *val1.Copy(), true e.evalBuffer.SetDatum(offset, val1) } - + if e.lazyFillAutoID { + e.rowIdx = rowIdx + } // Row may lack of generated column、autoIncrement column、empty column here. - return e.fillRowLazy(ctx, row, hasValue, rowIdx) + return e.fillRow(ctx, row, hasValue) } var emptyRow chunk.Row @@ -429,6 +433,10 @@ func (e *InsertValues) fastEvalRow(ctx context.Context, list []expression.Expres offset := e.insertColumns[i].Offset row[offset], hasValue[offset] = val1, true } + // rowIdx for lazy autoID datum allocation. + if e.lazyFillAutoID { + e.rowIdx = rowIdx + } return e.fillRow(ctx, row, hasValue) } @@ -633,6 +641,8 @@ func (e *InsertValues) fillColValue(ctx context.Context, datum types.Datum, idx // fillRow fills generated columns, auto_increment column and empty column. // For NOT NULL column, it will return error or use zero value based on sql_mode. +// When lazyFillAutoID is true, fill row will cache auto increment datum for lazy +// batch allocation. This case is used in insert|replace into values (row),(row),(row)... func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue []bool) ([]types.Datum, error) { gCols := make([]*table.Column, 0) for i, c := range e.Table.Cols() { @@ -642,13 +652,32 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue gCols = append(gCols, c) } else { // Get the default value for all no value columns, the auto increment column is different from the others. - row[i], err = e.fillColValue(ctx, row[i], i, c, hasValue[i]) - if err != nil { - return nil, err - } - // Handle the bad null error. - if row[i], err = c.HandleBadNull(row[i], e.ctx.GetSessionVars().StmtCtx); err != nil { - return nil, err + if e.lazyFillAutoID { + var datumLazyTmp datumLazy + if datumLazyTmp, err = e.fillColValueLazy(ctx, row[i], i, c, hasValue[i]); err != nil { + return nil, err + } + if !datumLazyTmp.isInAutoIncrement { + // Store the plain datum. + row[i] = datumLazyTmp.datum + // Handle the bad null error. + if row[i], err = c.HandleBadNull(row[i], e.ctx.GetSessionVars().StmtCtx); err != nil { + return nil, err + } + } else { + // Cache the autoIncrement datum for lazy batch alloc + datumLazyTmp.rowIdx = e.rowIdx + datumLazyTmp.colIdx = i + e.cache = append(e.cache, datumLazyTmp) + } + } else { + if row[i], err = e.fillColValue(ctx, row[i], i, c, hasValue[i]); err != nil { + return nil, err + } + // Handle the bad null error. + if row[i], err = c.HandleBadNull(row[i], e.ctx.GetSessionVars().StmtCtx); err != nil { + return nil, err + } } } } From 5d93e57bcfe4aa36a9fae165092d55bc93ebc37c Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Mon, 9 Sep 2019 11:22:53 +0800 Subject: [PATCH 19/49] delete fillRowLazy --- executor/insert_common.go | 49 --------------------------------------- 1 file changed, 49 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index fc8567139ec0b..a254771088215 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -699,55 +699,6 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue return row, nil } -// fillRowLazy is quite same to fillRow() except it will cache auto increment datum for lazy batch allocation of autoID. -// This case is used in insert|replace into values (row),(row),(row)... -func (e *InsertValues) fillRowLazy(ctx context.Context, row []types.Datum, hasValue []bool, rowIdx int) ([]types.Datum, error) { - gCols := make([]*table.Column, 0) - - for i, c := range e.Table.Cols() { - var err error - var datumLazyTmp datumLazy - // Evaluate the generated columns later after real columns set - if c.IsGenerated() { - gCols = append(gCols, c) - } else { - // Get the default value for all no value columns, the auto increment column is different from the others. - datumLazyTmp, err = e.fillColValueLazy(ctx, row[i], i, c, hasValue[i]) - if err != nil { - return nil, err - } - row[i] = datumLazyTmp.datum - if !datumLazyTmp.isInAutoIncrement { - // Handle the bad null error. - if row[i], err = c.HandleBadNull(row[i], e.ctx.GetSessionVars().StmtCtx); err != nil { - return nil, err - } - } else { - // Cache the autoIncrement datum for lazy batch alloc - datumLazyTmp.rowIdx = rowIdx - datumLazyTmp.colIdx = i - e.cache = append(e.cache, datumLazyTmp) - } - } - } - for i, gCol := range gCols { - colIdx := gCol.ColumnInfo.Offset - val, err := e.GenExprs[i].Eval(chunk.MutRowFromDatums(row).ToRow()) - if e.filterErr(err) != nil { - return nil, err - } - row[colIdx], err = table.CastValue(e.ctx, val, gCol.ToInfo()) - if err != nil { - return nil, err - } - // Handle the bad null error. - if row[colIdx], err = gCol.HandleBadNull(row[colIdx], e.ctx.GetSessionVars().StmtCtx); err != nil { - return nil, err - } - } - return row, nil -} - // adjustAutoIncrementDatumLazy is quite same to adjustAutoIncrementDatum() // except it will cache auto increment datum for lazy batch allocation of autoID. func (e *InsertValues) adjustAutoIncrementDatumLazy(ctx context.Context, d types.Datum, hasValue bool, c *table.Column) (datumLazy, error) { From 33da0085c553ea06d1c99bc151e6be5d4c5e642a Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Mon, 9 Sep 2019 12:46:27 +0800 Subject: [PATCH 20/49] fix commment --- executor/insert_common.go | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index a254771088215..107e2e2b9d9b3 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -62,7 +62,7 @@ type InsertValues struct { evalBufferTypes []*types.FieldType // Cache datumLazy for consecutive autoid batch alloc. - cache []datumLazy + cache []datumAutoIDLazy // Fill the autoID lazily to datum. // This is used for being compatible with JDBC using getGeneratedKeys(). // By now in insert multiple values, TiDB can guarantee consecutive autoID in a batch. @@ -216,7 +216,7 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { rows := make([][]types.Datum, 0, len(e.Lists)) // Cache for consecutive autoID batch alloc. - e.cache = make([]datumLazy, 0, len(e.Lists)) + e.cache = make([]datumAutoIDLazy, 0, len(e.Lists)) for i, list := range e.Lists { e.rowCount++ @@ -600,23 +600,23 @@ func (e *InsertValues) getColDefaultValue(idx int, col *table.Column) (d types.D } // fillColValueLazy is quite same to fillColValue() except it will cache auto increment datum for lazy batch allocation. -func (e *InsertValues) fillColValueLazy(ctx context.Context, datum types.Datum, idx int, column *table.Column, hasValue bool) (datumLazy, +func (e *InsertValues) fillColValueLazy(ctx context.Context, datum types.Datum, idx int, column *table.Column, hasValue bool) (datumAutoIDLazy, error) { if mysql.HasAutoIncrementFlag(column.Flag) { d, err := e.adjustAutoIncrementDatumLazy(ctx, datum, hasValue, column) if err != nil { - return datumLazy{}, err + return datumAutoIDLazy{}, err } return d, nil } if !hasValue { d, err := e.getColDefaultValue(idx, column) if e.filterErr(err) != nil { - return datumLazy{}, err + return datumAutoIDLazy{}, err } - return datumLazy{isInAutoIncrement: false, datum: d}, nil + return datumAutoIDLazy{isInAutoIncrement: false, datum: d}, nil } - return datumLazy{isInAutoIncrement: false, datum: datum}, nil + return datumAutoIDLazy{isInAutoIncrement: false, datum: datum}, nil } // fillColValue fills the column value if it is not set in the insert statement. @@ -653,13 +653,13 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue } else { // Get the default value for all no value columns, the auto increment column is different from the others. if e.lazyFillAutoID { - var datumLazyTmp datumLazy + var datumLazyTmp datumAutoIDLazy if datumLazyTmp, err = e.fillColValueLazy(ctx, row[i], i, c, hasValue[i]); err != nil { return nil, err } + // Store the plain datum. + row[i] = datumLazyTmp.datum if !datumLazyTmp.isInAutoIncrement { - // Store the plain datum. - row[i] = datumLazyTmp.datum // Handle the bad null error. if row[i], err = c.HandleBadNull(row[i], e.ctx.GetSessionVars().StmtCtx); err != nil { return nil, err @@ -701,15 +701,15 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue // adjustAutoIncrementDatumLazy is quite same to adjustAutoIncrementDatum() // except it will cache auto increment datum for lazy batch allocation of autoID. -func (e *InsertValues) adjustAutoIncrementDatumLazy(ctx context.Context, d types.Datum, hasValue bool, c *table.Column) (datumLazy, error) { +func (e *InsertValues) adjustAutoIncrementDatumLazy(ctx context.Context, d types.Datum, hasValue bool, c *table.Column) (datumAutoIDLazy, error) { retryInfo := e.ctx.GetSessionVars().RetryInfo if retryInfo.Retrying { id, err := retryInfo.GetCurrAutoIncrementID() if err != nil { - return datumLazy{}, err + return datumAutoIDLazy{}, err } d.SetAutoID(id, c.Flag) - return datumLazy{isInAutoIncrement: false, datum: d}, nil + return datumAutoIDLazy{isInAutoIncrement: false, datum: d}, nil } var err error @@ -720,25 +720,25 @@ func (e *InsertValues) adjustAutoIncrementDatumLazy(ctx context.Context, d types if !d.IsNull() { recordID, err = getAutoRecordID(d, &c.FieldType, true) if err != nil { - return datumLazy{}, err + return datumAutoIDLazy{}, err } } // Use the value if it's not null and not 0. if recordID != 0 { // Do the rebase action lazily. - return datumLazy{isInAutoIncrement: true, kind: AutoIDRebase, recordID: recordID, datum: d}, nil + return datumAutoIDLazy{isInAutoIncrement: true, kind: AutoIDRebase, recordID: recordID, datum: d}, nil } // Change NULL to auto id. // Change value 0 to auto id, if NoAutoValueOnZero SQL mode is not set. if d.IsNull() || e.ctx.GetSessionVars().SQLMode&mysql.ModeNoAutoValueOnZero == 0 { // Do the alloc action lazily. - return datumLazy{isInAutoIncrement: true, kind: AutoIDNull, datum: d}, nil + return datumAutoIDLazy{isInAutoIncrement: true, kind: AutoIDNull, datum: d}, nil } // Use the 0 value as auto id directly // Do the action lazily. - return datumLazy{isInAutoIncrement: true, kind: AutoIDZero, recordID: recordID, datum: d}, nil + return datumAutoIDLazy{isInAutoIncrement: true, kind: AutoIDZero, recordID: recordID, datum: d}, nil } @@ -918,7 +918,7 @@ const ( AutoIDZero ) -type datumLazy struct { +type datumAutoIDLazy struct { isInAutoIncrement bool kind datumAutoIDType datum types.Datum From de2e7aa09f7abbdb77af2c46cb8a2c5705108fea Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Mon, 9 Sep 2019 13:32:49 +0800 Subject: [PATCH 21/49] fix bug of rebase --- executor/insert_common.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index 107e2e2b9d9b3..302368e7b52e7 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -259,7 +259,7 @@ func (e *InsertValues) autoIDAllocN(ctx context.Context, rows [][]types.Datum) ( // Find consecutive num. start := i cnt := 1 - for i+1 < length && e.cache[i].kind == AutoIDNull { + for i+1 < length && e.cache[i+1].kind == AutoIDNull { i++ cnt++ } From ca90f91e0b0945e6648a326fd507634b4d836a25 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Mon, 9 Sep 2019 16:11:00 +0800 Subject: [PATCH 22/49] fix auto.go maxUint64 will be allocated --- meta/autoid/autoid.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index 7aaeea1d2e846..5de1a3eccb8d2 100644 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -14,6 +14,7 @@ package autoid import ( + "fmt" "math" "sync" "sync/atomic" @@ -426,6 +427,8 @@ func (alloc *allocator) allocN4Signed(tableID int64, N uint64) ([]int64, error) } func (alloc *allocator) allocN4Unsigned(tableID int64, N uint64) ([]int64, error) { + fmt.Println("base", uint64(alloc.base)) + fmt.Println("end", uint64(alloc.end)) N1 := int64(N) // Condition alloc.base+N1 > alloc.end will overflow when alloc.base + N1 > MaxInt64. So need this. if math.MaxUint64-uint64(alloc.base) < N { @@ -476,6 +479,10 @@ func (alloc *allocator) allocN4Unsigned(tableID int64, N uint64) ([]int64, error zap.Int64("database ID", alloc.dbID)) resN := make([]int64, 0, N1) for i := uint64(alloc.base) + 1; i <= uint64(alloc.base)+N; i++ { + // fix bug : maxUint64 will be allocated + if i == math.MaxUint64 { + return nil, ErrAutoincReadFailed + } resN = append(resN, int64(i)) } // Use uint64 N directly. From f6046e7a5c5c64f42776c26ea4e42be0f786b621 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Mon, 9 Sep 2019 16:31:32 +0800 Subject: [PATCH 23/49] fix maxInt64 autoid allocated --- meta/autoid/autoid.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index 5de1a3eccb8d2..d825b4888c3ff 100644 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -420,6 +420,10 @@ func (alloc *allocator) allocN4Signed(tableID int64, N uint64) ([]int64, error) zap.Int64("database ID", alloc.dbID)) resN := make([]int64, 0, N1) for i := alloc.base + 1; i <= alloc.base+N1; i++ { + // fix bug : maxInt64 will be allocated + if i == math.MaxInt64 { + return nil, ErrAutoincReadFailed + } resN = append(resN, i) } alloc.base += N1 From 7ef20b3bbedeed036fcdc6a30b1fe4fb97e8799f Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Mon, 9 Sep 2019 18:58:04 +0800 Subject: [PATCH 24/49] fix batch rowIdx update --- executor/insert_common.go | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index 302368e7b52e7..12cd27092c7dd 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -208,6 +208,8 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { batchInsert := sessVars.BatchInsert && !sessVars.InTxn() batchSize := sessVars.DMLBatchSize + // rowIdx and lazyFillAutoID are correlated. + e.rowIdx = -1 e.lazyFillAutoID = true evalRowFunc := e.fastEvalRow if !e.allAssignmentsAreConstant { @@ -217,8 +219,9 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { rows := make([][]types.Datum, 0, len(e.Lists)) // Cache for consecutive autoID batch alloc. e.cache = make([]datumAutoIDLazy, 0, len(e.Lists)) - for i, list := range e.Lists { + // rowIdx for lazy autoID datum allocation, it will reset after batch insert. + e.rowIdx++ e.rowCount++ var row []types.Datum row, err = evalRowFunc(ctx, list, i) @@ -240,6 +243,8 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { if err = e.doBatchInsert(ctx); err != nil { return err } + // Reset colIdx cause rows has been cut. + e.rowIdx = -1 } } // Fill the batch allocated autoIDs. @@ -404,9 +409,6 @@ func (e *InsertValues) evalRow(ctx context.Context, list []expression.Expression row[offset], hasValue[offset] = *val1.Copy(), true e.evalBuffer.SetDatum(offset, val1) } - if e.lazyFillAutoID { - e.rowIdx = rowIdx - } // Row may lack of generated column、autoIncrement column、empty column here. return e.fillRow(ctx, row, hasValue) } @@ -433,10 +435,6 @@ func (e *InsertValues) fastEvalRow(ctx context.Context, list []expression.Expres offset := e.insertColumns[i].Offset row[offset], hasValue[offset] = val1, true } - // rowIdx for lazy autoID datum allocation. - if e.lazyFillAutoID { - e.rowIdx = rowIdx - } return e.fillRow(ctx, row, hasValue) } @@ -665,7 +663,7 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue return nil, err } } else { - // Cache the autoIncrement datum for lazy batch alloc + // Cache the autoIncrement datum for lazy batch alloc. datumLazyTmp.rowIdx = e.rowIdx datumLazyTmp.colIdx = i e.cache = append(e.cache, datumLazyTmp) From 1287a75c27c58ed3d222a6a16239618398a60830 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Wed, 11 Sep 2019 10:15:49 +0800 Subject: [PATCH 25/49] add maxUint64 and maxInt64 limit in allocN --- meta/autoid/autoid.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index d825b4888c3ff..d60a846d1e9d3 100644 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -14,7 +14,6 @@ package autoid import ( - "fmt" "math" "sync" "sync/atomic" @@ -431,8 +430,6 @@ func (alloc *allocator) allocN4Signed(tableID int64, N uint64) ([]int64, error) } func (alloc *allocator) allocN4Unsigned(tableID int64, N uint64) ([]int64, error) { - fmt.Println("base", uint64(alloc.base)) - fmt.Println("end", uint64(alloc.end)) N1 := int64(N) // Condition alloc.base+N1 > alloc.end will overflow when alloc.base + N1 > MaxInt64. So need this. if math.MaxUint64-uint64(alloc.base) < N { From c583f57225c5b991309c0ca19621916e4ede690d Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Wed, 25 Sep 2019 09:59:06 +0800 Subject: [PATCH 26/49] reconstruct the autoid lazy alloc --- executor/insert_common.go | 267 ++++++++++++++++++++++++++------------ 1 file changed, 183 insertions(+), 84 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index 12cd27092c7dd..33d6a6fdfa028 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -61,14 +61,14 @@ type InsertValues struct { evalBuffer chunk.MutRow evalBufferTypes []*types.FieldType - // Cache datumLazy for consecutive autoid batch alloc. - cache []datumAutoIDLazy + // cacheHasValue cache haveValue for consecutive autoid batch alloc. + cacheHasValue []bool // Fill the autoID lazily to datum. // This is used for being compatible with JDBC using getGeneratedKeys(). // By now in insert multiple values, TiDB can guarantee consecutive autoID in a batch. lazyFillAutoID bool - // RowIdx - rowIdx int + // colIdx will cache the index of autoIncrement column in a row. + colIdx int } type defaultVal struct { @@ -208,20 +208,16 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { batchInsert := sessVars.BatchInsert && !sessVars.InTxn() batchSize := sessVars.DMLBatchSize - // rowIdx and lazyFillAutoID are correlated. - e.rowIdx = -1 e.lazyFillAutoID = true + e.colIdx = -1 evalRowFunc := e.fastEvalRow if !e.allAssignmentsAreConstant { evalRowFunc = e.evalRow } rows := make([][]types.Datum, 0, len(e.Lists)) - // Cache for consecutive autoID batch alloc. - e.cache = make([]datumAutoIDLazy, 0, len(e.Lists)) + e.cacheHasValue = make([]bool, 0, len(e.Lists)) for i, list := range e.Lists { - // rowIdx for lazy autoID datum allocation, it will reset after batch insert. - e.rowIdx++ e.rowCount++ var row []types.Datum row, err = evalRowFunc(ctx, list, i) @@ -231,11 +227,11 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { rows = append(rows, row) if batchInsert && e.rowCount%uint64(batchSize) == 0 { // Before batch insert, should fill the batch allocated autoIDs. - rows, err = e.autoIDAllocN(ctx, rows) + rows, err = e.lazyAdjustAutoIncrementDatum(ctx, rows) if err != nil { return err } - e.cache = e.cache[:0] + e.cacheHasValue = e.cacheHasValue[:0] if err = base.exec(ctx, rows); err != nil { return err } @@ -243,18 +239,17 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { if err = e.doBatchInsert(ctx); err != nil { return err } - // Reset colIdx cause rows has been cut. - e.rowIdx = -1 } } // Fill the batch allocated autoIDs. - rows, err = e.autoIDAllocN(ctx, rows) + rows, err = e.lazyAdjustAutoIncrementDatum(ctx, rows) if err != nil { return err } return base.exec(ctx, rows) } +/* func (e *InsertValues) autoIDAllocN(ctx context.Context, rows [][]types.Datum) ([][]types.Datum, error) { retryInfo := e.ctx.GetSessionVars().RetryInfo length := len(e.cache) @@ -355,6 +350,8 @@ func (e *InsertValues) autoIDAllocN(ctx context.Context, rows [][]types.Datum) ( return rows, nil } +*/ + func (e *InsertValues) handleErr(col *table.Column, val *types.Datum, rowIdx int, err error) error { if err == nil { return nil @@ -597,35 +594,26 @@ func (e *InsertValues) getColDefaultValue(idx int, col *table.Column) (d types.D return defaultVal, nil } -// fillColValueLazy is quite same to fillColValue() except it will cache auto increment datum for lazy batch allocation. -func (e *InsertValues) fillColValueLazy(ctx context.Context, datum types.Datum, idx int, column *table.Column, hasValue bool) (datumAutoIDLazy, - error) { - if mysql.HasAutoIncrementFlag(column.Flag) { - d, err := e.adjustAutoIncrementDatumLazy(ctx, datum, hasValue, column) - if err != nil { - return datumAutoIDLazy{}, err - } - return d, nil - } - if !hasValue { - d, err := e.getColDefaultValue(idx, column) - if e.filterErr(err) != nil { - return datumAutoIDLazy{}, err - } - return datumAutoIDLazy{isInAutoIncrement: false, datum: d}, nil - } - return datumAutoIDLazy{isInAutoIncrement: false, datum: datum}, nil -} - // fillColValue fills the column value if it is not set in the insert statement. func (e *InsertValues) fillColValue(ctx context.Context, datum types.Datum, idx int, column *table.Column, hasValue bool) (types.Datum, error) { if mysql.HasAutoIncrementFlag(column.Flag) { - d, err := e.adjustAutoIncrementDatum(ctx, datum, hasValue, column) - if err != nil { - return types.Datum{}, err + if e.lazyFillAutoID { + // cache the colIdx of autoIncrement column for lazy handle. + if e.colIdx == -1 { + e.colIdx = idx + } + // cache the hasValue of autoIncrement column for lazy handle. + e.cacheHasValue = append(e.cacheHasValue, hasValue) + // Store the plain datum of autoIncrement column directly for lazy handle. + return datum, nil + } else { + d, err := e.adjustAutoIncrementDatum(ctx, datum, hasValue, column) + if err != nil { + return types.Datum{}, err + } + return d, nil } - return d, nil } if !hasValue { d, err := e.getColDefaultValue(idx, column) @@ -650,28 +638,10 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue gCols = append(gCols, c) } else { // Get the default value for all no value columns, the auto increment column is different from the others. - if e.lazyFillAutoID { - var datumLazyTmp datumAutoIDLazy - if datumLazyTmp, err = e.fillColValueLazy(ctx, row[i], i, c, hasValue[i]); err != nil { - return nil, err - } - // Store the plain datum. - row[i] = datumLazyTmp.datum - if !datumLazyTmp.isInAutoIncrement { - // Handle the bad null error. - if row[i], err = c.HandleBadNull(row[i], e.ctx.GetSessionVars().StmtCtx); err != nil { - return nil, err - } - } else { - // Cache the autoIncrement datum for lazy batch alloc. - datumLazyTmp.rowIdx = e.rowIdx - datumLazyTmp.colIdx = i - e.cache = append(e.cache, datumLazyTmp) - } - } else { - if row[i], err = e.fillColValue(ctx, row[i], i, c, hasValue[i]); err != nil { - return nil, err - } + if row[i], err = e.fillColValue(ctx, row[i], i, c, hasValue[i]); err != nil { + return nil, err + } + if !e.lazyFillAutoID || (e.lazyFillAutoID && !mysql.HasAutoIncrementFlag(c.Flag)) { // Handle the bad null error. if row[i], err = c.HandleBadNull(row[i], e.ctx.GetSessionVars().StmtCtx); err != nil { return nil, err @@ -697,6 +667,7 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue return row, nil } +/* // adjustAutoIncrementDatumLazy is quite same to adjustAutoIncrementDatum() // except it will cache auto increment datum for lazy batch allocation of autoID. func (e *InsertValues) adjustAutoIncrementDatumLazy(ctx context.Context, d types.Datum, hasValue bool, c *table.Column) (datumAutoIDLazy, error) { @@ -739,6 +710,157 @@ func (e *InsertValues) adjustAutoIncrementDatumLazy(ctx context.Context, d types return datumAutoIDLazy{isInAutoIncrement: true, kind: AutoIDZero, recordID: recordID, datum: d}, nil } +*/ + +// isAutoNull can help judge whether a datum is AutoIncrement Null quickly. +// This used to help lazyFillAutoIncrement to find consecutive N datum backwards for batch autoID alloc. +func (e *InsertValues) isAutoNull(ctx context.Context, d types.Datum, col *table.Column, hasValue bool) bool { + // autoID can find in RetryInfo. + if e.ctx.GetSessionVars().RetryInfo.Retrying { + return false + } + var err error + var recordID int64 + if !hasValue { + d.SetNull() + } + if !d.IsNull() { + recordID, err = getAutoRecordID(d, &col.FieldType, true) + if err != nil { + return false + } + } + // Use the value if it's not null and not 0. + if recordID != 0 { + return false + } + // Change NULL to auto id. + // Change value 0 to auto id, if NoAutoValueOnZero SQL mode is not set. + if d.IsNull() || e.ctx.GetSessionVars().SQLMode&mysql.ModeNoAutoValueOnZero == 0 { + return true + } + return false +} + +// lazyAdjustAutoIncrementDatum is quite same to adjustAutoIncrementDatum() +// except it will cache auto increment datum previously for lazy batch allocation of autoID. +func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [][]types.Datum) ([][]types.Datum, error) { + // Not in lazyFillAutoID mode or no autoIncrement column means no need to fill. + if !e.lazyFillAutoID || e.colIdx == -1 { + return rows, nil + } + // Get the autoIncrement column. + col := e.Table.Cols()[e.colIdx] + // Consider the colIdx of autoIncrement in row are same. + length := len(rows) + for i := 0; i < length; i++ { + autoDatum := rows[i][e.colIdx] + hasValue := e.cacheHasValue[i] + + // autoID can find in RetryInfo. + retryInfo := e.ctx.GetSessionVars().RetryInfo + if retryInfo.Retrying { + id, err := retryInfo.GetCurrAutoIncrementID() + if err != nil { + return nil, err + } + autoDatum.SetAutoID(id, col.Flag) + + // Handle the bad null error. + if autoDatum, err = col.HandleBadNull(autoDatum, e.ctx.GetSessionVars().StmtCtx); err != nil { + return nil, err + } + rows[i][e.colIdx] = autoDatum + continue + } + + var err error + var recordID int64 + if !hasValue { + autoDatum.SetNull() + } + if !autoDatum.IsNull() { + recordID, err = getAutoRecordID(autoDatum, &col.FieldType, true) + if err != nil { + return nil, err + } + } + // Use the value if it's not null and not 0. + if recordID != 0 { + err = e.Table.RebaseAutoID(e.ctx, recordID, true) + if err != nil { + return nil, err + } + e.ctx.GetSessionVars().StmtCtx.InsertID = uint64(recordID) + retryInfo.AddAutoIncrementID(recordID) + + // Handle the bad null error. + if autoDatum, err = col.HandleBadNull(autoDatum, e.ctx.GetSessionVars().StmtCtx); err != nil { + return nil, err + } + rows[i][e.colIdx] = autoDatum + continue + } + + // Change NULL to auto id. + // Change value 0 to auto id, if NoAutoValueOnZero SQL mode is not set. + if autoDatum.IsNull() || e.ctx.GetSessionVars().SQLMode&mysql.ModeNoAutoValueOnZero == 0 { + // Find consecutive num. + start := i + cnt := 1 + for i+1 < length && e.isAutoNull(ctx, rows[i+1][e.colIdx], col, e.cacheHasValue[i+1]) { + i++ + cnt++ + } + // Alloc batch N consecutive autoIDs. + recordIDs, err := table.AllocBatchAutoIncrementValue(ctx, e.Table, e.ctx, cnt) + if e.filterErr(err) != nil { + return nil, err + } + // Assign autoIDs to rows. + for j := 0; j < cnt; j++ { + offset := j + start + d := rows[offset][e.colIdx] + + // It's compatible with mysql setting the first allocated autoID to lastInsertID. + // Cause autoID may be specified by user, judge only the first row is not suitable. + if e.lastInsertID == 0 { + e.lastInsertID = uint64(recordIDs[0]) + } + + d.SetAutoID(recordIDs[j], col.Flag) + retryInfo.AddAutoIncrementID(recordIDs[j]) + + // The value of d is adjusted by auto ID, so we need to cast it again. + d, err := table.CastValue(e.ctx, d, col.ToInfo()) + if err != nil { + return nil, err + } + // Handle the bad null error. + if d, err = col.HandleBadNull(d, e.ctx.GetSessionVars().StmtCtx); err != nil { + return nil, err + } + rows[offset][e.colIdx] = d + } + continue + } + + autoDatum.SetAutoID(recordID, col.Flag) + retryInfo.AddAutoIncrementID(recordID) + + // the value of d is adjusted by auto ID, so we need to cast it again. + autoDatum, err = table.CastValue(e.ctx, autoDatum, col.ToInfo()) + if err != nil { + return nil, err + } + // Handle the bad null error. + if autoDatum, err = col.HandleBadNull(autoDatum, e.ctx.GetSessionVars().StmtCtx); err != nil { + return nil, err + } + rows[i][e.colIdx] = autoDatum + } + return rows, nil +} func (e *InsertValues) adjustAutoIncrementDatum(ctx context.Context, d types.Datum, hasValue bool, c *table.Column) (types.Datum, error) { retryInfo := e.ctx.GetSessionVars().RetryInfo @@ -902,26 +1024,3 @@ func (e *InsertValues) addRecord(ctx context.Context, row []types.Datum) (int64, } return h, nil } - -type datumAutoIDType int - -const ( - // AutoIDNull stands for auto increment datum that need get a autoID from allocator. - AutoIDNull datumAutoIDType = iota - // AutoIDRebase stands for auto increment datum that has a specified value from user, - // so allocator need to rebase the value. - AutoIDRebase - // AutoIDZero stands for 0 value of auto increment datum, it won't replaced by autoID - // when sql-model NoAutoValueOnZero is set. - AutoIDZero -) - -type datumAutoIDLazy struct { - isInAutoIncrement bool - kind datumAutoIDType - datum types.Datum - // recordID is for AutoIdRebase type - recordID int64 - rowIdx int - colIdx int -} From 5ff4c80cb678603bd7d93b4e48fe17a7abf00b72 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Wed, 25 Sep 2019 10:17:05 +0800 Subject: [PATCH 27/49] fix format --- executor/insert_common.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index 33d6a6fdfa028..7870d94ceb7d8 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -607,13 +607,12 @@ func (e *InsertValues) fillColValue(ctx context.Context, datum types.Datum, idx e.cacheHasValue = append(e.cacheHasValue, hasValue) // Store the plain datum of autoIncrement column directly for lazy handle. return datum, nil - } else { - d, err := e.adjustAutoIncrementDatum(ctx, datum, hasValue, column) - if err != nil { - return types.Datum{}, err - } - return d, nil } + d, err := e.adjustAutoIncrementDatum(ctx, datum, hasValue, column) + if err != nil { + return types.Datum{}, err + } + return d, nil } if !hasValue { d, err := e.getColDefaultValue(idx, column) From 697d6403661655e20078e6af299f3a7ad68c8302 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Wed, 25 Sep 2019 10:57:33 +0800 Subject: [PATCH 28/49] remove commnet --- executor/insert_common.go | 148 -------------------------------------- 1 file changed, 148 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index 7870d94ceb7d8..a40f080d6006a 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -249,109 +249,6 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { return base.exec(ctx, rows) } -/* -func (e *InsertValues) autoIDAllocN(ctx context.Context, rows [][]types.Datum) ([][]types.Datum, error) { - retryInfo := e.ctx.GetSessionVars().RetryInfo - length := len(e.cache) - for i := 0; i < length; i++ { - switch e.cache[i].kind { - case AutoIDNull: - // Find consecutive num. - start := i - cnt := 1 - for i+1 < length && e.cache[i+1].kind == AutoIDNull { - i++ - cnt++ - } - // Alloc batch N consecutive autoIDs. - recordIDs, err := table.AllocBatchAutoIncrementValue(ctx, e.Table, e.ctx, cnt) - if e.filterErr(err) != nil { - return nil, err - } - // Assign autoIDs to rows. - var d types.Datum - var c *table.Column - for j := 0; j < cnt; j++ { - offset := j + start - rowIdx := e.cache[offset].rowIdx - colIdx := e.cache[offset].colIdx - - d = e.cache[offset].datum - c = e.Table.Cols()[colIdx] - - // It's compatible with mysql. So it sets the first allocated autoID to the lastInsertId. - if e.lastInsertID == 0 { - e.lastInsertID = uint64(recordIDs[0]) - } - - d.SetAutoID(recordIDs[j], c.Flag) - retryInfo.AddAutoIncrementID(recordIDs[j]) - - // The value of d is adjusted by auto ID, so we need to cast it again. - d, err := table.CastValue(e.ctx, d, c.ToInfo()) - if err != nil { - return nil, err - } - // Handle the bad null error. - if d, err = c.HandleBadNull(d, e.ctx.GetSessionVars().StmtCtx); err != nil { - return nil, err - } - rows[rowIdx][colIdx] = d - } - case AutoIDRebase: - // Rebase action. - rowIdx := e.cache[i].rowIdx - colIdx := e.cache[i].colIdx - - // recordID has been casted in evalRow. - recordID := e.cache[i].recordID - d := e.cache[i].datum - c := e.Table.Cols()[colIdx] - - err := e.Table.RebaseAutoID(e.ctx, recordID, true) - if err != nil { - return nil, err - } - e.ctx.GetSessionVars().StmtCtx.InsertID = uint64(recordID) - retryInfo.AddAutoIncrementID(recordID) - - // Handle the bad null error. - if d, err = c.HandleBadNull(d, e.ctx.GetSessionVars().StmtCtx); err != nil { - return nil, err - } - // Cause d may changed in HandleBadNull, do assignment here. - rows[rowIdx][colIdx] = d - case AutoIDZero: - // Won't change value 0 to auto id, if NO_AUTO_VALUE_ON_ZERO SQL mode is set. - rowIdx := e.cache[i].rowIdx - colIdx := e.cache[i].colIdx - - // recordID has been casted in evalRow. - recordID := e.cache[i].recordID - d := e.cache[i].datum - c := e.Table.Cols()[colIdx] - - d.SetAutoID(recordID, c.Flag) - retryInfo.AddAutoIncrementID(recordID) - - // The value of d is adjusted by auto ID, so we need to cast it again. - d, err := table.CastValue(e.ctx, d, c.ToInfo()) - if err != nil { - return nil, err - } - - // Handle the bad null error. - if d, err = c.HandleBadNull(d, e.ctx.GetSessionVars().StmtCtx); err != nil { - return nil, err - } - rows[rowIdx][colIdx] = d - } - } - return rows, nil -} - -*/ - func (e *InsertValues) handleErr(col *table.Column, val *types.Datum, rowIdx int, err error) error { if err == nil { return nil @@ -666,51 +563,6 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue return row, nil } -/* -// adjustAutoIncrementDatumLazy is quite same to adjustAutoIncrementDatum() -// except it will cache auto increment datum for lazy batch allocation of autoID. -func (e *InsertValues) adjustAutoIncrementDatumLazy(ctx context.Context, d types.Datum, hasValue bool, c *table.Column) (datumAutoIDLazy, error) { - retryInfo := e.ctx.GetSessionVars().RetryInfo - if retryInfo.Retrying { - id, err := retryInfo.GetCurrAutoIncrementID() - if err != nil { - return datumAutoIDLazy{}, err - } - d.SetAutoID(id, c.Flag) - return datumAutoIDLazy{isInAutoIncrement: false, datum: d}, nil - } - - var err error - var recordID int64 - if !hasValue { - d.SetNull() - } - if !d.IsNull() { - recordID, err = getAutoRecordID(d, &c.FieldType, true) - if err != nil { - return datumAutoIDLazy{}, err - } - } - // Use the value if it's not null and not 0. - if recordID != 0 { - // Do the rebase action lazily. - return datumAutoIDLazy{isInAutoIncrement: true, kind: AutoIDRebase, recordID: recordID, datum: d}, nil - } - - // Change NULL to auto id. - // Change value 0 to auto id, if NoAutoValueOnZero SQL mode is not set. - if d.IsNull() || e.ctx.GetSessionVars().SQLMode&mysql.ModeNoAutoValueOnZero == 0 { - // Do the alloc action lazily. - return datumAutoIDLazy{isInAutoIncrement: true, kind: AutoIDNull, datum: d}, nil - } - - // Use the 0 value as auto id directly - // Do the action lazily. - return datumAutoIDLazy{isInAutoIncrement: true, kind: AutoIDZero, recordID: recordID, datum: d}, nil - -} -*/ - // isAutoNull can help judge whether a datum is AutoIncrement Null quickly. // This used to help lazyFillAutoIncrement to find consecutive N datum backwards for batch autoID alloc. func (e *InsertValues) isAutoNull(ctx context.Context, d types.Datum, col *table.Column, hasValue bool) bool { From f6567fb38422246c601ad0a106891023fc81fde9 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Wed, 25 Sep 2019 14:46:44 +0800 Subject: [PATCH 29/49] use new kind of logutil.logger(ctx) instead of logutil.BgLogger() --- meta/autoid/autoid.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index d60a846d1e9d3..c4bbb3eb166ac 100644 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -14,6 +14,7 @@ package autoid import ( + "context" "math" "sync" "sync/atomic" @@ -254,7 +255,7 @@ func (alloc *allocator) alloc4Unsigned(tableID int64) (int64, error) { return 0, ErrAutoincReadFailed } alloc.base = int64(uint64(alloc.base) + 1) - logutil.BgLogger().Debug("alloc unsigned ID", + logutil.Logger(context.TODO()).Debug("alloc unsigned ID", zap.Uint64("ID", uint64(alloc.base)), zap.Int64("table ID", tableID), zap.Int64("database ID", alloc.dbID)) @@ -293,7 +294,7 @@ func (alloc *allocator) alloc4Signed(tableID int64) (int64, error) { return 0, ErrAutoincReadFailed } alloc.base++ - logutil.BgLogger().Debug("alloc signed ID", + logutil.Logger(context.TODO()).Debug("alloc signed ID", zap.Uint64("ID", uint64(alloc.base)), zap.Int64("table ID", tableID), zap.Int64("database ID", alloc.dbID)) @@ -412,7 +413,7 @@ func (alloc *allocator) allocN4Signed(tableID int64, N uint64) ([]int64, error) } alloc.base, alloc.end = newBase, newEnd } - logutil.BgLogger().Debug("alloc N signed ID", + logutil.Logger(context.TODO()).Debug("alloc N signed ID", zap.Uint64("from ID", uint64(alloc.base)), zap.Uint64("to ID", uint64(alloc.base+N1)), zap.Int64("table ID", tableID), @@ -473,7 +474,7 @@ func (alloc *allocator) allocN4Unsigned(tableID int64, N uint64) ([]int64, error } alloc.base, alloc.end = newBase, newEnd } - logutil.BgLogger().Debug("alloc unsigned ID", + logutil.Logger(context.TODO()).Debug("alloc unsigned ID", zap.Uint64(" from ID", uint64(alloc.base)), zap.Uint64("to ID", uint64(alloc.base+N1)), zap.Int64("table ID", tableID), From cf986ba5ac08e948a9cc07be4f143ed16081ceb5 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Thu, 26 Sep 2019 09:45:06 +0800 Subject: [PATCH 30/49] combine alloc() and allocN together --- meta/autoid/autoid.go | 101 ++------------------------------ meta/autoid/autoid_test.go | 117 +++++++++++++++++++------------------ table/table.go | 8 ++- table/tables/tables.go | 3 +- 4 files changed, 71 insertions(+), 158 deletions(-) diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index c4bbb3eb166ac..adc356ba9a541 100644 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -45,9 +45,10 @@ var errInvalidTableID = terror.ClassAutoid.New(codeInvalidTableID, "invalid Tabl // Allocator is an auto increment id generator. // Just keep id unique actually. type Allocator interface { - // Alloc allocs the next autoID for table with tableID. + // Alloc allocs N consecutive autoID for table with tableID. // It gets a batch of autoIDs at a time. So it does not need to access storage for each call. - Alloc(tableID int64) (int64, error) + // The consecutive feature is used to insert multiple rows in a statement, cause JDBC will presume the id of these rows are consecutive. + Alloc(tableID int64, N uint64) ([]int64, error) // Rebase rebases the autoID base for table with tableID and the new base value. // If allocIDs is true, it will allocate some IDs and save to the cache. // If allocIDs is false, it will not allocate IDs. @@ -58,9 +59,6 @@ type Allocator interface { End() int64 // NextGlobalAutoID returns the next global autoID. NextGlobalAutoID(tableID int64) (int64, error) - // AllocN allocates N consecutive autoID for table with tableID. It is used to insert multiple - // rows in a statement, cause JDBC will presume the id of these rows are consecutive. - AllocN(tableID int64, N uint64) ([]int64, error) } type allocator struct { @@ -223,97 +221,6 @@ func (alloc *allocator) Rebase(tableID, requiredBase int64, allocIDs bool) error return alloc.rebase4Signed(tableID, requiredBase, allocIDs) } -func (alloc *allocator) alloc4Unsigned(tableID int64) (int64, error) { - if alloc.base == alloc.end { // step - var newBase, newEnd int64 - startTime := time.Now() - consumeDur := startTime.Sub(alloc.lastAllocTime) - alloc.step = NextStep(alloc.step, consumeDur) - err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error { - m := meta.NewMeta(txn) - var err1 error - newBase, err1 = m.GetAutoTableID(alloc.dbID, tableID) - if err1 != nil { - return err1 - } - tmpStep := int64(mathutil.MinUint64(math.MaxUint64-uint64(newBase), uint64(alloc.step))) - newEnd, err1 = m.GenAutoTableID(alloc.dbID, tableID, tmpStep) - return err1 - }) - metrics.AutoIDHistogram.WithLabelValues(metrics.TableAutoIDAlloc, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds()) - if err != nil { - return 0, err - } - alloc.lastAllocTime = time.Now() - if uint64(newBase) == math.MaxUint64 { - return 0, ErrAutoincReadFailed - } - alloc.base, alloc.end = newBase, newEnd - } - - if uint64(alloc.base)+uint64(1) == math.MaxUint64 { - return 0, ErrAutoincReadFailed - } - alloc.base = int64(uint64(alloc.base) + 1) - logutil.Logger(context.TODO()).Debug("alloc unsigned ID", - zap.Uint64("ID", uint64(alloc.base)), - zap.Int64("table ID", tableID), - zap.Int64("database ID", alloc.dbID)) - return alloc.base, nil -} - -func (alloc *allocator) alloc4Signed(tableID int64) (int64, error) { - if alloc.base == alloc.end { // step - var newBase, newEnd int64 - startTime := time.Now() - consumeDur := startTime.Sub(alloc.lastAllocTime) - alloc.step = NextStep(alloc.step, consumeDur) - err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error { - m := meta.NewMeta(txn) - var err1 error - newBase, err1 = m.GetAutoTableID(alloc.dbID, tableID) - if err1 != nil { - return err1 - } - tmpStep := mathutil.MinInt64(math.MaxInt64-newBase, alloc.step) - newEnd, err1 = m.GenAutoTableID(alloc.dbID, tableID, tmpStep) - return err1 - }) - metrics.AutoIDHistogram.WithLabelValues(metrics.TableAutoIDAlloc, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds()) - if err != nil { - return 0, err - } - alloc.lastAllocTime = time.Now() - if newBase == math.MaxInt64 { - return 0, ErrAutoincReadFailed - } - alloc.base, alloc.end = newBase, newEnd - } - - if alloc.base+1 == math.MaxInt64 { - return 0, ErrAutoincReadFailed - } - alloc.base++ - logutil.Logger(context.TODO()).Debug("alloc signed ID", - zap.Uint64("ID", uint64(alloc.base)), - zap.Int64("table ID", tableID), - zap.Int64("database ID", alloc.dbID)) - return alloc.base, nil -} - -// Alloc implements autoid.Allocator Alloc interface. -func (alloc *allocator) Alloc(tableID int64) (int64, error) { - if tableID == 0 { - return 0, errInvalidTableID.GenWithStack("Invalid tableID") - } - alloc.mu.Lock() - defer alloc.mu.Unlock() - if alloc.isUnsigned { - return alloc.alloc4Unsigned(tableID) - } - return alloc.alloc4Signed(tableID) -} - // NextStep return new auto id step according to previous step and consuming time. func NextStep(curStep int64, consumeDur time.Duration) int64 { failpoint.Inject("mockAutoIDChange", func(val failpoint.Value) { @@ -354,7 +261,7 @@ func GenLocalSchemaID() int64 { } // AllocN implements autoid.Allocator Alloc interface. -func (alloc *allocator) AllocN(tableID int64, N uint64) ([]int64, error) { +func (alloc *allocator) Alloc(tableID int64, N uint64) ([]int64, error) { if tableID == 0 { return nil, errInvalidTableID.GenWithStackByArgs("Invalid tableID") } diff --git a/meta/autoid/autoid_test.go b/meta/autoid/autoid_test.go index f24cd8fd8537e..9feb2d1d458c2 100644 --- a/meta/autoid/autoid_test.go +++ b/meta/autoid/autoid_test.go @@ -73,13 +73,13 @@ func (*testSuite) TestT(c *C) { globalAutoID, err := alloc.NextGlobalAutoID(1) c.Assert(err, IsNil) c.Assert(globalAutoID, Equals, int64(1)) - id, err := alloc.Alloc(1) + id, err := alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id, Equals, int64(1)) - id, err = alloc.Alloc(1) + c.Assert(id[0], Equals, int64(1)) + id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id, Equals, int64(2)) - _, err = alloc.Alloc(0) + c.Assert(id[0], Equals, int64(2)) + _, err = alloc.Alloc(0, 1) c.Assert(err, NotNil) globalAutoID, err = alloc.NextGlobalAutoID(1) c.Assert(err, IsNil) @@ -88,38 +88,38 @@ func (*testSuite) TestT(c *C) { // rebase err = alloc.Rebase(1, int64(1), true) c.Assert(err, IsNil) - id, err = alloc.Alloc(1) + id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id, Equals, int64(3)) + c.Assert(id[0], Equals, int64(3)) err = alloc.Rebase(1, int64(3), true) c.Assert(err, IsNil) - id, err = alloc.Alloc(1) + id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id, Equals, int64(4)) + c.Assert(id[0], Equals, int64(4)) err = alloc.Rebase(1, int64(10), true) c.Assert(err, IsNil) - id, err = alloc.Alloc(1) + id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id, Equals, int64(11)) + c.Assert(id[0], Equals, int64(11)) err = alloc.Rebase(1, int64(3010), true) c.Assert(err, IsNil) - id, err = alloc.Alloc(1) + id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id, Equals, int64(3011)) + c.Assert(id[0], Equals, int64(3011)) alloc = autoid.NewAllocator(store, 1, false) c.Assert(alloc, NotNil) - id, err = alloc.Alloc(1) + id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id, Equals, int64(autoid.GetStep()+1)) + c.Assert(id[0], Equals, int64(autoid.GetStep()+1)) alloc = autoid.NewAllocator(store, 1, false) c.Assert(alloc, NotNil) err = alloc.Rebase(2, int64(1), false) c.Assert(err, IsNil) - id, err = alloc.Alloc(2) + id, err = alloc.Alloc(2, 1) c.Assert(err, IsNil) - c.Assert(id, Equals, int64(2)) + c.Assert(id[0], Equals, int64(2)) alloc = autoid.NewAllocator(store, 1, false) c.Assert(alloc, NotNil) @@ -129,41 +129,41 @@ func (*testSuite) TestT(c *C) { c.Assert(alloc, NotNil) err = alloc.Rebase(3, int64(3000), false) c.Assert(err, IsNil) - id, err = alloc.Alloc(3) + id, err = alloc.Alloc(3, 1) c.Assert(err, IsNil) - c.Assert(id, Equals, int64(3211)) + c.Assert(id[0], Equals, int64(3211)) err = alloc.Rebase(3, int64(6543), false) c.Assert(err, IsNil) - id, err = alloc.Alloc(3) + id, err = alloc.Alloc(3, 1) c.Assert(err, IsNil) - c.Assert(id, Equals, int64(6544)) + c.Assert(id[0], Equals, int64(6544)) // Test the MaxInt64 is the upper bound of `alloc` function but not `rebase`. err = alloc.Rebase(3, int64(math.MaxInt64-1), true) c.Assert(err, IsNil) - _, err = alloc.Alloc(3) + _, err = alloc.Alloc(3, 1) c.Assert(alloc, NotNil) err = alloc.Rebase(3, int64(math.MaxInt64), true) c.Assert(err, IsNil) - // allocN for signed + // alloc N for signed alloc = autoid.NewAllocator(store, 1, false) c.Assert(alloc, NotNil) globalAutoID, err = alloc.NextGlobalAutoID(4) c.Assert(err, IsNil) c.Assert(globalAutoID, Equals, int64(1)) - idN, err := alloc.AllocN(4, 1) + idN, err := alloc.Alloc(4, 1) c.Assert(err, IsNil) c.Assert(len(idN), Equals, 1) c.Assert(idN[0], Equals, int64(1)) - idN, err = alloc.AllocN(4, 2) + idN, err = alloc.Alloc(4, 2) c.Assert(err, IsNil) c.Assert(len(idN), Equals, 2) c.Assert(idN[0], Equals, int64(2)) c.Assert(idN[1], Equals, int64(3)) - idN, err = alloc.AllocN(4, 100) + idN, err = alloc.Alloc(4, 100) c.Assert(err, IsNil) c.Assert(len(idN), Equals, 100) for i := 0; i < 100; i++ { @@ -172,7 +172,7 @@ func (*testSuite) TestT(c *C) { err = alloc.Rebase(4, int64(1000), false) c.Assert(err, IsNil) - idN, err = alloc.AllocN(4, 3) + idN, err = alloc.Alloc(4, 3) c.Assert(err, IsNil) c.Assert(len(idN), Equals, 3) c.Assert(idN[0], Equals, int64(1001)) @@ -182,7 +182,7 @@ func (*testSuite) TestT(c *C) { lastRemainOne := alloc.End() err = alloc.Rebase(4, alloc.End()-2, false) c.Assert(err, IsNil) - idN, err = alloc.AllocN(4, 5) + idN, err = alloc.Alloc(4, 5) c.Assert(err, IsNil) c.Assert(len(idN), Equals, 5) c.Assert(idN[0], Greater, lastRemainOne) @@ -225,13 +225,13 @@ func (*testSuite) TestUnsignedAutoid(c *C) { globalAutoID, err := alloc.NextGlobalAutoID(1) c.Assert(err, IsNil) c.Assert(globalAutoID, Equals, int64(1)) - id, err := alloc.Alloc(1) + id, err := alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id, Equals, int64(1)) - id, err = alloc.Alloc(1) + c.Assert(id[0], Equals, int64(1)) + id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id, Equals, int64(2)) - _, err = alloc.Alloc(0) + c.Assert(id[0], Equals, int64(2)) + _, err = alloc.Alloc(0, 1) c.Assert(err, NotNil) globalAutoID, err = alloc.NextGlobalAutoID(1) c.Assert(err, IsNil) @@ -240,38 +240,38 @@ func (*testSuite) TestUnsignedAutoid(c *C) { // rebase err = alloc.Rebase(1, int64(1), true) c.Assert(err, IsNil) - id, err = alloc.Alloc(1) + id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id, Equals, int64(3)) + c.Assert(id[0], Equals, int64(3)) err = alloc.Rebase(1, int64(3), true) c.Assert(err, IsNil) - id, err = alloc.Alloc(1) + id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id, Equals, int64(4)) + c.Assert(id[0], Equals, int64(4)) err = alloc.Rebase(1, int64(10), true) c.Assert(err, IsNil) - id, err = alloc.Alloc(1) + id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id, Equals, int64(11)) + c.Assert(id[0], Equals, int64(11)) err = alloc.Rebase(1, int64(3010), true) c.Assert(err, IsNil) - id, err = alloc.Alloc(1) + id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id, Equals, int64(3011)) + c.Assert(id[0], Equals, int64(3011)) alloc = autoid.NewAllocator(store, 1, true) c.Assert(alloc, NotNil) - id, err = alloc.Alloc(1) + id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id, Equals, int64(autoid.GetStep()+1)) + c.Assert(id[0], Equals, int64(autoid.GetStep()+1)) alloc = autoid.NewAllocator(store, 1, true) c.Assert(alloc, NotNil) err = alloc.Rebase(2, int64(1), false) c.Assert(err, IsNil) - id, err = alloc.Alloc(2) + id, err = alloc.Alloc(2, 1) c.Assert(err, IsNil) - c.Assert(id, Equals, int64(2)) + c.Assert(id[0], Equals, int64(2)) alloc = autoid.NewAllocator(store, 1, true) c.Assert(alloc, NotNil) @@ -281,34 +281,34 @@ func (*testSuite) TestUnsignedAutoid(c *C) { c.Assert(alloc, NotNil) err = alloc.Rebase(3, int64(3000), false) c.Assert(err, IsNil) - id, err = alloc.Alloc(3) + id, err = alloc.Alloc(3, 1) c.Assert(err, IsNil) - c.Assert(id, Equals, int64(3211)) + c.Assert(id[0], Equals, int64(3211)) err = alloc.Rebase(3, int64(6543), false) c.Assert(err, IsNil) - id, err = alloc.Alloc(3) + id, err = alloc.Alloc(3, 1) c.Assert(err, IsNil) - c.Assert(id, Equals, int64(6544)) + c.Assert(id[0], Equals, int64(6544)) // Test the MaxUint64 is the upper bound of `alloc` func but not `rebase`. var n uint64 = math.MaxUint64 - 1 un := int64(n) err = alloc.Rebase(3, un, true) c.Assert(err, IsNil) - _, err = alloc.Alloc(3) + _, err = alloc.Alloc(3, 1) c.Assert(err, NotNil) un = int64(n + 1) err = alloc.Rebase(3, un, true) c.Assert(err, IsNil) - // allocN for unsigned + // alloc N for unsigned alloc = autoid.NewAllocator(store, 1, true) c.Assert(alloc, NotNil) globalAutoID, err = alloc.NextGlobalAutoID(4) c.Assert(err, IsNil) c.Assert(globalAutoID, Equals, int64(1)) - idN, err := alloc.AllocN(4, 2) + idN, err := alloc.Alloc(4, 2) c.Assert(err, IsNil) c.Assert(len(idN), Equals, 2) c.Assert(idN[0], Equals, int64(1)) @@ -316,7 +316,7 @@ func (*testSuite) TestUnsignedAutoid(c *C) { err = alloc.Rebase(4, int64(500), true) c.Assert(err, IsNil) - idN, err = alloc.AllocN(4, 2) + idN, err = alloc.Alloc(4, 2) c.Assert(err, IsNil) c.Assert(len(idN), Equals, 2) c.Assert(idN[0], Equals, int64(501)) @@ -325,7 +325,7 @@ func (*testSuite) TestUnsignedAutoid(c *C) { lastRemainOne := alloc.End() err = alloc.Rebase(4, alloc.End()-2, false) c.Assert(err, IsNil) - idN, err = alloc.AllocN(4, 5) + idN, err = alloc.Alloc(4, 5) c.Assert(err, IsNil) c.Assert(len(idN), Equals, 5) c.Assert(idN[0], Greater, lastRemainOne) @@ -368,7 +368,8 @@ func (*testSuite) TestConcurrentAlloc(c *C) { allocIDs := func() { alloc := autoid.NewAllocator(store, dbID, false) for j := 0; j < int(autoid.GetStep())+5; j++ { - id, err1 := alloc.Alloc(tblID) + ids, err1 := alloc.Alloc(tblID, 1) + id := ids[0] if err1 != nil { errCh <- err1 break @@ -383,9 +384,9 @@ func (*testSuite) TestConcurrentAlloc(c *C) { m[id] = struct{}{} mu.Unlock() - //test AllocN + //test Alloc N N := rand.Uint64() % 100 - idN, err1 := alloc.AllocN(tblID, N) + idN, err1 := alloc.Alloc(tblID, N) if err1 != nil { errCh <- err1 break @@ -445,7 +446,7 @@ func (*testSuite) TestRollbackAlloc(c *C) { injectConf.SetCommitError(errors.New("injected")) injectedStore := kv.NewInjectedStore(store, injectConf) alloc := autoid.NewAllocator(injectedStore, 1, false) - _, err = alloc.Alloc(2) + _, err = alloc.Alloc(2, 1) c.Assert(err, NotNil) c.Assert(alloc.Base(), Equals, int64(0)) c.Assert(alloc.End(), Equals, int64(0)) diff --git a/table/table.go b/table/table.go index 85ff9b15cdd4d..cbb82ea794c55 100644 --- a/table/table.go +++ b/table/table.go @@ -192,7 +192,11 @@ func AllocAutoIncrementValue(ctx context.Context, t Table, sctx sessionctx.Conte span1 := span.Tracer().StartSpan("table.AllocAutoIncrementValue", opentracing.ChildOf(span.Context())) defer span1.Finish() } - return t.Allocator(sctx).Alloc(t.Meta().ID) + ids, err := t.Allocator(sctx).Alloc(t.Meta().ID, uint64(1)) + if err != nil { + return 0, err + } + return ids[0], err } // AllocBatchAutoIncrementValue allocates batch auto_increment value for rows. @@ -201,7 +205,7 @@ func AllocBatchAutoIncrementValue(ctx context.Context, t Table, sctx sessionctx. span1 := span.Tracer().StartSpan("table.AllocBatchAutoIncrementValue", opentracing.ChildOf(span.Context())) defer span1.Finish() } - return t.Allocator(sctx).AllocN(t.Meta().ID, uint64(N)) + return t.Allocator(sctx).Alloc(t.Meta().ID, uint64(N)) } // PhysicalTable is an abstraction for two kinds of table representation: partition or non-partitioned table. diff --git a/table/tables/tables.go b/table/tables/tables.go index e0e9315cd4a85..44f39c4c9fa4e 100644 --- a/table/tables/tables.go +++ b/table/tables/tables.go @@ -943,10 +943,11 @@ func GetColDefaultValue(ctx sessionctx.Context, col *table.Column, defaultVals [ // AllocHandle implements table.Table AllocHandle interface. func (t *tableCommon) AllocHandle(ctx sessionctx.Context) (int64, error) { - rowID, err := t.Allocator(ctx).Alloc(t.tableID) + rowIDs, err := t.Allocator(ctx).Alloc(t.tableID, 1) if err != nil { return 0, err } + rowID := rowIDs[0] if t.meta.ShardRowIDBits > 0 { // Use max record ShardRowIDBits to check overflow. if OverflowShardBits(rowID, t.meta.MaxShardRowIDBits) { From 3d7b966b13a1f843298ff62a76aa534f56989eef Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Thu, 26 Sep 2019 10:42:44 +0800 Subject: [PATCH 31/49] remove cache of hasValue --- executor/insert_common.go | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index a40f080d6006a..82b9ca9798965 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -61,8 +61,6 @@ type InsertValues struct { evalBuffer chunk.MutRow evalBufferTypes []*types.FieldType - // cacheHasValue cache haveValue for consecutive autoid batch alloc. - cacheHasValue []bool // Fill the autoID lazily to datum. // This is used for being compatible with JDBC using getGeneratedKeys(). // By now in insert multiple values, TiDB can guarantee consecutive autoID in a batch. @@ -216,7 +214,6 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { } rows := make([][]types.Datum, 0, len(e.Lists)) - e.cacheHasValue = make([]bool, 0, len(e.Lists)) for i, list := range e.Lists { e.rowCount++ var row []types.Datum @@ -231,7 +228,6 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { if err != nil { return err } - e.cacheHasValue = e.cacheHasValue[:0] if err = base.exec(ctx, rows); err != nil { return err } @@ -500,8 +496,10 @@ func (e *InsertValues) fillColValue(ctx context.Context, datum types.Datum, idx if e.colIdx == -1 { e.colIdx = idx } - // cache the hasValue of autoIncrement column for lazy handle. - e.cacheHasValue = append(e.cacheHasValue, hasValue) + // Handle hasValue info in autoIncrement column previously for lazy handle. + if !hasValue { + datum.SetNull() + } // Store the plain datum of autoIncrement column directly for lazy handle. return datum, nil } @@ -565,16 +563,13 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue // isAutoNull can help judge whether a datum is AutoIncrement Null quickly. // This used to help lazyFillAutoIncrement to find consecutive N datum backwards for batch autoID alloc. -func (e *InsertValues) isAutoNull(ctx context.Context, d types.Datum, col *table.Column, hasValue bool) bool { +func (e *InsertValues) isAutoNull(ctx context.Context, d types.Datum, col *table.Column) bool { // autoID can find in RetryInfo. if e.ctx.GetSessionVars().RetryInfo.Retrying { return false } var err error var recordID int64 - if !hasValue { - d.SetNull() - } if !d.IsNull() { recordID, err = getAutoRecordID(d, &col.FieldType, true) if err != nil { @@ -606,7 +601,6 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] length := len(rows) for i := 0; i < length; i++ { autoDatum := rows[i][e.colIdx] - hasValue := e.cacheHasValue[i] // autoID can find in RetryInfo. retryInfo := e.ctx.GetSessionVars().RetryInfo @@ -627,9 +621,6 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] var err error var recordID int64 - if !hasValue { - autoDatum.SetNull() - } if !autoDatum.IsNull() { recordID, err = getAutoRecordID(autoDatum, &col.FieldType, true) if err != nil { @@ -659,7 +650,7 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] // Find consecutive num. start := i cnt := 1 - for i+1 < length && e.isAutoNull(ctx, rows[i+1][e.colIdx], col, e.cacheHasValue[i+1]) { + for i+1 < length && e.isAutoNull(ctx, rows[i+1][e.colIdx], col) { i++ cnt++ } From ae72865f3db89f0689dba974e7b953af8beb1e4b Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Thu, 26 Sep 2019 10:42:44 +0800 Subject: [PATCH 32/49] remove cache of hasValue --- executor/insert_common.go | 53 ++++++++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 20 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index 82b9ca9798965..7917e35461bd1 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -61,12 +61,11 @@ type InsertValues struct { evalBuffer chunk.MutRow evalBufferTypes []*types.FieldType - // Fill the autoID lazily to datum. - // This is used for being compatible with JDBC using getGeneratedKeys(). - // By now in insert multiple values, TiDB can guarantee consecutive autoID in a batch. + // Fill the autoID lazily to datum. This is used for being compatible with JDBC using getGeneratedKeys(). + // `insert|replace values` can guarantee consecutive autoID in a batch. + // Other statements like `insert select from` don't guarantee consecutive autoID. + // https://dev.mysql.com/doc/refman/8.0/en/innodb-auto-increment-handling.html lazyFillAutoID bool - // colIdx will cache the index of autoIncrement column in a row. - colIdx int } type defaultVal struct { @@ -207,7 +206,6 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { batchSize := sessVars.DMLBatchSize e.lazyFillAutoID = true - e.colIdx = -1 evalRowFunc := e.fastEvalRow if !e.allAssignmentsAreConstant { evalRowFunc = e.evalRow @@ -492,10 +490,6 @@ func (e *InsertValues) fillColValue(ctx context.Context, datum types.Datum, idx error) { if mysql.HasAutoIncrementFlag(column.Flag) { if e.lazyFillAutoID { - // cache the colIdx of autoIncrement column for lazy handle. - if e.colIdx == -1 { - e.colIdx = idx - } // Handle hasValue info in autoIncrement column previously for lazy handle. if !hasValue { datum.SetNull() @@ -588,19 +582,38 @@ func (e *InsertValues) isAutoNull(ctx context.Context, d types.Datum, col *table return false } +func (e *InsertValues) hasAutoIncrementColumn() (int, bool) { + colIdx := -1 + for i, c := range e.Table.Cols() { + if mysql.HasAutoIncrementFlag(c.Flag) { + colIdx = i + break + } + } + if colIdx == -1 { + return -1, false + } + return colIdx, true +} + // lazyAdjustAutoIncrementDatum is quite same to adjustAutoIncrementDatum() // except it will cache auto increment datum previously for lazy batch allocation of autoID. func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [][]types.Datum) ([][]types.Datum, error) { - // Not in lazyFillAutoID mode or no autoIncrement column means no need to fill. - if !e.lazyFillAutoID || e.colIdx == -1 { + // Not in lazyFillAutoID mode means no need to fill. + if !e.lazyFillAutoID { + return rows, nil + } + // No autoIncrement column means no need to fill. + colIdx, ok := e.hasAutoIncrementColumn() + if !ok { return rows, nil } // Get the autoIncrement column. - col := e.Table.Cols()[e.colIdx] + col := e.Table.Cols()[colIdx] // Consider the colIdx of autoIncrement in row are same. length := len(rows) for i := 0; i < length; i++ { - autoDatum := rows[i][e.colIdx] + autoDatum := rows[i][colIdx] // autoID can find in RetryInfo. retryInfo := e.ctx.GetSessionVars().RetryInfo @@ -615,7 +628,7 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] if autoDatum, err = col.HandleBadNull(autoDatum, e.ctx.GetSessionVars().StmtCtx); err != nil { return nil, err } - rows[i][e.colIdx] = autoDatum + rows[i][colIdx] = autoDatum continue } @@ -640,7 +653,7 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] if autoDatum, err = col.HandleBadNull(autoDatum, e.ctx.GetSessionVars().StmtCtx); err != nil { return nil, err } - rows[i][e.colIdx] = autoDatum + rows[i][colIdx] = autoDatum continue } @@ -650,7 +663,7 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] // Find consecutive num. start := i cnt := 1 - for i+1 < length && e.isAutoNull(ctx, rows[i+1][e.colIdx], col) { + for i+1 < length && e.isAutoNull(ctx, rows[i+1][colIdx], col) { i++ cnt++ } @@ -662,7 +675,7 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] // Assign autoIDs to rows. for j := 0; j < cnt; j++ { offset := j + start - d := rows[offset][e.colIdx] + d := rows[offset][colIdx] // It's compatible with mysql setting the first allocated autoID to lastInsertID. // Cause autoID may be specified by user, judge only the first row is not suitable. @@ -682,7 +695,7 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] if d, err = col.HandleBadNull(d, e.ctx.GetSessionVars().StmtCtx); err != nil { return nil, err } - rows[offset][e.colIdx] = d + rows[offset][colIdx] = d } continue } @@ -699,7 +712,7 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] if autoDatum, err = col.HandleBadNull(autoDatum, e.ctx.GetSessionVars().StmtCtx); err != nil { return nil, err } - rows[i][e.colIdx] = autoDatum + rows[i][colIdx] = autoDatum } return rows, nil } From 188b1ec7e205eb500fe3442088780ae14fbafbdb Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Thu, 26 Sep 2019 10:42:44 +0800 Subject: [PATCH 33/49] remove cache of hasValue --- executor/insert_common.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index 7917e35461bd1..1eab069b88cde 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -515,8 +515,10 @@ func (e *InsertValues) fillColValue(ctx context.Context, datum types.Datum, idx // fillRow fills generated columns, auto_increment column and empty column. // For NOT NULL column, it will return error or use zero value based on sql_mode. -// When lazyFillAutoID is true, fill row will cache auto increment datum for lazy -// batch allocation. This case is used in insert|replace into values (row),(row),(row)... +// When lazyFillAutoID is true, fill row will lazy handle auto increment datum for lazy batch allocation. +// `insert|replace values` can guarantee consecutive autoID in a batch. +// Other statements like `insert select from` don't guarantee consecutive autoID. +// https://dev.mysql.com/doc/refman/8.0/en/innodb-auto-increment-handling.html func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue []bool) ([]types.Datum, error) { gCols := make([]*table.Column, 0) for i, c := range e.Table.Cols() { From 976bfccbec784b40b22bed5a4f2b380af1bbe643 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Thu, 26 Sep 2019 10:42:44 +0800 Subject: [PATCH 34/49] remove cache of hasValue --- executor/insert_common.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index 1eab069b88cde..7d2b2d725027d 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -592,10 +592,7 @@ func (e *InsertValues) hasAutoIncrementColumn() (int, bool) { break } } - if colIdx == -1 { - return -1, false - } - return colIdx, true + return colIdx, colIdx != -1 } // lazyAdjustAutoIncrementDatum is quite same to adjustAutoIncrementDatum() From 324459a077395fb8f3efb4b8a2068c9c7e71f772 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Sun, 29 Sep 2019 09:38:14 +0800 Subject: [PATCH 35/49] add test : autoID can find in retryInfo --- executor/insert_test.go | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/executor/insert_test.go b/executor/insert_test.go index 3dd4db534e1ce..edc625bd3ef09 100644 --- a/executor/insert_test.go +++ b/executor/insert_test.go @@ -17,9 +17,11 @@ import ( "fmt" . "github.com/pingcap/check" "github.com/pingcap/parser/terror" + "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/testkit" + "strings" ) func (s *testSuite3) TestInsertOnDuplicateKey(c *C) { @@ -675,10 +677,37 @@ func (s *testSuite3) TestInsertWithAutoidSchema(c *C) { `select last_insert_id()`, testkit.Rows(`104`), }, + // last test : auto increment allocation can find in retryInfo. + { + `retry : insert into t8 values (null, 16), (null, 17)`, + `select * from t8 where id = 1000`, + testkit.Rows(`1000 16`), + }, + { + `;`, + `select * from t8 where id = 1001`, + testkit.Rows(`1001 17`), + }, + { + `;`, + `select last_insert_id()`, + // this insert doesn't has the last_insert_id, should be same as the last insert case. + testkit.Rows(`104`), + }, } for _, tt := range tests { - tk.MustExec(tt.insert) + if strings.HasPrefix(tt.insert, "retry : ") { + // it's the last retry insert case, change the sessionVars. + retryInfo := &variable.RetryInfo{Retrying: true} + retryInfo.AddAutoIncrementID(1000) + retryInfo.AddAutoIncrementID(1001) + tk.Se.GetSessionVars().RetryInfo = retryInfo + tk.MustExec(tt.insert[8:]) + tk.Se.GetSessionVars().RetryInfo = &variable.RetryInfo{} + } else { + tk.MustExec(tt.insert) + } if tt.query == "set session sql_mode = `ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION,NO_AUTO_VALUE_ON_ZERO`" || tt.query == "set session sql_mode = `ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION`" { tk.MustExec(tt.query) From 4c03cdcbc57a81a1a96c9ed53d9c0f9a86fccfe7 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Sun, 29 Sep 2019 09:38:14 +0800 Subject: [PATCH 36/49] add test : autoID can find in retryInfo --- executor/insert_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/executor/insert_test.go b/executor/insert_test.go index edc625bd3ef09..ea8d47742db69 100644 --- a/executor/insert_test.go +++ b/executor/insert_test.go @@ -15,6 +15,7 @@ package executor_test import ( "fmt" + . "github.com/pingcap/check" "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/sessionctx/variable" From ff4f9ec0f75bdd1402fb5894b81572f6efda6fb0 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Sun, 29 Sep 2019 14:53:03 +0800 Subject: [PATCH 37/49] extract retry logic --- executor/insert_common.go | 49 +++++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index 7d2b2d725027d..9c0fa573d73b3 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -560,10 +560,6 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue // isAutoNull can help judge whether a datum is AutoIncrement Null quickly. // This used to help lazyFillAutoIncrement to find consecutive N datum backwards for batch autoID alloc. func (e *InsertValues) isAutoNull(ctx context.Context, d types.Datum, col *table.Column) bool { - // autoID can find in RetryInfo. - if e.ctx.GetSessionVars().RetryInfo.Retrying { - return false - } var err error var recordID int64 if !d.IsNull() { @@ -595,18 +591,7 @@ func (e *InsertValues) hasAutoIncrementColumn() (int, bool) { return colIdx, colIdx != -1 } -// lazyAdjustAutoIncrementDatum is quite same to adjustAutoIncrementDatum() -// except it will cache auto increment datum previously for lazy batch allocation of autoID. -func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [][]types.Datum) ([][]types.Datum, error) { - // Not in lazyFillAutoID mode means no need to fill. - if !e.lazyFillAutoID { - return rows, nil - } - // No autoIncrement column means no need to fill. - colIdx, ok := e.hasAutoIncrementColumn() - if !ok { - return rows, nil - } +func (e *InsertValues) lazyAdjustAutoIncrementDatumInRetry(ctx context.Context, rows [][]types.Datum, colIdx int) ([][]types.Datum, error) { // Get the autoIncrement column. col := e.Table.Cols()[colIdx] // Consider the colIdx of autoIncrement in row are same. @@ -628,8 +613,33 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] return nil, err } rows[i][colIdx] = autoDatum - continue } + } +} + +// lazyAdjustAutoIncrementDatum is quite same to adjustAutoIncrementDatum() +// except it will cache auto increment datum previously for lazy batch allocation of autoID. +func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [][]types.Datum) ([][]types.Datum, error) { + // Not in lazyFillAutoID mode means no need to fill. + if !e.lazyFillAutoID { + return rows, nil + } + // No autoIncrement column means no need to fill. + colIdx, ok := e.hasAutoIncrementColumn() + if !ok { + return rows, nil + } + // autoID can find in RetryInfo. + retryInfo := e.ctx.GetSessionVars().RetryInfo + if retryInfo.Retrying { + return e.lazyAdjustAutoIncrementDatumInRetry(ctx, rows, colIdx) + } + // Get the autoIncrement column. + col := e.Table.Cols()[colIdx] + // Consider the colIdx of autoIncrement in row are same. + length := len(rows) + for i := 0; i < length; i++ { + autoDatum := rows[i][colIdx] var err error var recordID int64 @@ -647,11 +657,6 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] } e.ctx.GetSessionVars().StmtCtx.InsertID = uint64(recordID) retryInfo.AddAutoIncrementID(recordID) - - // Handle the bad null error. - if autoDatum, err = col.HandleBadNull(autoDatum, e.ctx.GetSessionVars().StmtCtx); err != nil { - return nil, err - } rows[i][colIdx] = autoDatum continue } From 7508baeab2b061917b02e880f6236ef285b87682 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Sun, 29 Sep 2019 14:53:03 +0800 Subject: [PATCH 38/49] extract retry logic --- executor/insert_common.go | 1 + 1 file changed, 1 insertion(+) diff --git a/executor/insert_common.go b/executor/insert_common.go index 9c0fa573d73b3..e1a7e7d14aad9 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -615,6 +615,7 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatumInRetry(ctx context.Context, rows[i][colIdx] = autoDatum } } + return rows, nil } // lazyAdjustAutoIncrementDatum is quite same to adjustAutoIncrementDatum() From ce89e847bdd0b9a1ca20e12edcb0372b8142798b Mon Sep 17 00:00:00 2001 From: Arenatlx Date: Sun, 29 Sep 2019 02:02:13 -0500 Subject: [PATCH 39/49] Update executor/insert_common.go Co-Authored-By: bb7133 --- executor/insert_common.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index e1a7e7d14aad9..c83598960c654 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -297,7 +297,7 @@ func (e *InsertValues) evalRow(ctx context.Context, list []expression.Expression row[offset], hasValue[offset] = *val1.Copy(), true e.evalBuffer.SetDatum(offset, val1) } - // Row may lack of generated column、autoIncrement column、empty column here. + // Row may lack of generated column, autoIncrement column, empty column here. return e.fillRow(ctx, row, hasValue) } From 16151bfde18c4a97df047be0dc783e9f60fcef4f Mon Sep 17 00:00:00 2001 From: Arenatlx Date: Sun, 29 Sep 2019 02:04:16 -0500 Subject: [PATCH 40/49] Update executor/insert_common.go Co-Authored-By: bb7133 --- executor/insert_common.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index c83598960c654..58cc55acc28f9 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -515,7 +515,7 @@ func (e *InsertValues) fillColValue(ctx context.Context, datum types.Datum, idx // fillRow fills generated columns, auto_increment column and empty column. // For NOT NULL column, it will return error or use zero value based on sql_mode. -// When lazyFillAutoID is true, fill row will lazy handle auto increment datum for lazy batch allocation. +// When lazyFillAutoID is true, fill row will lazily handle auto increment datum for lazy batch allocation. // `insert|replace values` can guarantee consecutive autoID in a batch. // Other statements like `insert select from` don't guarantee consecutive autoID. // https://dev.mysql.com/doc/refman/8.0/en/innodb-auto-increment-handling.html From 67233c554ea2ddf49ffaddcdb6925021f289d607 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Sun, 29 Sep 2019 15:19:52 +0800 Subject: [PATCH 41/49] fix comment --- executor/insert_common.go | 7 +++--- executor/insert_test.go | 2 +- meta/autoid/autoid.go | 48 +++++++++++++++++++-------------------- 3 files changed, 28 insertions(+), 29 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index 58cc55acc28f9..2b1a943847ca4 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -221,7 +221,7 @@ func insertRows(ctx context.Context, base insertCommon) (err error) { } rows = append(rows, row) if batchInsert && e.rowCount%uint64(batchSize) == 0 { - // Before batch insert, should fill the batch allocated autoIDs. + // Before batch insert, fill the batch allocated autoIDs. rows, err = e.lazyAdjustAutoIncrementDatum(ctx, rows) if err != nil { return err @@ -532,7 +532,6 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue return nil, err } if !e.lazyFillAutoID || (e.lazyFillAutoID && !mysql.HasAutoIncrementFlag(c.Flag)) { - // Handle the bad null error. if row[i], err = c.HandleBadNull(row[i], e.ctx.GetSessionVars().StmtCtx); err != nil { return nil, err } @@ -618,7 +617,7 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatumInRetry(ctx context.Context, return rows, nil } -// lazyAdjustAutoIncrementDatum is quite same to adjustAutoIncrementDatum() +// lazyAdjustAutoIncrementDatum is quite similar to adjustAutoIncrementDatum // except it will cache auto increment datum previously for lazy batch allocation of autoID. func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [][]types.Datum) ([][]types.Datum, error) { // Not in lazyFillAutoID mode means no need to fill. @@ -637,7 +636,7 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] } // Get the autoIncrement column. col := e.Table.Cols()[colIdx] - // Consider the colIdx of autoIncrement in row are same. + // Consider the colIdx of autoIncrement in row are the same. length := len(rows) for i := 0; i < length; i++ { autoDatum := rows[i][colIdx] diff --git a/executor/insert_test.go b/executor/insert_test.go index ea8d47742db69..1a8e9c7de99eb 100644 --- a/executor/insert_test.go +++ b/executor/insert_test.go @@ -15,6 +15,7 @@ package executor_test import ( "fmt" + "strings" . "github.com/pingcap/check" "github.com/pingcap/parser/terror" @@ -22,7 +23,6 @@ import ( "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/testkit" - "strings" ) func (s *testSuite3) TestInsertOnDuplicateKey(c *C) { diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index adc356ba9a541..84c9786c30d8e 100644 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -271,27 +271,27 @@ func (alloc *allocator) Alloc(tableID int64, N uint64) ([]int64, error) { alloc.mu.Lock() defer alloc.mu.Unlock() if alloc.isUnsigned { - return alloc.allocN4Unsigned(tableID, N) + return alloc.alloc4Unsigned(tableID, N) } - return alloc.allocN4Signed(tableID, N) + return alloc.alloc4Signed(tableID, N) } -func (alloc *allocator) allocN4Signed(tableID int64, N uint64) ([]int64, error) { - N1 := int64(N) +func (alloc *allocator) alloc4Signed(tableID int64, n uint64) ([]int64, error) { + n1 := int64(n) // Condition alloc.base+N1 > alloc.end will overflow when alloc.base + N1 > MaxInt64. So need this. - if math.MaxInt64-alloc.base < N1 { + if math.MaxInt64-alloc.base < n1 { return nil, ErrAutoincReadFailed } // The local rest is not enough for allocN, skip it. - if alloc.base+N1 > alloc.end { + if alloc.base+n1 > alloc.end { var newBase, newEnd int64 startTime := time.Now() // Although it may skip a segment here, we still think it is consumed. consumeDur := startTime.Sub(alloc.lastAllocTime) nextStep := NextStep(alloc.step, consumeDur) // Make sure nextStep is big enough. - if nextStep <= N1 { - alloc.step = mathutil.MinInt64(N1*2, maxStep) + if nextStep <= n1 { + alloc.step = mathutil.MinInt64(n1*2, maxStep) } else { alloc.step = nextStep } @@ -304,7 +304,7 @@ func (alloc *allocator) allocN4Signed(tableID int64, N uint64) ([]int64, error) } tmpStep := mathutil.MinInt64(math.MaxInt64-newBase, alloc.step) // The global rest is not enough for allocN. - if tmpStep < N1 { + if tmpStep < n1 { return ErrAutoincReadFailed } newEnd, err1 = m.GenAutoTableID(alloc.dbID, tableID, tmpStep) @@ -322,37 +322,37 @@ func (alloc *allocator) allocN4Signed(tableID int64, N uint64) ([]int64, error) } logutil.Logger(context.TODO()).Debug("alloc N signed ID", zap.Uint64("from ID", uint64(alloc.base)), - zap.Uint64("to ID", uint64(alloc.base+N1)), + zap.Uint64("to ID", uint64(alloc.base+n1)), zap.Int64("table ID", tableID), zap.Int64("database ID", alloc.dbID)) - resN := make([]int64, 0, N1) - for i := alloc.base + 1; i <= alloc.base+N1; i++ { + resN := make([]int64, 0, n1) + for i := alloc.base + 1; i <= alloc.base+n1; i++ { // fix bug : maxInt64 will be allocated if i == math.MaxInt64 { return nil, ErrAutoincReadFailed } resN = append(resN, i) } - alloc.base += N1 + alloc.base += n1 return resN, nil } -func (alloc *allocator) allocN4Unsigned(tableID int64, N uint64) ([]int64, error) { - N1 := int64(N) +func (alloc *allocator) alloc4Unsigned(tableID int64, n uint64) ([]int64, error) { + n1 := int64(n) // Condition alloc.base+N1 > alloc.end will overflow when alloc.base + N1 > MaxInt64. So need this. - if math.MaxUint64-uint64(alloc.base) < N { + if math.MaxUint64-uint64(alloc.base) < n { return nil, ErrAutoincReadFailed } // The local rest is not enough for allocN, skip it. - if uint64(alloc.base)+N > uint64(alloc.end) { + if uint64(alloc.base)+n > uint64(alloc.end) { var newBase, newEnd int64 startTime := time.Now() // Although it may skip a segment here, we still think it is consumed. consumeDur := startTime.Sub(alloc.lastAllocTime) nextStep := NextStep(alloc.step, consumeDur) // Make sure nextStep is big enough. - if nextStep <= N1 { - alloc.step = mathutil.MinInt64(N1*2, maxStep) + if nextStep <= n1 { + alloc.step = mathutil.MinInt64(n1*2, maxStep) } else { alloc.step = nextStep } @@ -365,7 +365,7 @@ func (alloc *allocator) allocN4Unsigned(tableID int64, N uint64) ([]int64, error } tmpStep := int64(mathutil.MinUint64(math.MaxUint64-uint64(newBase), uint64(alloc.step))) // The global rest is not enough for allocN. - if tmpStep < N1 { + if tmpStep < n1 { return ErrAutoincReadFailed } newEnd, err1 = m.GenAutoTableID(alloc.dbID, tableID, tmpStep) @@ -383,11 +383,11 @@ func (alloc *allocator) allocN4Unsigned(tableID int64, N uint64) ([]int64, error } logutil.Logger(context.TODO()).Debug("alloc unsigned ID", zap.Uint64(" from ID", uint64(alloc.base)), - zap.Uint64("to ID", uint64(alloc.base+N1)), + zap.Uint64("to ID", uint64(alloc.base+n1)), zap.Int64("table ID", tableID), zap.Int64("database ID", alloc.dbID)) - resN := make([]int64, 0, N1) - for i := uint64(alloc.base) + 1; i <= uint64(alloc.base)+N; i++ { + resN := make([]int64, 0, n1) + for i := uint64(alloc.base) + 1; i <= uint64(alloc.base)+n; i++ { // fix bug : maxUint64 will be allocated if i == math.MaxUint64 { return nil, ErrAutoincReadFailed @@ -395,6 +395,6 @@ func (alloc *allocator) allocN4Unsigned(tableID int64, N uint64) ([]int64, error resN = append(resN, int64(i)) } // Use uint64 N directly. - alloc.base = int64(uint64(alloc.base) + N) + alloc.base = int64(uint64(alloc.base) + n) return resN, nil } From 0c1b603d4644048d239040df2571fe1e9b98dfda Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Sun, 29 Sep 2019 15:19:52 +0800 Subject: [PATCH 42/49] fix comment --- executor/insert_common.go | 18 +++++++----------- go.mod | 1 - 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index 2b1a943847ca4..5cf04ed7987a9 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -593,12 +593,12 @@ func (e *InsertValues) hasAutoIncrementColumn() (int, bool) { func (e *InsertValues) lazyAdjustAutoIncrementDatumInRetry(ctx context.Context, rows [][]types.Datum, colIdx int) ([][]types.Datum, error) { // Get the autoIncrement column. col := e.Table.Cols()[colIdx] - // Consider the colIdx of autoIncrement in row are same. + // Consider the colIdx of autoIncrement in row are the same. length := len(rows) for i := 0; i < length; i++ { autoDatum := rows[i][colIdx] - // autoID can find in RetryInfo. + // autoID can be found in RetryInfo. retryInfo := e.ctx.GetSessionVars().RetryInfo if retryInfo.Retrying { id, err := retryInfo.GetCurrAutoIncrementID() @@ -607,7 +607,6 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatumInRetry(ctx context.Context, } autoDatum.SetAutoID(id, col.Flag) - // Handle the bad null error. if autoDatum, err = col.HandleBadNull(autoDatum, e.ctx.GetSessionVars().StmtCtx); err != nil { return nil, err } @@ -676,17 +675,16 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] if e.filterErr(err) != nil { return nil, err } + // It's compatible with mysql setting the first allocated autoID to lastInsertID. + // Cause autoID may be specified by user, judge only the first row is not suitable. + if e.lastInsertID == 0 { + e.lastInsertID = uint64(recordIDs[0]) + } // Assign autoIDs to rows. for j := 0; j < cnt; j++ { offset := j + start d := rows[offset][colIdx] - // It's compatible with mysql setting the first allocated autoID to lastInsertID. - // Cause autoID may be specified by user, judge only the first row is not suitable. - if e.lastInsertID == 0 { - e.lastInsertID = uint64(recordIDs[0]) - } - d.SetAutoID(recordIDs[j], col.Flag) retryInfo.AddAutoIncrementID(recordIDs[j]) @@ -695,7 +693,6 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] if err != nil { return nil, err } - // Handle the bad null error. if d, err = col.HandleBadNull(d, e.ctx.GetSessionVars().StmtCtx); err != nil { return nil, err } @@ -712,7 +709,6 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] if err != nil { return nil, err } - // Handle the bad null error. if autoDatum, err = col.HandleBadNull(autoDatum, e.ctx.GetSessionVars().StmtCtx); err != nil { return nil, err } diff --git a/go.mod b/go.mod index 22987a370bd12..e1ae085082f7b 100644 --- a/go.mod +++ b/go.mod @@ -69,7 +69,6 @@ require ( golang.org/x/text v0.3.2 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110 - google.golang.org/appengine v1.4.0 // indirect google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 // indirect google.golang.org/grpc v1.23.0 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect From 66038c736781f4d413dfa4b1b60b90598769b08e Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Sun, 29 Sep 2019 15:19:52 +0800 Subject: [PATCH 43/49] fix comment --- executor/insert_common.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index 5cf04ed7987a9..b2a235d87a6b9 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -693,9 +693,6 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] if err != nil { return nil, err } - if d, err = col.HandleBadNull(d, e.ctx.GetSessionVars().StmtCtx); err != nil { - return nil, err - } rows[offset][colIdx] = d } continue @@ -709,9 +706,6 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] if err != nil { return nil, err } - if autoDatum, err = col.HandleBadNull(autoDatum, e.ctx.GetSessionVars().StmtCtx); err != nil { - return nil, err - } rows[i][colIdx] = autoDatum } return rows, nil From fd19f99a5022399ff5df41a7decf5e919a1a1186 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Sun, 29 Sep 2019 15:19:52 +0800 Subject: [PATCH 44/49] fix comment --- executor/insert_common.go | 2 +- meta/autoid/autoid.go | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index b2a235d87a6b9..3a616d8cad8a6 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -628,7 +628,7 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] if !ok { return rows, nil } - // autoID can find in RetryInfo. + // autoID can be found in RetryInfo. retryInfo := e.ctx.GetSessionVars().RetryInfo if retryInfo.Retrying { return e.lazyAdjustAutoIncrementDatumInRetry(ctx, rows, colIdx) diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index 84c9786c30d8e..cb1a8f7326428 100644 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -47,8 +47,8 @@ var errInvalidTableID = terror.ClassAutoid.New(codeInvalidTableID, "invalid Tabl type Allocator interface { // Alloc allocs N consecutive autoID for table with tableID. // It gets a batch of autoIDs at a time. So it does not need to access storage for each call. - // The consecutive feature is used to insert multiple rows in a statement, cause JDBC will presume the id of these rows are consecutive. - Alloc(tableID int64, N uint64) ([]int64, error) + // The consecutive feature is used to insert multiple rows in a statement. + Alloc(tableID int64, n uint64) ([]int64, error) // Rebase rebases the autoID base for table with tableID and the new base value. // If allocIDs is true, it will allocate some IDs and save to the cache. // If allocIDs is false, it will not allocate IDs. @@ -250,7 +250,7 @@ func NewAllocator(store kv.Storage, dbID int64, isUnsigned bool) Allocator { } } -//autoid error codes. +//codeInvalidTableID is the code of autoid error. const codeInvalidTableID terror.ErrCode = 1 var localSchemaID = int64(math.MaxInt64) @@ -260,7 +260,7 @@ func GenLocalSchemaID() int64 { return atomic.AddInt64(&localSchemaID, -1) } -// AllocN implements autoid.Allocator Alloc interface. +// Alloc implements autoid.Allocator Alloc interface. func (alloc *allocator) Alloc(tableID int64, N uint64) ([]int64, error) { if tableID == 0 { return nil, errInvalidTableID.GenWithStackByArgs("Invalid tableID") @@ -303,7 +303,7 @@ func (alloc *allocator) alloc4Signed(tableID int64, n uint64) ([]int64, error) { return err1 } tmpStep := mathutil.MinInt64(math.MaxInt64-newBase, alloc.step) - // The global rest is not enough for allocN. + // The global rest is not enough for alloc. if tmpStep < n1 { return ErrAutoincReadFailed } @@ -339,15 +339,15 @@ func (alloc *allocator) alloc4Signed(tableID int64, n uint64) ([]int64, error) { func (alloc *allocator) alloc4Unsigned(tableID int64, n uint64) ([]int64, error) { n1 := int64(n) - // Condition alloc.base+N1 > alloc.end will overflow when alloc.base + N1 > MaxInt64. So need this. + // Condition alloc.base+n1 > alloc.end will overflow when alloc.base + n1 > MaxInt64. So need this. if math.MaxUint64-uint64(alloc.base) < n { return nil, ErrAutoincReadFailed } - // The local rest is not enough for allocN, skip it. + // The local rest is not enough for alloc, skip it. if uint64(alloc.base)+n > uint64(alloc.end) { var newBase, newEnd int64 startTime := time.Now() - // Although it may skip a segment here, we still think it is consumed. + // Although it may skip a segment here, we still treat it as consumed. consumeDur := startTime.Sub(alloc.lastAllocTime) nextStep := NextStep(alloc.step, consumeDur) // Make sure nextStep is big enough. @@ -364,7 +364,7 @@ func (alloc *allocator) alloc4Unsigned(tableID int64, n uint64) ([]int64, error) return err1 } tmpStep := int64(mathutil.MinUint64(math.MaxUint64-uint64(newBase), uint64(alloc.step))) - // The global rest is not enough for allocN. + // The global rest is not enough for alloc. if tmpStep < n1 { return ErrAutoincReadFailed } @@ -394,7 +394,7 @@ func (alloc *allocator) alloc4Unsigned(tableID int64, n uint64) ([]int64, error) } resN = append(resN, int64(i)) } - // Use uint64 N directly. + // Use uint64 n directly. alloc.base = int64(uint64(alloc.base) + n) return resN, nil } From b9cc54430032724f9f43c293810804d1b97c8305 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Sun, 29 Sep 2019 15:19:52 +0800 Subject: [PATCH 45/49] fix comment --- meta/autoid/autoid.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index cb1a8f7326428..43a3a4fb8338f 100644 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -261,19 +261,19 @@ func GenLocalSchemaID() int64 { } // Alloc implements autoid.Allocator Alloc interface. -func (alloc *allocator) Alloc(tableID int64, N uint64) ([]int64, error) { +func (alloc *allocator) Alloc(tableID int64, n uint64) ([]int64, error) { if tableID == 0 { return nil, errInvalidTableID.GenWithStackByArgs("Invalid tableID") } - if N == 0 { + if n == 0 { return []int64{}, nil } alloc.mu.Lock() defer alloc.mu.Unlock() if alloc.isUnsigned { - return alloc.alloc4Unsigned(tableID, N) + return alloc.alloc4Unsigned(tableID, n) } - return alloc.alloc4Signed(tableID, N) + return alloc.alloc4Signed(tableID, n) } func (alloc *allocator) alloc4Signed(tableID int64, n uint64) ([]int64, error) { From 4be6639329336f88b86b9255421ca07a843b75cd Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Sun, 6 Oct 2019 11:26:45 +0900 Subject: [PATCH 46/49] return min and max instead of slice in alloc autoid --- meta/autoid/autoid.go | 51 ++++----- meta/autoid/autoid_test.go | 206 ++++++++++++++++++++----------------- table/table.go | 14 ++- table/tables/tables.go | 3 +- 4 files changed, 145 insertions(+), 129 deletions(-) mode change 100644 => 100755 meta/autoid/autoid.go diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go old mode 100644 new mode 100755 index 43a3a4fb8338f..8551064081ae0 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -48,7 +48,7 @@ type Allocator interface { // Alloc allocs N consecutive autoID for table with tableID. // It gets a batch of autoIDs at a time. So it does not need to access storage for each call. // The consecutive feature is used to insert multiple rows in a statement. - Alloc(tableID int64, n uint64) ([]int64, error) + Alloc(tableID int64, n uint64) (int64, int64, error) // Rebase rebases the autoID base for table with tableID and the new base value. // If allocIDs is true, it will allocate some IDs and save to the cache. // If allocIDs is false, it will not allocate IDs. @@ -261,12 +261,13 @@ func GenLocalSchemaID() int64 { } // Alloc implements autoid.Allocator Alloc interface. -func (alloc *allocator) Alloc(tableID int64, n uint64) ([]int64, error) { +func (alloc *allocator) Alloc(tableID int64, n uint64) (int64, int64, error) { if tableID == 0 { - return nil, errInvalidTableID.GenWithStackByArgs("Invalid tableID") + return 0, 0, errInvalidTableID.GenWithStackByArgs("Invalid tableID") } if n == 0 { - return []int64{}, nil + //todo: alloc(tableID,0) will cause duplicate allocation in extreme test case, return err may be better. + return 0, 0, nil } alloc.mu.Lock() defer alloc.mu.Unlock() @@ -276,11 +277,11 @@ func (alloc *allocator) Alloc(tableID int64, n uint64) ([]int64, error) { return alloc.alloc4Signed(tableID, n) } -func (alloc *allocator) alloc4Signed(tableID int64, n uint64) ([]int64, error) { +func (alloc *allocator) alloc4Signed(tableID int64, n uint64) (int64, int64, error) { n1 := int64(n) // Condition alloc.base+N1 > alloc.end will overflow when alloc.base + N1 > MaxInt64. So need this. - if math.MaxInt64-alloc.base < n1 { - return nil, ErrAutoincReadFailed + if math.MaxInt64-alloc.base <= n1 { + return 0, 0, ErrAutoincReadFailed } // The local rest is not enough for allocN, skip it. if alloc.base+n1 > alloc.end { @@ -312,11 +313,11 @@ func (alloc *allocator) alloc4Signed(tableID int64, n uint64) ([]int64, error) { }) metrics.AutoIDHistogram.WithLabelValues(metrics.TableAutoIDAlloc, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds()) if err != nil { - return nil, err + return 0, 0, err } alloc.lastAllocTime = time.Now() if newBase == math.MaxInt64 { - return nil, ErrAutoincReadFailed + return 0, 0, ErrAutoincReadFailed } alloc.base, alloc.end = newBase, newEnd } @@ -325,23 +326,16 @@ func (alloc *allocator) alloc4Signed(tableID int64, n uint64) ([]int64, error) { zap.Uint64("to ID", uint64(alloc.base+n1)), zap.Int64("table ID", tableID), zap.Int64("database ID", alloc.dbID)) - resN := make([]int64, 0, n1) - for i := alloc.base + 1; i <= alloc.base+n1; i++ { - // fix bug : maxInt64 will be allocated - if i == math.MaxInt64 { - return nil, ErrAutoincReadFailed - } - resN = append(resN, i) - } + min := alloc.base alloc.base += n1 - return resN, nil + return min, alloc.base, nil } -func (alloc *allocator) alloc4Unsigned(tableID int64, n uint64) ([]int64, error) { +func (alloc *allocator) alloc4Unsigned(tableID int64, n uint64) (int64, int64, error) { n1 := int64(n) // Condition alloc.base+n1 > alloc.end will overflow when alloc.base + n1 > MaxInt64. So need this. - if math.MaxUint64-uint64(alloc.base) < n { - return nil, ErrAutoincReadFailed + if math.MaxUint64-uint64(alloc.base) <= n { + return 0, 0, ErrAutoincReadFailed } // The local rest is not enough for alloc, skip it. if uint64(alloc.base)+n > uint64(alloc.end) { @@ -373,11 +367,11 @@ func (alloc *allocator) alloc4Unsigned(tableID int64, n uint64) ([]int64, error) }) metrics.AutoIDHistogram.WithLabelValues(metrics.TableAutoIDAlloc, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds()) if err != nil { - return nil, err + return 0, 0, err } alloc.lastAllocTime = time.Now() if uint64(newBase) == math.MaxUint64 { - return nil, ErrAutoincReadFailed + return 0, 0, ErrAutoincReadFailed } alloc.base, alloc.end = newBase, newEnd } @@ -386,15 +380,8 @@ func (alloc *allocator) alloc4Unsigned(tableID int64, n uint64) ([]int64, error) zap.Uint64("to ID", uint64(alloc.base+n1)), zap.Int64("table ID", tableID), zap.Int64("database ID", alloc.dbID)) - resN := make([]int64, 0, n1) - for i := uint64(alloc.base) + 1; i <= uint64(alloc.base)+n; i++ { - // fix bug : maxUint64 will be allocated - if i == math.MaxUint64 { - return nil, ErrAutoincReadFailed - } - resN = append(resN, int64(i)) - } + min := alloc.base // Use uint64 n directly. alloc.base = int64(uint64(alloc.base) + n) - return resN, nil + return min, alloc.base, nil } diff --git a/meta/autoid/autoid_test.go b/meta/autoid/autoid_test.go index 9feb2d1d458c2..9adacbc82d21b 100644 --- a/meta/autoid/autoid_test.go +++ b/meta/autoid/autoid_test.go @@ -73,13 +73,13 @@ func (*testSuite) TestT(c *C) { globalAutoID, err := alloc.NextGlobalAutoID(1) c.Assert(err, IsNil) c.Assert(globalAutoID, Equals, int64(1)) - id, err := alloc.Alloc(1, 1) + _, id, err := alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id[0], Equals, int64(1)) - id, err = alloc.Alloc(1, 1) + c.Assert(id, Equals, int64(1)) + _, id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id[0], Equals, int64(2)) - _, err = alloc.Alloc(0, 1) + c.Assert(id, Equals, int64(2)) + _, _, err = alloc.Alloc(0, 1) c.Assert(err, NotNil) globalAutoID, err = alloc.NextGlobalAutoID(1) c.Assert(err, IsNil) @@ -88,38 +88,38 @@ func (*testSuite) TestT(c *C) { // rebase err = alloc.Rebase(1, int64(1), true) c.Assert(err, IsNil) - id, err = alloc.Alloc(1, 1) + _, id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id[0], Equals, int64(3)) + c.Assert(id, Equals, int64(3)) err = alloc.Rebase(1, int64(3), true) c.Assert(err, IsNil) - id, err = alloc.Alloc(1, 1) + _, id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id[0], Equals, int64(4)) + c.Assert(id, Equals, int64(4)) err = alloc.Rebase(1, int64(10), true) c.Assert(err, IsNil) - id, err = alloc.Alloc(1, 1) + _, id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id[0], Equals, int64(11)) + c.Assert(id, Equals, int64(11)) err = alloc.Rebase(1, int64(3010), true) c.Assert(err, IsNil) - id, err = alloc.Alloc(1, 1) + _, id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id[0], Equals, int64(3011)) + c.Assert(id, Equals, int64(3011)) alloc = autoid.NewAllocator(store, 1, false) c.Assert(alloc, NotNil) - id, err = alloc.Alloc(1, 1) + _, id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id[0], Equals, int64(autoid.GetStep()+1)) + c.Assert(id, Equals, int64(autoid.GetStep()+1)) alloc = autoid.NewAllocator(store, 1, false) c.Assert(alloc, NotNil) err = alloc.Rebase(2, int64(1), false) c.Assert(err, IsNil) - id, err = alloc.Alloc(2, 1) + _, id, err = alloc.Alloc(2, 1) c.Assert(err, IsNil) - c.Assert(id[0], Equals, int64(2)) + c.Assert(id, Equals, int64(2)) alloc = autoid.NewAllocator(store, 1, false) c.Assert(alloc, NotNil) @@ -129,19 +129,19 @@ func (*testSuite) TestT(c *C) { c.Assert(alloc, NotNil) err = alloc.Rebase(3, int64(3000), false) c.Assert(err, IsNil) - id, err = alloc.Alloc(3, 1) + _, id, err = alloc.Alloc(3, 1) c.Assert(err, IsNil) - c.Assert(id[0], Equals, int64(3211)) + c.Assert(id, Equals, int64(3211)) err = alloc.Rebase(3, int64(6543), false) c.Assert(err, IsNil) - id, err = alloc.Alloc(3, 1) + _, id, err = alloc.Alloc(3, 1) c.Assert(err, IsNil) - c.Assert(id[0], Equals, int64(6544)) + c.Assert(id, Equals, int64(6544)) // Test the MaxInt64 is the upper bound of `alloc` function but not `rebase`. err = alloc.Rebase(3, int64(math.MaxInt64-1), true) c.Assert(err, IsNil) - _, err = alloc.Alloc(3, 1) + _, _, err = alloc.Alloc(3, 1) c.Assert(alloc, NotNil) err = alloc.Rebase(3, int64(math.MaxInt64), true) c.Assert(err, IsNil) @@ -152,45 +152,42 @@ func (*testSuite) TestT(c *C) { globalAutoID, err = alloc.NextGlobalAutoID(4) c.Assert(err, IsNil) c.Assert(globalAutoID, Equals, int64(1)) - idN, err := alloc.Alloc(4, 1) + min, max, err := alloc.Alloc(4, 1) c.Assert(err, IsNil) - c.Assert(len(idN), Equals, 1) - c.Assert(idN[0], Equals, int64(1)) + c.Assert(max-min, Equals, int64(1)) + c.Assert(min+1, Equals, int64(1)) - idN, err = alloc.Alloc(4, 2) + min, max, err = alloc.Alloc(4, 2) c.Assert(err, IsNil) - c.Assert(len(idN), Equals, 2) - c.Assert(idN[0], Equals, int64(2)) - c.Assert(idN[1], Equals, int64(3)) + c.Assert(max-min, Equals, int64(2)) + c.Assert(min+1, Equals, int64(2)) + c.Assert(max, Equals, int64(3)) - idN, err = alloc.Alloc(4, 100) + min, max, err = alloc.Alloc(4, 100) c.Assert(err, IsNil) - c.Assert(len(idN), Equals, 100) - for i := 0; i < 100; i++ { - c.Assert(idN[i], Equals, int64(i+4)) + c.Assert(max-min, Equals, int64(100)) + expected := int64(4) + for i := min + 1; i <= max; i++ { + c.Assert(i, Equals, expected) + expected++ } err = alloc.Rebase(4, int64(1000), false) c.Assert(err, IsNil) - idN, err = alloc.Alloc(4, 3) + min, max, err = alloc.Alloc(4, 3) c.Assert(err, IsNil) - c.Assert(len(idN), Equals, 3) - c.Assert(idN[0], Equals, int64(1001)) - c.Assert(idN[1], Equals, int64(1002)) - c.Assert(idN[2], Equals, int64(1003)) + c.Assert(max-min, Equals, int64(3)) + c.Assert(min+1, Equals, int64(1001)) + c.Assert(min+2, Equals, int64(1002)) + c.Assert(max, Equals, int64(1003)) lastRemainOne := alloc.End() err = alloc.Rebase(4, alloc.End()-2, false) c.Assert(err, IsNil) - idN, err = alloc.Alloc(4, 5) + min, max, err = alloc.Alloc(4, 5) c.Assert(err, IsNil) - c.Assert(len(idN), Equals, 5) - c.Assert(idN[0], Greater, lastRemainOne) - consecutive := idN[0] - for i := 1; i < 5; i++ { - consecutive++ - c.Assert(idN[i], Equals, consecutive) - } + c.Assert(max-min, Equals, int64(5)) + c.Assert(min+1, Greater, lastRemainOne) } func (*testSuite) TestUnsignedAutoid(c *C) { @@ -225,13 +222,13 @@ func (*testSuite) TestUnsignedAutoid(c *C) { globalAutoID, err := alloc.NextGlobalAutoID(1) c.Assert(err, IsNil) c.Assert(globalAutoID, Equals, int64(1)) - id, err := alloc.Alloc(1, 1) + _, id, err := alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id[0], Equals, int64(1)) - id, err = alloc.Alloc(1, 1) + c.Assert(id, Equals, int64(1)) + _, id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id[0], Equals, int64(2)) - _, err = alloc.Alloc(0, 1) + c.Assert(id, Equals, int64(2)) + _, _, err = alloc.Alloc(0, 1) c.Assert(err, NotNil) globalAutoID, err = alloc.NextGlobalAutoID(1) c.Assert(err, IsNil) @@ -240,38 +237,38 @@ func (*testSuite) TestUnsignedAutoid(c *C) { // rebase err = alloc.Rebase(1, int64(1), true) c.Assert(err, IsNil) - id, err = alloc.Alloc(1, 1) + _, id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id[0], Equals, int64(3)) + c.Assert(id, Equals, int64(3)) err = alloc.Rebase(1, int64(3), true) c.Assert(err, IsNil) - id, err = alloc.Alloc(1, 1) + _, id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id[0], Equals, int64(4)) + c.Assert(id, Equals, int64(4)) err = alloc.Rebase(1, int64(10), true) c.Assert(err, IsNil) - id, err = alloc.Alloc(1, 1) + _, id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id[0], Equals, int64(11)) + c.Assert(id, Equals, int64(11)) err = alloc.Rebase(1, int64(3010), true) c.Assert(err, IsNil) - id, err = alloc.Alloc(1, 1) + _, id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id[0], Equals, int64(3011)) + c.Assert(id, Equals, int64(3011)) alloc = autoid.NewAllocator(store, 1, true) c.Assert(alloc, NotNil) - id, err = alloc.Alloc(1, 1) + _, id, err = alloc.Alloc(1, 1) c.Assert(err, IsNil) - c.Assert(id[0], Equals, int64(autoid.GetStep()+1)) + c.Assert(id, Equals, int64(autoid.GetStep()+1)) alloc = autoid.NewAllocator(store, 1, true) c.Assert(alloc, NotNil) err = alloc.Rebase(2, int64(1), false) c.Assert(err, IsNil) - id, err = alloc.Alloc(2, 1) + _, id, err = alloc.Alloc(2, 1) c.Assert(err, IsNil) - c.Assert(id[0], Equals, int64(2)) + c.Assert(id, Equals, int64(2)) alloc = autoid.NewAllocator(store, 1, true) c.Assert(alloc, NotNil) @@ -281,21 +278,21 @@ func (*testSuite) TestUnsignedAutoid(c *C) { c.Assert(alloc, NotNil) err = alloc.Rebase(3, int64(3000), false) c.Assert(err, IsNil) - id, err = alloc.Alloc(3, 1) + _, id, err = alloc.Alloc(3, 1) c.Assert(err, IsNil) - c.Assert(id[0], Equals, int64(3211)) + c.Assert(id, Equals, int64(3211)) err = alloc.Rebase(3, int64(6543), false) c.Assert(err, IsNil) - id, err = alloc.Alloc(3, 1) + _, id, err = alloc.Alloc(3, 1) c.Assert(err, IsNil) - c.Assert(id[0], Equals, int64(6544)) + c.Assert(id, Equals, int64(6544)) // Test the MaxUint64 is the upper bound of `alloc` func but not `rebase`. var n uint64 = math.MaxUint64 - 1 un := int64(n) err = alloc.Rebase(3, un, true) c.Assert(err, IsNil) - _, err = alloc.Alloc(3, 1) + _, _, err = alloc.Alloc(3, 1) c.Assert(err, NotNil) un = int64(n + 1) err = alloc.Rebase(3, un, true) @@ -308,32 +305,27 @@ func (*testSuite) TestUnsignedAutoid(c *C) { c.Assert(err, IsNil) c.Assert(globalAutoID, Equals, int64(1)) - idN, err := alloc.Alloc(4, 2) + min, max, err := alloc.Alloc(4, 2) c.Assert(err, IsNil) - c.Assert(len(idN), Equals, 2) - c.Assert(idN[0], Equals, int64(1)) - c.Assert(idN[1], Equals, int64(2)) + c.Assert(max-min, Equals, int64(2)) + c.Assert(min+1, Equals, int64(1)) + c.Assert(max, Equals, int64(2)) err = alloc.Rebase(4, int64(500), true) c.Assert(err, IsNil) - idN, err = alloc.Alloc(4, 2) + min, max, err = alloc.Alloc(4, 2) c.Assert(err, IsNil) - c.Assert(len(idN), Equals, 2) - c.Assert(idN[0], Equals, int64(501)) - c.Assert(idN[1], Equals, int64(502)) + c.Assert(max-min, Equals, int64(2)) + c.Assert(min+1, Equals, int64(501)) + c.Assert(max, Equals, int64(502)) lastRemainOne := alloc.End() err = alloc.Rebase(4, alloc.End()-2, false) c.Assert(err, IsNil) - idN, err = alloc.Alloc(4, 5) + min, max, err = alloc.Alloc(4, 5) c.Assert(err, IsNil) - c.Assert(len(idN), Equals, 5) - c.Assert(idN[0], Greater, lastRemainOne) - consecutive := idN[0] - for i := 1; i < 5; i++ { - consecutive++ - c.Assert(idN[i], Equals, consecutive) - } + c.Assert(max-min, Equals, int64(5)) + c.Assert(min+1, Greater, lastRemainOne) } // TestConcurrentAlloc is used for the test that @@ -368,8 +360,7 @@ func (*testSuite) TestConcurrentAlloc(c *C) { allocIDs := func() { alloc := autoid.NewAllocator(store, dbID, false) for j := 0; j < int(autoid.GetStep())+5; j++ { - ids, err1 := alloc.Alloc(tblID, 1) - id := ids[0] + _, id, err1 := alloc.Alloc(tblID, 1) if err1 != nil { errCh <- err1 break @@ -386,7 +377,7 @@ func (*testSuite) TestConcurrentAlloc(c *C) { //test Alloc N N := rand.Uint64() % 100 - idN, err1 := alloc.Alloc(tblID, N) + min, max, err1 := alloc.Alloc(tblID, N) if err1 != nil { errCh <- err1 break @@ -394,14 +385,14 @@ func (*testSuite) TestConcurrentAlloc(c *C) { errFlag := false mu.Lock() - for i := uint64(0); i < N; i++ { - if _, ok := m[idN[i]]; ok { - errCh <- fmt.Errorf("duplicate id:%v", idN[i]) + for i := min + 1; i <= max; i++ { + if _, ok := m[i]; ok { + errCh <- fmt.Errorf("duplicate id:%v", i) errFlag = true mu.Unlock() break } - m[idN[i]] = struct{}{} + m[i] = struct{}{} } if errFlag { break @@ -446,7 +437,7 @@ func (*testSuite) TestRollbackAlloc(c *C) { injectConf.SetCommitError(errors.New("injected")) injectedStore := kv.NewInjectedStore(store, injectConf) alloc := autoid.NewAllocator(injectedStore, 1, false) - _, err = alloc.Alloc(2, 1) + _, _, err = alloc.Alloc(2, 1) c.Assert(err, NotNil) c.Assert(alloc.Base(), Equals, int64(0)) c.Assert(alloc.End(), Equals, int64(0)) @@ -466,3 +457,34 @@ func (*testSuite) TestNextStep(c *C) { nextStep = autoid.NextStep(50000, 10*time.Minute) c.Assert(nextStep, Equals, int64(1000)) } + +func BenchmarkAllocator_Alloc(b *testing.B) { + b.StopTimer() + store, err := mockstore.NewMockTikvStore() + if err != nil { + return + } + defer store.Close() + dbID := int64(1) + tblID := int64(2) + err = kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { + m := meta.NewMeta(txn) + err = m.CreateDatabase(&model.DBInfo{ID: dbID, Name: model.NewCIStr("a")}) + if err != nil { + return err + } + err = m.CreateTableOrView(dbID, &model.TableInfo{ID: tblID, Name: model.NewCIStr("t")}) + if err != nil { + return err + } + return nil + }) + if err != nil { + return + } + alloc := autoid.NewAllocator(store, 1, false) + b.StartTimer() + for i := 0; i < b.N; i++ { + alloc.Alloc(2, 1) + } +} diff --git a/table/table.go b/table/table.go index cbb82ea794c55..2f281afbcf030 100644 --- a/table/table.go +++ b/table/table.go @@ -192,11 +192,11 @@ func AllocAutoIncrementValue(ctx context.Context, t Table, sctx sessionctx.Conte span1 := span.Tracer().StartSpan("table.AllocAutoIncrementValue", opentracing.ChildOf(span.Context())) defer span1.Finish() } - ids, err := t.Allocator(sctx).Alloc(t.Meta().ID, uint64(1)) + _, max, err := t.Allocator(sctx).Alloc(t.Meta().ID, uint64(1)) if err != nil { return 0, err } - return ids[0], err + return max, err } // AllocBatchAutoIncrementValue allocates batch auto_increment value for rows. @@ -205,7 +205,15 @@ func AllocBatchAutoIncrementValue(ctx context.Context, t Table, sctx sessionctx. span1 := span.Tracer().StartSpan("table.AllocBatchAutoIncrementValue", opentracing.ChildOf(span.Context())) defer span1.Finish() } - return t.Allocator(sctx).Alloc(t.Meta().ID, uint64(N)) + min, max, err := t.Allocator(sctx).Alloc(t.Meta().ID, uint64(N)) + if err != nil { + return nil, err + } + resN := make([]int64, 0, N) + for i := min + 1; i <= max; i++ { + resN = append(resN, i) + } + return resN, nil } // PhysicalTable is an abstraction for two kinds of table representation: partition or non-partitioned table. diff --git a/table/tables/tables.go b/table/tables/tables.go index 44f39c4c9fa4e..7044fd68fba02 100644 --- a/table/tables/tables.go +++ b/table/tables/tables.go @@ -943,11 +943,10 @@ func GetColDefaultValue(ctx sessionctx.Context, col *table.Column, defaultVals [ // AllocHandle implements table.Table AllocHandle interface. func (t *tableCommon) AllocHandle(ctx sessionctx.Context) (int64, error) { - rowIDs, err := t.Allocator(ctx).Alloc(t.tableID, 1) + _, rowID, err := t.Allocator(ctx).Alloc(t.tableID, 1) if err != nil { return 0, err } - rowID := rowIDs[0] if t.meta.ShardRowIDBits > 0 { // Use max record ShardRowIDBits to check overflow. if OverflowShardBits(rowID, t.meta.MaxShardRowIDBits) { From a29fe3e21102e65a9f16bd7d61f7c36a0c8ff27e Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Sun, 6 Oct 2019 11:26:45 +0900 Subject: [PATCH 47/49] return min and max instead of slice in alloc autoid --- executor/insert_common.go | 12 +++++++----- meta/autoid/autoid.go | 1 - table/table.go | 4 ++-- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index 3a616d8cad8a6..e4a54fdd33aaa 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -670,23 +670,25 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] i++ cnt++ } - // Alloc batch N consecutive autoIDs. - recordIDs, err := table.AllocBatchAutoIncrementValue(ctx, e.Table, e.ctx, cnt) + // Alloc batch N consecutive (min, max] autoIDs. + // max value can be derived from adding one for cnt times. + min, _, err := table.AllocBatchAutoIncrementValue(ctx, e.Table, e.ctx, cnt) if e.filterErr(err) != nil { return nil, err } // It's compatible with mysql setting the first allocated autoID to lastInsertID. // Cause autoID may be specified by user, judge only the first row is not suitable. if e.lastInsertID == 0 { - e.lastInsertID = uint64(recordIDs[0]) + e.lastInsertID = uint64(min) + 1 } // Assign autoIDs to rows. for j := 0; j < cnt; j++ { offset := j + start d := rows[offset][colIdx] - d.SetAutoID(recordIDs[j], col.Flag) - retryInfo.AddAutoIncrementID(recordIDs[j]) + id := int64(uint64(min) + uint64(j) + 1) + d.SetAutoID(id, col.Flag) + retryInfo.AddAutoIncrementID(id) // The value of d is adjusted by auto ID, so we need to cast it again. d, err := table.CastValue(e.ctx, d, col.ToInfo()) diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index 8551064081ae0..3e0b90c7bda75 100755 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -266,7 +266,6 @@ func (alloc *allocator) Alloc(tableID int64, n uint64) (int64, int64, error) { return 0, 0, errInvalidTableID.GenWithStackByArgs("Invalid tableID") } if n == 0 { - //todo: alloc(tableID,0) will cause duplicate allocation in extreme test case, return err may be better. return 0, 0, nil } alloc.mu.Lock() diff --git a/table/table.go b/table/table.go index 2f281afbcf030..921544d46951c 100644 --- a/table/table.go +++ b/table/table.go @@ -199,8 +199,8 @@ func AllocAutoIncrementValue(ctx context.Context, t Table, sctx sessionctx.Conte return max, err } -// AllocBatchAutoIncrementValue allocates batch auto_increment value for rows. -func AllocBatchAutoIncrementValue(ctx context.Context, t Table, sctx sessionctx.Context, N int) ([]int64, error) { +// AllocBatchAutoIncrementValue allocates batch auto_increment value (min and max] for rows. +func AllocBatchAutoIncrementValue(ctx context.Context, t Table, sctx sessionctx.Context, N int) (int64, int64, error) { if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil { span1 := span.Tracer().StartSpan("table.AllocBatchAutoIncrementValue", opentracing.ChildOf(span.Context())) defer span1.Finish() From 3bd377e5f055adc544c180a482fcc445f7d5f8e1 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Sun, 6 Oct 2019 11:26:45 +0900 Subject: [PATCH 48/49] return min and max instead of slice in alloc autoid --- table/table.go | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/table/table.go b/table/table.go index 921544d46951c..41c77bf9a4ac6 100644 --- a/table/table.go +++ b/table/table.go @@ -205,15 +205,7 @@ func AllocBatchAutoIncrementValue(ctx context.Context, t Table, sctx sessionctx. span1 := span.Tracer().StartSpan("table.AllocBatchAutoIncrementValue", opentracing.ChildOf(span.Context())) defer span1.Finish() } - min, max, err := t.Allocator(sctx).Alloc(t.Meta().ID, uint64(N)) - if err != nil { - return nil, err - } - resN := make([]int64, 0, N) - for i := min + 1; i <= max; i++ { - resN = append(resN, i) - } - return resN, nil + return t.Allocator(sctx).Alloc(t.Meta().ID, uint64(N)) } // PhysicalTable is an abstraction for two kinds of table representation: partition or non-partitioned table. From 5eebe61728e23cdfa9f40ad6ff44b1855ae8066e Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Thu, 10 Oct 2019 10:15:06 +0800 Subject: [PATCH 49/49] add comment for alloc N --- meta/autoid/autoid.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index 3e0b90c7bda75..abd52dde335d9 100755 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -45,7 +45,7 @@ var errInvalidTableID = terror.ClassAutoid.New(codeInvalidTableID, "invalid Tabl // Allocator is an auto increment id generator. // Just keep id unique actually. type Allocator interface { - // Alloc allocs N consecutive autoID for table with tableID. + // Alloc allocs N consecutive autoID for table with tableID, returning (min, max] of the allocated autoID batch. // It gets a batch of autoIDs at a time. So it does not need to access storage for each call. // The consecutive feature is used to insert multiple rows in a statement. Alloc(tableID int64, n uint64) (int64, int64, error)