Skip to content

Commit

Permalink
merge master
Browse files Browse the repository at this point in the history
  • Loading branch information
jackysp committed Oct 9, 2018
2 parents 3114517 + 75d6a3e commit 8faac5e
Show file tree
Hide file tree
Showing 29 changed files with 1,933 additions and 124 deletions.
9 changes: 9 additions & 0 deletions Gopkg.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

17 changes: 8 additions & 9 deletions cmd/explaintest/r/explain_easy.result
Original file line number Diff line number Diff line change
Expand Up @@ -96,15 +96,14 @@ TableReader_11 2.00 root data:TableScan_10
└─TableScan_10 2.00 cop table:t1, range:[0,0], [1,1], keep order:false, stats:pseudo
explain select (select count(1) k from t1 s where s.c1 = t1.c1 having k != 0) from t1;
id count task operator info
Projection_13 10000.00 root k
└─Projection_14 10000.00 root test.t1.c1, ifnull(5_col_0, 0)
└─MergeJoin_15 10000.00 root left outer join, left key:test.t1.c1, right key:s.c1
├─TableReader_18 10000.00 root data:TableScan_17
│ └─TableScan_17 10000.00 cop table:t1, range:[-inf,+inf], keep order:true, stats:pseudo
└─Selection_20 8000.00 root ne(k, 0)
└─Projection_21 10000.00 root 1, s.c1
└─TableReader_23 10000.00 root data:TableScan_22
└─TableScan_22 10000.00 cop table:s, range:[-inf,+inf], keep order:true, stats:pseudo
Projection_12 10000.00 root k
└─Projection_13 10000.00 root test.t1.c1, ifnull(5_col_0, 0)
└─MergeJoin_14 10000.00 root left outer join, left key:test.t1.c1, right key:s.c1
├─TableReader_17 10000.00 root data:TableScan_16
│ └─TableScan_16 10000.00 cop table:t1, range:[-inf,+inf], keep order:true, stats:pseudo
└─Projection_19 8000.00 root 1, s.c1
└─TableReader_21 10000.00 root data:TableScan_20
└─TableScan_20 10000.00 cop table:s, range:[-inf,+inf], keep order:true, stats:pseudo
explain select * from information_schema.columns;
id count task operator info
MemTableScan_4 10000.00 root
Expand Down
4 changes: 4 additions & 0 deletions cmd/explaintest/r/select.result
Original file line number Diff line number Diff line change
Expand Up @@ -328,3 +328,7 @@ Point_Get_1 1.00 root table:t, handle:1
desc select * from t where a = '1';
id count task operator info
Point_Get_1 1.00 root table:t, handle:1
desc select sysdate(), sleep(1), sysdate();
id count task operator info
Projection_3 1.00 root sysdate(), sleep(1), sysdate()
└─TableDual_4 1.00 root rows:1
2 changes: 2 additions & 0 deletions cmd/explaintest/t/select.test
Original file line number Diff line number Diff line change
Expand Up @@ -163,3 +163,5 @@ drop table if exists t;
create table t(a bigint primary key, b bigint);
desc select * from t where a = 1;
desc select * from t where a = '1';

desc select sysdate(), sleep(1), sysdate();
25 changes: 25 additions & 0 deletions executor/join.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
package executor

import (
"math"
"sync"
"sync/atomic"
"unsafe"
Expand Down Expand Up @@ -71,6 +72,12 @@ type HashJoinExec struct {
hashTableValBufs [][][]byte

memTracker *memory.Tracker // track memory usage.

// radixBits indicates the bit number using for radix partitioning. Inner
// relation will be split to 2^radixBits sub-relations before building the
// hash tables. If the complete inner relation can be hold in L2Cache in
// which case radixBits will be 0, we can skip the partition phase.
radixBits int
}

// outerChkResource stores the result of the join outer fetch worker,
Expand Down Expand Up @@ -269,6 +276,7 @@ func (e *HashJoinExec) fetchInnerRows(ctx context.Context, chkCh chan<- *chunk.C
return
}
if chk.NumRows() == 0 {
e.evalRadixBitNum()
return
}
chkCh <- chk
Expand All @@ -277,6 +285,23 @@ func (e *HashJoinExec) fetchInnerRows(ctx context.Context, chkCh chan<- *chunk.C
}
}

// evalRadixBitNum evaluates the radix bit numbers.
func (e *HashJoinExec) evalRadixBitNum() {
sv := e.ctx.GetSessionVars()
// Calculate the bit number needed when using radix partition.
if !sv.EnableRadixJoin {
return
}
innerResultSize := float64(e.innerResult.GetMemTracker().BytesConsumed())
// To ensure that one partition of inner relation, one hash
// table and one partition of outer relation fit into the L2
// cache when the input data obeys the uniform distribution,
// we suppose every sub-partition of inner relation using
// three quarters of the L2 cache size.
l2CacheSize := float64(sv.L2CacheSize) * 3 / 4
e.radixBits = int(math.Log2(innerResultSize / l2CacheSize))
}

func (e *HashJoinExec) initializeForProbe() {
// e.outerResultChs is for transmitting the chunks which store the data of outerExec,
// it'll be written by outer worker goroutine, and read by join workers.
Expand Down
12 changes: 10 additions & 2 deletions expression/aggregation/descriptor.go
Original file line number Diff line number Diff line change
Expand Up @@ -298,10 +298,14 @@ func (a *AggFuncDesc) typeInfer4Sum(ctx sessionctx.Context) {
a.RetTp.Decimal = mysql.MaxDecimalScale
}
// TODO: a.Args[0] = expression.WrapWithCastAsDecimal(ctx, a.Args[0])
default:
case mysql.TypeDouble, mysql.TypeFloat:
a.RetTp = types.NewFieldType(mysql.TypeDouble)
a.RetTp.Flen, a.RetTp.Decimal = mysql.MaxRealWidth, a.Args[0].GetType().Decimal
//TODO: a.Args[0] = expression.WrapWithCastAsReal(ctx, a.Args[0])
default:
a.RetTp = types.NewFieldType(mysql.TypeDouble)
a.RetTp.Flen, a.RetTp.Decimal = mysql.MaxRealWidth, types.UnspecifiedLength
// TODO: a.Args[0] = expression.WrapWithCastAsReal(ctx, a.Args[0])
}
types.SetBinChsClnFlag(a.RetTp)
}
Expand All @@ -319,10 +323,14 @@ func (a *AggFuncDesc) typeInfer4Avg(ctx sessionctx.Context) {
}
a.RetTp.Flen = mysql.MaxDecimalWidth
// TODO: a.Args[0] = expression.WrapWithCastAsDecimal(ctx, a.Args[0])
default:
case mysql.TypeDouble, mysql.TypeFloat:
a.RetTp = types.NewFieldType(mysql.TypeDouble)
a.RetTp.Flen, a.RetTp.Decimal = mysql.MaxRealWidth, a.Args[0].GetType().Decimal
// TODO: a.Args[0] = expression.WrapWithCastAsReal(ctx, a.Args[0])
default:
a.RetTp = types.NewFieldType(mysql.TypeDouble)
a.RetTp.Flen, a.RetTp.Decimal = mysql.MaxRealWidth, types.UnspecifiedLength
// TODO: a.Args[0] = expression.WrapWithCastAsReal(ctx, a.Args[0])
}
types.SetBinChsClnFlag(a.RetTp)
}
Expand Down
5 changes: 5 additions & 0 deletions expression/builtin_string.go
Original file line number Diff line number Diff line change
Expand Up @@ -1210,6 +1210,11 @@ func (b *builtinSubstringIndexSig) evalString(row chunk.Row) (d string, isNull b
} else {
// If count is negative, everything to the right of the final delimiter (counting from the right) is returned.
count = -count
if count < 0 {
// -count overflows max int64, returns an empty string.
return "", false, nil
}

if count < end {
start = end - count
}
Expand Down
1 change: 1 addition & 0 deletions expression/function_traits.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ var UnCacheableFunctions = map[string]struct{}{

// unFoldableFunctions stores functions which can not be folded duration constant folding stage.
var unFoldableFunctions = map[string]struct{}{
ast.Sysdate: {},
ast.FoundRows: {},
ast.Rand: {},
ast.UUID: {},
Expand Down
26 changes: 26 additions & 0 deletions expression/function_traits_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.

package expression

import (
. "github.com/pingcap/check"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/util/testleak"
)

func (s *testEvaluatorSuite) TestUnfoldableFuncs(c *C) {
defer testleak.AfterTest(c)()
_, ok := unFoldableFunctions[ast.Sysdate]
c.Assert(ok, IsTrue)
}
1 change: 1 addition & 0 deletions expression/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -736,6 +736,7 @@ func (s *testIntegrationSuite) TestStringBuiltin(c *C) {
result.Check(testkit.Rows("www.pingcap 12345 45 2017 01:01"))
result = tk.MustQuery(`select substring_index('www.pingcap.com', '.', 0), substring_index('www.pingcap.com', '.', 100), substring_index('www.pingcap.com', '.', -100)`)
result.Check(testkit.Rows(" www.pingcap.com www.pingcap.com"))
tk.MustQuery(`select substring_index('xyz', 'abc', 9223372036854775808)`).Check(testkit.Rows(``))
result = tk.MustQuery(`select substring_index('www.pingcap.com', 'd', 1), substring_index('www.pingcap.com', '', 1), substring_index('', '.', 1)`)
result.Check(testutil.RowsWithSep(",", "www.pingcap.com,,"))
result = tk.MustQuery(`select substring_index(null, '.', 1), substring_index('www.pingcap.com', null, 1), substring_index('www.pingcap.com', '.', null)`)
Expand Down
4 changes: 2 additions & 2 deletions expression/typeinfer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -822,14 +822,14 @@ func (s *testInferTypeSuite) createTestCase4Aggregations() []typeInferTestCase {
{"sum(c_decimal)", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxDecimalWidth, 3},
{"sum(1.0)", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxDecimalWidth, 1},
{"sum(1.2e2)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"sum(c_char)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"sum(c_char)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"avg(c_int_d)", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxDecimalWidth, 4},
{"avg(c_float_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"avg(c_double_d)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"avg(c_decimal)", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxDecimalWidth, 7},
{"avg(1.0)", mysql.TypeNewDecimal, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxDecimalWidth, 5},
{"avg(1.2e2)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"avg(c_char)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, 0},
{"avg(c_char)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength},
{"group_concat(c_int_d)", mysql.TypeVarString, charset.CharsetUTF8, 0, mysql.MaxBlobWidth, 0},
}
}
Expand Down
5 changes: 3 additions & 2 deletions planner/core/logical_plan_builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,13 +68,14 @@ func (la *LogicalAggregation) collectGroupByColumns() {

func (b *planBuilder) buildAggregation(p LogicalPlan, aggFuncList []*ast.AggregateFuncExpr, gbyItems []expression.Expression) (LogicalPlan, map[int]int, error) {
b.optFlag = b.optFlag | flagBuildKeyInfo
b.optFlag = b.optFlag | flagAggregationOptimize
b.optFlag = b.optFlag | flagPushDownAgg
// We may apply aggregation eliminate optimization.
// So we add the flagMaxMinEliminate to try to convert max/min to topn and flagPushDownTopN to handle the newly added topn operator.
b.optFlag = b.optFlag | flagMaxMinEliminate
b.optFlag = b.optFlag | flagPushDownTopN
// when we eliminate the max and min we may add `is not null` filter.
b.optFlag = b.optFlag | flagPredicatePushDown
b.optFlag = b.optFlag | flagEliminateAgg

plan4Agg := LogicalAggregation{AggFuncs: make([]*aggregation.AggFuncDesc, 0, len(aggFuncList))}.init(b.ctx)
schema4Agg := expression.NewSchema(make([]*expression.Column, 0, len(aggFuncList)+p.Schema().Len())...)
Expand Down Expand Up @@ -605,7 +606,7 @@ func (b *planBuilder) buildProjection(p LogicalPlan, fields []*ast.SelectField,

func (b *planBuilder) buildDistinct(child LogicalPlan, length int) *LogicalAggregation {
b.optFlag = b.optFlag | flagBuildKeyInfo
b.optFlag = b.optFlag | flagAggregationOptimize
b.optFlag = b.optFlag | flagPushDownAgg
plan4Agg := LogicalAggregation{
AggFuncs: make([]*aggregation.AggFuncDesc, 0, child.Schema().Len()),
GroupByItems: expression.Column2Exprs(child.Schema().Clone().Columns[:length]),
Expand Down
14 changes: 6 additions & 8 deletions planner/core/logical_plan_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1090,6 +1090,10 @@ func (s *testPlanSuite) TestEagerAggregation(c *C) {
sql: "select max(a.c) from t a join t b on a.a=b.a and a.b=b.b group by a.b",
best: "Join{DataScan(a)->DataScan(b)}(a.a,b.a)(a.b,b.b)->Aggr(max(a.c))->Projection",
},
{
sql: "select t1.a, count(t2.b) from t t1, t t2 where t1.a = t2.a group by t1.a",
best: "Join{DataScan(t1)->DataScan(t2)}(t1.a,t2.a)->Projection->Projection",
},
}
s.ctx.GetSessionVars().AllowAggPushDown = true
for _, tt := range tests {
Expand All @@ -1099,7 +1103,7 @@ func (s *testPlanSuite) TestEagerAggregation(c *C) {

p, err := BuildLogicalPlan(s.ctx, stmt, s.is)
c.Assert(err, IsNil)
p, err = logicalOptimize(flagBuildKeyInfo|flagPredicatePushDown|flagPrunColumns|flagAggregationOptimize, p.(LogicalPlan))
p, err = logicalOptimize(flagBuildKeyInfo|flagPredicatePushDown|flagPrunColumns|flagPushDownAgg, p.(LogicalPlan))
c.Assert(err, IsNil)
c.Assert(ToString(p), Equals, tt.best, Commentf("for %s", tt.sql))
}
Expand Down Expand Up @@ -1530,10 +1534,6 @@ func (s *testPlanSuite) TestAggPrune(c *C) {
sql: "select sum(b) from t group by c, d, e",
best: "DataScan(t)->Aggr(sum(test.t.b))->Projection",
},
{
sql: "select t1.a, count(t2.b) from t t1, t t2 where t1.a = t2.a group by t1.a",
best: "Join{DataScan(t1)->DataScan(t2)}(t1.a,t2.a)->Projection->Projection",
},
{
sql: "select tt.a, sum(tt.b) from (select a, b from t) tt group by tt.a",
best: "DataScan(t)->Projection->Projection->Projection",
Expand All @@ -1543,7 +1543,6 @@ func (s *testPlanSuite) TestAggPrune(c *C) {
best: "DataScan(t)->Projection->Projection->Projection->Projection",
},
}
s.ctx.GetSessionVars().AllowAggPushDown = true
for _, tt := range tests {
comment := Commentf("for %s", tt.sql)
stmt, err := s.ParseOneStmt(tt.sql, "", "")
Expand All @@ -1552,11 +1551,10 @@ func (s *testPlanSuite) TestAggPrune(c *C) {
p, err := BuildLogicalPlan(s.ctx, stmt, s.is)
c.Assert(err, IsNil)

p, err = logicalOptimize(flagPredicatePushDown|flagPrunColumns|flagBuildKeyInfo|flagAggregationOptimize, p.(LogicalPlan))
p, err = logicalOptimize(flagPredicatePushDown|flagPrunColumns|flagBuildKeyInfo|flagEliminateAgg, p.(LogicalPlan))
c.Assert(err, IsNil)
c.Assert(ToString(p), Equals, tt.best, comment)
}
s.ctx.GetSessionVars().AllowAggPushDown = false
}

func (s *testPlanSuite) TestVisitInfo(c *C) {
Expand Down
6 changes: 4 additions & 2 deletions planner/core/optimizer.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,11 @@ const (
flagEliminateProjection
flagBuildKeyInfo
flagDecorrelate
flagEliminateAgg
flagMaxMinEliminate
flagPredicatePushDown
flagPartitionProcessor
flagAggregationOptimize
flagPushDownAgg
flagPushDownTopN
)

Expand All @@ -45,10 +46,11 @@ var optRuleList = []logicalOptRule{
&projectionEliminater{},
&buildKeySolver{},
&decorrelateSolver{},
&aggregationEliminator{},
&maxMinEliminator{},
&ppdSolver{},
&partitionProcessor{},
&aggregationOptimizer{},
&aggregationPushDownSolver{},
&pushDownTopNOptimizer{},
}

Expand Down
2 changes: 1 addition & 1 deletion planner/core/physical_plan_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -851,7 +851,7 @@ func (s *testPlanSuite) TestDAGPlanBuilderAgg(c *C) {
},
{
sql: "select (select count(1) k from t s where s.a = t.a having k != 0) from t",
best: "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t)->StreamAgg)->StreamAgg->Sel([ne(k, 0)])}(test.t.a,s.a)->Projection->Projection",
best: "MergeLeftOuterJoin{TableReader(Table(t))->TableReader(Table(t))->Projection}(test.t.a,s.a)->Projection->Projection",
},
// Test stream agg with multi group by columns.
{
Expand Down
Loading

0 comments on commit 8faac5e

Please sign in to comment.