Skip to content

Commit

Permalink
infoschema, session: support for events_statements_summary_by_digest (#…
Browse files Browse the repository at this point in the history
  • Loading branch information
djshow832 authored and sre-bot committed Sep 12, 2019
1 parent 7d53f0f commit 0f55274
Show file tree
Hide file tree
Showing 21 changed files with 931 additions and 14 deletions.
19 changes: 16 additions & 3 deletions config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,9 +93,10 @@ type Config struct {
TreatOldVersionUTF8AsUTF8MB4 bool `toml:"treat-old-version-utf8-as-utf8mb4" json:"treat-old-version-utf8-as-utf8mb4"`
// EnableTableLock indicate whether enable table lock.
// TODO: remove this after table lock features stable.
EnableTableLock bool `toml:"enable-table-lock" json:"enable-table-lock"`
DelayCleanTableLock uint64 `toml:"delay-clean-table-lock" json:"delay-clean-table-lock"`
SplitRegionMaxNum uint64 `toml:"split-region-max-num" json:"split-region-max-num"`
EnableTableLock bool `toml:"enable-table-lock" json:"enable-table-lock"`
DelayCleanTableLock uint64 `toml:"delay-clean-table-lock" json:"delay-clean-table-lock"`
SplitRegionMaxNum uint64 `toml:"split-region-max-num" json:"split-region-max-num"`
StmtSummary StmtSummary `toml:"stmt-summary" json:"stmt-summary"`
}

// Log is the log section of config.
Expand Down Expand Up @@ -316,6 +317,14 @@ type PessimisticTxn struct {
TTL string `toml:"ttl" json:"ttl"`
}

// StmtSummary is the config for statement summary.
type StmtSummary struct {
// The maximum number of statements kept in memory.
MaxStmtCount uint `toml:"max-stmt-count" json:"max-stmt-count"`
// The maximum length of displayed normalized SQL and sample SQL.
MaxSQLLength uint `toml:"max-sql-length" json:"max-sql-length"`
}

var defaultConf = Config{
Host: "0.0.0.0",
AdvertiseAddress: "",
Expand Down Expand Up @@ -410,6 +419,10 @@ var defaultConf = Config{
MaxRetryCount: 256,
TTL: "40s",
},
StmtSummary: StmtSummary{
MaxStmtCount: 100,
MaxSQLLength: 4096,
},
}

var (
Expand Down
7 changes: 7 additions & 0 deletions config/config.toml.example
Original file line number Diff line number Diff line change
Expand Up @@ -315,3 +315,10 @@ max-retry-count = 256
# default TTL in milliseconds for pessimistic lock.
# The value must between "15s" and "120s".
ttl = "40s"

[stmt-summary]
# max number of statements kept in memory.
max-stmt-count = 100

# max length of displayed normalized sql and sample sql.
max-sql-length = 4096
5 changes: 5 additions & 0 deletions config/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,9 @@ txn-total-size-limit=2000
[tikv-client]
commit-timeout="41s"
max-batch-size=128
[stmt-summary]
max-stmt-count=1000
max-sql-length=1024
`)

c.Assert(err, IsNil)
Expand All @@ -90,6 +93,8 @@ max-batch-size=128
c.Assert(conf.EnableTableLock, IsTrue)
c.Assert(conf.DelayCleanTableLock, Equals, uint64(5))
c.Assert(conf.SplitRegionMaxNum, Equals, uint64(10000))
c.Assert(conf.StmtSummary.MaxStmtCount, Equals, uint(1000))
c.Assert(conf.StmtSummary.MaxSQLLength, Equals, uint(1024))
c.Assert(f.Close(), IsNil)
c.Assert(os.Remove(configFile), IsNil)

Expand Down
26 changes: 26 additions & 0 deletions domain/global_vars_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ import (
"time"

"github.com/pingcap/parser/ast"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/stmtsummary"
)

// GlobalVariableCache caches global variables.
Expand All @@ -41,6 +43,8 @@ func (gvc *GlobalVariableCache) Update(rows []chunk.Row, fields []*ast.ResultFie
gvc.rows = rows
gvc.fields = fields
gvc.Unlock()

checkEnableStmtSummary(rows, fields)
}

// Get gets the global variables from cache.
Expand All @@ -63,6 +67,28 @@ func (gvc *GlobalVariableCache) Disable() {
return
}

// checkEnableStmtSummary looks for TiDBEnableStmtSummary and notifies StmtSummary
func checkEnableStmtSummary(rows []chunk.Row, fields []*ast.ResultField) {
for _, row := range rows {
varName := row.GetString(0)
if varName == variable.TiDBEnableStmtSummary {
varVal := row.GetDatum(1, &fields[1].Column.FieldType)

sVal := ""
if !varVal.IsNull() {
var err error
sVal, err = varVal.ToString()
if err != nil {
return
}
}

stmtsummary.OnEnableStmtSummaryModified(sVal)
break
}
}
}

// GetGlobalVarsCache gets the global variable cache.
func (do *Domain) GetGlobalVarsCache() *GlobalVariableCache {
return &do.gvc
Expand Down
46 changes: 46 additions & 0 deletions domain/global_vars_cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,15 @@
package domain

import (
"sync/atomic"
"time"

. "github.com/pingcap/check"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/charset"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
Expand Down Expand Up @@ -96,3 +98,47 @@ func getResultField(colName string, id, offset int) *ast.ResultField {
DBName: model.NewCIStr("test"),
}
}

func (gvcSuite *testGVCSuite) TestCheckEnableStmtSummary(c *C) {
defer testleak.AfterTest(c)()
testleak.BeforeTest()

store, err := mockstore.NewMockTikvStore()
c.Assert(err, IsNil)
defer store.Close()
ddlLease := 50 * time.Millisecond
dom := NewDomain(store, ddlLease, 0, mockFactory)
err = dom.Init(ddlLease, sysMockFactory)
c.Assert(err, IsNil)
defer dom.Close()

gvc := dom.GetGlobalVarsCache()

rf := getResultField("c", 1, 0)
rf1 := getResultField("c1", 2, 1)
ft := &types.FieldType{
Tp: mysql.TypeString,
Charset: charset.CharsetBin,
Collate: charset.CollationBin,
}
ft1 := &types.FieldType{
Tp: mysql.TypeString,
Charset: charset.CharsetBin,
Collate: charset.CollationBin,
}

atomic.StoreInt32(&variable.EnableStmtSummary, 0)
ck := chunk.NewChunkWithCapacity([]*types.FieldType{ft, ft1}, 1024)
ck.AppendString(0, variable.TiDBEnableStmtSummary)
ck.AppendString(1, "1")
row := ck.GetRow(0)
gvc.Update([]chunk.Row{row}, []*ast.ResultField{rf, rf1})
c.Assert(atomic.LoadInt32(&variable.EnableStmtSummary), Equals, int32(1))

ck = chunk.NewChunkWithCapacity([]*types.FieldType{ft, ft1}, 1024)
ck.AppendString(0, variable.TiDBEnableStmtSummary)
ck.AppendString(1, "0")
row = ck.GetRow(0)
gvc.Update([]chunk.Row{row}, []*ast.ResultField{rf, rf1})
c.Assert(atomic.LoadInt32(&variable.EnableStmtSummary), Equals, int32(0))
}
23 changes: 23 additions & 0 deletions executor/adapter.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ import (
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/pingcap/tidb/util/stmtsummary"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
Expand Down Expand Up @@ -180,6 +181,7 @@ func (a *recordSet) Close() error {
a.stmt.LogSlowQuery(a.txnStartTS, a.lastErr == nil)
a.stmt.Ctx.GetSessionVars().PrevStmt = a.stmt.OriginText()
a.stmt.logAudit()
a.stmt.SummaryStmt()
return err
}

Expand Down Expand Up @@ -779,6 +781,27 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool) {
}
}

// SummaryStmt collects statements for performance_schema.events_statements_summary_by_digest
func (a *ExecStmt) SummaryStmt() {
sessVars := a.Ctx.GetSessionVars()
if sessVars.InRestrictedSQL || atomic.LoadInt32(&variable.EnableStmtSummary) == 0 {
return
}
stmtCtx := sessVars.StmtCtx
normalizedSQL, digest := stmtCtx.SQLDigest()
costTime := time.Since(sessVars.StartTime)
stmtsummary.StmtSummaryByDigestMap.AddStatement(&stmtsummary.StmtExecInfo{
SchemaName: sessVars.CurrentDB,
OriginalSQL: a.Text,
NormalizedSQL: normalizedSQL,
Digest: digest,
TotalLatency: uint64(costTime.Nanoseconds()),
AffectedRows: stmtCtx.AffectedRows(),
SentRows: 0,
StartTime: sessVars.StartTime,
})
}

// IsPointGetWithPKOrUniqueKeyByAutoCommit returns true when meets following conditions:
// 1. ctx is auto commit tagged
// 2. txn is not valid
Expand Down
17 changes: 17 additions & 0 deletions infoschema/perfschema/const.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ var perfSchemaTables = []string{
tableStagesCurrent,
tableStagesHistory,
tableStagesHistoryLong,
tableEventsStatementsSummaryByDigest,
}

// tableGlobalStatus contains the column name definitions for table global_status, same as MySQL.
Expand Down Expand Up @@ -374,3 +375,19 @@ const tableStagesHistoryLong = "CREATE TABLE if not exists performance_schema.ev
"WORK_ESTIMATED BIGINT(20) UNSIGNED," +
"NESTING_EVENT_ID BIGINT(20) UNSIGNED," +
"NESTING_EVENT_TYPE ENUM('TRANSACTION','STATEMENT','STAGE'));"

// tableEventsStatementsSummaryByDigest contains the column name definitions for table
// events_statements_summary_by_digest, same as MySQL.
const tableEventsStatementsSummaryByDigest = "CREATE TABLE if not exists events_statements_summary_by_digest (" +
"SCHEMA_NAME VARCHAR(64) DEFAULT NULL," +
"DIGEST VARCHAR(64) DEFAULT NULL," +
"DIGEST_TEXT LONGTEXT DEFAULT NULL," +
"EXEC_COUNT BIGINT(20) UNSIGNED NOT NULL," +
"SUM_LATENCY BIGINT(20) UNSIGNED NOT NULL," +
"MAX_LATENCY BIGINT(20) UNSIGNED NOT NULL," +
"MIN_LATENCY BIGINT(20) UNSIGNED NOT NULL," +
"AVG_LATENCY BIGINT(20) UNSIGNED NOT NULL," +
"SUM_ROWS_AFFECTED BIGINT(20) UNSIGNED NOT NULL," +
"FIRST_SEEN TIMESTAMP(6) NOT NULL," +
"LAST_SEEN TIMESTAMP(6) NOT NULL," +
"QUERY_SAMPLE_TEXT LONGTEXT DEFAULT NULL);"
49 changes: 49 additions & 0 deletions infoschema/perfschema/tables.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,16 @@ package perfschema
import (
"github.com/pingcap/parser/model"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/stmtsummary"
)

const (
tableNameEventsStatementsSummaryByDigest = "events_statements_summary_by_digest"
)

// perfSchemaTable stands for the fake table all its data is in the memory.
Expand Down Expand Up @@ -77,3 +85,44 @@ func (vt *perfSchemaTable) GetPhysicalID() int64 {
func (vt *perfSchemaTable) Meta() *model.TableInfo {
return vt.meta
}

func (vt *perfSchemaTable) getRows(ctx sessionctx.Context, cols []*table.Column) (fullRows [][]types.Datum, err error) {
switch vt.meta.Name.O {
case tableNameEventsStatementsSummaryByDigest:
fullRows = stmtsummary.StmtSummaryByDigestMap.ToDatum()
}
if len(cols) == len(vt.cols) {
return
}
rows := make([][]types.Datum, len(fullRows))
for i, fullRow := range fullRows {
row := make([]types.Datum, len(cols))
for j, col := range cols {
row[j] = fullRow[col.Offset]
}
rows[i] = row
}
return rows, nil
}

// IterRecords implements table.Table IterRecords interface.
func (vt *perfSchemaTable) IterRecords(ctx sessionctx.Context, startKey kv.Key, cols []*table.Column,
fn table.RecordIterFunc) error {
if len(startKey) != 0 {
return table.ErrUnsupportedOp
}
rows, err := vt.getRows(ctx, cols)
if err != nil {
return err
}
for i, row := range rows {
more, err := fn(int64(i), row, cols)
if err != nil {
return err
}
if !more {
break
}
}
return nil
}
Loading

0 comments on commit 0f55274

Please sign in to comment.