diff --git a/executor/adapter.go b/executor/adapter.go index 5b63fa99e0428..b4ff8d4cfe85c 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -775,15 +775,29 @@ func (a *ExecStmt) SummaryStmt() { stmtCtx := sessVars.StmtCtx normalizedSQL, digest := stmtCtx.SQLDigest() costTime := time.Since(sessVars.StartTime) + + execDetail := stmtCtx.GetExecDetails() + copTaskInfo := stmtCtx.CopTasksDetails() + memMax := stmtCtx.MemTracker.MaxConsumed() + var userString string + if sessVars.User != nil { + userString = sessVars.User.String() + } + stmtsummary.StmtSummaryByDigestMap.AddStatement(&stmtsummary.StmtExecInfo{ - SchemaName: sessVars.CurrentDB, - OriginalSQL: a.Text, - NormalizedSQL: normalizedSQL, - Digest: digest, - TotalLatency: uint64(costTime.Nanoseconds()), - AffectedRows: stmtCtx.AffectedRows(), - SentRows: 0, - StartTime: sessVars.StartTime, + SchemaName: strings.ToLower(sessVars.CurrentDB), + OriginalSQL: a.Text, + NormalizedSQL: normalizedSQL, + Digest: digest, + User: userString, + TotalLatency: costTime, + ParseLatency: sessVars.DurationParse, + CompileLatency: sessVars.DurationCompile, + StmtCtx: stmtCtx, + CopTasks: copTaskInfo, + ExecDetail: &execDetail, + MemMax: memMax, + StartTime: sessVars.StartTime, }) } diff --git a/infoschema/perfschema/const.go b/infoschema/perfschema/const.go index b72226e954d36..77d0953e74165 100644 --- a/infoschema/perfschema/const.go +++ b/infoschema/perfschema/const.go @@ -382,15 +382,63 @@ const tableStagesHistoryLong = "CREATE TABLE if not exists performance_schema.ev // tableEventsStatementsSummaryByDigest contains the column name definitions for table // events_statements_summary_by_digest, same as MySQL. const tableEventsStatementsSummaryByDigest = "CREATE TABLE if not exists events_statements_summary_by_digest (" + + "STMT_TYPE VARCHAR(64) NOT NULL," + "SCHEMA_NAME VARCHAR(64) DEFAULT NULL," + - "DIGEST VARCHAR(64) DEFAULT NULL," + - "DIGEST_TEXT LONGTEXT DEFAULT NULL," + + "DIGEST VARCHAR(64) NOT NULL," + + "DIGEST_TEXT LONGTEXT NOT NULL," + + "TABLE_NAMES TEXT DEFAULT NULL," + + "INDEX_NAMES TEXT DEFAULT NULL," + + "SAMPLE_USER VARCHAR(64) DEFAULT NULL," + "EXEC_COUNT BIGINT(20) UNSIGNED NOT NULL," + "SUM_LATENCY BIGINT(20) UNSIGNED NOT NULL," + "MAX_LATENCY BIGINT(20) UNSIGNED NOT NULL," + "MIN_LATENCY BIGINT(20) UNSIGNED NOT NULL," + "AVG_LATENCY BIGINT(20) UNSIGNED NOT NULL," + - "SUM_ROWS_AFFECTED BIGINT(20) UNSIGNED NOT NULL," + + "AVG_PARSE_LATENCY BIGINT(20) UNSIGNED NOT NULL," + + "MAX_PARSE_LATENCY BIGINT(20) UNSIGNED NOT NULL," + + "AVG_COMPILE_LATENCY BIGINT(20) UNSIGNED NOT NULL," + + "MAX_COMPILE_LATENCY BIGINT(20) UNSIGNED NOT NULL," + + "COP_TASK_NUM BIGINT(20) UNSIGNED NOT NULL," + + "AVG_COP_PROCESS_TIME BIGINT(20) UNSIGNED NOT NULL," + + "MAX_COP_PROCESS_TIME BIGINT(20) UNSIGNED NOT NULL," + + "MAX_COP_PROCESS_ADDRESS VARCHAR(256) DEFAULT NULL," + + "AVG_COP_WAIT_TIME BIGINT(20) UNSIGNED NOT NULL," + + "MAX_COP_WAIT_TIME BIGINT(20) UNSIGNED NOT NULL," + + "MAX_COP_WAIT_ADDRESS VARCHAR(256) DEFAULT NULL," + + "AVG_PROCESS_TIME BIGINT(20) UNSIGNED NOT NULL," + + "MAX_PROCESS_TIME BIGINT(20) UNSIGNED NOT NULL," + + "AVG_WAIT_TIME BIGINT(20) UNSIGNED NOT NULL," + + "MAX_WAIT_TIME BIGINT(20) UNSIGNED NOT NULL," + + "AVG_BACKOFF_TIME BIGINT(20) UNSIGNED NOT NULL," + + "MAX_BACKOFF_TIME BIGINT(20) UNSIGNED NOT NULL," + + "AVG_TOTAL_KEYS BIGINT(20) UNSIGNED NOT NULL," + + "MAX_TOTAL_KEYS BIGINT(20) UNSIGNED NOT NULL," + + "AVG_PROCESSED_KEYS BIGINT(20) UNSIGNED NOT NULL," + + "MAX_PROCESSED_KEYS BIGINT(20) UNSIGNED NOT NULL," + + "AVG_PREWRITE_TIME BIGINT(20) UNSIGNED NOT NULL," + + "MAX_PREWRITE_TIME BIGINT(20) UNSIGNED NOT NULL," + + "AVG_COMMIT_TIME BIGINT(20) UNSIGNED NOT NULL," + + "MAX_COMMIT_TIME BIGINT(20) UNSIGNED NOT NULL," + + "AVG_GET_COMMIT_TS_TIME BIGINT(20) UNSIGNED NOT NULL," + + "MAX_GET_COMMIT_TS_TIME BIGINT(20) UNSIGNED NOT NULL," + + "AVG_COMMIT_BACKOFF_TIME BIGINT(20) UNSIGNED NOT NULL," + + "MAX_COMMIT_BACKOFF_TIME BIGINT(20) UNSIGNED NOT NULL," + + "AVG_RESOLVE_LOCK_TIME BIGINT(20) UNSIGNED NOT NULL," + + "MAX_RESOLVE_LOCK_TIME BIGINT(20) UNSIGNED NOT NULL," + + "AVG_LOCAL_LATCH_WAIT_TIME BIGINT(20) UNSIGNED NOT NULL," + + "MAX_LOCAL_LATCH_WAIT_TIME BIGINT(20) UNSIGNED NOT NULL," + + "AVG_WRITE_KEYS DOUBLE UNSIGNED NOT NULL," + + "MAX_WRITE_KEYS BIGINT(20) UNSIGNED NOT NULL," + + "AVG_WRITE_SIZE DOUBLE NOT NULL," + + "MAX_WRITE_SIZE BIGINT(20) UNSIGNED NOT NULL," + + "AVG_PREWRITE_REGIONS DOUBLE NOT NULL," + + "MAX_PREWRITE_REGIONS INT(11) UNSIGNED NOT NULL," + + "AVG_TXN_RETRY DOUBLE NOT NULL," + + "MAX_TXN_RETRY INT(11) UNSIGNED NOT NULL," + + "BACKOFF_TYPES VARCHAR(1024) DEFAULT NULL," + + "AVG_MEM BIGINT(20) UNSIGNED NOT NULL," + + "MAX_MEM BIGINT(20) UNSIGNED NOT NULL," + + "AVG_AFFECTED_ROWS DOUBLE UNSIGNED NOT NULL," + "FIRST_SEEN TIMESTAMP(6) NOT NULL," + "LAST_SEEN TIMESTAMP(6) NOT NULL," + "QUERY_SAMPLE_TEXT LONGTEXT DEFAULT NULL);" diff --git a/infoschema/perfschema/tables_test.go b/infoschema/perfschema/tables_test.go index b344a1fad9ec3..8fbd0a416bc9a 100644 --- a/infoschema/perfschema/tables_test.go +++ b/infoschema/perfschema/tables_test.go @@ -69,7 +69,7 @@ func (s *testTableSuite) TestStmtSummaryTable(c *C) { tk := testkit.NewTestKitWithInit(c, s.store) tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int, b varchar(10))") + tk.MustExec("create table t(a int, b varchar(10), key k(a))") // Statement summary is disabled by default tk.MustQuery("select @@global.tidb_enable_stmt_summary").Check(testkit.Rows("0")) @@ -90,23 +90,29 @@ func (s *testTableSuite) TestStmtSummaryTable(c *C) { tk.MustExec("insert into t values(2, 'b')") tk.MustExec("insert into t VALUES(3, 'c')") tk.MustExec("/**/insert into t values(4, 'd')") - tk.MustQuery(`select schema_name, exec_count, sum_rows_affected, query_sample_text + tk.MustQuery(`select stmt_type, schema_name, table_names, index_names, exec_count, cop_task_num, avg_total_keys, + max_total_keys, avg_processed_keys, max_processed_keys, avg_write_keys, max_write_keys, avg_prewrite_regions, + max_prewrite_regions, avg_affected_rows, query_sample_text from performance_schema.events_statements_summary_by_digest where digest_text like 'insert into t%'`, - ).Check(testkit.Rows("test 4 4 insert into t values(1, 'a')")) + ).Check(testkit.Rows("insert test test.t 4 0 0 0 0 0 2 2 1 1 1 /**/insert into t values(4, 'd')")) // Test SELECT tk.MustQuery("select * from t where a=2") - tk.MustQuery(`select schema_name, exec_count, sum_rows_affected, query_sample_text + tk.MustQuery(`select stmt_type, schema_name, table_names, index_names, exec_count, cop_task_num, avg_total_keys, + max_total_keys, avg_processed_keys, max_processed_keys, avg_write_keys, max_write_keys, avg_prewrite_regions, + max_prewrite_regions, avg_affected_rows, query_sample_text from performance_schema.events_statements_summary_by_digest where digest_text like 'select * from t%'`, - ).Check(testkit.Rows("test 1 0 select * from t where a=2")) + ).Check(testkit.Rows("select test test.t t:k 1 2 0 0 0 0 0 0 0 0 0 select * from t where a=2")) // select ... order by - tk.MustQuery(`select schema_name, exec_count, sum_rows_affected, query_sample_text + tk.MustQuery(`select stmt_type, schema_name, table_names, index_names, exec_count, cop_task_num, avg_total_keys, + max_total_keys, avg_processed_keys, max_processed_keys, avg_write_keys, max_write_keys, avg_prewrite_regions, + max_prewrite_regions, avg_affected_rows, query_sample_text from performance_schema.events_statements_summary_by_digest order by exec_count desc limit 1`, - ).Check(testkit.Rows("test 4 4 insert into t values(1, 'a')")) + ).Check(testkit.Rows("insert test test.t 4 0 0 0 0 0 2 2 1 1 1 /**/insert into t values(4, 'd')")) // Disable it again tk.MustExec("set global tidb_enable_stmt_summary = false") @@ -119,18 +125,38 @@ func (s *testTableSuite) TestStmtSummaryTable(c *C) { tk.MustQuery("select * from t where a=2") // The table should be cleared - tk.MustQuery(`select schema_name, exec_count, sum_rows_affected, query_sample_text + tk.MustQuery(`select stmt_type, schema_name, table_names, index_names, exec_count, cop_task_num, avg_total_keys, + max_total_keys, avg_processed_keys, max_processed_keys, avg_write_keys, max_write_keys, avg_prewrite_regions, + max_prewrite_regions, avg_affected_rows, query_sample_text from performance_schema.events_statements_summary_by_digest`, ).Check(testkit.Rows()) // Enable it in session scope tk.MustExec("set session tidb_enable_stmt_summary = on") // It should work immediately + tk.MustExec("begin") + tk.MustExec("insert into t values(1, 'a')") + tk.MustExec("commit") + tk.MustQuery(`select stmt_type, schema_name, table_names, index_names, exec_count, cop_task_num, avg_total_keys, + max_total_keys, avg_processed_keys, max_processed_keys, avg_write_keys, max_write_keys, avg_prewrite_regions, + max_prewrite_regions, avg_affected_rows, query_sample_text + from performance_schema.events_statements_summary_by_digest + where digest_text like 'insert into t%'`, + ).Check(testkit.Rows("insert test test.t 1 0 0 0 0 0 0 0 0 0 1 insert into t values(1, 'a')")) + tk.MustQuery(`select stmt_type, schema_name, table_names, index_names, exec_count, cop_task_num, avg_total_keys, + max_total_keys, avg_processed_keys, max_processed_keys, avg_write_keys, max_write_keys, avg_prewrite_regions, + max_prewrite_regions, avg_affected_rows, query_sample_text + from performance_schema.events_statements_summary_by_digest + where digest_text='commit'`, + ).Check(testkit.Rows("commit test 1 0 0 0 0 0 2 2 1 1 0 commit")) + tk.MustQuery("select * from t where a=2") - tk.MustQuery(`select schema_name, exec_count, sum_rows_affected, query_sample_text + tk.MustQuery(`select stmt_type, schema_name, table_names, index_names, exec_count, cop_task_num, avg_total_keys, + max_total_keys, avg_processed_keys, max_processed_keys, avg_write_keys, max_write_keys, avg_prewrite_regions, + max_prewrite_regions, avg_affected_rows, query_sample_text from performance_schema.events_statements_summary_by_digest where digest_text like 'select * from t%'`, - ).Check(testkit.Rows("test 1 0 select * from t where a=2")) + ).Check(testkit.Rows("select test test.t t:k 1 2 0 0 0 0 0 0 0 0 0 select * from t where a=2")) // Disable it in global scope tk.MustExec("set global tidb_enable_stmt_summary = off") @@ -141,17 +167,21 @@ func (s *testTableSuite) TestStmtSummaryTable(c *C) { tk.MustQuery("select * from t where a=2") // Statement summary is still enabled - tk.MustQuery(`select schema_name, exec_count, sum_rows_affected, query_sample_text + tk.MustQuery(`select stmt_type, schema_name, table_names, index_names, exec_count, cop_task_num, avg_total_keys, + max_total_keys, avg_processed_keys, max_processed_keys, avg_write_keys, max_write_keys, avg_prewrite_regions, + max_prewrite_regions, avg_affected_rows, query_sample_text from performance_schema.events_statements_summary_by_digest where digest_text like 'select * from t%'`, - ).Check(testkit.Rows("test 2 0 select * from t where a=2")) + ).Check(testkit.Rows("select test test.t t:k 2 4 0 0 0 0 0 0 0 0 0 select * from t where a=2")) // Unset session variable tk.MustExec("set session tidb_enable_stmt_summary = ''") tk.MustQuery("select * from t where a=2") // Statement summary is disabled - tk.MustQuery(`select schema_name, exec_count, sum_rows_affected, query_sample_text + tk.MustQuery(`select stmt_type, schema_name, table_names, index_names, exec_count, cop_task_num, avg_total_keys, + max_total_keys, avg_processed_keys, max_processed_keys, avg_write_keys, max_write_keys, avg_prewrite_regions, + max_prewrite_regions, avg_affected_rows, query_sample_text from performance_schema.events_statements_summary_by_digest`, ).Check(testkit.Rows()) } diff --git a/util/stmtsummary/statement_summary.go b/util/stmtsummary/statement_summary.go index c00b42fe9be05..3b04936901c04 100644 --- a/util/stmtsummary/statement_summary.go +++ b/util/stmtsummary/statement_summary.go @@ -14,13 +14,19 @@ package stmtsummary import ( + "bytes" + "fmt" + "sort" "strings" "sync" + "sync/atomic" "time" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/execdetails" "github.com/pingcap/tidb/util/hack" "github.com/pingcap/tidb/util/kvcache" ) @@ -28,22 +34,21 @@ import ( // There're many types of statement summary tables in MySQL, but we have // only implemented events_statement_summary_by_digest for now. -// stmtSummaryByDigestKey defines key for stmtSummaryByDigestMap.summaryMap +// stmtSummaryByDigestKey defines key for stmtSummaryByDigestMap.summaryMap. type stmtSummaryByDigestKey struct { // Same statements may appear in different schema, but they refer to different tables. schemaName string digest string - // TODO: add plan digest - // `hash` is the hash value of this object + // `hash` is the hash value of this object. hash []byte } -// Hash implements SimpleLRUCache.Key +// Hash implements SimpleLRUCache.Key. func (key *stmtSummaryByDigestKey) Hash() []byte { if len(key.hash) == 0 { key.hash = make([]byte, 0, len(key.schemaName)+len(key.digest)) key.hash = append(key.hash, hack.Slice(key.digest)...) - key.hash = append(key.hash, hack.Slice(strings.ToLower(key.schemaName))...) + key.hash = append(key.hash, hack.Slice(key.schemaName)...) } return key.hash } @@ -71,17 +76,70 @@ var StmtSummaryByDigestMap = newStmtSummaryByDigestMap() type stmtSummaryByDigest struct { // It's rare to read concurrently, so RWMutex is not needed. sync.Mutex - schemaName string - digest string - normalizedSQL string - sampleSQL string - execCount uint64 - sumLatency uint64 - maxLatency uint64 - minLatency uint64 + // basic + schemaName string + digest string + stmtType string + normalizedSQL string + sampleSQL string + tableNames string + indexNames string + sampleUser string + execCount int64 + // latency + sumLatency time.Duration + maxLatency time.Duration + minLatency time.Duration + sumParseLatency time.Duration + maxParseLatency time.Duration + sumCompileLatency time.Duration + maxCompileLatency time.Duration + // coprocessor + numCopTasks int64 + sumCopProcessTime int64 + maxCopProcessTime time.Duration + maxCopProcessAddress string + sumCopWaitTime int64 + maxCopWaitTime time.Duration + maxCopWaitAddress string + // TiKV + sumProcessTime time.Duration + maxProcessTime time.Duration + sumWaitTime time.Duration + maxWaitTime time.Duration + sumBackoffTime time.Duration + maxBackoffTime time.Duration + sumTotalKeys int64 + maxTotalKeys int64 + sumProcessedKeys int64 + maxProcessedKeys int64 + // txn + commitCount int64 + sumGetCommitTsTime time.Duration + maxGetCommitTsTime time.Duration + sumPrewriteTime time.Duration + maxPrewriteTime time.Duration + sumCommitTime time.Duration + maxCommitTime time.Duration + sumLocalLatchTime time.Duration + maxLocalLatchTime time.Duration + sumCommitBackoffTime int64 + maxCommitBackoffTime int64 + sumResolveLockTime int64 + maxResolveLockTime int64 + sumWriteKeys int64 + maxWriteKeys int + sumWriteSize int64 + maxWriteSize int + sumPrewriteRegionNum int64 + maxPrewriteRegionNum int32 + sumTxnRetry int64 + maxTxnRetry int + backoffTypes map[fmt.Stringer]int + // other + sumMem int64 + maxMem int64 sumAffectedRows uint64 - // Number of rows sent to client. - sumSentRows uint64 // The first time this type of SQL executes. firstSeen time.Time // The last time this type of SQL executes. @@ -90,15 +148,19 @@ type stmtSummaryByDigest struct { // StmtExecInfo records execution information of each statement. type StmtExecInfo struct { - SchemaName string - OriginalSQL string - NormalizedSQL string - Digest string - TotalLatency uint64 - AffectedRows uint64 - // Number of rows sent to client. - SentRows uint64 - StartTime time.Time + SchemaName string + OriginalSQL string + NormalizedSQL string + Digest string + User string + TotalLatency time.Duration + ParseLatency time.Duration + CompileLatency time.Duration + StmtCtx *stmtctx.StatementContext + CopTasks *stmtctx.CopTasksDetails + ExecDetail *execdetails.ExecDetails + MemMax int64 + StartTime time.Time } // newStmtSummaryByDigestMap creates an empty stmtSummaryByDigestMap. @@ -113,59 +175,6 @@ func newStmtSummaryByDigestMap() *stmtSummaryByDigestMap { return ssMap } -// newStmtSummaryByDigest creates a stmtSummaryByDigest from StmtExecInfo -func newStmtSummaryByDigest(sei *StmtExecInfo) *stmtSummaryByDigest { - // Trim SQL to size MaxSQLLength - maxSQLLength := config.GetGlobalConfig().StmtSummary.MaxSQLLength - normalizedSQL := sei.NormalizedSQL - if len(normalizedSQL) > int(maxSQLLength) { - normalizedSQL = normalizedSQL[:maxSQLLength] - } - sampleSQL := sei.OriginalSQL - if len(sampleSQL) > int(maxSQLLength) { - sampleSQL = sampleSQL[:maxSQLLength] - } - - return &stmtSummaryByDigest{ - schemaName: sei.SchemaName, - digest: sei.Digest, - normalizedSQL: normalizedSQL, - sampleSQL: sampleSQL, - execCount: 1, - sumLatency: sei.TotalLatency, - maxLatency: sei.TotalLatency, - minLatency: sei.TotalLatency, - sumAffectedRows: sei.AffectedRows, - sumSentRows: sei.SentRows, - firstSeen: sei.StartTime, - lastSeen: sei.StartTime, - } -} - -// Add a StmtExecInfo to stmtSummary -func (ssbd *stmtSummaryByDigest) add(sei *StmtExecInfo) { - ssbd.Lock() - - ssbd.sumLatency += sei.TotalLatency - ssbd.execCount++ - if sei.TotalLatency > ssbd.maxLatency { - ssbd.maxLatency = sei.TotalLatency - } - if sei.TotalLatency < ssbd.minLatency { - ssbd.minLatency = sei.TotalLatency - } - ssbd.sumAffectedRows += sei.AffectedRows - ssbd.sumSentRows += sei.SentRows - if sei.StartTime.Before(ssbd.firstSeen) { - ssbd.firstSeen = sei.StartTime - } - if ssbd.lastSeen.Before(sei.StartTime) { - ssbd.lastSeen = sei.StartTime - } - - ssbd.Unlock() -} - // AddStatement adds a statement to StmtSummaryByDigestMap. func (ssMap *stmtSummaryByDigestMap) AddStatement(sei *StmtExecInfo) { key := &stmtSummaryByDigestKey{ @@ -173,18 +182,23 @@ func (ssMap *stmtSummaryByDigestMap) AddStatement(sei *StmtExecInfo) { digest: sei.Digest, } - ssMap.Lock() - // Check again. Statements could be added before disabling the flag and after Clear() - if !ssMap.Enabled() { - ssMap.Unlock() - return - } - value, ok := ssMap.summaryMap.Get(key) - if !ok { - newSummary := newStmtSummaryByDigest(sei) - ssMap.summaryMap.Put(key, newSummary) - } - ssMap.Unlock() + // Enclose the block in a function to ensure the lock will always be released. + value, ok := func() (kvcache.Value, bool) { + ssMap.Lock() + defer ssMap.Unlock() + + // Check again. Statements could be added before disabling the flag and after Clear(). + if !ssMap.Enabled() { + return nil, false + } + + value, ok := ssMap.summaryMap.Get(key) + if !ok { + newSummary := newStmtSummaryByDigest(sei) + ssMap.summaryMap.Put(key, newSummary) + } + return value, ok + }() // Lock a single entry, not the whole cache. if ok { @@ -195,11 +209,12 @@ func (ssMap *stmtSummaryByDigestMap) AddStatement(sei *StmtExecInfo) { // Clear removes all statement summaries. func (ssMap *stmtSummaryByDigestMap) Clear() { ssMap.Lock() + defer ssMap.Unlock() + ssMap.summaryMap.DeleteAll() - ssMap.Unlock() } -// ToDatum converts statement summary to Datum +// ToDatum converts statement summary to datum. func (ssMap *stmtSummaryByDigestMap) ToDatum() [][]types.Datum { ssMap.Lock() values := ssMap.summaryMap.Values() @@ -207,29 +222,36 @@ func (ssMap *stmtSummaryByDigestMap) ToDatum() [][]types.Datum { rows := make([][]types.Datum, 0, len(values)) for _, value := range values { - summary := value.(*stmtSummaryByDigest) - summary.Lock() - record := types.MakeDatums( - summary.schemaName, - summary.digest, - summary.normalizedSQL, - summary.execCount, - summary.sumLatency, - summary.maxLatency, - summary.minLatency, - summary.sumLatency/summary.execCount, // AVG_LATENCY - summary.sumAffectedRows, - types.Time{Time: types.FromGoTime(summary.firstSeen), Type: mysql.TypeTimestamp}, - types.Time{Time: types.FromGoTime(summary.lastSeen), Type: mysql.TypeTimestamp}, - summary.sampleSQL, - ) - summary.Unlock() + ssbd := value.(*stmtSummaryByDigest) + record := ssbd.toDatum() rows = append(rows, record) } - return rows } +// GetMoreThanOnceSelect gets select SQLs that occurred more than once. +func (ssMap *stmtSummaryByDigestMap) GetMoreThanOnceSelect() ([]string, []string) { + ssMap.Lock() + values := ssMap.summaryMap.Values() + ssMap.Unlock() + + schemas := make([]string, 0, len(values)) + sqls := make([]string, 0, len(values)) + for _, value := range values { + ssbd := value.(*stmtSummaryByDigest) + // `stmtType` won't change once created, so locking is not needed. + if ssbd.stmtType == "select" { + ssbd.Lock() + if ssbd.execCount > 1 { + schemas = append(schemas, ssbd.schemaName) + sqls = append(sqls, ssbd.sampleSQL) + } + ssbd.Unlock() + } + } + return schemas, sqls +} + // SetEnabled enables or disables statement summary in global(cluster) or session(server) scope. func (ssMap *stmtSummaryByDigestMap) SetEnabled(value string, inSession bool) { value = ssMap.normalizeEnableValue(value) @@ -259,17 +281,18 @@ func (ssMap *stmtSummaryByDigestMap) SetEnabled(value string, inSession bool) { // Enabled returns whether statement summary is enabled. func (ssMap *stmtSummaryByDigestMap) Enabled() bool { ssMap.enabledWrapper.RLock() + defer ssMap.enabledWrapper.RUnlock() + var enabled bool if ssMap.isSet(ssMap.enabledWrapper.sessionEnabled) { enabled = ssMap.isEnabled(ssMap.enabledWrapper.sessionEnabled) } else { enabled = ssMap.isEnabled(ssMap.enabledWrapper.globalEnabled) } - ssMap.enabledWrapper.RUnlock() return enabled } -// normalizeEnableValue converts 'ON' to '1' and 'OFF' to '0' +// normalizeEnableValue converts 'ON' to '1' and 'OFF' to '0'. func (ssMap *stmtSummaryByDigestMap) normalizeEnableValue(value string) string { switch { case strings.EqualFold(value, "ON"): @@ -291,3 +314,301 @@ func (ssMap *stmtSummaryByDigestMap) isEnabled(value string) bool { func (ssMap *stmtSummaryByDigestMap) isSet(value string) bool { return value != "" } + +// newStmtSummaryByDigest creates a stmtSummaryByDigest from StmtExecInfo. +func newStmtSummaryByDigest(sei *StmtExecInfo) *stmtSummaryByDigest { + // Trim SQL to size MaxSQLLength. + maxSQLLength := config.GetGlobalConfig().StmtSummary.MaxSQLLength + normalizedSQL := sei.NormalizedSQL + if len(normalizedSQL) > int(maxSQLLength) { + normalizedSQL = normalizedSQL[:maxSQLLength] + } + + // Use "," to separate table names and index names to support FIND_IN_SET. + var buffer bytes.Buffer + for i, value := range sei.StmtCtx.Tables { + buffer.WriteString(strings.ToLower(value.DB)) + buffer.WriteString(".") + buffer.WriteString(strings.ToLower(value.Table)) + if i < len(sei.StmtCtx.Tables)-1 { + buffer.WriteString(",") + } + } + tableNames := buffer.String() + + ssbd := &stmtSummaryByDigest{ + schemaName: sei.SchemaName, + digest: sei.Digest, + stmtType: strings.ToLower(sei.StmtCtx.StmtType), + normalizedSQL: normalizedSQL, + tableNames: tableNames, + indexNames: strings.Join(sei.StmtCtx.IndexNames, ","), + minLatency: sei.TotalLatency, + backoffTypes: make(map[fmt.Stringer]int), + firstSeen: sei.StartTime, + lastSeen: sei.StartTime, + } + ssbd.add(sei) + return ssbd +} + +func (ssbd *stmtSummaryByDigest) add(sei *StmtExecInfo) { + ssbd.Lock() + defer ssbd.Unlock() + + if sei.User != "" { + ssbd.sampleUser = sei.User + } + + maxSQLLength := config.GetGlobalConfig().StmtSummary.MaxSQLLength + sampleSQL := sei.OriginalSQL + if len(sampleSQL) > int(maxSQLLength) { + sampleSQL = sampleSQL[:maxSQLLength] + } + ssbd.sampleSQL = sampleSQL + ssbd.execCount++ + + // latency + ssbd.sumLatency += sei.TotalLatency + if sei.TotalLatency > ssbd.maxLatency { + ssbd.maxLatency = sei.TotalLatency + } + if sei.TotalLatency < ssbd.minLatency { + ssbd.minLatency = sei.TotalLatency + } + ssbd.sumParseLatency += sei.ParseLatency + if sei.ParseLatency > ssbd.maxParseLatency { + ssbd.maxParseLatency = sei.ParseLatency + } + ssbd.sumCompileLatency += sei.CompileLatency + if sei.CompileLatency > ssbd.maxCompileLatency { + ssbd.maxCompileLatency = sei.CompileLatency + } + + // coprocessor + numCopTasks := int64(sei.CopTasks.NumCopTasks) + ssbd.numCopTasks += numCopTasks + ssbd.sumCopProcessTime += sei.CopTasks.AvgProcessTime.Nanoseconds() * numCopTasks + if sei.CopTasks.MaxProcessTime > ssbd.maxCopProcessTime { + ssbd.maxCopProcessTime = sei.CopTasks.MaxProcessTime + ssbd.maxCopProcessAddress = sei.CopTasks.MaxProcessAddress + } + ssbd.sumCopWaitTime += sei.CopTasks.AvgWaitTime.Nanoseconds() * numCopTasks + if sei.CopTasks.MaxWaitTime > ssbd.maxCopWaitTime { + ssbd.maxCopWaitTime = sei.CopTasks.MaxWaitTime + ssbd.maxCopWaitAddress = sei.CopTasks.MaxWaitAddress + } + + // TiKV + ssbd.sumProcessTime += sei.ExecDetail.ProcessTime + if sei.ExecDetail.ProcessTime > ssbd.maxProcessTime { + ssbd.maxProcessTime = sei.ExecDetail.ProcessTime + } + ssbd.sumWaitTime += sei.ExecDetail.WaitTime + if sei.ExecDetail.WaitTime > ssbd.maxWaitTime { + ssbd.maxWaitTime = sei.ExecDetail.WaitTime + } + ssbd.sumBackoffTime += sei.ExecDetail.BackoffTime + if sei.ExecDetail.BackoffTime > ssbd.maxBackoffTime { + ssbd.maxBackoffTime = sei.ExecDetail.BackoffTime + } + ssbd.sumTotalKeys += sei.ExecDetail.TotalKeys + if sei.ExecDetail.TotalKeys > ssbd.maxTotalKeys { + ssbd.maxTotalKeys = sei.ExecDetail.TotalKeys + } + ssbd.sumProcessedKeys += sei.ExecDetail.ProcessedKeys + if sei.ExecDetail.ProcessedKeys > ssbd.maxProcessedKeys { + ssbd.maxProcessedKeys = sei.ExecDetail.ProcessedKeys + } + + // txn + commitDetails := sei.ExecDetail.CommitDetail + if commitDetails != nil { + ssbd.commitCount++ + ssbd.sumPrewriteTime += commitDetails.PrewriteTime + if commitDetails.PrewriteTime > ssbd.maxPrewriteTime { + ssbd.maxPrewriteTime = commitDetails.PrewriteTime + } + ssbd.sumCommitTime += commitDetails.CommitTime + if commitDetails.CommitTime > ssbd.maxCommitTime { + ssbd.maxCommitTime = commitDetails.CommitTime + } + ssbd.sumGetCommitTsTime += commitDetails.GetCommitTsTime + if commitDetails.GetCommitTsTime > ssbd.maxGetCommitTsTime { + ssbd.maxGetCommitTsTime = commitDetails.GetCommitTsTime + } + ssbd.sumCommitBackoffTime += commitDetails.CommitBackoffTime + if commitDetails.CommitBackoffTime > ssbd.maxCommitBackoffTime { + ssbd.maxCommitBackoffTime = commitDetails.CommitBackoffTime + } + resolveLockTime := atomic.LoadInt64(&commitDetails.ResolveLockTime) + ssbd.sumResolveLockTime += resolveLockTime + if resolveLockTime > ssbd.maxResolveLockTime { + ssbd.maxResolveLockTime = resolveLockTime + } + ssbd.sumLocalLatchTime += commitDetails.LocalLatchTime + if commitDetails.LocalLatchTime > ssbd.maxLocalLatchTime { + ssbd.maxLocalLatchTime = commitDetails.LocalLatchTime + } + ssbd.sumWriteKeys += int64(commitDetails.WriteKeys) + if commitDetails.WriteKeys > ssbd.maxWriteKeys { + ssbd.maxWriteKeys = commitDetails.WriteKeys + } + ssbd.sumWriteSize += int64(commitDetails.WriteSize) + if commitDetails.WriteSize > ssbd.maxWriteSize { + ssbd.maxWriteSize = commitDetails.WriteSize + } + prewriteRegionNum := atomic.LoadInt32(&commitDetails.PrewriteRegionNum) + ssbd.sumPrewriteRegionNum += int64(prewriteRegionNum) + if prewriteRegionNum > ssbd.maxPrewriteRegionNum { + ssbd.maxPrewriteRegionNum = prewriteRegionNum + } + ssbd.sumTxnRetry += int64(commitDetails.TxnRetry) + if commitDetails.TxnRetry > ssbd.maxTxnRetry { + ssbd.maxTxnRetry = commitDetails.TxnRetry + } + commitDetails.Mu.Lock() + for _, backoffType := range commitDetails.Mu.BackoffTypes { + if _, ok := ssbd.backoffTypes[backoffType]; ok { + ssbd.backoffTypes[backoffType] += 1 + } else { + ssbd.backoffTypes[backoffType] = 1 + } + } + commitDetails.Mu.Unlock() + } + + // other + ssbd.sumAffectedRows += sei.StmtCtx.AffectedRows() + ssbd.sumMem += sei.MemMax + if sei.MemMax > ssbd.maxMem { + ssbd.maxMem = sei.MemMax + } + if sei.StartTime.Before(ssbd.firstSeen) { + ssbd.firstSeen = sei.StartTime + } + if ssbd.lastSeen.Before(sei.StartTime) { + ssbd.lastSeen = sei.StartTime + } +} + +func (ssbd *stmtSummaryByDigest) toDatum() []types.Datum { + ssbd.Lock() + defer ssbd.Unlock() + + return types.MakeDatums( + ssbd.stmtType, + ssbd.schemaName, + ssbd.digest, + ssbd.normalizedSQL, + convertEmptyToNil(ssbd.tableNames), + convertEmptyToNil(ssbd.indexNames), + convertEmptyToNil(ssbd.sampleUser), + ssbd.execCount, + int64(ssbd.sumLatency), + int64(ssbd.maxLatency), + int64(ssbd.minLatency), + avgInt(int64(ssbd.sumLatency), ssbd.execCount), + avgInt(int64(ssbd.sumParseLatency), ssbd.execCount), + int64(ssbd.maxParseLatency), + avgInt(int64(ssbd.sumCompileLatency), ssbd.execCount), + int64(ssbd.maxCompileLatency), + ssbd.numCopTasks, + avgInt(ssbd.sumCopProcessTime, ssbd.numCopTasks), + int64(ssbd.maxCopProcessTime), + convertEmptyToNil(ssbd.maxCopProcessAddress), + avgInt(ssbd.sumCopWaitTime, ssbd.numCopTasks), + int64(ssbd.maxCopWaitTime), + convertEmptyToNil(ssbd.maxCopWaitAddress), + avgInt(int64(ssbd.sumProcessTime), ssbd.execCount), + int64(ssbd.maxProcessTime), + avgInt(int64(ssbd.sumWaitTime), ssbd.execCount), + int64(ssbd.maxWaitTime), + avgInt(int64(ssbd.sumBackoffTime), ssbd.execCount), + int64(ssbd.maxBackoffTime), + avgInt(ssbd.sumTotalKeys, ssbd.execCount), + ssbd.maxTotalKeys, + avgInt(ssbd.sumProcessedKeys, ssbd.execCount), + ssbd.maxProcessedKeys, + avgInt(int64(ssbd.sumPrewriteTime), ssbd.commitCount), + int64(ssbd.maxPrewriteTime), + avgInt(int64(ssbd.sumCommitTime), ssbd.commitCount), + int64(ssbd.maxCommitTime), + avgInt(int64(ssbd.sumGetCommitTsTime), ssbd.commitCount), + int64(ssbd.maxGetCommitTsTime), + avgInt(ssbd.sumCommitBackoffTime, ssbd.commitCount), + ssbd.maxCommitBackoffTime, + avgInt(ssbd.sumResolveLockTime, ssbd.commitCount), + ssbd.maxResolveLockTime, + avgInt(int64(ssbd.sumLocalLatchTime), ssbd.commitCount), + int64(ssbd.maxLocalLatchTime), + avgFloat(ssbd.sumWriteKeys, ssbd.commitCount), + ssbd.maxWriteKeys, + avgFloat(ssbd.sumWriteSize, ssbd.commitCount), + ssbd.maxWriteSize, + avgFloat(ssbd.sumPrewriteRegionNum, ssbd.commitCount), + int(ssbd.maxPrewriteRegionNum), + avgFloat(ssbd.sumTxnRetry, ssbd.commitCount), + ssbd.maxTxnRetry, + formatBackoffTypes(ssbd.backoffTypes), + avgInt(ssbd.sumMem, ssbd.execCount), + ssbd.maxMem, + avgFloat(int64(ssbd.sumAffectedRows), ssbd.execCount), + types.Time{Time: types.FromGoTime(ssbd.firstSeen), Type: mysql.TypeTimestamp}, + types.Time{Time: types.FromGoTime(ssbd.lastSeen), Type: mysql.TypeTimestamp}, + ssbd.sampleSQL, + ) +} + +// Format the backoffType map to a string or nil. +func formatBackoffTypes(backoffMap map[fmt.Stringer]int) interface{} { + type backoffStat struct { + backoffType fmt.Stringer + count int + } + + size := len(backoffMap) + if size == 0 { + return nil + } + + backoffArray := make([]backoffStat, 0, len(backoffMap)) + for backoffType, count := range backoffMap { + backoffArray = append(backoffArray, backoffStat{backoffType, count}) + } + sort.Slice(backoffArray, func(i, j int) bool { + return backoffArray[i].count > backoffArray[j].count + }) + + var buffer bytes.Buffer + for index, stat := range backoffArray { + if _, err := fmt.Fprintf(&buffer, "%v:%d", stat.backoffType, stat.count); err != nil { + return "FORMAT ERROR" + } + if index < len(backoffArray)-1 { + buffer.WriteString(",") + } + } + return buffer.String() +} + +func avgInt(sum int64, count int64) int64 { + if count > 0 { + return sum / count + } + return 0 +} + +func avgFloat(sum int64, count int64) float64 { + if count > 0 { + return float64(sum) / float64(count) + } + return 0 +} + +func convertEmptyToNil(str string) interface{} { + if str == "" { + return nil + } + return str +} diff --git a/util/stmtsummary/statement_summary_test.go b/util/stmtsummary/statement_summary_test.go index 5d742b1d08396..290c9b403ced3 100644 --- a/util/stmtsummary/statement_summary_test.go +++ b/util/stmtsummary/statement_summary_test.go @@ -23,7 +23,10 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/execdetails" ) var _ = Suite(&testStmtSummarySuite{}) @@ -42,134 +45,396 @@ func TestT(t *testing.T) { TestingT(t) } -// Test stmtSummaryByDigest.AddStatement +// Test stmtSummaryByDigest.AddStatement. func (s *testStmtSummarySuite) TestAddStatement(c *C) { s.ssMap.Clear() - // First statement - stmtExecInfo1 := &StmtExecInfo{ - SchemaName: "schema_name", - OriginalSQL: "original_sql1", - NormalizedSQL: "normalized_sql", - Digest: "digest", - TotalLatency: 10000, - AffectedRows: 100, - SentRows: 100, - StartTime: time.Date(2019, 1, 1, 10, 10, 10, 10, time.UTC), - } + tables := []stmtctx.TableEntry{{DB: "db1", Table: "tb1"}, {DB: "db2", Table: "tb2"}} + indexes := []string{"a"} + + // first statement + stmtExecInfo1 := generateAnyExecInfo() + stmtExecInfo1.ExecDetail.CommitDetail.Mu.BackoffTypes = make([]fmt.Stringer, 0) key := &stmtSummaryByDigestKey{ schemaName: stmtExecInfo1.SchemaName, digest: stmtExecInfo1.Digest, } expectedSummary := stmtSummaryByDigest{ - schemaName: stmtExecInfo1.SchemaName, - digest: stmtExecInfo1.Digest, - normalizedSQL: stmtExecInfo1.NormalizedSQL, - sampleSQL: stmtExecInfo1.OriginalSQL, - execCount: 1, - sumLatency: stmtExecInfo1.TotalLatency, - maxLatency: stmtExecInfo1.TotalLatency, - minLatency: stmtExecInfo1.TotalLatency, - sumAffectedRows: stmtExecInfo1.AffectedRows, - sumSentRows: stmtExecInfo1.SentRows, - firstSeen: stmtExecInfo1.StartTime, - lastSeen: stmtExecInfo1.StartTime, + schemaName: stmtExecInfo1.SchemaName, + stmtType: stmtExecInfo1.StmtCtx.StmtType, + digest: stmtExecInfo1.Digest, + normalizedSQL: stmtExecInfo1.NormalizedSQL, + sampleSQL: stmtExecInfo1.OriginalSQL, + tableNames: "db1.tb1,db2.tb2", + indexNames: "a", + sampleUser: stmtExecInfo1.User, + execCount: 1, + sumLatency: stmtExecInfo1.TotalLatency, + maxLatency: stmtExecInfo1.TotalLatency, + minLatency: stmtExecInfo1.TotalLatency, + sumParseLatency: stmtExecInfo1.ParseLatency, + maxParseLatency: stmtExecInfo1.ParseLatency, + sumCompileLatency: stmtExecInfo1.CompileLatency, + maxCompileLatency: stmtExecInfo1.CompileLatency, + numCopTasks: int64(stmtExecInfo1.CopTasks.NumCopTasks), + sumCopProcessTime: int64(stmtExecInfo1.CopTasks.AvgProcessTime) * int64(stmtExecInfo1.CopTasks.NumCopTasks), + maxCopProcessTime: stmtExecInfo1.CopTasks.MaxProcessTime, + maxCopProcessAddress: stmtExecInfo1.CopTasks.MaxProcessAddress, + sumCopWaitTime: int64(stmtExecInfo1.CopTasks.AvgWaitTime) * int64(stmtExecInfo1.CopTasks.NumCopTasks), + maxCopWaitTime: stmtExecInfo1.CopTasks.MaxWaitTime, + maxCopWaitAddress: stmtExecInfo1.CopTasks.MaxWaitAddress, + sumProcessTime: stmtExecInfo1.ExecDetail.ProcessTime, + maxProcessTime: stmtExecInfo1.ExecDetail.ProcessTime, + sumWaitTime: stmtExecInfo1.ExecDetail.WaitTime, + maxWaitTime: stmtExecInfo1.ExecDetail.WaitTime, + sumBackoffTime: stmtExecInfo1.ExecDetail.BackoffTime, + maxBackoffTime: stmtExecInfo1.ExecDetail.BackoffTime, + sumTotalKeys: stmtExecInfo1.ExecDetail.TotalKeys, + maxTotalKeys: stmtExecInfo1.ExecDetail.TotalKeys, + sumProcessedKeys: stmtExecInfo1.ExecDetail.ProcessedKeys, + maxProcessedKeys: stmtExecInfo1.ExecDetail.ProcessedKeys, + sumGetCommitTsTime: stmtExecInfo1.ExecDetail.CommitDetail.GetCommitTsTime, + maxGetCommitTsTime: stmtExecInfo1.ExecDetail.CommitDetail.GetCommitTsTime, + sumPrewriteTime: stmtExecInfo1.ExecDetail.CommitDetail.PrewriteTime, + maxPrewriteTime: stmtExecInfo1.ExecDetail.CommitDetail.PrewriteTime, + sumCommitTime: stmtExecInfo1.ExecDetail.CommitDetail.CommitTime, + maxCommitTime: stmtExecInfo1.ExecDetail.CommitDetail.CommitTime, + sumLocalLatchTime: stmtExecInfo1.ExecDetail.CommitDetail.LocalLatchTime, + maxLocalLatchTime: stmtExecInfo1.ExecDetail.CommitDetail.LocalLatchTime, + sumCommitBackoffTime: stmtExecInfo1.ExecDetail.CommitDetail.CommitBackoffTime, + maxCommitBackoffTime: stmtExecInfo1.ExecDetail.CommitDetail.CommitBackoffTime, + sumResolveLockTime: stmtExecInfo1.ExecDetail.CommitDetail.ResolveLockTime, + maxResolveLockTime: stmtExecInfo1.ExecDetail.CommitDetail.ResolveLockTime, + sumWriteKeys: int64(stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys), + maxWriteKeys: stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys, + sumWriteSize: int64(stmtExecInfo1.ExecDetail.CommitDetail.WriteSize), + maxWriteSize: stmtExecInfo1.ExecDetail.CommitDetail.WriteSize, + sumPrewriteRegionNum: int64(stmtExecInfo1.ExecDetail.CommitDetail.PrewriteRegionNum), + maxPrewriteRegionNum: stmtExecInfo1.ExecDetail.CommitDetail.PrewriteRegionNum, + sumTxnRetry: int64(stmtExecInfo1.ExecDetail.CommitDetail.TxnRetry), + maxTxnRetry: stmtExecInfo1.ExecDetail.CommitDetail.TxnRetry, + backoffTypes: make(map[fmt.Stringer]int), + sumMem: stmtExecInfo1.MemMax, + maxMem: stmtExecInfo1.MemMax, + sumAffectedRows: stmtExecInfo1.StmtCtx.AffectedRows(), + firstSeen: stmtExecInfo1.StartTime, + lastSeen: stmtExecInfo1.StartTime, } - s.ssMap.AddStatement(stmtExecInfo1) summary, ok := s.ssMap.summaryMap.Get(key) c.Assert(ok, IsTrue) - c.Assert(*summary.(*stmtSummaryByDigest) == expectedSummary, IsTrue) + c.Assert(matchStmtSummaryByDigest(summary.(*stmtSummaryByDigest), &expectedSummary), IsTrue) - // Second statement + // Second statement is similar with the first statement, and its values are + // greater than that of the first statement. stmtExecInfo2 := &StmtExecInfo{ - SchemaName: "schema_name", - OriginalSQL: "original_sql2", - NormalizedSQL: "normalized_sql", - Digest: "digest", - TotalLatency: 50000, - AffectedRows: 500, - SentRows: 500, - StartTime: time.Date(2019, 1, 1, 10, 10, 20, 10, time.UTC), + SchemaName: "schema_name", + OriginalSQL: "original_sql2", + NormalizedSQL: "normalized_sql", + Digest: "digest", + User: "user2", + TotalLatency: 20000, + ParseLatency: 200, + CompileLatency: 2000, + CopTasks: &stmtctx.CopTasksDetails{ + NumCopTasks: 20, + AvgProcessTime: 2000, + P90ProcessTime: 20000, + MaxProcessAddress: "200", + MaxProcessTime: 25000, + AvgWaitTime: 200, + P90WaitTime: 2000, + MaxWaitAddress: "201", + MaxWaitTime: 2500, + }, + ExecDetail: &execdetails.ExecDetails{ + CalleeAddress: "202", + ProcessTime: 1500, + WaitTime: 150, + BackoffTime: 180, + RequestCount: 20, + TotalKeys: 6000, + ProcessedKeys: 1500, + CommitDetail: &execdetails.CommitDetails{ + GetCommitTsTime: 500, + PrewriteTime: 50000, + CommitTime: 5000, + LocalLatchTime: 50, + CommitBackoffTime: 1000, + Mu: struct { + sync.Mutex + BackoffTypes []fmt.Stringer + }{ + BackoffTypes: []fmt.Stringer{tikv.BoTxnLock}, + }, + ResolveLockTime: 10000, + WriteKeys: 100000, + WriteSize: 1000000, + PrewriteRegionNum: 100, + TxnRetry: 10, + }, + }, + StmtCtx: &stmtctx.StatementContext{ + StmtType: "Select", + Tables: tables, + IndexNames: indexes, + }, + MemMax: 20000, + StartTime: time.Date(2019, 1, 1, 10, 10, 20, 10, time.UTC), } + stmtExecInfo2.StmtCtx.AddAffectedRows(200) expectedSummary.execCount++ + expectedSummary.sampleSQL = stmtExecInfo2.OriginalSQL + expectedSummary.sampleUser = stmtExecInfo2.User expectedSummary.sumLatency += stmtExecInfo2.TotalLatency expectedSummary.maxLatency = stmtExecInfo2.TotalLatency - expectedSummary.sumAffectedRows += stmtExecInfo2.AffectedRows - expectedSummary.sumSentRows += stmtExecInfo2.SentRows + expectedSummary.sumParseLatency += stmtExecInfo2.ParseLatency + expectedSummary.maxParseLatency = stmtExecInfo2.ParseLatency + expectedSummary.sumCompileLatency += stmtExecInfo2.CompileLatency + expectedSummary.maxCompileLatency = stmtExecInfo2.CompileLatency + expectedSummary.numCopTasks += int64(stmtExecInfo2.CopTasks.NumCopTasks) + expectedSummary.sumCopProcessTime += int64(stmtExecInfo2.CopTasks.AvgProcessTime) * int64(stmtExecInfo2.CopTasks.NumCopTasks) + expectedSummary.maxCopProcessTime = stmtExecInfo2.CopTasks.MaxProcessTime + expectedSummary.maxCopProcessAddress = stmtExecInfo2.CopTasks.MaxProcessAddress + expectedSummary.sumCopWaitTime += int64(stmtExecInfo2.CopTasks.AvgWaitTime) * int64(stmtExecInfo2.CopTasks.NumCopTasks) + expectedSummary.maxCopWaitTime = stmtExecInfo2.CopTasks.MaxWaitTime + expectedSummary.maxCopWaitAddress = stmtExecInfo2.CopTasks.MaxWaitAddress + expectedSummary.sumProcessTime += stmtExecInfo2.ExecDetail.ProcessTime + expectedSummary.maxProcessTime = stmtExecInfo2.ExecDetail.ProcessTime + expectedSummary.sumWaitTime += stmtExecInfo2.ExecDetail.WaitTime + expectedSummary.maxWaitTime = stmtExecInfo2.ExecDetail.WaitTime + expectedSummary.sumBackoffTime += stmtExecInfo2.ExecDetail.BackoffTime + expectedSummary.maxBackoffTime = stmtExecInfo2.ExecDetail.BackoffTime + expectedSummary.sumTotalKeys += stmtExecInfo2.ExecDetail.TotalKeys + expectedSummary.maxTotalKeys = stmtExecInfo2.ExecDetail.TotalKeys + expectedSummary.sumProcessedKeys += stmtExecInfo2.ExecDetail.ProcessedKeys + expectedSummary.maxProcessedKeys = stmtExecInfo2.ExecDetail.ProcessedKeys + expectedSummary.sumGetCommitTsTime += stmtExecInfo2.ExecDetail.CommitDetail.GetCommitTsTime + expectedSummary.maxGetCommitTsTime = stmtExecInfo2.ExecDetail.CommitDetail.GetCommitTsTime + expectedSummary.sumPrewriteTime += stmtExecInfo2.ExecDetail.CommitDetail.PrewriteTime + expectedSummary.maxPrewriteTime = stmtExecInfo2.ExecDetail.CommitDetail.PrewriteTime + expectedSummary.sumCommitTime += stmtExecInfo2.ExecDetail.CommitDetail.CommitTime + expectedSummary.maxCommitTime = stmtExecInfo2.ExecDetail.CommitDetail.CommitTime + expectedSummary.sumLocalLatchTime += stmtExecInfo2.ExecDetail.CommitDetail.LocalLatchTime + expectedSummary.maxLocalLatchTime = stmtExecInfo2.ExecDetail.CommitDetail.LocalLatchTime + expectedSummary.sumCommitBackoffTime += stmtExecInfo2.ExecDetail.CommitDetail.CommitBackoffTime + expectedSummary.maxCommitBackoffTime = stmtExecInfo2.ExecDetail.CommitDetail.CommitBackoffTime + expectedSummary.sumResolveLockTime += stmtExecInfo2.ExecDetail.CommitDetail.ResolveLockTime + expectedSummary.maxResolveLockTime = stmtExecInfo2.ExecDetail.CommitDetail.ResolveLockTime + expectedSummary.sumWriteKeys += int64(stmtExecInfo2.ExecDetail.CommitDetail.WriteKeys) + expectedSummary.maxWriteKeys = stmtExecInfo2.ExecDetail.CommitDetail.WriteKeys + expectedSummary.sumWriteSize += int64(stmtExecInfo2.ExecDetail.CommitDetail.WriteSize) + expectedSummary.maxWriteSize = stmtExecInfo2.ExecDetail.CommitDetail.WriteSize + expectedSummary.sumPrewriteRegionNum += int64(stmtExecInfo2.ExecDetail.CommitDetail.PrewriteRegionNum) + expectedSummary.maxPrewriteRegionNum = stmtExecInfo2.ExecDetail.CommitDetail.PrewriteRegionNum + expectedSummary.sumTxnRetry += int64(stmtExecInfo2.ExecDetail.CommitDetail.TxnRetry) + expectedSummary.maxTxnRetry = stmtExecInfo2.ExecDetail.CommitDetail.TxnRetry + expectedSummary.backoffTypes[tikv.BoTxnLock] = 1 + expectedSummary.sumMem += stmtExecInfo2.MemMax + expectedSummary.maxMem = stmtExecInfo2.MemMax + expectedSummary.sumAffectedRows += stmtExecInfo2.StmtCtx.AffectedRows() expectedSummary.lastSeen = stmtExecInfo2.StartTime s.ssMap.AddStatement(stmtExecInfo2) summary, ok = s.ssMap.summaryMap.Get(key) c.Assert(ok, IsTrue) - c.Assert(*summary.(*stmtSummaryByDigest) == expectedSummary, IsTrue) + c.Assert(matchStmtSummaryByDigest(summary.(*stmtSummaryByDigest), &expectedSummary), IsTrue) - // Third statement + // Third statement is similar with the first statement, and its values are + // less than that of the first statement. stmtExecInfo3 := &StmtExecInfo{ - SchemaName: "schema_name", - OriginalSQL: "original_sql3", - NormalizedSQL: "normalized_sql", - Digest: "digest", - TotalLatency: 1000, - AffectedRows: 10, - SentRows: 10, - StartTime: time.Date(2019, 1, 1, 10, 10, 0, 10, time.UTC), + SchemaName: "schema_name", + OriginalSQL: "original_sql3", + NormalizedSQL: "normalized_sql", + Digest: "digest", + User: "user3", + TotalLatency: 1000, + ParseLatency: 50, + CompileLatency: 500, + CopTasks: &stmtctx.CopTasksDetails{ + NumCopTasks: 2, + AvgProcessTime: 100, + P90ProcessTime: 300, + MaxProcessAddress: "300", + MaxProcessTime: 350, + AvgWaitTime: 20, + P90WaitTime: 200, + MaxWaitAddress: "301", + MaxWaitTime: 250, + }, + ExecDetail: &execdetails.ExecDetails{ + CalleeAddress: "302", + ProcessTime: 150, + WaitTime: 15, + BackoffTime: 18, + RequestCount: 2, + TotalKeys: 600, + ProcessedKeys: 150, + CommitDetail: &execdetails.CommitDetails{ + GetCommitTsTime: 50, + PrewriteTime: 5000, + CommitTime: 500, + LocalLatchTime: 5, + CommitBackoffTime: 100, + Mu: struct { + sync.Mutex + BackoffTypes []fmt.Stringer + }{ + BackoffTypes: []fmt.Stringer{tikv.BoTxnLock}, + }, + ResolveLockTime: 1000, + WriteKeys: 10000, + WriteSize: 100000, + PrewriteRegionNum: 10, + TxnRetry: 1, + }, + }, + StmtCtx: &stmtctx.StatementContext{ + StmtType: "Select", + Tables: tables, + IndexNames: indexes, + }, + MemMax: 200, + StartTime: time.Date(2019, 1, 1, 10, 10, 0, 10, time.UTC), } + stmtExecInfo3.StmtCtx.AddAffectedRows(20000) expectedSummary.execCount++ + expectedSummary.sampleUser = stmtExecInfo3.User + expectedSummary.sampleSQL = stmtExecInfo3.OriginalSQL expectedSummary.sumLatency += stmtExecInfo3.TotalLatency expectedSummary.minLatency = stmtExecInfo3.TotalLatency - expectedSummary.sumAffectedRows += stmtExecInfo3.AffectedRows - expectedSummary.sumSentRows += stmtExecInfo3.SentRows + expectedSummary.sumParseLatency += stmtExecInfo3.ParseLatency + expectedSummary.sumCompileLatency += stmtExecInfo3.CompileLatency + expectedSummary.numCopTasks += int64(stmtExecInfo3.CopTasks.NumCopTasks) + expectedSummary.sumCopProcessTime += int64(stmtExecInfo3.CopTasks.AvgProcessTime) * int64(stmtExecInfo3.CopTasks.NumCopTasks) + expectedSummary.sumCopWaitTime += int64(stmtExecInfo3.CopTasks.AvgWaitTime) * int64(stmtExecInfo3.CopTasks.NumCopTasks) + expectedSummary.sumProcessTime += stmtExecInfo3.ExecDetail.ProcessTime + expectedSummary.sumWaitTime += stmtExecInfo3.ExecDetail.WaitTime + expectedSummary.sumBackoffTime += stmtExecInfo3.ExecDetail.BackoffTime + expectedSummary.sumTotalKeys += stmtExecInfo3.ExecDetail.TotalKeys + expectedSummary.sumProcessedKeys += stmtExecInfo3.ExecDetail.ProcessedKeys + expectedSummary.sumGetCommitTsTime += stmtExecInfo3.ExecDetail.CommitDetail.GetCommitTsTime + expectedSummary.sumPrewriteTime += stmtExecInfo3.ExecDetail.CommitDetail.PrewriteTime + expectedSummary.sumCommitTime += stmtExecInfo3.ExecDetail.CommitDetail.CommitTime + expectedSummary.sumLocalLatchTime += stmtExecInfo3.ExecDetail.CommitDetail.LocalLatchTime + expectedSummary.sumCommitBackoffTime += stmtExecInfo3.ExecDetail.CommitDetail.CommitBackoffTime + expectedSummary.sumResolveLockTime += stmtExecInfo3.ExecDetail.CommitDetail.ResolveLockTime + expectedSummary.sumWriteKeys += int64(stmtExecInfo3.ExecDetail.CommitDetail.WriteKeys) + expectedSummary.sumWriteSize += int64(stmtExecInfo3.ExecDetail.CommitDetail.WriteSize) + expectedSummary.sumPrewriteRegionNum += int64(stmtExecInfo3.ExecDetail.CommitDetail.PrewriteRegionNum) + expectedSummary.sumTxnRetry += int64(stmtExecInfo3.ExecDetail.CommitDetail.TxnRetry) + expectedSummary.backoffTypes[tikv.BoTxnLock] = 2 + expectedSummary.sumMem += stmtExecInfo3.MemMax + expectedSummary.sumAffectedRows += stmtExecInfo3.StmtCtx.AffectedRows() expectedSummary.firstSeen = stmtExecInfo3.StartTime s.ssMap.AddStatement(stmtExecInfo3) summary, ok = s.ssMap.summaryMap.Get(key) c.Assert(ok, IsTrue) - c.Assert(*summary.(*stmtSummaryByDigest) == expectedSummary, IsTrue) - - // Fourth statement that in a different schema - stmtExecInfo4 := &StmtExecInfo{ - SchemaName: "schema_name2", - OriginalSQL: "original_sql1", - NormalizedSQL: "normalized_sql", - Digest: "digest", - TotalLatency: 1000, - AffectedRows: 10, - SentRows: 10, - StartTime: time.Date(2019, 1, 1, 10, 10, 0, 10, time.UTC), - } + c.Assert(matchStmtSummaryByDigest(summary.(*stmtSummaryByDigest), &expectedSummary), IsTrue) + + // Fourth statement is in a different schema. + stmtExecInfo4 := stmtExecInfo1 + stmtExecInfo4.SchemaName = "schema2" + stmtExecInfo4.ExecDetail.CommitDetail = nil key = &stmtSummaryByDigestKey{ schemaName: stmtExecInfo4.SchemaName, digest: stmtExecInfo4.Digest, } - s.ssMap.AddStatement(stmtExecInfo4) c.Assert(s.ssMap.summaryMap.Size(), Equals, 2) _, ok = s.ssMap.summaryMap.Get(key) c.Assert(ok, IsTrue) - // Fifth statement that has a different digest - stmtExecInfo5 := &StmtExecInfo{ - SchemaName: "schema_name", - OriginalSQL: "original_sql1", - NormalizedSQL: "normalized_sql2", - Digest: "digest2", - TotalLatency: 1000, - AffectedRows: 10, - SentRows: 10, - StartTime: time.Date(2019, 1, 1, 10, 10, 0, 10, time.UTC), - } + // Fifth statement has a different digest. + stmtExecInfo5 := stmtExecInfo1 + stmtExecInfo5.Digest = "digest2" key = &stmtSummaryByDigestKey{ schemaName: stmtExecInfo5.SchemaName, digest: stmtExecInfo5.Digest, } - s.ssMap.AddStatement(stmtExecInfo5) c.Assert(s.ssMap.summaryMap.Size(), Equals, 3) _, ok = s.ssMap.summaryMap.Get(key) c.Assert(ok, IsTrue) } +func matchStmtSummaryByDigest(first, second *stmtSummaryByDigest) bool { + if first.schemaName != second.schemaName || + strings.ToLower(first.stmtType) != strings.ToLower(second.stmtType) || + first.digest != second.digest || + first.normalizedSQL != second.normalizedSQL || + first.sampleSQL != second.sampleSQL || + first.sampleUser != second.sampleUser || + first.tableNames != second.tableNames || + first.indexNames != second.indexNames || + first.execCount != second.execCount || + first.sumLatency != second.sumLatency || + first.maxLatency != second.maxLatency || + first.minLatency != second.minLatency || + first.sumParseLatency != second.sumParseLatency || + first.maxParseLatency != second.maxParseLatency || + first.sumCompileLatency != second.sumCompileLatency || + first.maxCompileLatency != second.maxCompileLatency || + first.numCopTasks != second.numCopTasks || + first.sumCopProcessTime != second.sumCopProcessTime || + first.maxCopProcessTime != second.maxCopProcessTime || + first.maxCopProcessAddress != second.maxCopProcessAddress || + first.sumCopWaitTime != second.sumCopWaitTime || + first.maxCopWaitTime != second.maxCopWaitTime || + first.maxCopWaitAddress != second.maxCopWaitAddress || + first.sumProcessTime != second.sumProcessTime || + first.maxProcessTime != second.maxProcessTime || + first.sumWaitTime != second.sumWaitTime || + first.maxWaitTime != second.maxWaitTime || + first.sumBackoffTime != second.sumBackoffTime || + first.maxBackoffTime != second.maxBackoffTime || + first.sumTotalKeys != second.sumTotalKeys || + first.maxTotalKeys != second.maxTotalKeys || + first.sumProcessedKeys != second.sumProcessedKeys || + first.maxProcessedKeys != second.maxProcessedKeys || + first.sumGetCommitTsTime != second.sumGetCommitTsTime || + first.maxGetCommitTsTime != second.maxGetCommitTsTime || + first.sumPrewriteTime != second.sumPrewriteTime || + first.maxPrewriteTime != second.maxPrewriteTime || + first.sumCommitTime != second.sumCommitTime || + first.maxCommitTime != second.maxCommitTime || + first.sumLocalLatchTime != second.sumLocalLatchTime || + first.maxLocalLatchTime != second.maxLocalLatchTime || + first.sumCommitBackoffTime != second.sumCommitBackoffTime || + first.maxCommitBackoffTime != second.maxCommitBackoffTime || + first.sumResolveLockTime != second.sumResolveLockTime || + first.maxResolveLockTime != second.maxResolveLockTime || + first.sumWriteKeys != second.sumWriteKeys || + first.maxWriteKeys != second.maxWriteKeys || + first.sumWriteSize != second.sumWriteSize || + first.maxWriteSize != second.maxWriteSize || + first.sumPrewriteRegionNum != second.sumPrewriteRegionNum || + first.maxPrewriteRegionNum != second.maxPrewriteRegionNum || + first.sumTxnRetry != second.sumTxnRetry || + first.maxTxnRetry != second.maxTxnRetry || + first.sumMem != second.sumMem || + first.maxMem != second.maxMem || + first.sumAffectedRows != second.sumAffectedRows || + first.firstSeen != second.firstSeen || + first.lastSeen != second.lastSeen { + return false + } + if len(first.backoffTypes) != len(second.backoffTypes) { + return false + } + for key, value1 := range first.backoffTypes { + value2, ok := second.backoffTypes[key] + if ok != true { + return false + } + if value1 != value2 { + return false + } + } + return true +} + func match(c *C, row []types.Datum, expected ...interface{}) { c.Assert(len(row), Equals, len(expected)) for i := range row { @@ -179,30 +444,103 @@ func match(c *C, row []types.Datum, expected ...interface{}) { } } -// Test stmtSummaryByDigest.ToDatum +func generateAnyExecInfo() *StmtExecInfo { + tables := []stmtctx.TableEntry{{DB: "db1", Table: "tb1"}, {DB: "db2", Table: "tb2"}} + indexes := []string{"a"} + stmtExecInfo := &StmtExecInfo{ + SchemaName: "schema_name", + OriginalSQL: "original_sql1", + NormalizedSQL: "normalized_sql", + Digest: "digest", + User: "user", + TotalLatency: 10000, + ParseLatency: 100, + CompileLatency: 1000, + CopTasks: &stmtctx.CopTasksDetails{ + NumCopTasks: 10, + AvgProcessTime: 1000, + P90ProcessTime: 10000, + MaxProcessAddress: "127", + MaxProcessTime: 15000, + AvgWaitTime: 100, + P90WaitTime: 1000, + MaxWaitAddress: "128", + MaxWaitTime: 1500, + }, + ExecDetail: &execdetails.ExecDetails{ + CalleeAddress: "129", + ProcessTime: 500, + WaitTime: 50, + BackoffTime: 80, + RequestCount: 10, + TotalKeys: 1000, + ProcessedKeys: 500, + CommitDetail: &execdetails.CommitDetails{ + GetCommitTsTime: 100, + PrewriteTime: 10000, + CommitTime: 1000, + LocalLatchTime: 10, + CommitBackoffTime: 200, + Mu: struct { + sync.Mutex + BackoffTypes []fmt.Stringer + }{ + BackoffTypes: []fmt.Stringer{tikv.BoTxnLock}, + }, + ResolveLockTime: 2000, + WriteKeys: 20000, + WriteSize: 200000, + PrewriteRegionNum: 20, + TxnRetry: 2, + }, + }, + StmtCtx: &stmtctx.StatementContext{ + StmtType: "Select", + Tables: tables, + IndexNames: indexes, + }, + MemMax: 10000, + StartTime: time.Date(2019, 1, 1, 10, 10, 10, 10, time.UTC), + } + stmtExecInfo.StmtCtx.AddAffectedRows(10000) + return stmtExecInfo +} + +// Test stmtSummaryByDigest.ToDatum. func (s *testStmtSummarySuite) TestToDatum(c *C) { s.ssMap.Clear() - stmtExecInfo1 := &StmtExecInfo{ - SchemaName: "schema_name", - OriginalSQL: "original_sql1", - NormalizedSQL: "normalized_sql", - Digest: "digest", - TotalLatency: 10000, - AffectedRows: 100, - SentRows: 100, - StartTime: time.Date(2019, 1, 1, 10, 10, 10, 10, time.UTC), - } + stmtExecInfo1 := generateAnyExecInfo() s.ssMap.AddStatement(stmtExecInfo1) datums := s.ssMap.ToDatum() c.Assert(len(datums), Equals, 1) t := types.Time{Time: types.FromGoTime(stmtExecInfo1.StartTime), Type: mysql.TypeTimestamp} - match(c, datums[0], stmtExecInfo1.SchemaName, stmtExecInfo1.Digest, stmtExecInfo1.NormalizedSQL, - 1, stmtExecInfo1.TotalLatency, stmtExecInfo1.TotalLatency, stmtExecInfo1.TotalLatency, stmtExecInfo1.TotalLatency, - stmtExecInfo1.AffectedRows, t, t, stmtExecInfo1.OriginalSQL) + match(c, datums[0], "select", stmtExecInfo1.SchemaName, stmtExecInfo1.Digest, stmtExecInfo1.NormalizedSQL, + "db1.tb1,db2.tb2", "a", stmtExecInfo1.User, 1, int64(stmtExecInfo1.TotalLatency), + int64(stmtExecInfo1.TotalLatency), int64(stmtExecInfo1.TotalLatency), int64(stmtExecInfo1.TotalLatency), + int64(stmtExecInfo1.ParseLatency), int64(stmtExecInfo1.ParseLatency), int64(stmtExecInfo1.CompileLatency), + int64(stmtExecInfo1.CompileLatency), stmtExecInfo1.CopTasks.NumCopTasks, int64(stmtExecInfo1.CopTasks.AvgProcessTime), + int64(stmtExecInfo1.CopTasks.MaxProcessTime), stmtExecInfo1.CopTasks.MaxProcessAddress, + int64(stmtExecInfo1.CopTasks.AvgWaitTime), int64(stmtExecInfo1.CopTasks.MaxWaitTime), + stmtExecInfo1.CopTasks.MaxWaitAddress, int64(stmtExecInfo1.ExecDetail.ProcessTime), int64(stmtExecInfo1.ExecDetail.ProcessTime), + int64(stmtExecInfo1.ExecDetail.WaitTime), int64(stmtExecInfo1.ExecDetail.WaitTime), int64(stmtExecInfo1.ExecDetail.BackoffTime), + int64(stmtExecInfo1.ExecDetail.BackoffTime), stmtExecInfo1.ExecDetail.TotalKeys, stmtExecInfo1.ExecDetail.TotalKeys, + stmtExecInfo1.ExecDetail.ProcessedKeys, stmtExecInfo1.ExecDetail.ProcessedKeys, + int64(stmtExecInfo1.ExecDetail.CommitDetail.PrewriteTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.PrewriteTime), + int64(stmtExecInfo1.ExecDetail.CommitDetail.CommitTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.CommitTime), + int64(stmtExecInfo1.ExecDetail.CommitDetail.GetCommitTsTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.GetCommitTsTime), + stmtExecInfo1.ExecDetail.CommitDetail.CommitBackoffTime, stmtExecInfo1.ExecDetail.CommitDetail.CommitBackoffTime, + stmtExecInfo1.ExecDetail.CommitDetail.ResolveLockTime, stmtExecInfo1.ExecDetail.CommitDetail.ResolveLockTime, + int64(stmtExecInfo1.ExecDetail.CommitDetail.LocalLatchTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.LocalLatchTime), + stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys, stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys, + stmtExecInfo1.ExecDetail.CommitDetail.WriteSize, stmtExecInfo1.ExecDetail.CommitDetail.WriteSize, + stmtExecInfo1.ExecDetail.CommitDetail.PrewriteRegionNum, stmtExecInfo1.ExecDetail.CommitDetail.PrewriteRegionNum, + stmtExecInfo1.ExecDetail.CommitDetail.TxnRetry, stmtExecInfo1.ExecDetail.CommitDetail.TxnRetry, + "txnLock:1", stmtExecInfo1.MemMax, stmtExecInfo1.MemMax, stmtExecInfo1.StmtCtx.AffectedRows(), + t, t, stmtExecInfo1.OriginalSQL) } -// Test AddStatement and ToDatum parallel +// Test AddStatement and ToDatum parallel. func (s *testStmtSummarySuite) TestAddStatementParallel(c *C) { s.ssMap.Clear() @@ -213,24 +551,15 @@ func (s *testStmtSummarySuite) TestAddStatementParallel(c *C) { addStmtFunc := func() { defer wg.Done() - stmtExecInfo1 := &StmtExecInfo{ - SchemaName: "schema_name", - OriginalSQL: "original_sql1", - NormalizedSQL: "normalized_sql", - Digest: "digest", - TotalLatency: 10000, - AffectedRows: 100, - SentRows: 100, - StartTime: time.Date(2019, 1, 1, 10, 10, 10, 10, time.UTC), - } + stmtExecInfo1 := generateAnyExecInfo() - // Add 32 times with different digest + // Add 32 times with different digest. for i := 0; i < loops; i++ { stmtExecInfo1.Digest = fmt.Sprintf("digest%d", i) s.ssMap.AddStatement(stmtExecInfo1) } - // There would be 32 summaries + // There would be 32 summaries. datums := s.ssMap.ToDatum() c.Assert(len(datums), Equals, loops) } @@ -248,17 +577,7 @@ func (s *testStmtSummarySuite) TestAddStatementParallel(c *C) { func (s *testStmtSummarySuite) TestMaxStmtCount(c *C) { s.ssMap.Clear() - stmtExecInfo1 := &StmtExecInfo{ - SchemaName: "schema_name", - OriginalSQL: "original_sql1", - NormalizedSQL: "normalized_sql", - Digest: "digest", - TotalLatency: 10000, - AffectedRows: 100, - SentRows: 100, - StartTime: time.Date(2019, 1, 1, 10, 10, 10, 10, time.UTC), - } - + stmtExecInfo1 := generateAnyExecInfo() maxStmtCount := config.GetGlobalConfig().StmtSummary.MaxStmtCount // 1000 digests @@ -268,11 +587,11 @@ func (s *testStmtSummarySuite) TestMaxStmtCount(c *C) { s.ssMap.AddStatement(stmtExecInfo1) } - // Summary count should be MaxStmtCount + // Summary count should be MaxStmtCount. sm := s.ssMap.summaryMap c.Assert(sm.Size(), Equals, int(maxStmtCount)) - // LRU cache should work + // LRU cache should work. for i := loops - int(maxStmtCount); i < loops; i++ { key := &stmtSummaryByDigestKey{ schemaName: stmtExecInfo1.SchemaName, @@ -292,47 +611,31 @@ func (s *testStmtSummarySuite) TestMaxSQLLength(c *C) { length := int(maxSQLLength) * 10 str := strings.Repeat("a", length) - stmtExecInfo1 := &StmtExecInfo{ - SchemaName: "schema_name", - OriginalSQL: str, - NormalizedSQL: str, - Digest: "digest", - TotalLatency: 10000, - AffectedRows: 100, - SentRows: 100, - StartTime: time.Date(2019, 1, 1, 10, 10, 10, 10, time.UTC), - } - + stmtExecInfo1 := generateAnyExecInfo() + stmtExecInfo1.OriginalSQL = str + stmtExecInfo1.NormalizedSQL = str s.ssMap.AddStatement(stmtExecInfo1) + key := &stmtSummaryByDigestKey{ schemaName: stmtExecInfo1.SchemaName, digest: stmtExecInfo1.Digest, } value, ok := s.ssMap.summaryMap.Get(key) c.Assert(ok, IsTrue) - // Length of normalizedSQL and sampleSQL should be maxSQLLength + // Length of normalizedSQL and sampleSQL should be maxSQLLength. summary := value.(*stmtSummaryByDigest) c.Assert(len(summary.normalizedSQL), Equals, int(maxSQLLength)) c.Assert(len(summary.sampleSQL), Equals, int(maxSQLLength)) } -// Test setting EnableStmtSummary to 0 +// Test setting EnableStmtSummary to 0. func (s *testStmtSummarySuite) TestDisableStmtSummary(c *C) { s.ssMap.Clear() + // Set false in global scope, it should work. s.ssMap.SetEnabled("0", false) - stmtExecInfo1 := &StmtExecInfo{ - SchemaName: "schema_name", - OriginalSQL: "original_sql1", - NormalizedSQL: "normalized_sql", - Digest: "digest", - TotalLatency: 10000, - AffectedRows: 100, - SentRows: 100, - StartTime: time.Date(2019, 1, 1, 10, 10, 10, 10, time.UTC), - } - + stmtExecInfo1 := generateAnyExecInfo() s.ssMap.AddStatement(stmtExecInfo1) datums := s.ssMap.ToDatum() c.Assert(len(datums), Equals, 0) @@ -347,32 +650,65 @@ func (s *testStmtSummarySuite) TestDisableStmtSummary(c *C) { // Set false in global scope, it shouldn't work. s.ssMap.SetEnabled("0", false) - stmtExecInfo2 := &StmtExecInfo{ - SchemaName: "schema_name", - OriginalSQL: "original_sql2", - NormalizedSQL: "normalized_sql2", - Digest: "digest2", - TotalLatency: 50000, - AffectedRows: 500, - SentRows: 500, - StartTime: time.Date(2019, 1, 1, 10, 10, 20, 10, time.UTC), - } + stmtExecInfo2 := stmtExecInfo1 + stmtExecInfo2.OriginalSQL = "original_sql2" + stmtExecInfo2.NormalizedSQL = "normalized_sql2" + stmtExecInfo2.Digest = "digest2" s.ssMap.AddStatement(stmtExecInfo2) datums = s.ssMap.ToDatum() c.Assert(len(datums), Equals, 2) - // Unset in session scope + // Unset in session scope. s.ssMap.SetEnabled("", true) s.ssMap.AddStatement(stmtExecInfo2) datums = s.ssMap.ToDatum() c.Assert(len(datums), Equals, 0) - // Unset in global scope + // Unset in global scope. s.ssMap.SetEnabled("", false) s.ssMap.AddStatement(stmtExecInfo1) datums = s.ssMap.ToDatum() c.Assert(len(datums), Equals, 0) - // Set back + // Set back. s.ssMap.SetEnabled("1", false) } + +// Test GetMoreThanOnceSelect. +func (s *testStmtSummarySuite) TestGetMoreThenOnceSelect(c *C) { + s.ssMap.Clear() + + stmtExecInfo1 := generateAnyExecInfo() + stmtExecInfo1.OriginalSQL = "insert 1" + stmtExecInfo1.NormalizedSQL = "insert ?" + stmtExecInfo1.StmtCtx.StmtType = "insert" + s.ssMap.AddStatement(stmtExecInfo1) + schemas, sqls := s.ssMap.GetMoreThanOnceSelect() + c.Assert(len(schemas), Equals, 0) + c.Assert(len(sqls), Equals, 0) + + stmtExecInfo1.NormalizedSQL = "select ?" + stmtExecInfo1.Digest = "digest1" + stmtExecInfo1.StmtCtx.StmtType = "select" + s.ssMap.AddStatement(stmtExecInfo1) + schemas, sqls = s.ssMap.GetMoreThanOnceSelect() + c.Assert(len(schemas), Equals, 0) + c.Assert(len(sqls), Equals, 0) + + s.ssMap.AddStatement(stmtExecInfo1) + schemas, sqls = s.ssMap.GetMoreThanOnceSelect() + c.Assert(len(schemas), Equals, 1) + c.Assert(len(sqls), Equals, 1) +} + +// Test `formatBackoffTypes`. +func (s *testStmtSummarySuite) TestFormatBackoffTypes(c *C) { + backoffMap := make(map[fmt.Stringer]int) + c.Assert(formatBackoffTypes(backoffMap), IsNil) + + backoffMap[tikv.BoPDRPC] = 1 + c.Assert(formatBackoffTypes(backoffMap), Equals, "pdRPC:1") + + backoffMap[tikv.BoTxnLock] = 2 + c.Assert(formatBackoffTypes(backoffMap), Equals, "txnLock:2,pdRPC:1") +}