tidb expensivequery 源码
tidb expensivequery 代码
文件路径:/util/expensivequery/expensivequery.go
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package expensivequery
import (
"fmt"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/pingcap/log"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/logutil"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// Handle is the handler for expensive query.
type Handle struct {
exitCh chan struct{}
sm atomic.Value
}
// NewExpensiveQueryHandle builds a new expensive query handler.
func NewExpensiveQueryHandle(exitCh chan struct{}) *Handle {
return &Handle{exitCh: exitCh}
}
// SetSessionManager sets the SessionManager which is used to fetching the info
// of all active sessions.
func (eqh *Handle) SetSessionManager(sm util.SessionManager) *Handle {
eqh.sm.Store(sm)
return eqh
}
// Run starts a expensive query checker goroutine at the start time of the server.
func (eqh *Handle) Run() {
threshold := atomic.LoadUint64(&variable.ExpensiveQueryTimeThreshold)
// use 100ms as tickInterval temply, may use given interval or use defined variable later
tickInterval := time.Millisecond * time.Duration(100)
ticker := time.NewTicker(tickInterval)
defer ticker.Stop()
sm := eqh.sm.Load().(util.SessionManager)
record := &memoryUsageAlarm{}
for {
select {
case <-ticker.C:
processInfo := sm.ShowProcessList()
for _, info := range processInfo {
if len(info.Info) == 0 {
continue
}
costTime := time.Since(info.Time)
if !info.ExceedExpensiveTimeThresh && costTime >= time.Second*time.Duration(threshold) && log.GetLevel() <= zapcore.WarnLevel {
logExpensiveQuery(costTime, info)
info.ExceedExpensiveTimeThresh = true
}
if info.MaxExecutionTime > 0 && costTime > time.Duration(info.MaxExecutionTime)*time.Millisecond {
logutil.BgLogger().Warn("execution timeout, kill it", zap.Duration("costTime", costTime),
zap.Duration("maxExecutionTime", time.Duration(info.MaxExecutionTime)*time.Millisecond), zap.String("processInfo", info.String()))
sm.Kill(info.ID, true)
}
if info.ID == util.GetAutoAnalyzeProcID(sm.ServerID) {
maxAutoAnalyzeTime := variable.MaxAutoAnalyzeTime.Load()
if maxAutoAnalyzeTime > 0 && costTime > time.Duration(maxAutoAnalyzeTime)*time.Second {
logutil.BgLogger().Warn("auto analyze timeout, kill it", zap.Duration("costTime", costTime),
zap.Duration("maxAutoAnalyzeTime", time.Duration(maxAutoAnalyzeTime)*time.Second), zap.String("processInfo", info.String()))
sm.Kill(info.ID, true)
}
}
}
threshold = atomic.LoadUint64(&variable.ExpensiveQueryTimeThreshold)
record.memoryUsageAlarmRatio = variable.MemoryUsageAlarmRatio.Load()
if record.err == nil {
record.alarm4ExcessiveMemUsage(sm)
}
case <-eqh.exitCh:
return
}
}
}
// LogOnQueryExceedMemQuota prints a log when memory usage of connID is out of memory quota.
func (eqh *Handle) LogOnQueryExceedMemQuota(connID uint64) {
if log.GetLevel() > zapcore.WarnLevel {
return
}
// The out-of-memory SQL may be the internal SQL which is executed during
// the bootstrap phase, and the `sm` is not set at this phase. This is
// unlikely to happen except for testing. Thus we do not need to log
// detailed message for it.
v := eqh.sm.Load()
if v == nil {
logutil.BgLogger().Info("expensive_query during bootstrap phase", zap.Uint64("conn_id", connID))
return
}
sm := v.(util.SessionManager)
info, ok := sm.GetProcessInfo(connID)
if !ok {
return
}
logExpensiveQuery(time.Since(info.Time), info)
}
func genLogFields(costTime time.Duration, info *util.ProcessInfo) []zap.Field {
logFields := make([]zap.Field, 0, 20)
logFields = append(logFields, zap.String("cost_time", strconv.FormatFloat(costTime.Seconds(), 'f', -1, 64)+"s"))
execDetail := info.StmtCtx.GetExecDetails()
logFields = append(logFields, execDetail.ToZapFields()...)
if copTaskInfo := info.StmtCtx.CopTasksDetails(); copTaskInfo != nil {
logFields = append(logFields, copTaskInfo.ToZapFields()...)
}
if statsInfo := info.StatsInfo(info.Plan); len(statsInfo) > 0 {
var buf strings.Builder
firstComma := false
vStr := ""
for k, v := range statsInfo {
if v == 0 {
vStr = "pseudo"
} else {
vStr = strconv.FormatUint(v, 10)
}
if firstComma {
buf.WriteString("," + k + ":" + vStr)
} else {
buf.WriteString(k + ":" + vStr)
firstComma = true
}
}
logFields = append(logFields, zap.String("stats", buf.String()))
}
if info.ID != 0 {
logFields = append(logFields, zap.Uint64("conn_id", info.ID))
}
if len(info.User) > 0 {
logFields = append(logFields, zap.String("user", info.User))
}
if len(info.DB) > 0 {
logFields = append(logFields, zap.String("database", info.DB))
}
var tableIDs, indexNames string
if len(info.StmtCtx.TableIDs) > 0 {
tableIDs = strings.Replace(fmt.Sprintf("%v", info.StmtCtx.TableIDs), " ", ",", -1)
logFields = append(logFields, zap.String("table_ids", tableIDs))
}
if len(info.StmtCtx.IndexNames) > 0 {
indexNames = strings.Replace(fmt.Sprintf("%v", info.StmtCtx.IndexNames), " ", ",", -1)
logFields = append(logFields, zap.String("index_names", indexNames))
}
logFields = append(logFields, zap.Uint64("txn_start_ts", info.CurTxnStartTS))
if memTracker := info.StmtCtx.MemTracker; memTracker != nil {
logFields = append(logFields, zap.String("mem_max", fmt.Sprintf("%d Bytes (%v)", memTracker.MaxConsumed(), memTracker.FormatBytes(memTracker.MaxConsumed()))))
}
const logSQLLen = 1024 * 8
var sql string
if len(info.Info) > 0 {
sql = info.Info
if info.RedactSQL {
sql = parser.Normalize(sql)
}
}
if len(sql) > logSQLLen {
sql = fmt.Sprintf("%s len(%d)", sql[:logSQLLen], len(sql))
}
logFields = append(logFields, zap.String("sql", sql))
return logFields
}
// logExpensiveQuery logs the queries which exceed the time threshold or memory threshold.
func logExpensiveQuery(costTime time.Duration, info *util.ProcessInfo) {
logutil.BgLogger().Warn("expensive_query", genLogFields(costTime, info)...)
}
相关信息
相关文章
0
赞
热门推荐
-
2、 - 优质文章
-
3、 gate.io
-
8、 golang
-
9、 openharmony
-
10、 Vue中input框自动聚焦