Skip to content

Commit

Permalink
*: move dim to statistics (#5541)
Browse files Browse the repository at this point in the history
ref #4949

Signed-off-by: lhy1024 <[email protected]>
  • Loading branch information
lhy1024 authored Sep 22, 2022
1 parent 689dd56 commit 8209515
Show file tree
Hide file tree
Showing 9 changed files with 108 additions and 77 deletions.
12 changes: 6 additions & 6 deletions server/schedulers/hot_region.go
Original file line number Diff line number Diff line change
Expand Up @@ -1493,11 +1493,11 @@ func toResourceType(rwTy statistics.RWType, opTy opType) resourceType {

func stringToDim(name string) int {
switch name {
case BytePriority:
case statistics.BytePriority:
return statistics.ByteDim
case KeyPriority:
case statistics.KeyPriority:
return statistics.KeyDim
case QueryPriority:
case statistics.QueryPriority:
return statistics.QueryDim
}
return statistics.ByteDim
Expand All @@ -1506,11 +1506,11 @@ func stringToDim(name string) int {
func dimToString(dim int) string {
switch dim {
case statistics.ByteDim:
return BytePriority
return statistics.BytePriority
case statistics.KeyDim:
return KeyPriority
return statistics.KeyPriority
case statistics.QueryDim:
return QueryPriority
return statistics.QueryPriority
default:
return ""
}
Expand Down
28 changes: 11 additions & 17 deletions server/schedulers/hot_region_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,29 +36,23 @@ import (
)

const (
// BytePriority indicates hot-region-scheduler prefer byte dim
BytePriority = "byte"
// KeyPriority indicates hot-region-scheduler prefer key dim
KeyPriority = "key"
// QueryPriority indicates hot-region-scheduler prefer query dim
QueryPriority = "query"

// Scheduling has a bigger impact on TiFlash, so it needs to be corrected in configuration items
// In the default config, the TiKV difference is 1.05*1.05-1 = 0.1025, and the TiFlash difference is 1.15*1.15-1 = 0.3225
tiflashToleranceRatioCorrection = 0.1
)

var defaultPrioritiesConfig = prioritiesConfig{
read: []string{QueryPriority, BytePriority},
writeLeader: []string{KeyPriority, BytePriority},
writePeer: []string{BytePriority, KeyPriority},
read: []string{statistics.QueryPriority, statistics.BytePriority},
writeLeader: []string{statistics.KeyPriority, statistics.BytePriority},
writePeer: []string{statistics.BytePriority, statistics.KeyPriority},
}

// because tikv below 5.2.0 does not report query information, we will use byte and key as the scheduling dimensions
var compatiblePrioritiesConfig = prioritiesConfig{
read: []string{BytePriority, KeyPriority},
writeLeader: []string{KeyPriority, BytePriority},
writePeer: []string{BytePriority, KeyPriority},
read: []string{statistics.BytePriority, statistics.KeyPriority},
writeLeader: []string{statistics.KeyPriority, statistics.BytePriority},
writePeer: []string{statistics.BytePriority, statistics.KeyPriority},
}

// params about hot region.
Expand Down Expand Up @@ -344,7 +338,7 @@ func (conf *hotRegionSchedulerConfig) handleGetConfig(w http.ResponseWriter, r *
func isPriorityValid(priorities []string) (map[string]bool, error) {
priorityMap := map[string]bool{}
for _, p := range priorities {
if p != BytePriority && p != KeyPriority && p != QueryPriority {
if p != statistics.BytePriority && p != statistics.KeyPriority && p != statistics.QueryPriority {
return nil, errs.ErrSchedulerConfig.FastGenByArgs("invalid scheduling dimensions")
}
priorityMap[p] = true
Expand All @@ -367,8 +361,8 @@ func (conf *hotRegionSchedulerConfig) valid() error {
}
if pm, err := isPriorityValid(conf.WritePeerPriorities); err != nil {
return err
} else if pm[QueryPriority] {
return errs.ErrSchedulerConfig.FastGenByArgs("qps is not allowed to be set in priorities for write-peer-priorities")
} else if pm[statistics.QueryPriority] {
return errs.ErrSchedulerConfig.FastGenByArgs("query is not allowed to be set in priorities for write-peer-priorities")
}

if conf.RankFormulaVersion != "" && conf.RankFormulaVersion != "v1" && conf.RankFormulaVersion != "v2" {
Expand Down Expand Up @@ -478,7 +472,7 @@ func getWritePeerPriorities(c *prioritiesConfig) []string {
// because tikv below 5.2.0 does not report query information, we will use byte and key as the scheduling dimensions
func adjustPrioritiesConfig(querySupport bool, origins []string, getPriorities func(*prioritiesConfig) []string) []string {
withQuery := slice.AnyOf(origins, func(i int) bool {
return origins[i] == QueryPriority
return origins[i] == statistics.QueryPriority
})
compatibles := getPriorities(&compatiblePrioritiesConfig)
if !querySupport && withQuery {
Expand All @@ -487,7 +481,7 @@ func adjustPrioritiesConfig(querySupport bool, origins []string, getPriorities f

defaults := getPriorities(&defaultPrioritiesConfig)
isLegal := slice.AllOf(origins, func(i int) bool {
return origins[i] == BytePriority || origins[i] == KeyPriority || origins[i] == QueryPriority
return origins[i] == statistics.BytePriority || origins[i] == statistics.KeyPriority || origins[i] == statistics.QueryPriority
})
if len(defaults) == len(origins) && isLegal && origins[0] != origins[1] {
return origins
Expand Down
52 changes: 26 additions & 26 deletions server/schedulers/hot_region_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -559,7 +559,7 @@ func TestHotWriteRegionScheduleWithQuery(t *testing.T) {
re.NoError(err)
hb.(*hotScheduler).conf.SetSrcToleranceRatio(1)
hb.(*hotScheduler).conf.SetDstToleranceRatio(1)
hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{QueryPriority, BytePriority}
hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.QueryPriority, statistics.BytePriority}

tc := mockcluster.NewCluster(ctx, opt)
tc.SetHotRegionCacheHitsThreshold(0)
Expand Down Expand Up @@ -968,7 +968,7 @@ func TestHotReadRegionScheduleByteRateOnly(t *testing.T) {
scheduler, err := schedule.CreateScheduler(statistics.Read.String(), schedule.NewOperatorController(ctx, nil, nil), storage.NewStorageWithMemoryBackend(), nil)
re.NoError(err)
hb := scheduler.(*hotScheduler)
hb.conf.ReadPriorities = []string{BytePriority, KeyPriority}
hb.conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority}
tc.SetHotRegionCacheHitsThreshold(0)

// Add stores 1, 2, 3, 4, 5 with region counts 3, 2, 2, 2, 0.
Expand Down Expand Up @@ -1122,7 +1122,7 @@ func TestHotReadRegionScheduleWithKeyRate(t *testing.T) {
re.NoError(err)
hb.(*hotScheduler).conf.SetSrcToleranceRatio(1)
hb.(*hotScheduler).conf.SetDstToleranceRatio(1)
hb.(*hotScheduler).conf.ReadPriorities = []string{BytePriority, KeyPriority}
hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority}

tc := mockcluster.NewCluster(ctx, opt)
tc.SetHotRegionCacheHitsThreshold(0)
Expand Down Expand Up @@ -1180,7 +1180,7 @@ func TestHotReadRegionScheduleWithPendingInfluence(t *testing.T) {
hb.(*hotScheduler).conf.GreatDecRatio = 0.99
hb.(*hotScheduler).conf.MinorDecRatio = 1
hb.(*hotScheduler).conf.DstToleranceRatio = 1
hb.(*hotScheduler).conf.ReadPriorities = []string{BytePriority, KeyPriority}
hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority}
old := pendingAmpFactor
pendingAmpFactor = 0.0
defer func() {
Expand Down Expand Up @@ -1299,7 +1299,7 @@ func TestHotReadWithEvictLeaderScheduler(t *testing.T) {
hb.(*hotScheduler).conf.SetSrcToleranceRatio(1)
hb.(*hotScheduler).conf.SetDstToleranceRatio(1)
hb.(*hotScheduler).conf.SetStrictPickingStore(false)
hb.(*hotScheduler).conf.ReadPriorities = []string{BytePriority, KeyPriority}
hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority}
tc := mockcluster.NewCluster(ctx, opt)
tc.SetHotRegionCacheHitsThreshold(0)
tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0))
Expand Down Expand Up @@ -1854,7 +1854,7 @@ func TestHotReadPeerSchedule(t *testing.T) {
sche, err := schedule.CreateScheduler(statistics.Read.String(), schedule.NewOperatorController(ctx, tc, nil), storage.NewStorageWithMemoryBackend(), schedule.ConfigJSONDecoder([]byte("null")))
re.NoError(err)
hb := sche.(*hotScheduler)
hb.conf.ReadPriorities = []string{BytePriority, KeyPriority}
hb.conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority}

tc.UpdateStorageReadStats(1, 20*units.MiB, 20*units.MiB)
tc.UpdateStorageReadStats(2, 19*units.MiB, 19*units.MiB)
Expand Down Expand Up @@ -1903,12 +1903,12 @@ func TestHotScheduleWithPriority(t *testing.T) {
{1, []uint64{1, 2, 3}, 2 * units.MiB, 1 * units.MiB, 0},
{6, []uint64{4, 2, 3}, 1 * units.MiB, 2 * units.MiB, 0},
})
hb.(*hotScheduler).conf.WritePeerPriorities = []string{BytePriority, KeyPriority}
hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority}
ops, _ := hb.Schedule(tc, false)
re.Len(ops, 1)
testutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 1, 5)
clearPendingInfluence(hb.(*hotScheduler))
hb.(*hotScheduler).conf.WritePeerPriorities = []string{KeyPriority, BytePriority}
hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.KeyPriority, statistics.BytePriority}
ops, _ = hb.Schedule(tc, false)
re.Len(ops, 1)
testutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 4, 5)
Expand All @@ -1925,12 +1925,12 @@ func TestHotScheduleWithPriority(t *testing.T) {
addRegionInfo(tc, statistics.Read, []testRegionInfo{
{1, []uint64{1, 2, 3}, 2 * units.MiB, 2 * units.MiB, 0},
})
hb.(*hotScheduler).conf.ReadPriorities = []string{BytePriority, KeyPriority}
hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority}
ops, _ = hb.Schedule(tc, false)
re.Len(ops, 1)
testutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 2)
clearPendingInfluence(hb.(*hotScheduler))
hb.(*hotScheduler).conf.ReadPriorities = []string{KeyPriority, BytePriority}
hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.KeyPriority, statistics.BytePriority}
ops, _ = hb.Schedule(tc, false)
re.Len(ops, 1)
testutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 3)
Expand All @@ -1944,7 +1944,7 @@ func TestHotScheduleWithPriority(t *testing.T) {
tc.UpdateStorageWrittenStats(3, 6*units.MiB*statistics.StoreHeartBeatReportInterval, 6*units.MiB*statistics.StoreHeartBeatReportInterval)
tc.UpdateStorageWrittenStats(4, 6*units.MiB*statistics.StoreHeartBeatReportInterval, 6*units.MiB*statistics.StoreHeartBeatReportInterval)
tc.UpdateStorageWrittenStats(5, 1*units.MiB*statistics.StoreHeartBeatReportInterval, 1*units.MiB*statistics.StoreHeartBeatReportInterval)
hb.(*hotScheduler).conf.WritePeerPriorities = []string{BytePriority, KeyPriority}
hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority}
hb.(*hotScheduler).conf.StrictPickingStore = true
ops, _ = hb.Schedule(tc, false)
re.Empty(ops)
Expand All @@ -1959,7 +1959,7 @@ func TestHotScheduleWithPriority(t *testing.T) {
tc.UpdateStorageWrittenStats(3, 6*units.MiB*statistics.StoreHeartBeatReportInterval, 6*units.MiB*statistics.StoreHeartBeatReportInterval)
tc.UpdateStorageWrittenStats(4, 1*units.MiB*statistics.StoreHeartBeatReportInterval, 10*units.MiB*statistics.StoreHeartBeatReportInterval)
tc.UpdateStorageWrittenStats(5, 1*units.MiB*statistics.StoreHeartBeatReportInterval, 1*units.MiB*statistics.StoreHeartBeatReportInterval)
hb.(*hotScheduler).conf.WritePeerPriorities = []string{KeyPriority, BytePriority}
hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.KeyPriority, statistics.BytePriority}
hb.(*hotScheduler).conf.StrictPickingStore = true
ops, _ = hb.Schedule(tc, false)
re.Empty(ops)
Expand Down Expand Up @@ -1999,7 +1999,7 @@ func TestHotScheduleWithStddev(t *testing.T) {
addRegionInfo(tc, statistics.Write, []testRegionInfo{
{6, []uint64{3, 4, 2}, 0.1 * units.MiB, 0.1 * units.MiB, 0},
})
hb.(*hotScheduler).conf.WritePeerPriorities = []string{BytePriority, KeyPriority}
hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority}
stddevThreshold = 0.1
ops, _ := hb.Schedule(tc, false)
re.Empty(ops)
Expand All @@ -2018,7 +2018,7 @@ func TestHotScheduleWithStddev(t *testing.T) {
addRegionInfo(tc, statistics.Write, []testRegionInfo{
{6, []uint64{3, 4, 2}, 0.1 * units.MiB, 0.1 * units.MiB, 0},
})
hb.(*hotScheduler).conf.WritePeerPriorities = []string{BytePriority, KeyPriority}
hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority}
stddevThreshold = 0.1
ops, _ = hb.Schedule(tc, false)
re.Empty(ops)
Expand Down Expand Up @@ -2062,11 +2062,11 @@ func TestHotWriteLeaderScheduleWithPriority(t *testing.T) {
defer func() {
schedulePeerPr, pendingAmpFactor = old1, old2
}()
hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{KeyPriority, BytePriority}
hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.KeyPriority, statistics.BytePriority}
ops, _ := hb.Schedule(tc, false)
re.Len(ops, 1)
testutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 2)
hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{BytePriority, KeyPriority}
hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.BytePriority, statistics.KeyPriority}
ops, _ = hb.Schedule(tc, false)
re.Len(ops, 1)
testutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 3)
Expand All @@ -2089,8 +2089,8 @@ func TestCompatibility(t *testing.T) {
})
// config error value
hb.(*hotScheduler).conf.ReadPriorities = []string{"error"}
hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{"error", BytePriority}
hb.(*hotScheduler).conf.WritePeerPriorities = []string{QueryPriority, BytePriority, KeyPriority}
hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{"error", statistics.BytePriority}
hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.QueryPriority, statistics.BytePriority, statistics.KeyPriority}
checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{
{statistics.QueryDim, statistics.ByteDim},
{statistics.KeyDim, statistics.ByteDim},
Expand All @@ -2104,18 +2104,18 @@ func TestCompatibility(t *testing.T) {
{statistics.ByteDim, statistics.KeyDim},
})
// config byte and key
hb.(*hotScheduler).conf.ReadPriorities = []string{KeyPriority, BytePriority}
hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{BytePriority, KeyPriority}
hb.(*hotScheduler).conf.WritePeerPriorities = []string{KeyPriority, BytePriority}
hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.KeyPriority, statistics.BytePriority}
hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.BytePriority, statistics.KeyPriority}
hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.KeyPriority, statistics.BytePriority}
checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{
{statistics.KeyDim, statistics.ByteDim},
{statistics.ByteDim, statistics.KeyDim},
{statistics.KeyDim, statistics.ByteDim},
})
// config query in low version
hb.(*hotScheduler).conf.ReadPriorities = []string{QueryPriority, BytePriority}
hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{QueryPriority, BytePriority}
hb.(*hotScheduler).conf.WritePeerPriorities = []string{QueryPriority, BytePriority}
hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.QueryPriority, statistics.BytePriority}
hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.QueryPriority, statistics.BytePriority}
hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.QueryPriority, statistics.BytePriority}
checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{
{statistics.ByteDim, statistics.KeyDim},
{statistics.KeyDim, statistics.ByteDim},
Expand All @@ -2124,7 +2124,7 @@ func TestCompatibility(t *testing.T) {
// config error value
hb.(*hotScheduler).conf.ReadPriorities = []string{"error", "error"}
hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{}
hb.(*hotScheduler).conf.WritePeerPriorities = []string{QueryPriority, BytePriority, KeyPriority}
hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.QueryPriority, statistics.BytePriority, statistics.KeyPriority}
checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{
{statistics.ByteDim, statistics.KeyDim},
{statistics.KeyDim, statistics.ByteDim},
Expand Down Expand Up @@ -2229,7 +2229,7 @@ func TestConfigValidation(t *testing.T) {
err = hc.valid()
re.Error(err)

// qps is not allowed to be set in priorities for write-peer-priorities
// query is not allowed to be set in priorities for write-peer-priorities
hc = initHotRegionScheduleConfig()
hc.WritePeerPriorities = []string{"query", "byte"}
err = hc.valid()
Expand Down
12 changes: 7 additions & 5 deletions server/schedulers/hot_region_v2.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ package schedulers
import (
"fmt"
"math"

"github.com/tikv/pd/server/statistics"
)

const (
Expand Down Expand Up @@ -98,11 +100,11 @@ func (bs *balanceSolver) filterUniformStoreV2() (string, bool) {
}
if isUniformFirstPriority && (bs.cur.progressiveRank == -2 || bs.cur.progressiveRank == -3) {
// If first priority dim is enough uniform, -2 is unnecessary and maybe lead to worse balance for second priority dim
return dimToString(bs.firstPriority), true
return statistics.DimToString(bs.firstPriority), true
}
if isUniformSecondPriority && bs.cur.progressiveRank == -1 {
// If second priority dim is enough uniform, -1 is unnecessary and maybe lead to worse balance for first priority dim
return dimToString(bs.secondPriority), true
return statistics.DimToString(bs.secondPriority), true
}
return "", false
}
Expand Down Expand Up @@ -360,11 +362,11 @@ func (bs *balanceSolver) rankToDimStringV2() string {
case -4:
return "all"
case -3:
return dimToString(bs.firstPriority)
return statistics.DimToString(bs.firstPriority)
case -2:
return dimToString(bs.firstPriority) + "-only"
return statistics.DimToString(bs.firstPriority) + "-only"
case -1:
return dimToString(bs.secondPriority)
return statistics.DimToString(bs.secondPriority)
default:
return "none"
}
Expand Down
10 changes: 5 additions & 5 deletions server/schedulers/hot_region_v2_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimSecond(t *testing.T) {
tc.AddRegionStore(3, 20)
tc.AddRegionStore(4, 20)
tc.AddRegionStore(5, 20)
hb.conf.WritePeerPriorities = []string{BytePriority, KeyPriority}
hb.conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority}

tc.UpdateStorageWrittenStats(1, 15*units.MiB*statistics.StoreHeartBeatReportInterval, 15*units.MiB*statistics.StoreHeartBeatReportInterval)
tc.UpdateStorageWrittenStats(2, 16*units.MiB*statistics.StoreHeartBeatReportInterval, 20*units.MiB*statistics.StoreHeartBeatReportInterval)
Expand Down Expand Up @@ -114,7 +114,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirst(t *testing.T) {
tc.AddRegionStore(3, 20)
tc.AddRegionStore(4, 20)
tc.AddRegionStore(5, 20)
hb.conf.WritePeerPriorities = []string{BytePriority, KeyPriority}
hb.conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority}

tc.UpdateStorageWrittenStats(1, 15*units.MiB*statistics.StoreHeartBeatReportInterval, 15*units.MiB*statistics.StoreHeartBeatReportInterval)
tc.UpdateStorageWrittenStats(2, 20*units.MiB*statistics.StoreHeartBeatReportInterval, 14*units.MiB*statistics.StoreHeartBeatReportInterval)
Expand Down Expand Up @@ -168,7 +168,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirstOnly(t *testing.T) {
tc.AddRegionStore(3, 20)
tc.AddRegionStore(4, 20)
tc.AddRegionStore(5, 20)
hb.conf.WritePeerPriorities = []string{BytePriority, KeyPriority}
hb.conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority}

tc.UpdateStorageWrittenStats(1, 15*units.MiB*statistics.StoreHeartBeatReportInterval, 15*units.MiB*statistics.StoreHeartBeatReportInterval)
tc.UpdateStorageWrittenStats(2, 20*units.MiB*statistics.StoreHeartBeatReportInterval, 14*units.MiB*statistics.StoreHeartBeatReportInterval)
Expand Down Expand Up @@ -231,7 +231,7 @@ func TestHotReadRegionScheduleWithRevertRegionsDimSecond(t *testing.T) {
tc.AddRegionStore(3, 20)
tc.AddRegionStore(4, 20)
tc.AddRegionStore(5, 20)
hb.conf.ReadPriorities = []string{BytePriority, KeyPriority}
hb.conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority}

tc.UpdateStorageReadStats(1, 15*units.MiB*statistics.StoreHeartBeatReportInterval, 15*units.MiB*statistics.StoreHeartBeatReportInterval)
tc.UpdateStorageReadStats(2, 16*units.MiB*statistics.StoreHeartBeatReportInterval, 20*units.MiB*statistics.StoreHeartBeatReportInterval)
Expand Down Expand Up @@ -284,7 +284,7 @@ func TestSkipUniformStore(t *testing.T) {
hb.(*hotScheduler).conf.SetSrcToleranceRatio(1)
hb.(*hotScheduler).conf.SetDstToleranceRatio(1)
hb.(*hotScheduler).conf.SetRankFormulaVersion("v2")
hb.(*hotScheduler).conf.ReadPriorities = []string{BytePriority, KeyPriority}
hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority}
tc := mockcluster.NewCluster(ctx, opt)
tc.SetHotRegionCacheHitsThreshold(0)
tc.AddRegionStore(1, 20)
Expand Down
Loading

0 comments on commit 8209515

Please sign in to comment.