Skip to content

Commit

Permalink
Merge pull request #25 from changsongl/develop
Browse files Browse the repository at this point in the history
Fix fetch interval bug and add pprof
  • Loading branch information
changsongl authored Jul 14, 2021
2 parents ed0b1d2 + 6301488 commit 9080ba4
Show file tree
Hide file tree
Showing 9 changed files with 153 additions and 9 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,5 @@ go.sum

# IDE and OS
.idea

.DS_Store
5 changes: 4 additions & 1 deletion cmd/delayqueue/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,10 @@ func run() int {

p := pool.New(s, l)
q := queue.New(s, conf.DelayQueue.QueueName)
t := timer.New(l, time.Duration(conf.DelayQueue.TimerFetchInterval)*time.Millisecond)
t := timer.New(
l, time.Duration(conf.DelayQueue.TimerFetchInterval)*time.Millisecond,
time.Duration(conf.DelayQueue.TimerFetchDelay)*time.Millisecond,
)
return b, p, q, t
},
)
Expand Down
1 change: 1 addition & 0 deletions config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ type DelayQueue struct {
BucketMaxFetchNum uint64 `yaml:"bucket_max_fetch_num,omitempty" json:"bucket_max_fetch_num,omitempty"`
QueueName string `yaml:"queue_name,omitempty" json:"queue_name,omitempty"`
TimerFetchInterval int `yaml:"timer_fetch_interval,omitempty" json:"timer_fetch_interval,omitempty"`
TimerFetchDelay int `yaml:"timer_fetch_delay,omitempty" json:"timer_fetch_delay,omitempty"`
}

// Redis redis configuration
Expand Down
2 changes: 2 additions & 0 deletions config/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ delay_queue:
queue_name: "dq_queue" # queue redis key name
bucket_max_fetch_num: 200 # max fetch number of jobs in the bucket
timer_fetch_interval: 1000 # fetching job interval(ms), decrease interval may get better throughout.
timer_fetch_delay: 0 # fetch delay(ms), if there are still job in the bucket after the fetch,
# it will delay timer_fetch_delay ms for next fetch. Default is not wait.

# redis config
redis:
Expand Down
2 changes: 2 additions & 0 deletions config/config.yaml.example
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ delay_queue:
queue_name: "dqqueue" # queue redis key name
bucket_max_fetch_num: 250 # max fetch number of jobs in the bucket
timer_fetch_interval: 2000 # fetching job interval(ms), decrease interval may get better throughout.
timer_fetch_delay: 0 # fetch delay(ms), if there are still job in the bucket after the fetch,
# it will delay timer_fetch_delay ms for next fetch. Default is not wait.

# redis config
redis:
Expand Down
128 changes: 128 additions & 0 deletions server/pprof.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
package server

import (
"net/http/pprof"
"strings"

"github.com/gin-gonic/gin"
)

// WrapPProf Wrap adds several routes from package `net/http/pprof` to *gin.Engine object
func WrapPProf(router *gin.Engine) {
WrapGroup(&router.RouterGroup)
}

// WrapGroup adds several routes from package `net/http/pprof` to *gin.RouterGroup object
func WrapGroup(router *gin.RouterGroup) {
routers := []struct {
Method string
Path string
Handler gin.HandlerFunc
}{
{"GET", "/debug/pprof/", IndexHandler()},
{"GET", "/debug/pprof/heap", HeapHandler()},
{"GET", "/debug/pprof/goroutine", GoroutineHandler()},
{"GET", "/debug/pprof/allocs", AllocsHandler()},
{"GET", "/debug/pprof/block", BlockHandler()},
{"GET", "/debug/pprof/threadcreate", ThreadCreateHandler()},
{"GET", "/debug/pprof/cmdline", CmdlineHandler()},
{"GET", "/debug/pprof/profile", ProfileHandler()},
{"GET", "/debug/pprof/symbol", SymbolHandler()},
{"POST", "/debug/pprof/symbol", SymbolHandler()},
{"GET", "/debug/pprof/trace", TraceHandler()},
{"GET", "/debug/pprof/mutex", MutexHandler()},
}

basePath := strings.TrimSuffix(router.BasePath(), "/")
var prefix string

switch {
case basePath == "":
prefix = ""
case strings.HasSuffix(basePath, "/debug"):
prefix = "/debug"
case strings.HasSuffix(basePath, "/debug/pprof"):
prefix = "/debug/pprof"
}

for _, r := range routers {
router.Handle(r.Method, strings.TrimPrefix(r.Path, prefix), r.Handler)
}
}

// IndexHandler will pass the call from /debug/pprof to pprof
func IndexHandler() gin.HandlerFunc {
return func(ctx *gin.Context) {
pprof.Index(ctx.Writer, ctx.Request)
}
}

// HeapHandler will pass the call from /debug/pprof/heap to pprof
func HeapHandler() gin.HandlerFunc {
return func(ctx *gin.Context) {
pprof.Handler("heap").ServeHTTP(ctx.Writer, ctx.Request)
}
}

// GoroutineHandler will pass the call from /debug/pprof/goroutine to pprof
func GoroutineHandler() gin.HandlerFunc {
return func(ctx *gin.Context) {
pprof.Handler("goroutine").ServeHTTP(ctx.Writer, ctx.Request)
}
}

// AllocsHandler will pass the call from /debug/pprof/allocs to pprof
func AllocsHandler() gin.HandlerFunc {
return func(ctx *gin.Context) {
pprof.Handler("allocs").ServeHTTP(ctx.Writer, ctx.Request)
}
}

// BlockHandler will pass the call from /debug/pprof/block to pprof
func BlockHandler() gin.HandlerFunc {
return func(ctx *gin.Context) {
pprof.Handler("block").ServeHTTP(ctx.Writer, ctx.Request)
}
}

// ThreadCreateHandler will pass the call from /debug/pprof/threadcreate to pprof
func ThreadCreateHandler() gin.HandlerFunc {
return func(ctx *gin.Context) {
pprof.Handler("threadcreate").ServeHTTP(ctx.Writer, ctx.Request)
}
}

// CmdlineHandler will pass the call from /debug/pprof/cmdline to pprof
func CmdlineHandler() gin.HandlerFunc {
return func(ctx *gin.Context) {
pprof.Cmdline(ctx.Writer, ctx.Request)
}
}

// ProfileHandler will pass the call from /debug/pprof/profile to pprof
func ProfileHandler() gin.HandlerFunc {
return func(ctx *gin.Context) {
pprof.Profile(ctx.Writer, ctx.Request)
}
}

// SymbolHandler will pass the call from /debug/pprof/symbol to pprof
func SymbolHandler() gin.HandlerFunc {
return func(ctx *gin.Context) {
pprof.Symbol(ctx.Writer, ctx.Request)
}
}

// TraceHandler will pass the call from /debug/pprof/trace to pprof
func TraceHandler() gin.HandlerFunc {
return func(ctx *gin.Context) {
pprof.Trace(ctx.Writer, ctx.Request)
}
}

// MutexHandler will pass the call from /debug/pprof/mutex to pprof
func MutexHandler() gin.HandlerFunc {
return func(ctx *gin.Context) {
pprof.Handler("mutex").ServeHTTP(ctx.Writer, ctx.Request)
}
}
2 changes: 2 additions & 0 deletions server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,8 @@ func (s *server) Init() {
s.r.Use(gin.LoggerWithConfig(gin.LoggerConfig{}))
}

WrapPProf(s.r)

regMetricFunc := setServerMetricHandlerAndMiddleware()
regMetricFunc(s.r)
}
Expand Down
4 changes: 2 additions & 2 deletions test/integration/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ import (
// It will test add job, consume and remove.
func TestDelayQueueAddAndRemove(t *testing.T) {
// push n jobs with delay within 1 min
DelayTimeSeconds := 60
DelayTimeSeconds := 30
Jobs := 200
topic, key := "TestDelayQueueAddAndRemove-topic", "TestDelayQueueAddAndRemove-set"
rand.Seed(time.Now().Unix())
Expand Down Expand Up @@ -64,7 +64,7 @@ func TestDelayQueueAddAndRemove(t *testing.T) {

// check after 1.5 min, all jobs should be done
t.Log("Sleeping")
time.Sleep(90 * time.Second)
time.Sleep(50 * time.Second)

num, err := RecordNumbers(key)
require.NoError(t, err)
Expand Down
17 changes: 11 additions & 6 deletions timer/timer.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ import (

// TaskFunc only task function can be added to
// the timer.
type TaskFunc func() (bool, error)
type TaskFunc func() (hasMore bool, err error)

// Timer is for processing task. it checks buckets
// for popping jobs. it will put ready jobs to queue.
Expand All @@ -26,7 +26,8 @@ type timer struct {
tasks []taskStub // task stub
once sync.Once // once
l log.Logger // logger
taskInterval time.Duration
taskInterval time.Duration // fetch interval
taskDelay time.Duration // fetch delay when bucket has more jobs after a fetching. Default no wait.
}

// taskStub task stub for function itself and context,
Expand All @@ -38,11 +39,12 @@ type taskStub struct {
l log.Logger
}

func New(l log.Logger, taskInterval time.Duration) Timer {
func New(l log.Logger, taskInterval, taskDelay time.Duration) Timer {
return &timer{
wg: sync.WaitGroup{},
l: l.WithModule("timer"),
taskInterval: taskInterval,
taskDelay: taskDelay,
}
}

Expand All @@ -64,7 +66,7 @@ func (t *timer) Run() {
for _, task := range t.tasks {
go func(task taskStub) {
defer t.wg.Done()
task.run(t.taskInterval)
task.run(t.taskInterval, t.taskDelay)
}(task)
}

Expand All @@ -84,7 +86,7 @@ func (t *timer) Close() {

// run a task, and wait for context is done.
// this can be implement with more thinking.
func (task taskStub) run(fetchInterval time.Duration) {
func (task taskStub) run(fetchInterval, fetchDelay time.Duration) {
for {
select {
case <-task.ctx.Done():
Expand All @@ -95,10 +97,13 @@ func (task taskStub) run(fetchInterval time.Duration) {
task.l.Error("task run failed", log.String("err", err.Error()))
time.Sleep(fetchInterval)
continue
} else if hasMore {
} else if !hasMore {
time.Sleep(fetchInterval)
continue
}

// have more jobs, wait delay time to fetch next time
time.Sleep(fetchDelay)
}
}
}

0 comments on commit 9080ba4

Please sign in to comment.