Skip to content

Commit

Permalink
goschedstats: support go1.20
Browse files Browse the repository at this point in the history
There was one field added to the `p` that matters: needspinning
(see golang/go@8cb350d).

All of the relevant symbols and structs were re-copied. Some `uintptr`s
were replaced with real pointers, and some things were ported to use new
atomic types.

Relates to #96443.

Epic: none

Release note: None
  • Loading branch information
ajwerner authored and Andrew Werner committed May 31, 2023
1 parent 906e7ad commit 8b33327
Show file tree
Hide file tree
Showing 3 changed files with 202 additions and 1 deletion.
1 change: 1 addition & 0 deletions pkg/util/goschedstats/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ go_library(
srcs = [
"runnable.go",
"runtime_go1.19.go",
"runtime_go1.20.go",
],
importpath = "github.com/cockroachdb/cockroach/pkg/util/goschedstats",
visibility = ["//visibility:public"],
Expand Down
2 changes: 1 addition & 1 deletion pkg/util/goschedstats/runtime_go1.19.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
//

// The structure definitions in this file have been cross-checked against go1.19.
// Before allowing newer versions, please check that the structures
// still match with those in go/src/runtime.
Expand Down
200 changes: 200 additions & 0 deletions pkg/util/goschedstats/runtime_go1.20.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,200 @@
// Copyright 2023 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.

// The structure definitions in this file have been cross-checked against go1.20.
// Before allowing newer versions, please check that the structures
// still match with those in go/src/runtime.

//go:build gc && go1.20 && !go1.21
// +build gc,go1.20,!go1.21

package goschedstats

import (
"sync/atomic"
_ "unsafe" // required by go:linkname
)

type puintptr uintptr

type muintptr uintptr

type guintptr uintptr

type sysmontick struct {
schedtick uint32
schedwhen int64
syscalltick uint32
syscallwhen int64
}

type pageCache struct {
base uintptr // base address of the chunk
cache uint64 // 64-bit bitmap representing free pages (1 means free)
scav uint64 // 64-bit bitmap representing scavenged pages (1 means scavenged)
}

// A _defer holds an entry on the list of deferred calls.
// If you add a field here, add code to clear it in deferProcStack.
// This struct must match the code in cmd/compile/internal/ssagen/ssa.go:deferstruct
// and cmd/compile/internal/ssagen/ssa.go:(*state).call.
// Some defers will be allocated on the stack and some on the heap.
// All defers are logically part of the stack, so write barriers to
// initialize them are not required. All defers must be manually scanned,
// and for heap defers, marked.
type _defer struct {
// The rest of the fields aren't important.
}

// Per-thread (in Go, per-P) cache for small objects.
// This includes a small object cache and local allocation stats.
// No locking needed because it is per-thread (per-P).
//
// mcaches are allocated from non-GC'd memory, so any heap pointers
// must be specially handled.
type mcache struct {
// The rest of the fields aren't important.
}

type p struct {
id int32
status uint32 // one of pidle/prunning/...
link puintptr
schedtick uint32 // incremented on every scheduler call
syscalltick uint32 // incremented on every system call
sysmontick sysmontick // last tick observed by sysmon
m muintptr // back-link to associated m (nil if idle)
mcache *mcache
pcache pageCache
raceprocctx uintptr

deferpool []*_defer // pool of available defer structs (see panic.go)
deferpoolbuf [32]*_defer

// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
goidcache uint64
goidcacheend uint64

// Queue of runnable goroutines. Accessed without lock.
runqhead uint32
runqtail uint32
runq [256]guintptr
// runnext, if non-nil, is a runnable G that was ready'd by
// the current G and should be run next instead of what's in
// runq if there's time remaining in the running G's time
// slice. It will inherit the time left in the current time
// slice. If a set of goroutines is locked in a
// communicate-and-wait pattern, this schedules that set as a
// unit and eliminates the (potentially large) scheduling
// latency that otherwise arises from adding the ready'd
// goroutines to the end of the run queue.
//
// Note that while other P's may atomically CAS this to zero,
// only the owner P can CAS it to a valid G.
runnext guintptr

// The rest of the fields aren't important.
}

type lockRankStruct struct{}

// Mutual exclusion locks. In the uncontended case,
// as fast as spin locks (just a few user-level instructions),
// but on the contention path they sleep in the kernel.
// A zeroed Mutex is unlocked (no need to initialize each lock).
// Initialization is helpful for static lock ranking, but not required.
type mutex struct {
// Empty struct if lock ranking is disabled, otherwise includes the lock rank
lockRankStruct
// Futex-based impl treats it as uint32 key,
// while sema-based impl as M* waitm.
// Used to be a union, but unions break precise GC.
key uintptr
}

// A gQueue is a dequeue of Gs linked through g.schedlink. A G can only
// be on one gQueue or gList at a time.
type gQueue struct {
head guintptr
tail guintptr
}

type schedt struct {
goidgen atomic.Uint64
lastpoll atomic.Int64 // time of last network poll, 0 if currently polling
pollUntil atomic.Int64 // time to which current poll is sleeping

lock mutex

// When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be
// sure to call checkdead().

midle muintptr // idle m's waiting for work
nmidle int32 // number of idle m's waiting for work
nmidlelocked int32 // number of locked m's waiting for work
mnext int64 // number of m's that have been created and next M ID
maxmcount int32 // maximum number of m's allowed (or die)
nmsys int32 // number of system m's not counted for deadlock
nmfreed int64 // cumulative number of freed m's

ngsys atomic.Int32 // number of system goroutines

pidle puintptr // idle p's
npidle atomic.Int32
nmspinning atomic.Int32 // See "Worker thread parking/unparking" comment in proc.go.
needspinning atomic.Uint32 // See "Delicate dance" comment in proc.go. Boolean. Must hold sched.lock to set to 1.

// Global runnable queue.
runq gQueue
runqsize int32

// The rest of the fields aren't important.
}

//go:linkname allp runtime.allp
var allp []*p

//go:linkname sched runtime.sched
var sched schedt

//go:linkname lock runtime.lock
func lock(l *mutex)

//go:linkname unlock runtime.unlock
func unlock(l *mutex)

func numRunnableGoroutines() (numRunnable int, numProcs int) {
lock(&sched.lock)
numRunnable = int(sched.runqsize)
numProcs = len(allp)

// Note that holding sched.lock prevents the number of Ps from changing, so
// it's safe to loop over allp.
for _, p := range allp {
// Retry loop for concurrent updates.
for {
h := atomic.LoadUint32(&p.runqhead)
t := atomic.LoadUint32(&p.runqtail)
next := atomic.LoadUintptr((*uintptr)(&p.runnext))
runnable := int32(t - h)
if atomic.LoadUint32(&p.runqhead) != h || runnable < 0 {
// A concurrent update messed with us; try again.
continue
}
if next != 0 {
runnable++
}
numRunnable += int(runnable)
break
}
}
unlock(&sched.lock)
return numRunnable, numProcs
}

0 comments on commit 8b33327

Please sign in to comment.