diff --git a/cache/refs.go b/cache/refs.go index 274a20ffba6d..08a1de59d7f0 100644 --- a/cache/refs.go +++ b/cache/refs.go @@ -444,7 +444,7 @@ func (cr *cacheRecord) remove(ctx context.Context, removeSnapshot bool) (rerr er "id": cr.ID(), "refCount": len(cr.refs), "removeSnapshot": removeSnapshot, - "stack": bklog.LazyStackTrace{}, + "stack": bklog.TraceLevelOnlyStack(), }) if rerr != nil { l = l.WithError(rerr) @@ -488,7 +488,7 @@ func (sr *immutableRef) traceLogFields() logrus.Fields { "refID": fmt.Sprintf("%p", sr), "newRefCount": len(sr.refs), "mutable": false, - "stack": bklog.LazyStackTrace{}, + "stack": bklog.TraceLevelOnlyStack(), } if sr.equalMutable != nil { m["equalMutableID"] = sr.equalMutable.ID() @@ -628,7 +628,7 @@ func (sr *mutableRef) traceLogFields() logrus.Fields { "refID": fmt.Sprintf("%p", sr), "newRefCount": len(sr.refs), "mutable": true, - "stack": bklog.LazyStackTrace{}, + "stack": bklog.TraceLevelOnlyStack(), } if sr.equalMutable != nil { m["equalMutableID"] = sr.equalMutable.ID() diff --git a/solver/cachekey.go b/solver/cachekey.go index 398368716ad2..e93139f51c92 100644 --- a/solver/cachekey.go +++ b/solver/cachekey.go @@ -24,12 +24,20 @@ type CacheKeyWithSelector struct { CacheKey ExportableCacheKey } +func (ck CacheKeyWithSelector) TraceFields() map[string]any { + fields := ck.CacheKey.TraceFields() + fields["selector"] = ck.Selector.String() + return fields +} + type CacheKey struct { mu sync.RWMutex - ID string - deps [][]CacheKeyWithSelector // only [][]*inMemoryCacheKey + ID string + deps [][]CacheKeyWithSelector + // digest is the digest returned by the CacheMap implementation of this op digest digest.Digest + // vtx is the LLB digest that this op was created for vtx digest.Digest output Index ids map[*cacheManager]string @@ -37,6 +45,35 @@ type CacheKey struct { indexIDs []string } +func (ck *CacheKey) TraceFields() map[string]any { + ck.mu.RLock() + defer ck.mu.RUnlock() + idsMap := map[string]string{} + for cm, id := range ck.ids { + idsMap[cm.ID()] = id + } + + // don't recurse more than one level in showing deps + depsMap := make([]map[string]string, len(ck.deps)) + for i, deps := range ck.deps { + depsMap[i] = map[string]string{} + for _, ck := range deps { + depsMap[i]["id"] = ck.CacheKey.ID + depsMap[i]["selector"] = ck.Selector.String() + } + } + + return map[string]any{ + "id": ck.ID, + "digest": ck.digest, + "vtx": ck.vtx, + "output": ck.output, + "indexIDs": ck.indexIDs, + "ids": idsMap, + "deps": depsMap, + } +} + func (ck *CacheKey) Deps() [][]CacheKeyWithSelector { ck.mu.RLock() defer ck.mu.RUnlock() diff --git a/solver/cachemanager.go b/solver/cachemanager.go index 5f5d9f33e230..e5e7e4fb9eab 100644 --- a/solver/cachemanager.go +++ b/solver/cachemanager.go @@ -10,6 +10,7 @@ import ( "github.com/moby/buildkit/identity" "github.com/moby/buildkit/util/bklog" digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" ) // NewInMemoryCacheManager creates a new in-memory cache manager @@ -55,7 +56,28 @@ func (c *cacheManager) ID() string { return c.id } -func (c *cacheManager) Query(deps []CacheKeyWithSelector, input Index, dgst digest.Digest, output Index) ([]*CacheKey, error) { +func (c *cacheManager) Query(deps []CacheKeyWithSelector, input Index, dgst digest.Digest, output Index) (rcks []*CacheKey, rerr error) { + depsField := make([]map[string]any, len(deps)) + for i, dep := range deps { + depsField[i] = dep.TraceFields() + } + lg := bklog.G(context.TODO()).WithFields(logrus.Fields{ + "cache_manager": c.id, + "op": "query", + "deps": depsField, + "input": input, + "digest": dgst, + "output": output, + "stack": bklog.TraceLevelOnlyStack(), + }) + defer func() { + rcksField := make([]map[string]any, len(rcks)) + for i, rck := range rcks { + rcksField[i] = rck.TraceFields() + } + lg.WithError(rerr).WithField("return_cachekeys", rcksField).Trace("cache manager") + }() + c.mu.RLock() defer c.mu.RUnlock() @@ -112,7 +134,21 @@ func (c *cacheManager) Query(deps []CacheKeyWithSelector, input Index, dgst dige return keys, nil } -func (c *cacheManager) Records(ctx context.Context, ck *CacheKey) ([]*CacheRecord, error) { +func (c *cacheManager) Records(ctx context.Context, ck *CacheKey) (rrecs []*CacheRecord, rerr error) { + lg := bklog.G(context.TODO()).WithFields(logrus.Fields{ + "cache_manager": c.id, + "op": "records", + "cachekey": ck.TraceFields(), + "stack": bklog.TraceLevelOnlyStack(), + }) + defer func() { + rrercsField := make([]map[string]any, len(rrecs)) + for i, rrec := range rrecs { + rrercsField[i] = rrec.TraceFields() + } + lg.WithError(rerr).WithField("return_records", rrercsField).Trace("cache manager") + }() + outs := make([]*CacheRecord, 0) if err := c.backend.WalkResults(c.getID(ck), func(r CacheResult) error { if c.results.Exists(ctx, r.ID) { @@ -132,7 +168,17 @@ func (c *cacheManager) Records(ctx context.Context, ck *CacheKey) ([]*CacheRecor return outs, nil } -func (c *cacheManager) Load(ctx context.Context, rec *CacheRecord) (Result, error) { +func (c *cacheManager) Load(ctx context.Context, rec *CacheRecord) (rres Result, rerr error) { + lg := bklog.G(context.TODO()).WithFields(logrus.Fields{ + "cache_manager": c.id, + "op": "load", + "record": rec.TraceFields(), + "stack": bklog.TraceLevelOnlyStack(), + }) + defer func() { + lg.WithError(rerr).WithField("return_result", rres.ID()).Trace("cache manager") + }() + c.mu.RLock() defer c.mu.RUnlock() @@ -150,6 +196,14 @@ type LoadedResult struct { CacheKey *CacheKey } +func (r *LoadedResult) TraceFields() map[string]any { + return map[string]any{ + "result": r.Result.ID(), + "cache_result": r.CacheResult.ID, + "cache_key": r.CacheKey.TraceFields(), + } +} + func (c *cacheManager) filterResults(m map[string]Result, ck *CacheKey, visited map[string]struct{}) (results []LoadedResult, err error) { id := c.getID(ck) if _, ok := visited[id]; ok { @@ -187,7 +241,21 @@ func (c *cacheManager) filterResults(m map[string]Result, ck *CacheKey, visited return } -func (c *cacheManager) LoadWithParents(ctx context.Context, rec *CacheRecord) ([]LoadedResult, error) { +func (c *cacheManager) LoadWithParents(ctx context.Context, rec *CacheRecord) (rres []LoadedResult, rerr error) { + lg := bklog.G(context.TODO()).WithFields(logrus.Fields{ + "cache_manager": c.id, + "op": "load_with_parents", + "record": rec.TraceFields(), + "stack": bklog.TraceLevelOnlyStack(), + }) + defer func() { + rresField := make([]map[string]any, len(rres)) + for i, rres := range rres { + rresField[i] = rres.TraceFields() + } + lg.WithError(rerr).WithField("return_results", rresField).Trace("cache manager") + }() + lwp, ok := c.results.(interface { LoadWithParents(context.Context, CacheResult) (map[string]Result, error) }) @@ -226,7 +294,17 @@ func (c *cacheManager) LoadWithParents(ctx context.Context, rec *CacheRecord) ([ return results, nil } -func (c *cacheManager) Save(k *CacheKey, r Result, createdAt time.Time) (*ExportableCacheKey, error) { +func (c *cacheManager) Save(k *CacheKey, r Result, createdAt time.Time) (rck *ExportableCacheKey, rerr error) { + lg := bklog.G(context.TODO()).WithFields(logrus.Fields{ + "cache_manager": c.id, + "op": "save", + "result": r.ID(), + "stack": bklog.TraceLevelOnlyStack(), + }) + defer func() { + lg.WithError(rerr).WithField("return_cachekey", rck.TraceFields()).Trace("cache manager") + }() + c.mu.Lock() defer c.mu.Unlock() diff --git a/solver/llbsolver/mounts/mount.go b/solver/llbsolver/mounts/mount.go index b61e7e3d1c0c..67eac20d727b 100644 --- a/solver/llbsolver/mounts/mount.go +++ b/solver/llbsolver/mounts/mount.go @@ -483,7 +483,7 @@ type cacheRefShare struct { func (r *cacheRefShare) clone(ctx context.Context) cache.MutableRef { bklog.G(ctx).WithFields(map[string]any{ "key": r.key, - "stack": bklog.LazyStackTrace{}, + "stack": bklog.TraceLevelOnlyStack(), }).Trace("cloning cache mount ref share") cacheRef := &cacheRef{cacheRefShare: r} if cacheRefCloneHijack != nil { @@ -498,7 +498,7 @@ func (r *cacheRefShare) clone(ctx context.Context) cache.MutableRef { func (r *cacheRefShare) release(ctx context.Context) error { bklog.G(ctx).WithFields(map[string]any{ "key": r.key, - "stack": bklog.LazyStackTrace{}, + "stack": bklog.TraceLevelOnlyStack(), }).Trace("releasing cache mount ref share main") if r.main != nil { delete(r.main.shares, r.key) @@ -516,7 +516,7 @@ type cacheRef struct { func (r *cacheRef) Release(ctx context.Context) error { bklog.G(ctx).WithFields(map[string]any{ "key": r.key, - "stack": bklog.LazyStackTrace{}, + "stack": bklog.TraceLevelOnlyStack(), }).Trace("releasing cache mount ref share") if r.main != nil { r.main.mu.Lock() diff --git a/solver/types.go b/solver/types.go index 01b344a3af90..ffb9b790b94f 100644 --- a/solver/types.go +++ b/solver/types.go @@ -228,6 +228,17 @@ type CacheRecord struct { key *CacheKey } +func (ck *CacheRecord) TraceFields() map[string]any { + return map[string]any{ + "id": ck.ID, + "size": ck.Size, + "createdAt": ck.CreatedAt, + "priority": ck.Priority, + "cache_manager": ck.cacheManager.ID(), + "cache_key": ck.key.TraceFields(), + } +} + // CacheManager determines if there is a result that matches the cache keys // generated during the build that could be reused instead of fully // reevaluating the vertex and its inputs. There can be multiple cache diff --git a/util/bklog/log.go b/util/bklog/log.go index 7d0b1d90df0b..fc0ee756cb7e 100644 --- a/util/bklog/log.go +++ b/util/bklog/log.go @@ -63,14 +63,13 @@ func GetLogger(ctx context.Context) (l *logrus.Entry) { return l } -// LazyStackTrace lets you include a stack trace as a field's value in a log but only -// call it when the log level is actually enabled. -type LazyStackTrace struct{} - -func (LazyStackTrace) String() string { - return string(debug.Stack()) -} - -func (LazyStackTrace) MarshalText() ([]byte, error) { - return debug.Stack(), nil +// TraceLevelOnlyStack returns a stack trace for the current goroutine only if +// trace level logs are enabled; otherwise it returns an empty string. This ensure +// we only pay the cost of generating a stack trace when the log entry will actually +// be emitted. +func TraceLevelOnlyStack() string { + if logrus.GetLevel() == logrus.TraceLevel { + return string(debug.Stack()) + } + return "" }