Skip to content

Commit

Permalink
fix(core): change name of config flag for cache (#9274)
Browse files Browse the repository at this point in the history
  • Loading branch information
harshil-goel authored Jan 13, 2025
1 parent ee99a7a commit 02b9d13
Show file tree
Hide file tree
Showing 5 changed files with 27 additions and 33 deletions.
14 changes: 9 additions & 5 deletions dgraph/cmd/alpha/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -145,8 +145,12 @@ they form a Raft group and provide synchronous replication.
Flag("percentage",
"Cache percentages summing up to 100 for various caches (FORMAT: PostingListCache,"+
"PstoreBlockCache,PstoreIndexCache)").
Flag("keep-updates",
"Should carry updates in cache or not (bool)").
Flag("delete-on-updates",
"When set as true, we would delete the key from the cache once it's updated. If it's not "+
"we would update the value inside the cache. If the cache gets too full, it starts"+
" to get slow. So if your usecase has a lot of heavy mutations, this should be set"+
" as true. If you are modifying same data again and again, this should be set as"+
" false").
String())

flag.String("raft", worker.RaftDefaults, z.NewSuperFlagHelp(worker.RaftDefaults).
Expand Down Expand Up @@ -635,7 +639,7 @@ func run() {
x.AssertTruef(totalCache >= 0, "ERROR: Cache size must be non-negative")

cachePercentage := cache.GetString("percentage")
keepUpdates := cache.GetBool("keep-updates")
deleteOnUpdates := cache.GetBool("delete-on-updates")
cachePercent, err := x.GetCachePercentages(cachePercentage, 3)
x.Check(err)
postingListCacheSize := (cachePercent[0] * (totalCache << 20)) / 100
Expand All @@ -658,7 +662,7 @@ func run() {
WALDir: Alpha.Conf.GetString("wal"),
CacheMb: totalCache,
CachePercentage: cachePercentage,
KeepUpdates: keepUpdates,
DeleteOnUpdates: deleteOnUpdates,

MutationsMode: worker.AllowMutations,
AuthToken: security.GetString("token"),
Expand Down Expand Up @@ -786,7 +790,7 @@ func run() {
// Posting will initialize index which requires schema. Hence, initialize
// schema before calling posting.Init().
schema.Init(worker.State.Pstore)
posting.Init(worker.State.Pstore, postingListCacheSize, keepUpdates)
posting.Init(worker.State.Pstore, postingListCacheSize, deleteOnUpdates)
defer posting.Cleanup()
worker.Init(worker.State.Pstore)

Expand Down
4 changes: 2 additions & 2 deletions posting/lists.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,12 @@ var (
)

// Init initializes the posting lists package, the in memory and dirty list hash.
func Init(ps *badger.DB, cacheSize int64, keepUpdates bool) {
func Init(ps *badger.DB, cacheSize int64, deleteOnUpdates bool) {
pstore = ps
closer = z.NewCloser(1)
go x.MonitorMemoryMetrics(closer)

memoryLayer = initMemoryLayer(cacheSize, keepUpdates)
memoryLayer = initMemoryLayer(cacheSize, deleteOnUpdates)
}

func UpdateMaxCost(maxCost int64) {
Expand Down
36 changes: 13 additions & 23 deletions posting/mvcc.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,8 @@ type incrRollupi struct {
}

type CachePL struct {
count int
list *List
lastUpdate uint64
lastRead time.Time
}

var (
Expand Down Expand Up @@ -401,8 +399,8 @@ func (c *Cache) clear() {
}

type MemoryLayer struct {
keepUpdates bool
cache *Cache
deleteOnUpdates bool
cache *Cache

numDisksRead int
}
Expand All @@ -414,9 +412,9 @@ func (ml *MemoryLayer) del(key []byte) {
ml.cache.del(key)
}

func initMemoryLayer(cacheSize int64, keepUpdates bool) *MemoryLayer {
func initMemoryLayer(cacheSize int64, deleteOnUpdates bool) *MemoryLayer {
ml := &MemoryLayer{}
ml.keepUpdates = keepUpdates
ml.deleteOnUpdates = deleteOnUpdates
if cacheSize > 0 {
cache, err := ristretto.NewCache[[]byte, *CachePL](&ristretto.Config[[]byte, *CachePL]{
// Use 5% of cache memory for storing counters.
Expand Down Expand Up @@ -449,9 +447,7 @@ func initMemoryLayer(cacheSize int64, keepUpdates bool) *MemoryLayer {

func NewCachePL() *CachePL {
return &CachePL{
count: 0,
list: nil,
lastUpdate: 0,
list: nil,
}
}

Expand All @@ -472,7 +468,7 @@ func (ml *MemoryLayer) updateItemInCache(key string, delta []byte, startTs, comm
return
}

if !ml.keepUpdates {
if ml.deleteOnUpdates {
// TODO We should mark the key as deleted instead of directly deleting from the cache.
ml.del([]byte(key))
return
Expand All @@ -484,9 +480,8 @@ func (ml *MemoryLayer) updateItemInCache(key string, delta []byte, startTs, comm
}

val.lastUpdate = commitTs
val.count -= 1

if val.list != nil && ml.keepUpdates {
if val.list != nil {
p := new(pb.PostingList)
x.Check(proto.Unmarshal(delta, p))

Expand Down Expand Up @@ -640,16 +635,12 @@ func (c *CachePL) Set(l *List, readTs uint64) {
func (ml *MemoryLayer) readFromCache(key []byte, readTs uint64) *List {
cacheItem, ok := ml.cache.get(key)

if ok {
cacheItem.count += 1
cacheItem.lastRead = time.Now()
if cacheItem.list != nil && cacheItem.list.minTs <= readTs {
cacheItem.list.RLock()
lCopy := copyList(cacheItem.list)
cacheItem.list.RUnlock()
checkForRollup(key, lCopy)
return lCopy
}
if ok && cacheItem.list != nil && cacheItem.list.minTs <= readTs {
cacheItem.list.RLock()
lCopy := copyList(cacheItem.list)
cacheItem.list.RUnlock()
checkForRollup(key, lCopy)
return lCopy
}
return nil
}
Expand Down Expand Up @@ -679,7 +670,6 @@ func (ml *MemoryLayer) saveInCache(key []byte, l *List) {
l.RLock()
defer l.RUnlock()
cacheItem := NewCachePL()
cacheItem.count = 1
cacheItem.list = copyList(l)
cacheItem.lastUpdate = l.maxTs
ml.cache.set(key, cacheItem)
Expand Down
4 changes: 2 additions & 2 deletions worker/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,11 @@ type Options struct {
CachePercentage string
// CacheMb is the total memory allocated between all the caches.
CacheMb int64
// KeepUpdates is the parameter that allows the user to set if the cache should keep the items that were
// DeleteOnUpdates is the parameter that allows the user to set if the cache should keep the items that were
// just mutated. Keeping these items are good when there is a mixed workload where you are updating the
// same element multiple times. However, for a heavy mutation workload, not keeping these items would be better
// , as keeping these elements bloats the cache making it slow.
KeepUpdates bool
DeleteOnUpdates bool

Audit *x.LoggerConf

Expand Down
2 changes: 1 addition & 1 deletion worker/server_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ const (
ZeroLimitsDefaults = `uid-lease=0; refill-interval=30s; disable-admin-http=false;`
GraphQLDefaults = `introspection=true; debug=false; extensions=true; poll-interval=1s; ` +
`lambda-url=;`
CacheDefaults = `size-mb=1024; percentage=40,40,20; keep-updates=false`
CacheDefaults = `size-mb=1024; percentage=40,40,20; delete-on-updates=true`
FeatureFlagsDefaults = `normalize-compatibility-mode=`
)

Expand Down

0 comments on commit 02b9d13

Please sign in to comment.