From 78d53457321e62082fa1a4102722f49ec81f6642 Mon Sep 17 00:00:00 2001 From: Radu Berinde Date: Thu, 19 Dec 2024 11:05:26 -0800 Subject: [PATCH] db: improve pprof labels for compactions We add a label for the compaction output level. We also remove the "table-cache" label which is not very useful - you can just focus on `load() / releaseLoop()` to get the same information. This label "erases" the top-level labels because the context is not plumbed correctly. --- compaction.go | 20 +++++++++++++++++--- file_cache.go | 17 +++++------------ 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/compaction.go b/compaction.go index d8540268d3..8de36afbf9 100644 --- a/compaction.go +++ b/compaction.go @@ -36,8 +36,15 @@ var errEmptyTable = errors.New("pebble: empty table") // concurrent excise or ingest-split operation. var ErrCancelledCompaction = errors.New("pebble: compaction cancelled by a concurrent operation, will retry compaction") -var compactLabels = pprof.Labels("pebble", "compact") -var flushLabels = pprof.Labels("pebble", "flush") +var compactLabelsNoLevel = pprof.Labels("pebble", "compact") +var compactLabelsPerLevel = func() [manifest.NumLevels]pprof.LabelSet { + var labels [manifest.NumLevels]pprof.LabelSet + for i := 0; i < manifest.NumLevels; i++ { + labels[i] = pprof.Labels("pebble", "compact", "output-level", fmt.Sprintf("L%d", i)) + } + return labels +}() +var flushLabels = pprof.Labels("pebble", "flush", "output-level", "L0") var gcLabels = pprof.Labels("pebble", "gc") // expandedCompactionByteSizeLimit is the maximum number of bytes in all @@ -2175,7 +2182,14 @@ func checkDeleteCompactionHints( // compact runs one compaction and maybe schedules another call to compact. func (d *DB) compact(c *compaction, errChannel chan error) { - pprof.Do(context.Background(), compactLabels, func(context.Context) { + var labels pprof.LabelSet + if c.outputLevel == nil { + // Delete-only compactions don't have an output level. + labels = compactLabelsNoLevel + } else { + labels = compactLabelsPerLevel[c.outputLevel.level] + } + pprof.Do(context.Background(), labels, func(context.Context) { d.mu.Lock() defer d.mu.Unlock() if err := d.compact1(c, errChannel); err != nil { diff --git a/file_cache.go b/file_cache.go index 24607d4e92..5d1f966d3c 100644 --- a/file_cache.go +++ b/file_cache.go @@ -10,7 +10,6 @@ import ( "fmt" "io" "runtime/debug" - "runtime/pprof" "sync" "sync/atomic" "unsafe" @@ -85,8 +84,6 @@ func tableNewRangeKeyIter(newIters tableNewIters) keyspanimpl.TableNewSpanIter { } } -var fileCacheLabels = pprof.Labels("pebble", "table-cache") - // fileCacheOpts contains the db specific fields of a file cache. This is stored // in the fileCacheContainer along with the file cache. // @@ -396,12 +393,10 @@ func (c *fileCacheShard) init(size int) { } func (c *fileCacheShard) releaseLoop() { - pprof.Do(context.Background(), fileCacheLabels, func(context.Context) { - defer c.releaseLoopExit.Done() - for v := range c.releasingCh { - v.release(c) - } - }) + defer c.releaseLoopExit.Done() + for v := range c.releasingCh { + v.release(c) + } } // checkAndIntersectFilters checks the specific table and block property filters @@ -915,9 +910,7 @@ func (c *fileCacheShard) findNodeInternal( // Note adding to the cache lists must complete before we begin loading the // table as a failure during load will result in the node being unlinked. - pprof.Do(context.Background(), fileCacheLabels, func(context.Context) { - v.load(ctx, backingFileNum, c, dbOpts) - }) + v.load(ctx, backingFileNum, c, dbOpts) return v }