Skip to content

Commit

Permalink
db: improve pprof labels for compactions
Browse files Browse the repository at this point in the history
We add a label for the compaction output level.

We also remove the "table-cache" label which is not very useful - you
can just focus on `load() / releaseLoop()` to get the same
information. This label "erases" the top-level labels because the
context is not plumbed correctly.
  • Loading branch information
RaduBerinde committed Dec 20, 2024
1 parent d533952 commit 78d5345
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 15 deletions.
20 changes: 17 additions & 3 deletions compaction.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,15 @@ var errEmptyTable = errors.New("pebble: empty table")
// concurrent excise or ingest-split operation.
var ErrCancelledCompaction = errors.New("pebble: compaction cancelled by a concurrent operation, will retry compaction")

var compactLabels = pprof.Labels("pebble", "compact")
var flushLabels = pprof.Labels("pebble", "flush")
var compactLabelsNoLevel = pprof.Labels("pebble", "compact")
var compactLabelsPerLevel = func() [manifest.NumLevels]pprof.LabelSet {
var labels [manifest.NumLevels]pprof.LabelSet
for i := 0; i < manifest.NumLevels; i++ {
labels[i] = pprof.Labels("pebble", "compact", "output-level", fmt.Sprintf("L%d", i))
}
return labels
}()
var flushLabels = pprof.Labels("pebble", "flush", "output-level", "L0")
var gcLabels = pprof.Labels("pebble", "gc")

// expandedCompactionByteSizeLimit is the maximum number of bytes in all
Expand Down Expand Up @@ -2175,7 +2182,14 @@ func checkDeleteCompactionHints(

// compact runs one compaction and maybe schedules another call to compact.
func (d *DB) compact(c *compaction, errChannel chan error) {
pprof.Do(context.Background(), compactLabels, func(context.Context) {
var labels pprof.LabelSet
if c.outputLevel == nil {
// Delete-only compactions don't have an output level.
labels = compactLabelsNoLevel
} else {
labels = compactLabelsPerLevel[c.outputLevel.level]
}
pprof.Do(context.Background(), labels, func(context.Context) {
d.mu.Lock()
defer d.mu.Unlock()
if err := d.compact1(c, errChannel); err != nil {
Expand Down
17 changes: 5 additions & 12 deletions file_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ import (
"fmt"
"io"
"runtime/debug"
"runtime/pprof"
"sync"
"sync/atomic"
"unsafe"
Expand Down Expand Up @@ -85,8 +84,6 @@ func tableNewRangeKeyIter(newIters tableNewIters) keyspanimpl.TableNewSpanIter {
}
}

var fileCacheLabels = pprof.Labels("pebble", "table-cache")

// fileCacheOpts contains the db specific fields of a file cache. This is stored
// in the fileCacheContainer along with the file cache.
//
Expand Down Expand Up @@ -396,12 +393,10 @@ func (c *fileCacheShard) init(size int) {
}

func (c *fileCacheShard) releaseLoop() {
pprof.Do(context.Background(), fileCacheLabels, func(context.Context) {
defer c.releaseLoopExit.Done()
for v := range c.releasingCh {
v.release(c)
}
})
defer c.releaseLoopExit.Done()
for v := range c.releasingCh {
v.release(c)
}
}

// checkAndIntersectFilters checks the specific table and block property filters
Expand Down Expand Up @@ -915,9 +910,7 @@ func (c *fileCacheShard) findNodeInternal(

// Note adding to the cache lists must complete before we begin loading the
// table as a failure during load will result in the node being unlinked.
pprof.Do(context.Background(), fileCacheLabels, func(context.Context) {
v.load(ctx, backingFileNum, c, dbOpts)
})
v.load(ctx, backingFileNum, c, dbOpts)
return v
}

Expand Down

0 comments on commit 78d5345

Please sign in to comment.