Skip to content

Commit

Permalink
Parallel download blocks - Follow up of #5475 (#5493)
Browse files Browse the repository at this point in the history
* Download blocks in parallel

Signed-off-by: Alan Protasio <[email protected]>

* remove the go func

Signed-off-by: Alan Protasio <[email protected]>

* Doc

Signed-off-by: Alan Protasio <[email protected]>

* CHANGELOG

Signed-off-by: Alan Protasio <[email protected]>

* doc

Signed-off-by: alanprot <[email protected]>

* AddressComments

Signed-off-by: alanprot <[email protected]>

* fix typo

Signed-off-by: Alan Protasio <[email protected]>
  • Loading branch information
alanprot authored Jul 14, 2022
1 parent cd1f80e commit ef3adc5
Show file tree
Hide file tree
Showing 6 changed files with 120 additions and 92 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re
- [#5472](https://github.com/thanos-io/thanos/pull/5472) Receive: add new tenant metrics to example dashboard.
- [#5475](https://github.com/thanos-io/thanos/pull/5475) Compact/Store: Added `--block-files-concurrency` allowing to configure number of go routines for download/upload block files during compaction.
- [#5470](https://github.com/thanos-io/thanos/pull/5470) Receive: Implement exposing TSDB stats for all tenants
- [#5493](https://github.com/thanos-io/thanos/pull/5493) Compact: Added `--compact.blocks-fetch-concurrency` allowing to configure number of go routines for download blocks during compactions.

### Changed

Expand Down
4 changes: 4 additions & 0 deletions cmd/thanos/compact.go
Original file line number Diff line number Diff line change
Expand Up @@ -350,6 +350,7 @@ func runCompact(
compactMetrics.blocksMarked.WithLabelValues(metadata.NoCompactMarkFilename, metadata.OutOfOrderChunksNoCompactReason),
metadata.HashFunc(conf.hashFunc),
conf.blockFilesConcurrency,
conf.compactBlocksFetchConcurrency,
)
tsdbPlanner := compact.NewPlanner(logger, levels, noCompactMarkerFilter)
planner := compact.WithLargeTotalIndexSizeFilter(
Expand Down Expand Up @@ -637,6 +638,7 @@ type compactConfig struct {
cleanupBlocksInterval time.Duration
compactionConcurrency int
downsampleConcurrency int
compactBlocksFetchConcurrency int
deleteDelay model.Duration
dedupReplicaLabels []string
selectorRelabelConf extflag.PathOrContent
Expand Down Expand Up @@ -703,6 +705,8 @@ func (cc *compactConfig) registerFlag(cmd extkingpin.FlagClause) {

cmd.Flag("compact.concurrency", "Number of goroutines to use when compacting groups.").
Default("1").IntVar(&cc.compactionConcurrency)
cmd.Flag("compact.blocks-fetch-concurrency", "Number of goroutines to use when download block during compaction.").
Default("1").IntVar(&cc.compactBlocksFetchConcurrency)
cmd.Flag("downsample.concurrency", "Number of goroutines to use when downsampling blocks.").
Default("1").IntVar(&cc.downsampleConcurrency)

Expand Down
3 changes: 3 additions & 0 deletions docs/components/compact.md
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,9 @@ Flags:
--bucket-web-label=BUCKET-WEB-LABEL
Prometheus label to use as timeline title in the
bucket web UI
--compact.blocks-fetch-concurrency=1
Number of goroutines to use when download block
during compaction.
--compact.cleanup-interval=5m
How often we should clean up partially uploaded
blocks and blocks with deletion mark in the
Expand Down
194 changes: 107 additions & 87 deletions pkg/compact/compact.go
Original file line number Diff line number Diff line change
Expand Up @@ -219,20 +219,21 @@ type Grouper interface {
// DefaultGrouper is the Thanos built-in grouper. It groups blocks based on downsample
// resolution and block's labels.
type DefaultGrouper struct {
bkt objstore.Bucket
logger log.Logger
acceptMalformedIndex bool
enableVerticalCompaction bool
compactions *prometheus.CounterVec
compactionRunsStarted *prometheus.CounterVec
compactionRunsCompleted *prometheus.CounterVec
compactionFailures *prometheus.CounterVec
verticalCompactions *prometheus.CounterVec
garbageCollectedBlocks prometheus.Counter
blocksMarkedForDeletion prometheus.Counter
blocksMarkedForNoCompact prometheus.Counter
hashFunc metadata.HashFunc
blockFilesConcurrency int
bkt objstore.Bucket
logger log.Logger
acceptMalformedIndex bool
enableVerticalCompaction bool
compactions *prometheus.CounterVec
compactionRunsStarted *prometheus.CounterVec
compactionRunsCompleted *prometheus.CounterVec
compactionFailures *prometheus.CounterVec
verticalCompactions *prometheus.CounterVec
garbageCollectedBlocks prometheus.Counter
blocksMarkedForDeletion prometheus.Counter
blocksMarkedForNoCompact prometheus.Counter
hashFunc metadata.HashFunc
blockFilesConcurrency int
compactBlocksFetchConcurrency int
}

// NewDefaultGrouper makes a new DefaultGrouper.
Expand All @@ -247,6 +248,7 @@ func NewDefaultGrouper(
blocksMarkedForNoCompact prometheus.Counter,
hashFunc metadata.HashFunc,
blockFilesConcurrency int,
compactBlocksFetchConcurrency int,
) *DefaultGrouper {
return &DefaultGrouper{
bkt: bkt,
Expand All @@ -273,11 +275,12 @@ func NewDefaultGrouper(
Name: "thanos_compact_group_vertical_compactions_total",
Help: "Total number of group compaction attempts that resulted in a new block based on overlapping blocks.",
}, []string{"group"}),
blocksMarkedForNoCompact: blocksMarkedForNoCompact,
garbageCollectedBlocks: garbageCollectedBlocks,
blocksMarkedForDeletion: blocksMarkedForDeletion,
hashFunc: hashFunc,
blockFilesConcurrency: blockFilesConcurrency,
blocksMarkedForNoCompact: blocksMarkedForNoCompact,
garbageCollectedBlocks: garbageCollectedBlocks,
blocksMarkedForDeletion: blocksMarkedForDeletion,
hashFunc: hashFunc,
blockFilesConcurrency: blockFilesConcurrency,
compactBlocksFetchConcurrency: compactBlocksFetchConcurrency,
}
}

Expand Down Expand Up @@ -308,6 +311,7 @@ func (g *DefaultGrouper) Groups(blocks map[ulid.ULID]*metadata.Meta) (res []*Gro
g.blocksMarkedForNoCompact,
g.hashFunc,
g.blockFilesConcurrency,
g.compactBlocksFetchConcurrency,
)
if err != nil {
return nil, errors.Wrap(err, "create compaction group")
Expand All @@ -328,25 +332,26 @@ func (g *DefaultGrouper) Groups(blocks map[ulid.ULID]*metadata.Meta) (res []*Gro
// Group captures a set of blocks that have the same origin labels and downsampling resolution.
// Those blocks generally contain the same series and can thus efficiently be compacted.
type Group struct {
logger log.Logger
bkt objstore.Bucket
key string
labels labels.Labels
resolution int64
mtx sync.Mutex
metasByMinTime []*metadata.Meta
acceptMalformedIndex bool
enableVerticalCompaction bool
compactions prometheus.Counter
compactionRunsStarted prometheus.Counter
compactionRunsCompleted prometheus.Counter
compactionFailures prometheus.Counter
verticalCompactions prometheus.Counter
groupGarbageCollectedBlocks prometheus.Counter
blocksMarkedForDeletion prometheus.Counter
blocksMarkedForNoCompact prometheus.Counter
hashFunc metadata.HashFunc
blockFilesConcurrency int
logger log.Logger
bkt objstore.Bucket
key string
labels labels.Labels
resolution int64
mtx sync.Mutex
metasByMinTime []*metadata.Meta
acceptMalformedIndex bool
enableVerticalCompaction bool
compactions prometheus.Counter
compactionRunsStarted prometheus.Counter
compactionRunsCompleted prometheus.Counter
compactionFailures prometheus.Counter
verticalCompactions prometheus.Counter
groupGarbageCollectedBlocks prometheus.Counter
blocksMarkedForDeletion prometheus.Counter
blocksMarkedForNoCompact prometheus.Counter
hashFunc metadata.HashFunc
blockFilesConcurrency int
compactBlocksFetchConcurrency int
}

// NewGroup returns a new compaction group.
Expand All @@ -368,6 +373,7 @@ func NewGroup(
blocksMarkedForNoCompact prometheus.Counter,
hashFunc metadata.HashFunc,
blockFilesConcurrency int,
compactBlocksFetchConcurrency int,
) (*Group, error) {
if logger == nil {
logger = log.NewNopLogger()
Expand All @@ -378,23 +384,24 @@ func NewGroup(
}

g := &Group{
logger: logger,
bkt: bkt,
key: key,
labels: lset,
resolution: resolution,
acceptMalformedIndex: acceptMalformedIndex,
enableVerticalCompaction: enableVerticalCompaction,
compactions: compactions,
compactionRunsStarted: compactionRunsStarted,
compactionRunsCompleted: compactionRunsCompleted,
compactionFailures: compactionFailures,
verticalCompactions: verticalCompactions,
groupGarbageCollectedBlocks: groupGarbageCollectedBlocks,
blocksMarkedForDeletion: blocksMarkedForDeletion,
blocksMarkedForNoCompact: blocksMarkedForNoCompact,
hashFunc: hashFunc,
blockFilesConcurrency: blockFilesConcurrency,
logger: logger,
bkt: bkt,
key: key,
labels: lset,
resolution: resolution,
acceptMalformedIndex: acceptMalformedIndex,
enableVerticalCompaction: enableVerticalCompaction,
compactions: compactions,
compactionRunsStarted: compactionRunsStarted,
compactionRunsCompleted: compactionRunsCompleted,
compactionFailures: compactionFailures,
verticalCompactions: verticalCompactions,
groupGarbageCollectedBlocks: groupGarbageCollectedBlocks,
blocksMarkedForDeletion: blocksMarkedForDeletion,
blocksMarkedForNoCompact: blocksMarkedForNoCompact,
hashFunc: hashFunc,
blockFilesConcurrency: blockFilesConcurrency,
compactBlocksFetchConcurrency: compactBlocksFetchConcurrency,
}
return g, nil
}
Expand Down Expand Up @@ -1007,53 +1014,66 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp

// Once we have a plan we need to download the actual data.
begin := time.Now()
g, errCtx := errgroup.WithContext(ctx)
g.SetLimit(cg.compactBlocksFetchConcurrency)

toCompactDirs := make([]string, 0, len(toCompact))
for _, meta := range toCompact {
bdir := filepath.Join(dir, meta.ULID.String())
for _, s := range meta.Compaction.Sources {
for _, m := range toCompact {
bdir := filepath.Join(dir, m.ULID.String())
for _, s := range m.Compaction.Sources {
if _, ok := uniqueSources[s]; ok {
return false, ulid.ULID{}, halt(errors.Errorf("overlapping sources detected for plan %v", toCompact))
}
uniqueSources[s] = struct{}{}
}

tracing.DoInSpanWithErr(ctx, "compaction_block_download", func(ctx context.Context) error {
err = block.Download(ctx, cg.logger, cg.bkt, meta.ULID, bdir, objstore.WithFetchConcurrency(cg.blockFilesConcurrency))
return err
}, opentracing.Tags{"block.id": meta.ULID})
if err != nil {
return false, ulid.ULID{}, retry(errors.Wrapf(err, "download block %s", meta.ULID))
}
func(ctx context.Context, meta *metadata.Meta) {
g.Go(func() error {
tracing.DoInSpanWithErr(ctx, "compaction_block_download", func(ctx context.Context) error {
err = block.Download(ctx, cg.logger, cg.bkt, meta.ULID, bdir, objstore.WithFetchConcurrency(cg.blockFilesConcurrency))
return err
}, opentracing.Tags{"block.id": meta.ULID})
if err != nil {
return retry(errors.Wrapf(err, "download block %s", meta.ULID))
}

// Ensure all input blocks are valid.
var stats block.HealthStats
tracing.DoInSpanWithErr(ctx, "compaction_block_health_stats", func(ctx context.Context) error {
stats, err = block.GatherIndexHealthStats(cg.logger, filepath.Join(bdir, block.IndexFilename), meta.MinTime, meta.MaxTime)
return err
}, opentracing.Tags{"block.id": meta.ULID})
if err != nil {
return false, ulid.ULID{}, errors.Wrapf(err, "gather index issues for block %s", bdir)
}
// Ensure all input blocks are valid.
var stats block.HealthStats
tracing.DoInSpanWithErr(ctx, "compaction_block_health_stats", func(ctx context.Context) error {
stats, err = block.GatherIndexHealthStats(cg.logger, filepath.Join(bdir, block.IndexFilename), meta.MinTime, meta.MaxTime)
return err
}, opentracing.Tags{"block.id": meta.ULID})
if err != nil {
return errors.Wrapf(err, "gather index issues for block %s", bdir)
}

if err := stats.CriticalErr(); err != nil {
return false, ulid.ULID{}, halt(errors.Wrapf(err, "block with not healthy index found %s; Compaction level %v; Labels: %v", bdir, meta.Compaction.Level, meta.Thanos.Labels))
}
if err := stats.CriticalErr(); err != nil {
return halt(errors.Wrapf(err, "block with not healthy index found %s; Compaction level %v; Labels: %v", bdir, meta.Compaction.Level, meta.Thanos.Labels))
}

if err := stats.OutOfOrderChunksErr(); err != nil {
return false, ulid.ULID{}, outOfOrderChunkError(errors.Wrapf(err, "blocks with out-of-order chunks are dropped from compaction: %s", bdir), meta.ULID)
}
if err := stats.OutOfOrderChunksErr(); err != nil {
return outOfOrderChunkError(errors.Wrapf(err, "blocks with out-of-order chunks are dropped from compaction: %s", bdir), meta.ULID)
}

if err := stats.Issue347OutsideChunksErr(); err != nil {
return false, ulid.ULID{}, issue347Error(errors.Wrapf(err, "invalid, but reparable block %s", bdir), meta.ULID)
}
if err := stats.Issue347OutsideChunksErr(); err != nil {
return issue347Error(errors.Wrapf(err, "invalid, but reparable block %s", bdir), meta.ULID)
}

if err := stats.PrometheusIssue5372Err(); !cg.acceptMalformedIndex && err != nil {
return errors.Wrapf(err,
"block id %s, try running with --debug.accept-malformed-index", meta.ULID)
}
return nil
})
}(errCtx, m)

if err := stats.PrometheusIssue5372Err(); !cg.acceptMalformedIndex && err != nil {
return false, ulid.ULID{}, errors.Wrapf(err,
"block id %s, try running with --debug.accept-malformed-index", meta.ULID)
}
toCompactDirs = append(toCompactDirs, bdir)
}

if err := g.Wait(); err != nil {
return false, ulid.ULID{}, err
}

level.Info(cg.logger).Log("msg", "downloaded and verified blocks; compacting blocks", "plan", fmt.Sprintf("%v", toCompactDirs), "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds())

begin = time.Now()
Expand Down
4 changes: 2 additions & 2 deletions pkg/compact/compact_e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ func TestSyncer_GarbageCollect_e2e(t *testing.T) {
testutil.Ok(t, sy.GarbageCollect(ctx))

// Only the level 3 block, the last source block in both resolutions should be left.
grouper := NewDefaultGrouper(nil, bkt, false, false, nil, blocksMarkedForDeletion, garbageCollectedBlocks, blockMarkedForNoCompact, metadata.NoneFunc, 1)
grouper := NewDefaultGrouper(nil, bkt, false, false, nil, blocksMarkedForDeletion, garbageCollectedBlocks, blockMarkedForNoCompact, metadata.NoneFunc, 1, 1)
groups, err := grouper.Groups(sy.Metas())
testutil.Ok(t, err)

Expand Down Expand Up @@ -214,7 +214,7 @@ func testGroupCompactE2e(t *testing.T, mergeFunc storage.VerticalChunkSeriesMerg
testutil.Ok(t, err)

planner := NewPlanner(logger, []int64{1000, 3000}, noCompactMarkerFilter)
grouper := NewDefaultGrouper(logger, bkt, false, false, reg, blocksMarkedForDeletion, garbageCollectedBlocks, blocksMaredForNoCompact, metadata.NoneFunc, 1)
grouper := NewDefaultGrouper(logger, bkt, false, false, reg, blocksMarkedForDeletion, garbageCollectedBlocks, blocksMaredForNoCompact, metadata.NoneFunc, 1, 1)
bComp, err := NewBucketCompactor(logger, sy, grouper, planner, comp, dir, bkt, 2, true)
testutil.Ok(t, err)

Expand Down
6 changes: 3 additions & 3 deletions pkg/compact/compact_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ func TestRetentionProgressCalculate(t *testing.T) {

var bkt objstore.Bucket
temp := promauto.With(reg).NewCounter(prometheus.CounterOpts{Name: "test_metric_for_group", Help: "this is a test metric for compact progress tests"})
grouper := NewDefaultGrouper(logger, bkt, false, false, reg, temp, temp, temp, "", 1)
grouper := NewDefaultGrouper(logger, bkt, false, false, reg, temp, temp, temp, "", 1, 1)

type groupedResult map[string]float64

Expand Down Expand Up @@ -376,7 +376,7 @@ func TestCompactProgressCalculate(t *testing.T) {

var bkt objstore.Bucket
temp := promauto.With(reg).NewCounter(prometheus.CounterOpts{Name: "test_metric_for_group", Help: "this is a test metric for compact progress tests"})
grouper := NewDefaultGrouper(logger, bkt, false, false, reg, temp, temp, temp, "", 1)
grouper := NewDefaultGrouper(logger, bkt, false, false, reg, temp, temp, temp, "", 1, 1)

for _, tcase := range []struct {
testName string
Expand Down Expand Up @@ -498,7 +498,7 @@ func TestDownsampleProgressCalculate(t *testing.T) {

var bkt objstore.Bucket
temp := promauto.With(reg).NewCounter(prometheus.CounterOpts{Name: "test_metric_for_group", Help: "this is a test metric for downsample progress tests"})
grouper := NewDefaultGrouper(logger, bkt, false, false, reg, temp, temp, temp, "", 1)
grouper := NewDefaultGrouper(logger, bkt, false, false, reg, temp, temp, temp, "", 1, 1)

for _, tcase := range []struct {
testName string
Expand Down

0 comments on commit ef3adc5

Please sign in to comment.