Skip to content

Commit f52f8ad

Browse files
authored
chore: Clarify compression package (#14257)
This PR renames "encoding" to "codec" in the compression package to remove the cognitive dissonance. It also removes the `Enc` prefix for codec identifiers, so that they adhere Go's best practice of naming conventions, e.g. `compression.EncGZIP` becomes `compression.GZIP` when used in a different package. Signed-off-by: Christian Haudum <[email protected]>
1 parent 84788ad commit f52f8ad

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

56 files changed

+306
-304
lines changed

pkg/bloombuild/builder/builder.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ import (
3434
)
3535

3636
// TODO(chaudum): Make configurable via (per-tenant?) setting.
37-
var blockCompressionAlgo = compression.EncNone
37+
var defaultBlockCompressionCodec = compression.None
3838

3939
type Builder struct {
4040
services.Service
@@ -336,7 +336,7 @@ func (b *Builder) processTask(
336336
return nil, fmt.Errorf("failed to get client: %w", err)
337337
}
338338

339-
blockEnc, err := compression.ParseEncoding(b.limits.BloomBlockEncoding(task.Tenant))
339+
blockEnc, err := compression.ParseCodec(b.limits.BloomBlockEncoding(task.Tenant))
340340
if err != nil {
341341
return nil, fmt.Errorf("failed to parse block encoding: %w", err)
342342
}
@@ -407,7 +407,7 @@ func (b *Builder) processTask(
407407
blockCt++
408408
blk := newBlocks.At()
409409

410-
built, err := bloomshipper.BlockFrom(blockCompressionAlgo, tenant, task.Table.Addr(), blk)
410+
built, err := bloomshipper.BlockFrom(defaultBlockCompressionCodec, tenant, task.Table.Addr(), blk)
411411
if err != nil {
412412
level.Error(logger).Log("msg", "failed to build block", "err", err)
413413
if err = blk.Reader().Cleanup(); err != nil {

pkg/bloombuild/builder/spec_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ func dummyBloomGen(t *testing.T, opts v1.BlockOptions, store v2.Iterator[*v1.Ser
115115

116116
func TestSimpleBloomGenerator(t *testing.T) {
117117
const maxBlockSize = 100 << 20 // 100MB
118-
for _, enc := range []compression.Encoding{compression.EncNone, compression.EncGZIP, compression.EncSnappy} {
118+
for _, enc := range []compression.Codec{compression.None, compression.GZIP, compression.Snappy} {
119119
for _, tc := range []struct {
120120
desc string
121121
fromSchema, toSchema v1.BlockOptions

pkg/bloombuild/common/tsdb.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ func (b *BloomTSDBStore) LoadTSDB(
102102
}
103103
defer data.Close()
104104

105-
decompressorPool := compression.GetReaderPool(compression.EncGZIP)
105+
decompressorPool := compression.GetReaderPool(compression.GZIP)
106106
decompressor, err := decompressorPool.GetReader(data)
107107
if err != nil {
108108
return nil, errors.Wrap(err, "failed to get decompressor")

pkg/bloombuild/planner/planner_test.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@ func genBlock(ref bloomshipper.BlockRef) (bloomshipper.Block, error) {
188188
writer := v1.NewMemoryBlockWriter(indexBuf, bloomsBuf)
189189
reader := v1.NewByteReader(indexBuf, bloomsBuf)
190190

191-
blockOpts := v1.NewBlockOptions(compression.EncNone, 0, 0)
191+
blockOpts := v1.NewBlockOptions(compression.None, 0, 0)
192192

193193
builder, err := v1.NewBlockBuilder(blockOpts, writer)
194194
if err != nil {
@@ -202,7 +202,7 @@ func genBlock(ref bloomshipper.BlockRef) (bloomshipper.Block, error) {
202202
block := v1.NewBlock(reader, v1.NewMetrics(nil))
203203

204204
buf := bytes.NewBuffer(nil)
205-
if err := v1.TarCompress(ref.Encoding, buf, block.Reader()); err != nil {
205+
if err := v1.TarCompress(ref.Codec, buf, block.Reader()); err != nil {
206206
return bloomshipper.Block{}, err
207207
}
208208

pkg/chunkenc/dumb_chunk.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ func (c *dumbChunk) Utilization() float64 {
7070
return float64(len(c.entries)) / float64(tmpNumEntries)
7171
}
7272

73-
func (c *dumbChunk) Encoding() compression.Encoding { return compression.EncNone }
73+
func (c *dumbChunk) Encoding() compression.Codec { return compression.None }
7474

7575
// Returns an iterator that goes from _most_ recent to _least_ recent (ie,
7676
// backwards).

pkg/chunkenc/interface.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ type Chunk interface {
6868
UncompressedSize() int
6969
CompressedSize() int
7070
Close() error
71-
Encoding() compression.Encoding
71+
Encoding() compression.Codec
7272
Rebound(start, end time.Time, filter filter.Func) (Chunk, error)
7373
}
7474

pkg/chunkenc/memchunk.go

+7-7
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ type MemChunk struct {
132132
head HeadBlock
133133

134134
format byte
135-
encoding compression.Encoding
135+
encoding compression.Codec
136136
headFmt HeadBlockFmt
137137

138138
// compressed size of chunk. Set when chunk is cut or while decoding chunk from storage.
@@ -355,7 +355,7 @@ type entry struct {
355355
}
356356

357357
// NewMemChunk returns a new in-mem chunk.
358-
func NewMemChunk(chunkFormat byte, enc compression.Encoding, head HeadBlockFmt, blockSize, targetSize int) *MemChunk {
358+
func NewMemChunk(chunkFormat byte, enc compression.Codec, head HeadBlockFmt, blockSize, targetSize int) *MemChunk {
359359
return newMemChunkWithFormat(chunkFormat, enc, head, blockSize, targetSize)
360360
}
361361

@@ -370,7 +370,7 @@ func panicIfInvalidFormat(chunkFmt byte, head HeadBlockFmt) {
370370
}
371371

372372
// NewMemChunk returns a new in-mem chunk.
373-
func newMemChunkWithFormat(format byte, enc compression.Encoding, head HeadBlockFmt, blockSize, targetSize int) *MemChunk {
373+
func newMemChunkWithFormat(format byte, enc compression.Codec, head HeadBlockFmt, blockSize, targetSize int) *MemChunk {
374374
panicIfInvalidFormat(format, head)
375375

376376
symbolizer := newSymbolizer()
@@ -414,10 +414,10 @@ func newByteChunk(b []byte, blockSize, targetSize int, fromCheckpoint bool) (*Me
414414
bc.format = version
415415
switch version {
416416
case ChunkFormatV1:
417-
bc.encoding = compression.EncGZIP
417+
bc.encoding = compression.GZIP
418418
case ChunkFormatV2, ChunkFormatV3, ChunkFormatV4:
419419
// format v2+ has a byte for block encoding.
420-
enc := compression.Encoding(db.byte())
420+
enc := compression.Codec(db.byte())
421421
if db.err() != nil {
422422
return nil, errors.Wrap(db.err(), "verifying encoding")
423423
}
@@ -777,7 +777,7 @@ func MemchunkFromCheckpoint(chk, head []byte, desiredIfNotUnordered HeadBlockFmt
777777
}
778778

779779
// Encoding implements Chunk.
780-
func (c *MemChunk) Encoding() compression.Encoding {
780+
func (c *MemChunk) Encoding() compression.Codec {
781781
return c.encoding
782782
}
783783

@@ -1173,7 +1173,7 @@ func (c *MemChunk) Rebound(start, end time.Time, filter filter.Func) (Chunk, err
11731173
// then allows us to bind a decoding context to a block when requested, but otherwise helps reduce the
11741174
// chances of chunk<>block encoding drift in the codebase as the latter is parameterized by the former.
11751175
type encBlock struct {
1176-
enc compression.Encoding
1176+
enc compression.Codec
11771177
format byte
11781178
symbolizer *symbolizer
11791179
block

pkg/chunkenc/memchunk_test.go

+28-28
Original file line numberDiff line numberDiff line change
@@ -32,16 +32,16 @@ import (
3232
"github.com/grafana/loki/v3/pkg/util/filter"
3333
)
3434

35-
var testEncodings = []compression.Encoding{
36-
compression.EncNone,
37-
compression.EncGZIP,
38-
compression.EncLZ4_64k,
39-
compression.EncLZ4_256k,
40-
compression.EncLZ4_1M,
41-
compression.EncLZ4_4M,
42-
compression.EncSnappy,
43-
compression.EncFlate,
44-
compression.EncZstd,
35+
var testEncodings = []compression.Codec{
36+
compression.None,
37+
compression.GZIP,
38+
compression.LZ4_64k,
39+
compression.LZ4_256k,
40+
compression.LZ4_1M,
41+
compression.LZ4_4M,
42+
compression.Snappy,
43+
compression.Flate,
44+
compression.Zstd,
4545
}
4646

4747
var (
@@ -299,7 +299,7 @@ func TestCorruptChunk(t *testing.T) {
299299
func TestReadFormatV1(t *testing.T) {
300300
t.Parallel()
301301

302-
c := NewMemChunk(ChunkFormatV3, compression.EncGZIP, DefaultTestHeadBlockFmt, testBlockSize, testTargetSize)
302+
c := NewMemChunk(ChunkFormatV3, compression.GZIP, DefaultTestHeadBlockFmt, testBlockSize, testTargetSize)
303303
fillChunk(c)
304304
// overrides to v1 for testing that specific version.
305305
c.format = ChunkFormatV1
@@ -391,7 +391,7 @@ func TestRoundtripV2(t *testing.T) {
391391
}
392392
}
393393

394-
func testNameWithFormats(enc compression.Encoding, chunkFormat byte, headBlockFmt HeadBlockFmt) string {
394+
func testNameWithFormats(enc compression.Codec, chunkFormat byte, headBlockFmt HeadBlockFmt) string {
395395
return fmt.Sprintf("encoding:%v chunkFormat:%v headBlockFmt:%v", enc, chunkFormat, headBlockFmt)
396396
}
397397

@@ -558,7 +558,7 @@ func TestChunkFilling(t *testing.T) {
558558
func TestGZIPChunkTargetSize(t *testing.T) {
559559
t.Parallel()
560560

561-
chk := NewMemChunk(ChunkFormatV3, compression.EncGZIP, DefaultTestHeadBlockFmt, testBlockSize, testTargetSize)
561+
chk := NewMemChunk(ChunkFormatV3, compression.GZIP, DefaultTestHeadBlockFmt, testBlockSize, testTargetSize)
562562

563563
lineSize := 512
564564
entry := &logproto.Entry{
@@ -681,7 +681,7 @@ func TestMemChunk_AppendOutOfOrder(t *testing.T) {
681681
t.Run(testName, func(t *testing.T) {
682682
t.Parallel()
683683

684-
tester(t, NewMemChunk(ChunkFormatV3, compression.EncGZIP, f, testBlockSize, testTargetSize))
684+
tester(t, NewMemChunk(ChunkFormatV3, compression.GZIP, f, testBlockSize, testTargetSize))
685685
})
686686
}
687687
}
@@ -726,7 +726,7 @@ func TestChunkSize(t *testing.T) {
726726
}
727727

728728
func TestChunkStats(t *testing.T) {
729-
c := NewMemChunk(ChunkFormatV4, compression.EncSnappy, DefaultTestHeadBlockFmt, testBlockSize, 0)
729+
c := NewMemChunk(ChunkFormatV4, compression.Snappy, DefaultTestHeadBlockFmt, testBlockSize, 0)
730730
first := time.Now()
731731
entry := &logproto.Entry{
732732
Timestamp: first,
@@ -968,7 +968,7 @@ func BenchmarkBackwardIterator(b *testing.B) {
968968
for _, bs := range testBlockSizes {
969969
b.Run(humanize.Bytes(uint64(bs)), func(b *testing.B) {
970970
b.ReportAllocs()
971-
c := NewMemChunk(ChunkFormatV4, compression.EncSnappy, DefaultTestHeadBlockFmt, bs, testTargetSize)
971+
c := NewMemChunk(ChunkFormatV4, compression.Snappy, DefaultTestHeadBlockFmt, bs, testTargetSize)
972972
_ = fillChunk(c)
973973
b.ResetTimer()
974974
for n := 0; n < b.N; n++ {
@@ -1082,7 +1082,7 @@ func BenchmarkHeadBlockSampleIterator(b *testing.B) {
10821082
func TestMemChunk_IteratorBounds(t *testing.T) {
10831083
createChunk := func() *MemChunk {
10841084
t.Helper()
1085-
c := NewMemChunk(ChunkFormatV3, compression.EncNone, DefaultTestHeadBlockFmt, 1e6, 1e6)
1085+
c := NewMemChunk(ChunkFormatV3, compression.None, DefaultTestHeadBlockFmt, 1e6, 1e6)
10861086

10871087
if _, err := c.Append(&logproto.Entry{
10881088
Timestamp: time.Unix(0, 1),
@@ -1168,9 +1168,9 @@ func TestMemchunkLongLine(t *testing.T) {
11681168
func TestBytesWith(t *testing.T) {
11691169
t.Parallel()
11701170

1171-
exp, err := NewMemChunk(ChunkFormatV3, compression.EncNone, DefaultTestHeadBlockFmt, testBlockSize, testTargetSize).BytesWith(nil)
1171+
exp, err := NewMemChunk(ChunkFormatV3, compression.None, DefaultTestHeadBlockFmt, testBlockSize, testTargetSize).BytesWith(nil)
11721172
require.Nil(t, err)
1173-
out, err := NewMemChunk(ChunkFormatV3, compression.EncNone, DefaultTestHeadBlockFmt, testBlockSize, testTargetSize).BytesWith([]byte{1, 2, 3})
1173+
out, err := NewMemChunk(ChunkFormatV3, compression.None, DefaultTestHeadBlockFmt, testBlockSize, testTargetSize).BytesWith([]byte{1, 2, 3})
11741174
require.Nil(t, err)
11751175

11761176
require.Equal(t, exp, out)
@@ -1181,8 +1181,8 @@ func TestCheckpointEncoding(t *testing.T) {
11811181

11821182
blockSize, targetSize := 256*1024, 1500*1024
11831183
for _, f := range allPossibleFormats {
1184-
t.Run(testNameWithFormats(compression.EncSnappy, f.chunkFormat, f.headBlockFmt), func(t *testing.T) {
1185-
c := newMemChunkWithFormat(f.chunkFormat, compression.EncSnappy, f.headBlockFmt, blockSize, targetSize)
1184+
t.Run(testNameWithFormats(compression.Snappy, f.chunkFormat, f.headBlockFmt), func(t *testing.T) {
1185+
c := newMemChunkWithFormat(f.chunkFormat, compression.Snappy, f.headBlockFmt, blockSize, targetSize)
11861186

11871187
// add a few entries
11881188
for i := 0; i < 5; i++ {
@@ -1267,7 +1267,7 @@ var (
12671267
func BenchmarkBufferedIteratorLabels(b *testing.B) {
12681268
for _, f := range HeadBlockFmts {
12691269
b.Run(f.String(), func(b *testing.B) {
1270-
c := NewMemChunk(ChunkFormatV3, compression.EncSnappy, f, testBlockSize, testTargetSize)
1270+
c := NewMemChunk(ChunkFormatV3, compression.Snappy, f, testBlockSize, testTargetSize)
12711271
_ = fillChunk(c)
12721272

12731273
labelsSet := []labels.Labels{
@@ -1367,8 +1367,8 @@ func BenchmarkBufferedIteratorLabels(b *testing.B) {
13671367

13681368
func Test_HeadIteratorReverse(t *testing.T) {
13691369
for _, testData := range allPossibleFormats {
1370-
t.Run(testNameWithFormats(compression.EncSnappy, testData.chunkFormat, testData.headBlockFmt), func(t *testing.T) {
1371-
c := newMemChunkWithFormat(testData.chunkFormat, compression.EncSnappy, testData.headBlockFmt, testBlockSize, testTargetSize)
1370+
t.Run(testNameWithFormats(compression.Snappy, testData.chunkFormat, testData.headBlockFmt), func(t *testing.T) {
1371+
c := newMemChunkWithFormat(testData.chunkFormat, compression.Snappy, testData.headBlockFmt, testBlockSize, testTargetSize)
13721372
genEntry := func(i int64) *logproto.Entry {
13731373
return &logproto.Entry{
13741374
Timestamp: time.Unix(0, i),
@@ -1483,7 +1483,7 @@ func TestMemChunk_Rebound(t *testing.T) {
14831483
}
14841484

14851485
func buildTestMemChunk(t *testing.T, from, through time.Time) *MemChunk {
1486-
chk := NewMemChunk(ChunkFormatV3, compression.EncGZIP, DefaultTestHeadBlockFmt, defaultBlockSize, 0)
1486+
chk := NewMemChunk(ChunkFormatV3, compression.GZIP, DefaultTestHeadBlockFmt, defaultBlockSize, 0)
14871487
for ; from.Before(through); from = from.Add(time.Second) {
14881488
_, err := chk.Append(&logproto.Entry{
14891489
Line: from.String(),
@@ -1604,7 +1604,7 @@ func TestMemChunk_ReboundAndFilter_with_filter(t *testing.T) {
16041604
}
16051605

16061606
func buildFilterableTestMemChunk(t *testing.T, from, through time.Time, matchingFrom, matchingTo *time.Time, withStructuredMetadata bool) *MemChunk {
1607-
chk := NewMemChunk(ChunkFormatV4, compression.EncGZIP, DefaultTestHeadBlockFmt, defaultBlockSize, 0)
1607+
chk := NewMemChunk(ChunkFormatV4, compression.GZIP, DefaultTestHeadBlockFmt, defaultBlockSize, 0)
16081608
t.Logf("from : %v", from.String())
16091609
t.Logf("through: %v", through.String())
16101610
var structuredMetadata push.LabelsAdapter
@@ -1753,7 +1753,7 @@ func TestMemChunk_SpaceFor(t *testing.T) {
17531753
t.Run(tc.desc, func(t *testing.T) {
17541754
for _, format := range allPossibleFormats {
17551755
t.Run(fmt.Sprintf("chunk_v%d_head_%s", format.chunkFormat, format.headBlockFmt), func(t *testing.T) {
1756-
chk := newMemChunkWithFormat(format.chunkFormat, compression.EncNone, format.headBlockFmt, 1024, tc.targetSize)
1756+
chk := newMemChunkWithFormat(format.chunkFormat, compression.None, format.headBlockFmt, 1024, tc.targetSize)
17571757

17581758
chk.blocks = make([]block, tc.nBlocks)
17591759
chk.cutBlockSize = tc.cutBlockSize
@@ -2055,7 +2055,7 @@ func TestDecodeChunkIncorrectBlockOffset(t *testing.T) {
20552055
t.Run(fmt.Sprintf("chunkFormat:%v headBlockFmt:%v", format.chunkFormat, format.headBlockFmt), func(t *testing.T) {
20562056
for incorrectOffsetBlockNum := 0; incorrectOffsetBlockNum < 3; incorrectOffsetBlockNum++ {
20572057
t.Run(fmt.Sprintf("inorrect offset block: %d", incorrectOffsetBlockNum), func(t *testing.T) {
2058-
chk := NewMemChunk(format.chunkFormat, compression.EncNone, format.headBlockFmt, blockSize, testTargetSize)
2058+
chk := NewMemChunk(format.chunkFormat, compression.None, format.headBlockFmt, blockSize, testTargetSize)
20592059
ts := time.Now().Unix()
20602060
for i := 0; i < 3; i++ {
20612061
dup, err := chk.Append(&logproto.Entry{

pkg/chunkenc/unordered_test.go

+8-8
Original file line numberDiff line numberDiff line change
@@ -451,7 +451,7 @@ func BenchmarkHeadBlockWrites(b *testing.B) {
451451
}
452452

453453
func TestUnorderedChunkIterators(t *testing.T) {
454-
c := NewMemChunk(ChunkFormatV4, compression.EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
454+
c := NewMemChunk(ChunkFormatV4, compression.Snappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
455455
for i := 0; i < 100; i++ {
456456
// push in reverse order
457457
dup, err := c.Append(&logproto.Entry{
@@ -497,11 +497,11 @@ func TestUnorderedChunkIterators(t *testing.T) {
497497
}
498498

499499
func BenchmarkUnorderedRead(b *testing.B) {
500-
legacy := NewMemChunk(ChunkFormatV3, compression.EncSnappy, OrderedHeadBlockFmt, testBlockSize, testTargetSize)
500+
legacy := NewMemChunk(ChunkFormatV3, compression.Snappy, OrderedHeadBlockFmt, testBlockSize, testTargetSize)
501501
fillChunkClose(legacy, false)
502-
ordered := NewMemChunk(ChunkFormatV3, compression.EncSnappy, UnorderedHeadBlockFmt, testBlockSize, testTargetSize)
502+
ordered := NewMemChunk(ChunkFormatV3, compression.Snappy, UnorderedHeadBlockFmt, testBlockSize, testTargetSize)
503503
fillChunkClose(ordered, false)
504-
unordered := NewMemChunk(ChunkFormatV3, compression.EncSnappy, UnorderedHeadBlockFmt, testBlockSize, testTargetSize)
504+
unordered := NewMemChunk(ChunkFormatV3, compression.Snappy, UnorderedHeadBlockFmt, testBlockSize, testTargetSize)
505505
fillChunkRandomOrder(unordered, false)
506506

507507
tcs := []struct {
@@ -559,7 +559,7 @@ func BenchmarkUnorderedRead(b *testing.B) {
559559
}
560560

561561
func TestUnorderedIteratorCountsAllEntries(t *testing.T) {
562-
c := NewMemChunk(ChunkFormatV4, compression.EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
562+
c := NewMemChunk(ChunkFormatV4, compression.Snappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
563563
fillChunkRandomOrder(c, false)
564564

565565
ct := 0
@@ -596,7 +596,7 @@ func TestUnorderedIteratorCountsAllEntries(t *testing.T) {
596596
}
597597

598598
func chunkFrom(xs []logproto.Entry) ([]byte, error) {
599-
c := NewMemChunk(ChunkFormatV4, compression.EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
599+
c := NewMemChunk(ChunkFormatV4, compression.Snappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
600600
for _, x := range xs {
601601
if _, err := c.Append(&x); err != nil {
602602
return nil, err
@@ -656,7 +656,7 @@ func TestReorder(t *testing.T) {
656656
},
657657
} {
658658
t.Run(tc.desc, func(t *testing.T) {
659-
c := NewMemChunk(ChunkFormatV4, compression.EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
659+
c := NewMemChunk(ChunkFormatV4, compression.Snappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
660660
for _, x := range tc.input {
661661
dup, err := c.Append(&x)
662662
require.False(t, dup)
@@ -675,7 +675,7 @@ func TestReorder(t *testing.T) {
675675
}
676676

677677
func TestReorderAcrossBlocks(t *testing.T) {
678-
c := NewMemChunk(ChunkFormatV4, compression.EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
678+
c := NewMemChunk(ChunkFormatV4, compression.Snappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
679679
for _, batch := range [][]int{
680680
// ensure our blocks have overlapping bounds and must be reordered
681681
// before closing.

pkg/chunkenc/util_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ func logprotoEntryWithStructuredMetadata(ts int64, line string, structuredMetada
2424
}
2525
}
2626

27-
func generateData(enc compression.Encoding, chunksCount, blockSize, targetSize int) ([]Chunk, uint64) {
27+
func generateData(enc compression.Codec, chunksCount, blockSize, targetSize int) ([]Chunk, uint64) {
2828
chunks := []Chunk{}
2929
i := int64(0)
3030
size := uint64(0)

pkg/compactor/deletion/delete_requests_table.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ func (t *deleteRequestsTable) uploadFile() error {
117117
}()
118118

119119
err = t.db.View(func(tx *bbolt.Tx) (err error) {
120-
gzipPool := compression.GetWriterPool(compression.EncGZIP)
120+
gzipPool := compression.GetWriterPool(compression.GZIP)
121121
compressedWriter := gzipPool.GetWriter(f)
122122
defer gzipPool.PutWriter(compressedWriter)
123123

pkg/compactor/index_set.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -229,7 +229,7 @@ func (is *indexSet) upload() error {
229229
}
230230
}()
231231

232-
gzipPool := compression.GetWriterPool(compression.EncGZIP)
232+
gzipPool := compression.GetWriterPool(compression.GZIP)
233233
compressedWriter := gzipPool.GetWriter(f)
234234
defer gzipPool.PutWriter(compressedWriter)
235235

0 commit comments

Comments
 (0)