diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 6868102fa31..bddb8e88517 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -405,7 +405,7 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) } } - b, err := db.Block(blockID) + b, err := db.Block(blockID, tsdb.DefaultPostingsDecoderFactory) if err != nil { return nil, nil, err } diff --git a/tsdb/block.go b/tsdb/block.go index 83b86a58d16..bca56e8f0bf 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -329,7 +329,7 @@ type Block struct { // OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used // to instantiate chunk structs. -func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool) (pb *Block, err error) { +func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool, postingsDecoderFactory PostingsDecoderFactory) (pb *Block, err error) { if logger == nil { logger = log.NewNopLogger() } @@ -350,7 +350,11 @@ func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool) (pb *Block, er } closers = append(closers, cr) - ir, err := index.NewFileReader(filepath.Join(dir, indexFilename)) + decoder := index.DecodePostingsRaw + if postingsDecoderFactory != nil { + decoder = postingsDecoderFactory(meta) + } + ir, err := index.NewFileReader(filepath.Join(dir, indexFilename), decoder) if err != nil { return nil, err } diff --git a/tsdb/block_test.go b/tsdb/block_test.go index 42acc3c6930..68c4b6ce84e 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -58,14 +58,14 @@ func TestSetCompactionFailed(t *testing.T) { tmpdir := t.TempDir() blockDir := createBlock(t, tmpdir, genSeries(1, 1, 0, 1)) - b, err := OpenBlock(nil, blockDir, nil) + b, err := OpenBlock(nil, blockDir, nil, nil) require.NoError(t, err) require.False(t, b.meta.Compaction.Failed) require.NoError(t, b.setCompactionFailed()) require.True(t, b.meta.Compaction.Failed) require.NoError(t, b.Close()) - b, err = OpenBlock(nil, blockDir, nil) + b, err = OpenBlock(nil, blockDir, nil, nil) require.NoError(t, err) require.True(t, b.meta.Compaction.Failed) require.NoError(t, b.Close()) @@ -73,7 +73,7 @@ func TestSetCompactionFailed(t *testing.T) { func TestCreateBlock(t *testing.T) { tmpdir := t.TempDir() - b, err := OpenBlock(nil, createBlock(t, tmpdir, genSeries(1, 1, 0, 10)), nil) + b, err := OpenBlock(nil, createBlock(t, tmpdir, genSeries(1, 1, 0, 10)), nil, nil) require.NoError(t, err) require.NoError(t, b.Close()) } @@ -83,7 +83,7 @@ func BenchmarkOpenBlock(b *testing.B) { blockDir := createBlock(b, tmpdir, genSeries(1e6, 20, 0, 10)) b.Run("benchmark", func(b *testing.B) { for i := 0; i < b.N; i++ { - block, err := OpenBlock(nil, blockDir, nil) + block, err := OpenBlock(nil, blockDir, nil, nil) require.NoError(b, err) require.NoError(b, block.Close()) } @@ -189,7 +189,7 @@ func TestCorruptedChunk(t *testing.T) { require.NoError(t, f.Close()) // Check open err. - b, err := OpenBlock(nil, blockDir, nil) + b, err := OpenBlock(nil, blockDir, nil, nil) if tc.openErr != nil { require.Equal(t, tc.openErr.Error(), err.Error()) return @@ -244,7 +244,7 @@ func TestLabelValuesWithMatchers(t *testing.T) { require.NotEmpty(t, files, "No chunk created.") // Check open err. - block, err := OpenBlock(nil, blockDir, nil) + block, err := OpenBlock(nil, blockDir, nil, nil) require.NoError(t, err) defer func() { require.NoError(t, block.Close()) }() @@ -324,7 +324,7 @@ func TestBlockSize(t *testing.T) { // Create a block and compare the reported size vs actual disk size. { blockDirInit = createBlock(t, tmpdir, genSeries(10, 1, 1, 100)) - blockInit, err = OpenBlock(nil, blockDirInit, nil) + blockInit, err = OpenBlock(nil, blockDirInit, nil, nil) require.NoError(t, err) defer func() { require.NoError(t, blockInit.Close()) @@ -348,7 +348,7 @@ func TestBlockSize(t *testing.T) { require.NoError(t, err) blockDirAfterCompact, err := c.Compact(tmpdir, []string{blockInit.Dir()}, nil) require.NoError(t, err) - blockAfterCompact, err := OpenBlock(nil, filepath.Join(tmpdir, blockDirAfterCompact.String()), nil) + blockAfterCompact, err := OpenBlock(nil, filepath.Join(tmpdir, blockDirAfterCompact.String()), nil, nil) require.NoError(t, err) defer func() { require.NoError(t, blockAfterCompact.Close()) @@ -379,7 +379,7 @@ func TestReadIndexFormatV1(t *testing.T) { */ blockDir := filepath.Join("testdata", "index_format_v1") - block, err := OpenBlock(nil, blockDir, nil) + block, err := OpenBlock(nil, blockDir, nil, nil) require.NoError(t, err) q, err := NewBlockQuerier(block, 0, 1000) @@ -416,7 +416,7 @@ func BenchmarkLabelValuesWithMatchers(b *testing.B) { require.NotEmpty(b, files, "No chunk created.") // Check open err. - block, err := OpenBlock(nil, blockDir, nil) + block, err := OpenBlock(nil, blockDir, nil, nil) require.NoError(b, err) defer func() { require.NoError(b, block.Close()) }() @@ -468,7 +468,7 @@ func TestLabelNamesWithMatchers(t *testing.T) { require.NotEmpty(t, files, "No chunk created.") // Check open err. - block, err := OpenBlock(nil, blockDir, nil) + block, err := OpenBlock(nil, blockDir, nil, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, block.Close()) }) diff --git a/tsdb/blockwriter_test.go b/tsdb/blockwriter_test.go index d8240b53c6d..e2757eaff25 100644 --- a/tsdb/blockwriter_test.go +++ b/tsdb/blockwriter_test.go @@ -46,7 +46,7 @@ func TestBlockWriter(t *testing.T) { // Confirm the block has the correct data. blockpath := filepath.Join(outputDir, id.String()) - b, err := OpenBlock(nil, blockpath, nil) + b, err := OpenBlock(nil, blockpath, nil, nil) require.NoError(t, err) defer func() { require.NoError(t, b.Close()) }() q, err := NewBlockQuerier(b, math.MinInt64, math.MaxInt64) diff --git a/tsdb/compact.go b/tsdb/compact.go index c2ae23b2e40..655b3fc5c77 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -83,6 +83,7 @@ type LeveledCompactor struct { maxBlockChunkSegmentSize int64 mergeFunc storage.VerticalChunkSeriesMergeFunc postingsEncoder index.PostingsEncoder + postingsDecoderFactory PostingsDecoderFactory enableOverlappingCompaction bool } @@ -154,6 +155,9 @@ type LeveledCompactorOptions struct { // PE specifies the postings encoder. It is called when compactor is writing out the postings for a label name/value pair during compaction. // If it is nil then the default encoder is used. At the moment that is the "raw" encoder. See index.EncodePostingsRaw for more. PE index.PostingsEncoder + // PD specifies the postings decoder factory to return different postings decoder based on BlockMeta. It is called when opening a block or opening the index file. + // If it is nil then the default decoder is used. At the moment that is the "raw" decoder. See index.DecodePostingsRaw for more. + PD PostingsDecoderFactory // MaxBlockChunkSegmentSize is the max block chunk segment size. If it is 0 then the default chunks.DefaultChunkSegmentSize is used. MaxBlockChunkSegmentSize int64 // MergeFunc is used for merging series together in vertical compaction. By default storage.NewCompactingChunkSeriesMerger(storage.ChainedSeriesMerge) is used. @@ -163,6 +167,12 @@ type LeveledCompactorOptions struct { EnableOverlappingCompaction bool } +type PostingsDecoderFactory func(meta *BlockMeta) index.PostingsDecoder + +func DefaultPostingsDecoderFactory(_ *BlockMeta) index.PostingsDecoder { + return index.DecodePostingsRaw +} + func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { return NewLeveledCompactorWithOptions(ctx, r, l, ranges, pool, LeveledCompactorOptions{ MaxBlockChunkSegmentSize: maxBlockChunkSegmentSize, @@ -200,6 +210,10 @@ func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer if pe == nil { pe = index.EncodePostingsRaw } + pd := opts.PD + if pd == nil { + pd = DefaultPostingsDecoderFactory + } return &LeveledCompactor{ ranges: ranges, chunkPool: pool, @@ -209,6 +223,7 @@ func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer maxBlockChunkSegmentSize: maxBlockChunkSegmentSize, mergeFunc: mergeFunc, postingsEncoder: pe, + postingsDecoderFactory: pd, enableOverlappingCompaction: opts.EnableOverlappingCompaction, }, nil } @@ -473,7 +488,7 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, if b == nil { var err error - b, err = OpenBlock(c.logger, d, c.chunkPool) + b, err = OpenBlock(c.logger, d, c.chunkPool, c.postingsDecoderFactory) if err != nil { return uid, err } diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 10c90e30dc2..d68a141693f 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -1093,7 +1093,7 @@ func BenchmarkCompaction(b *testing.B) { blockDirs := make([]string, 0, len(c.ranges)) var blocks []*Block for _, r := range c.ranges { - block, err := OpenBlock(nil, createBlock(b, dir, genSeries(nSeries, 10, r[0], r[1])), nil) + block, err := OpenBlock(nil, createBlock(b, dir, genSeries(nSeries, 10, r[0], r[1])), nil, nil) require.NoError(b, err) blocks = append(blocks, block) defer func() { @@ -1489,7 +1489,7 @@ func TestHeadCompactionWithHistograms(t *testing.T) { require.NotEqual(t, ulid.ULID{}, id) // Open the block and query it and check the histograms. - block, err := OpenBlock(nil, path.Join(head.opts.ChunkDirRoot, id.String()), nil) + block, err := OpenBlock(nil, path.Join(head.opts.ChunkDirRoot, id.String()), nil, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, block.Close()) diff --git a/tsdb/db.go b/tsdb/db.go index c2e8904a251..f7a8a44764e 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -84,6 +84,7 @@ func DefaultOptions() *Options { OutOfOrderCapMax: DefaultOutOfOrderCapMax, EnableOverlappingCompaction: true, EnableSharding: false, + PostingsDecoderFactory: DefaultPostingsDecoderFactory, } } @@ -189,6 +190,10 @@ type Options struct { // EnableSharding enables query sharding support in TSDB. EnableSharding bool + + // PostingsDecoderFactory allows users to customize postings decoders based on BlockMeta. + // By default, DefaultPostingsDecoderFactory will be used to create raw posting decoder. + PostingsDecoderFactory PostingsDecoderFactory } type BlocksToDeleteFunc func(blocks []*Block) map[ulid.ULID]struct{} @@ -571,7 +576,7 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { return nil, ErrClosed default: } - loadable, corrupted, err := openBlocks(db.logger, db.dir, nil, nil) + loadable, corrupted, err := openBlocks(db.logger, db.dir, nil, nil, DefaultPostingsDecoderFactory) if err != nil { return nil, err } @@ -669,7 +674,7 @@ func (db *DBReadOnly) LastBlockID() (string, error) { } // Block returns a block reader by given block id. -func (db *DBReadOnly) Block(blockID string) (BlockReader, error) { +func (db *DBReadOnly) Block(blockID string, postingsDecoderFactory PostingsDecoderFactory) (BlockReader, error) { select { case <-db.closed: return nil, ErrClosed @@ -681,7 +686,7 @@ func (db *DBReadOnly) Block(blockID string) (BlockReader, error) { return nil, fmt.Errorf("invalid block ID %s", blockID) } - block, err := OpenBlock(db.logger, filepath.Join(db.dir, blockID), nil) + block, err := OpenBlock(db.logger, filepath.Join(db.dir, blockID), nil, postingsDecoderFactory) if err != nil { return nil, err } @@ -831,6 +836,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs db.compactor, err = NewLeveledCompactorWithOptions(ctx, r, l, rngs, db.chunkPool, LeveledCompactorOptions{ MaxBlockChunkSegmentSize: opts.MaxBlockChunkSegmentSize, EnableOverlappingCompaction: opts.EnableOverlappingCompaction, + PD: opts.PostingsDecoderFactory, }) if err != nil { cancel() @@ -1437,7 +1443,7 @@ func (db *DB) reloadBlocks() (err error) { db.mtx.Lock() defer db.mtx.Unlock() - loadable, corrupted, err := openBlocks(db.logger, db.dir, db.blocks, db.chunkPool) + loadable, corrupted, err := openBlocks(db.logger, db.dir, db.blocks, db.chunkPool, db.opts.PostingsDecoderFactory) if err != nil { return err } @@ -1529,7 +1535,7 @@ func (db *DB) reloadBlocks() (err error) { return nil } -func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool) (blocks []*Block, corrupted map[ulid.ULID]error, err error) { +func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool, postingsDecoderFactory PostingsDecoderFactory) (blocks []*Block, corrupted map[ulid.ULID]error, err error) { bDirs, err := blockDirs(dir) if err != nil { return nil, nil, fmt.Errorf("find blocks: %w", err) @@ -1546,7 +1552,7 @@ func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Po // See if we already have the block in memory or open it otherwise. block, open := getBlock(loaded, meta.ULID) if !open { - block, err = OpenBlock(l, bDir, chunkPool) + block, err = OpenBlock(l, bDir, chunkPool, postingsDecoderFactory) if err != nil { corrupted[meta.ULID] = err continue diff --git a/tsdb/db_test.go b/tsdb/db_test.go index a682f465549..29a7be38fe3 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -1319,7 +1319,7 @@ func TestTombstoneCleanFail(t *testing.T) { totalBlocks := 2 for i := 0; i < totalBlocks; i++ { blockDir := createBlock(t, db.Dir(), genSeries(1, 1, int64(i), int64(i)+1)) - block, err := OpenBlock(nil, blockDir, nil) + block, err := OpenBlock(nil, blockDir, nil, nil) require.NoError(t, err) // Add some fake tombstones to trigger the compaction. tomb := tombstones.NewMemTombstones() @@ -1374,7 +1374,7 @@ func TestTombstoneCleanRetentionLimitsRace(t *testing.T) { // Generate some blocks with old mint (near epoch). for j := 0; j < totalBlocks; j++ { blockDir := createBlock(t, dbDir, genSeries(10, 1, int64(j), int64(j)+1)) - block, err := OpenBlock(nil, blockDir, nil) + block, err := OpenBlock(nil, blockDir, nil, nil) require.NoError(t, err) // Cover block with tombstones so it can be deleted with CleanTombstones() as well. tomb := tombstones.NewMemTombstones() @@ -1435,7 +1435,7 @@ func (c *mockCompactorFailing) Write(dest string, _ BlockReader, _, _ int64, _ * return ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail") } - block, err := OpenBlock(nil, createBlock(c.t, dest, genSeries(1, 1, 0, 1)), nil) + block, err := OpenBlock(nil, createBlock(c.t, dest, genSeries(1, 1, 0, 1)), nil, nil) require.NoError(c.t, err) require.NoError(c.t, block.Close()) // Close block as we won't be using anywhere. c.blocks = append(c.blocks, block) @@ -2508,13 +2508,13 @@ func TestDBReadOnly(t *testing.T) { }) t.Run("block", func(t *testing.T) { blockID := expBlock.meta.ULID.String() - block, err := dbReadOnly.Block(blockID) + block, err := dbReadOnly.Block(blockID, nil) require.NoError(t, err) require.Equal(t, expBlock.Meta(), block.Meta(), "block meta mismatch") }) t.Run("invalid block ID", func(t *testing.T) { blockID := "01GTDVZZF52NSWB5SXQF0P2PGF" - _, err := dbReadOnly.Block(blockID) + _, err := dbReadOnly.Block(blockID, nil) require.Error(t, err) }) t.Run("last block ID", func(t *testing.T) { diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 4ded4cbe202..e80e155cc22 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -116,6 +116,8 @@ type symbolCacheEntry struct { type PostingsEncoder func(*encoding.Encbuf, []uint32) error +type PostingsDecoder func(b []byte) (int, Postings, error) + // Writer implements the IndexWriter interface for the standard // serialization format. type Writer struct { @@ -1154,17 +1156,17 @@ func (b realByteSlice) Sub(start, end int) ByteSlice { // NewReader returns a new index reader on the given byte slice. It automatically // handles different format versions. -func NewReader(b ByteSlice) (*Reader, error) { - return newReader(b, io.NopCloser(nil)) +func NewReader(b ByteSlice, decoder PostingsDecoder) (*Reader, error) { + return newReader(b, io.NopCloser(nil), decoder) } // NewFileReader returns a new index reader against the given index file. -func NewFileReader(path string) (*Reader, error) { +func NewFileReader(path string, decoder PostingsDecoder) (*Reader, error) { f, err := fileutil.OpenMmapFile(path) if err != nil { return nil, err } - r, err := newReader(realByteSlice(f.Bytes()), f) + r, err := newReader(realByteSlice(f.Bytes()), f, decoder) if err != nil { return nil, tsdb_errors.NewMulti( err, @@ -1175,7 +1177,7 @@ func NewFileReader(path string) (*Reader, error) { return r, nil } -func newReader(b ByteSlice, c io.Closer) (*Reader, error) { +func newReader(b ByteSlice, c io.Closer, postingsDecoder PostingsDecoder) (*Reader, error) { r := &Reader{ b: b, c: c, @@ -1272,7 +1274,10 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) { r.nameSymbols[off] = k } - r.dec = &Decoder{LookupSymbol: r.lookupSymbol} + if postingsDecoder == nil { + postingsDecoder = DecodePostingsRaw + } + r.dec = &Decoder{LookupSymbol: r.lookupSymbol, DecodePostings: postingsDecoder} return r, nil } @@ -1695,7 +1700,7 @@ func (r *Reader) Postings(ctx context.Context, name string, values ...string) (P } // Read from the postings table. d := encoding.NewDecbufAt(r.b, int(postingsOff), castagnoliTable) - _, p, err := r.dec.Postings(d.Get()) + _, p, err := r.dec.DecodePostings(d.Get()) if err != nil { return nil, fmt.Errorf("decode postings: %w", err) } @@ -1738,7 +1743,7 @@ func (r *Reader) Postings(ctx context.Context, name string, values ...string) (P if val == value { // Read from the postings table. d2 := encoding.NewDecbufAt(r.b, int(postingsOff), castagnoliTable) - _, p, err := r.dec.Postings(d2.Get()) + _, p, err := r.dec.DecodePostings(d2.Get()) if err != nil { return false, fmt.Errorf("decode postings: %w", err) } @@ -1907,11 +1912,12 @@ func (s stringListIter) Err() error { return nil } // It currently does not contain decoding methods for all entry types but can be extended // by them if there's demand. type Decoder struct { - LookupSymbol func(context.Context, uint32) (string, error) + LookupSymbol func(context.Context, uint32) (string, error) + DecodePostings PostingsDecoder } -// Postings returns a postings list for b and its number of elements. -func (dec *Decoder) Postings(b []byte) (int, Postings, error) { +// DecodePostingsRaw returns a postings list for b and its number of elements. +func DecodePostingsRaw(b []byte) (int, Postings, error) { d := encoding.Decbuf{B: b} return dec.PostingsFromDecbuf(d) } diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go index 5c6d64e076a..84134274a1c 100644 --- a/tsdb/index/index_test.go +++ b/tsdb/index/index_test.go @@ -146,7 +146,7 @@ func TestIndexRW_Create_Open(t *testing.T) { require.NoError(t, err) require.NoError(t, iw.Close()) - ir, err := NewFileReader(fn) + ir, err := NewFileReader(fn, nil) require.NoError(t, err) require.NoError(t, ir.Close()) @@ -157,7 +157,7 @@ func TestIndexRW_Create_Open(t *testing.T) { require.NoError(t, err) f.Close() - _, err = NewFileReader(dir) + _, err = NewFileReader(dir, nil) require.Error(t, err) } @@ -218,7 +218,7 @@ func TestIndexRW_Postings(t *testing.T) { }, labelIndices) t.Run("ShardedPostings()", func(t *testing.T) { - ir, err := NewFileReader(fn) + ir, err := NewFileReader(fn, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, ir.Close()) @@ -469,7 +469,7 @@ func TestDecbufUvarintWithInvalidBuffer(t *testing.T) { func TestReaderWithInvalidBuffer(t *testing.T) { b := realByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81}) - _, err := NewReader(b) + _, err := NewReader(b, DecodePostingsRaw) require.Error(t, err) } @@ -481,7 +481,7 @@ func TestNewFileReaderErrorNoOpenFiles(t *testing.T) { err := os.WriteFile(idxName, []byte("corrupted contents"), 0o666) require.NoError(t, err) - _, err = NewFileReader(idxName) + _, err = NewFileReader(idxName, nil) require.Error(t, err) // dir.Close will fail on Win if idxName fd is not closed on error path. @@ -560,7 +560,7 @@ func BenchmarkReader_ShardedPostings(b *testing.B) { } func TestDecoder_Postings_WrongInput(t *testing.T) { - _, _, err := (&Decoder{}).Postings([]byte("the cake is a lie")) + _, _, err := (&Decoder{DecodePostings: DecodePostingsRaw}).DecodePostings([]byte("the cake is a lie")) require.Error(t, err) } diff --git a/tsdb/querier_bench_test.go b/tsdb/querier_bench_test.go index 9a823024203..e1f34d6bb07 100644 --- a/tsdb/querier_bench_test.go +++ b/tsdb/querier_bench_test.go @@ -74,7 +74,7 @@ func BenchmarkQuerier(b *testing.B) { b.Run("Block", func(b *testing.B) { blockdir := createBlockFromHead(b, b.TempDir(), h) - block, err := OpenBlock(nil, blockdir, nil) + block, err := OpenBlock(nil, blockdir, nil, nil) require.NoError(b, err) defer func() { require.NoError(b, block.Close()) @@ -297,7 +297,7 @@ func BenchmarkQuerierSelect(b *testing.B) { tmpdir := b.TempDir() blockdir := createBlockFromHead(b, tmpdir, h) - block, err := OpenBlock(nil, blockdir, nil) + block, err := OpenBlock(nil, blockdir, nil, nil) require.NoError(b, err) defer func() { require.NoError(b, block.Close()) diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index bb13531d7df..c2a9195df05 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -2440,7 +2440,7 @@ func BenchmarkQueryIterator(b *testing.B) { } else { generatedSeries = populateSeries(prefilledLabels, mint, maxt) } - block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil) + block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil, nil) require.NoError(b, err) blocks = append(blocks, block) defer block.Close() @@ -2503,7 +2503,7 @@ func BenchmarkQuerySeek(b *testing.B) { } else { generatedSeries = populateSeries(prefilledLabels, mint, maxt) } - block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil) + block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil, nil) require.NoError(b, err) blocks = append(blocks, block) defer block.Close() @@ -2638,7 +2638,7 @@ func BenchmarkSetMatcher(b *testing.B) { } else { generatedSeries = populateSeries(prefilledLabels, mint, maxt) } - block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil) + block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil, nil) require.NoError(b, err) blocks = append(blocks, block) defer block.Close() @@ -3134,7 +3134,7 @@ func BenchmarkQueries(b *testing.B) { qs := make([]storage.Querier, 0, 10) for x := 0; x <= 10; x++ { - block, err := OpenBlock(nil, createBlock(b, dir, series), nil) + block, err := OpenBlock(nil, createBlock(b, dir, series), nil, nil) require.NoError(b, err) q, err := NewBlockQuerier(block, 1, nSamples) require.NoError(b, err) diff --git a/tsdb/repair_test.go b/tsdb/repair_test.go index 8a70e05f317..e7b42e9923f 100644 --- a/tsdb/repair_test.go +++ b/tsdb/repair_test.go @@ -79,7 +79,7 @@ func TestRepairBadIndexVersion(t *testing.T) { require.NoError(t, os.MkdirAll(filepath.Join(tmpDbDir, "chunks"), 0o777)) // Read current index to check integrity. - r, err := index.NewFileReader(filepath.Join(tmpDbDir, indexFilename)) + r, err := index.NewFileReader(filepath.Join(tmpDbDir, indexFilename), nil) require.NoError(t, err) p, err := r.Postings(ctx, "b", "1") require.NoError(t, err) @@ -97,7 +97,7 @@ func TestRepairBadIndexVersion(t *testing.T) { require.NoError(t, err) db.Close() - r, err = index.NewFileReader(filepath.Join(tmpDbDir, indexFilename)) + r, err = index.NewFileReader(filepath.Join(tmpDbDir, indexFilename), nil) require.NoError(t, err) defer r.Close() p, err = r.Postings(ctx, "b", "1")