From cfc50780284d5a54efc234beb4475818b00b46e3 Mon Sep 17 00:00:00 2001 From: Ahmed Hassan Date: Mon, 27 Jan 2025 00:37:09 -0800 Subject: [PATCH] add unit tests for getIntervalFromMaxSplits Signed-off-by: Ahmed Hassan --- .../queryrange/dynamic_query_splits.go | 17 -- .../tripperware/queryrange/limits_test.go | 9 +- .../queryrange/query_range_middlewares.go | 15 +- .../queryrange/split_by_interval.go | 77 +++++--- .../queryrange/split_by_interval_test.go | 183 +++++++++++++++++- 5 files changed, 236 insertions(+), 65 deletions(-) delete mode 100644 pkg/querier/tripperware/queryrange/dynamic_query_splits.go diff --git a/pkg/querier/tripperware/queryrange/dynamic_query_splits.go b/pkg/querier/tripperware/queryrange/dynamic_query_splits.go deleted file mode 100644 index 35aeeaf1d5..0000000000 --- a/pkg/querier/tripperware/queryrange/dynamic_query_splits.go +++ /dev/null @@ -1,17 +0,0 @@ -package queryrange - -import ( - "flag" - "time" -) - -type DynamicQuerySplitsConfig struct { - MaxShardsPerQuery int `yaml:"max_shards_per_query"` - MaxDurationOfDataFetchedFromStoragePerQuery time.Duration `yaml:"max_duration_of_data_fetched_from_storage_per_query"` -} - -// RegisterFlags registers flags foy dynamic query splits -func (cfg *DynamicQuerySplitsConfig) RegisterFlags(f *flag.FlagSet) { - f.IntVar(&cfg.MaxShardsPerQuery, "querier.max-shards-per-query", 0, "[EXPERIMENTAL] Maximum number of shards for a query, 0 disables it. Dynamically uses a multiple of `split-queries-by-interval` to maintain the number of splits below the limit. If vertical sharding is enabled for a query, the combined total number of vertical and interval shards is kept below this limit.") - f.DurationVar(&cfg.MaxDurationOfDataFetchedFromStoragePerQuery, "querier.max-duration-of-data-fetched-from-storage-per-query", 0, "[EXPERIMENTAL] Max total duration of data fetched by all query shards from storage, 0 disables it. Dynamically uses a multiple of `split-queries-by-interval` to ensure the total fetched duration of data is lower than the value set. It takes into account additional data fetched by matrix selectors and subqueries.") -} diff --git a/pkg/querier/tripperware/queryrange/limits_test.go b/pkg/querier/tripperware/queryrange/limits_test.go index 6c3e257986..09cfe77b98 100644 --- a/pkg/querier/tripperware/queryrange/limits_test.go +++ b/pkg/querier/tripperware/queryrange/limits_test.go @@ -233,9 +233,10 @@ func TestLimitsMiddleware_MaxQueryLength(t *testing.T) { } type mockLimits struct { - maxQueryLookback time.Duration - maxQueryLength time.Duration - maxCacheFreshness time.Duration + maxQueryLookback time.Duration + maxQueryLength time.Duration + maxCacheFreshness time.Duration + queryVerticalShardSize int } func (m mockLimits) MaxQueryLookback(string) time.Duration { @@ -255,7 +256,7 @@ func (m mockLimits) MaxCacheFreshness(string) time.Duration { } func (m mockLimits) QueryVerticalShardSize(userID string) int { - return 0 + return m.queryVerticalShardSize } func (m mockLimits) QueryPriority(userID string) validation.QueryPriority { diff --git a/pkg/querier/tripperware/queryrange/query_range_middlewares.go b/pkg/querier/tripperware/queryrange/query_range_middlewares.go index b4ed9653e5..0f0bb1b64f 100644 --- a/pkg/querier/tripperware/queryrange/query_range_middlewares.go +++ b/pkg/querier/tripperware/queryrange/query_range_middlewares.go @@ -70,7 +70,7 @@ func (cfg *Config) Validate(qCfg querier.Config) error { return errors.Wrap(err, "invalid ResultsCache config") } } - if cfg.DynamicQuerySplitsConfig.MaxShardsPerQuery > 0 || cfg.DynamicQuerySplitsConfig.MaxDurationOfDataFetchedFromStoragePerQuery > 0 { + if cfg.DynamicQuerySplitsConfig.MaxSplitsPerQuery > 0 || cfg.DynamicQuerySplitsConfig.MaxFetchedStorageDataDurationPerQuery > 0 { if cfg.SplitQueriesByInterval <= 0 { return errors.New("configs under dynamic-query-splits requires that a value for split-queries-by-interval is set.") } @@ -78,6 +78,17 @@ func (cfg *Config) Validate(qCfg querier.Config) error { return nil } +type DynamicQuerySplitsConfig struct { + MaxSplitsPerQuery int `yaml:"max_splits_per_query"` + MaxFetchedStorageDataDurationPerQuery time.Duration `yaml:"max_fetched_storage_data_duration_per_query"` +} + +// RegisterFlags registers flags foy dynamic query splits +func (cfg *DynamicQuerySplitsConfig) RegisterFlags(f *flag.FlagSet) { + f.IntVar(&cfg.MaxSplitsPerQuery, "querier.max-splits-per-query", 0, "[EXPERIMENTAL] Maximum number of splits for a query, 0 disables it. Dynamically uses a multiple of split interval to maintain a total number of splits below the set value. If vertical sharding is enabled for a query, the combined total number of vertical and interval splits is kept below this value.") + f.DurationVar(&cfg.MaxFetchedStorageDataDurationPerQuery, "querier.max-fetched-storage-data-duration-per-query", 0, "[EXPERIMENTAL] Max total duration of data fetched from storage by all query splits, 0 disables it. Dynamically uses a multiple of split interval to maintain a total fetched duration of data lower than the value set. It takes into account additional duration fetched by matrix selectors and subqueries.") +} + // Middlewares returns list of middlewares that should be applied for range query. func Middlewares( cfg Config, @@ -99,7 +110,7 @@ func Middlewares( } if cfg.SplitQueriesByInterval != 0 { intervalFn := staticIntervalFn(cfg) - if cfg.DynamicQuerySplitsConfig.MaxShardsPerQuery > 0 || cfg.DynamicQuerySplitsConfig.MaxDurationOfDataFetchedFromStoragePerQuery > 0 { + if cfg.DynamicQuerySplitsConfig.MaxSplitsPerQuery > 0 || cfg.DynamicQuerySplitsConfig.MaxFetchedStorageDataDurationPerQuery > 0 { intervalFn = dynamicIntervalFn(cfg, limits, queryAnalyzer, lookbackDelta) } queryRangeMiddleware = append(queryRangeMiddleware, tripperware.InstrumentMiddleware("split_by_interval", metrics), SplitByIntervalMiddleware(intervalFn, limits, prometheusCodec, registerer, lookbackDelta)) diff --git a/pkg/querier/tripperware/queryrange/split_by_interval.go b/pkg/querier/tripperware/queryrange/split_by_interval.go index edc7686ab5..7b7fd9fb89 100644 --- a/pkg/querier/tripperware/queryrange/split_by_interval.go +++ b/pkg/querier/tripperware/queryrange/split_by_interval.go @@ -160,8 +160,8 @@ func staticIntervalFn(cfg Config) func(ctx context.Context, r tripperware.Reques func dynamicIntervalFn(cfg Config, limits tripperware.Limits, queryAnalyzer querysharding.Analyzer, lookbackDelta time.Duration) func(ctx context.Context, r tripperware.Request) (time.Duration, error) { return func(ctx context.Context, r tripperware.Request) (time.Duration, error) { baseInterval := cfg.SplitQueriesByInterval - maxDurationFetchedConfig := cfg.DynamicQuerySplitsConfig.MaxDurationOfDataFetchedFromStoragePerQuery - maxSplitsConfig := cfg.DynamicQuerySplitsConfig.MaxShardsPerQuery + maxDurationFetchedConfig := cfg.DynamicQuerySplitsConfig.MaxFetchedStorageDataDurationPerQuery + maxSplitsConfig := cfg.DynamicQuerySplitsConfig.MaxSplitsPerQuery queryVerticalShardSize, err := getMaxVerticalShardSizeForQuery(ctx, r, limits, queryAnalyzer) if err != nil { @@ -173,33 +173,37 @@ func dynamicIntervalFn(cfg Config, limits tripperware.Limits, queryAnalyzer quer return baseInterval, err } - // First we analyze the query using original start-end time. Additional duration fetched by lookbackDelta here only reflects the start time of first split - queryRangeIntervalsCount, extraIntervalsPerSplitCount, firstSplitLookbackDeltaIntervals := analyzeDurationFetchedByQuery(queryExpr, r, baseInterval, lookbackDelta) + // First analyze the query using original start-end time. Additional duration fetched by lookbackDelta here only reflects the start time of first split + queryRangeIntervalsCount, extraIntervalsPerSplitCount, firstSplitLookbackDeltaIntervals := analyzeDurationFetchedByQuery(queryExpr, r.GetStart(), r.GetEnd(), baseInterval, lookbackDelta) if extraIntervalsPerSplitCount == 0 { - extraIntervalsPerSplitCount = 1 // prevent division by 0 + extraIntervalsPerSplitCount = 1 // avoid division by 0 } - // Next we analyze the query using the next split start time to find the additional duration fetched by lookbackDelta for all subsequent splits + // Next analyze the query using the next split start time to find the additional duration fetched by lookbackDelta for other subsequent splits nextIntervalStart := nextIntervalBoundary(r.GetStart(), r.GetStep(), baseInterval) + r.GetStep() - nextIntervalReq := r.WithStartEnd(nextIntervalStart, r.GetEnd()) - _, _, otherSplitsLookbackDeltaIntervals := analyzeDurationFetchedByQuery(queryExpr, nextIntervalReq, baseInterval, lookbackDelta) + _, _, otherSplitsLookbackDeltaIntervals := analyzeDurationFetchedByQuery(queryExpr, nextIntervalStart, r.GetEnd(), baseInterval, lookbackDelta) - // By default we subtract the 'first split' duration fetched by loookbackDelta, and divide by the 'other splits' duration fetched by loookbackDelta. + // By default subtract the 'first split' duration fetched by loookbackDelta, and divide by the 'other splits' duration fetched by loookbackDelta. if firstSplitLookbackDeltaIntervals > 0 && otherSplitsLookbackDeltaIntervals > 0 { firstSplitLookbackDeltaIntervals = 0 // Dividing is enough if additional duration is fetched by loookbackDelta for all splits } else if otherSplitsLookbackDeltaIntervals > 0 { firstSplitLookbackDeltaIntervals = otherSplitsLookbackDeltaIntervals * -1 // Adding instead of subtracting for first split, if additional duration is fetched by loookbackDelta for all splits except first one } + // Find the max number of splits that will fetch less than MaxFetchedStorageDataDurationPerQuery var maxSplitsByDurationFetched int if maxDurationFetchedConfig > 0 { maxIntervalsFetchedByQuery := int(maxDurationFetchedConfig / baseInterval) + // Equation for max duration fetched by example query: up[15d:1h] with a range of 30 days, a base split interval of 24 hours, and 5 min lookbackDelta + // MaxFetchedStorageDataDurationPerQuery > (30 + ((15 + 1) x horizontal splits)) x vertical shards + // Rearranging the equation to find the max horizontal splits maxSplitsByDurationFetched = ((maxIntervalsFetchedByQuery / queryVerticalShardSize) - queryRangeIntervalsCount - firstSplitLookbackDeltaIntervals) / (extraIntervalsPerSplitCount + otherSplitsLookbackDeltaIntervals) if maxSplitsByDurationFetched <= 0 { maxSplitsByDurationFetched = 1 } } + // Find max number of splits from MaxSplitsPerQuery after accounting for vertical sharding var maxSplitsByConfig int if maxSplitsConfig > 0 { maxSplitsByConfig = maxSplitsConfig / queryVerticalShardSize @@ -244,34 +248,43 @@ func getMaxVerticalShardSizeForQuery(ctx context.Context, r tripperware.Request, return queryVerticalShardSize, nil } -func getIntervalFromMaxSplits(r tripperware.Request, baseInterval time.Duration, maxSplits int) time.Duration { - maxSplitsDuration := time.Duration(maxSplits) +func getIntervalFromMaxSplits(r tripperware.Request, baseInterval time.Duration, maxSplitsInt int) time.Duration { + maxSplits := time.Duration(maxSplitsInt) queryRange := time.Duration((r.GetEnd() - r.GetStart()) * int64(time.Millisecond)) - // Calculate the multiple of interval needed to shard query to <= maxSplits - n1 := (queryRange + baseInterval*maxSplitsDuration - 1) / (baseInterval * maxSplitsDuration) - if n1 <= 0 { - n1 = 1 + // Calculate the multiple n of interval needed to shard query to <= maxSplits + n := (queryRange + baseInterval*maxSplits - 1) / (baseInterval * maxSplits) + if n <= 0 { + n = 1 } - // The first split can be truncated and not cover the full length of n*interval. - // So we remove it and calculate the multiple of interval needed to shard <= maxSplits-1 - nextSplitStart := nextIntervalBoundary(r.GetStart(), r.GetStep(), n1*baseInterval) + r.GetStep() - queryRangeWithoutFirstSplit := time.Duration((r.GetEnd() - nextSplitStart) * int64(time.Millisecond)) - var n2 time.Duration - if maxSplitsDuration > 1 { - n2 = (queryRangeWithoutFirstSplit + baseInterval*(maxSplitsDuration-1) - 1) / (baseInterval * (maxSplitsDuration - 1)) + if maxSplits == 1 || queryRange < baseInterval { + // No splitting, interval should be long enough to result in 1 split only + nextSplitStart := nextIntervalBoundary(r.GetStart(), r.GetStep(), n*baseInterval) + r.GetStep() + if nextSplitStart < r.GetEnd() { + queryRangeWithoutFirstSplit := time.Duration((r.GetEnd() - nextSplitStart) * int64(time.Millisecond)) + n += (queryRangeWithoutFirstSplit + baseInterval - 1) / baseInterval + } } else { - // If maxSplits is <= 1 then we should not shard at all - n1 += (queryRangeWithoutFirstSplit + baseInterval - 1) / baseInterval + for n <= 2*(queryRange/baseInterval) { + // The first split can be truncated and shorter than other splits. + // So it is removed to check if a larger interval is needed to shard <= maxSplits-1 + nextSplitStart := nextIntervalBoundary(r.GetStart(), r.GetStep(), n*baseInterval) + r.GetStep() + queryRangeWithoutFirstSplit := time.Duration((r.GetEnd() - nextSplitStart) * int64(time.Millisecond)) + n_temp := (queryRangeWithoutFirstSplit + baseInterval*(maxSplits-1) - 1) / (baseInterval * (maxSplits - 1)) + if n >= n_temp { + break + } + n++ + } } - n := max(n1, n2) return n * baseInterval } -// Analyzes the query to identify variables useful for calculating the duration of data -// that will be fetched from storage when the query is executed after being split. -// All variables are expressed as a count of multiples of the base split interval. +// analyzeDurationFetchedByQuery analyzes the query to identify variables useful for +// calculating the duration of data that will be fetched from storage when the query +// is executed after being split. All variables are expressed as a count of multiples +// of the base split interval. // // Returns: // - queryRangeIntervalsCount: The total count of intervals fetched by the original start-end @@ -282,11 +295,11 @@ func getIntervalFromMaxSplits(r tripperware.Request, baseInterval time.Duration, // for the specified start time. // // Example: -// Query: up[15d:1h] with a range of 30 days, a base split interval of 24 hours, and 5 min lookbackDelta +// Query up[15d:1h] with a range of 30 days, a base split interval of 24 hours, and 5 min lookbackDelta // - queryRangeIntervalsCount = 30 // - extraIntervalsPerSplitCount = 15 // - lookbackDeltaIntervalsCount = 1 -func analyzeDurationFetchedByQuery(expr parser.Expr, req tripperware.Request, baseInterval time.Duration, lookbackDelta time.Duration) (queryRangeIntervalsCount int, extraIntervalsPerSplitCount int, lookbackDeltaIntervalsCount int) { +func analyzeDurationFetchedByQuery(expr parser.Expr, queryStart int64, queryEnd int64, baseInterval time.Duration, lookbackDelta time.Duration) (queryRangeIntervalsCount int, extraIntervalsPerSplitCount int, lookbackDeltaIntervalsCount int) { queryRangeIntervalsCount = 0 lookbackDeltaIntervalsCount = 0 baseIntervalMillis := util.DurationMilliseconds(baseInterval) @@ -297,10 +310,10 @@ func analyzeDurationFetchedByQuery(expr parser.Expr, req tripperware.Request, ba switch n := node.(type) { case *parser.VectorSelector: // Increment count of intervals fetched by the original start-end time range - queryRangeIntervalsCount += int((req.GetEnd()/baseIntervalMillis)-(req.GetStart()/baseIntervalMillis)) + 1 + queryRangeIntervalsCount += int((queryEnd/baseIntervalMillis)-(queryStart/baseIntervalMillis)) + 1 // Adjust start and end time based on matrix selectors or subquery, this excludes lookbackDelta - start, end := util.GetTimeRangesForSelector(req.GetStart(), req.GetEnd(), 0, n, path, evalRange) + start, end := util.GetTimeRangesForSelector(queryStart, queryEnd, 0, n, path, evalRange) startIntervalIndex := floorDiv(start, baseIntervalMillis) endIntervalIndex := floorDiv(end, baseIntervalMillis) totalDurationFetchedCount += int(endIntervalIndex-startIntervalIndex) + 1 diff --git a/pkg/querier/tripperware/queryrange/split_by_interval_test.go b/pkg/querier/tripperware/queryrange/split_by_interval_test.go index 9f87bd3013..54e36b8af8 100644 --- a/pkg/querier/tripperware/queryrange/split_by_interval_test.go +++ b/pkg/querier/tripperware/queryrange/split_by_interval_test.go @@ -26,6 +26,7 @@ const ( seconds = 1e3 // 1e3 milliseconds per second. queryStoreAfter = 24 * time.Hour lookbackDelta = 5 * time.Minute + longQuery = "/api/v1/query_range?end=1539266098&query=sum%28container_memory_rss%29+by+%28namespace%29&start=1536673680&stats=all&step=1200" ) func TestNextIntervalBoundary(t *testing.T) { @@ -290,8 +291,32 @@ func TestSplitByDay(t *testing.T) { for i, tc := range []struct { path, expectedBody string expectedQueryCount int32 + intervalFn IntervalFn }{ - {query, string(mergedHTTPResponseBody), 2}, + { + path: query, + expectedBody: string(mergedHTTPResponseBody), + expectedQueryCount: 2, + intervalFn: func(_ context.Context, _ tripperware.Request) (time.Duration, error) { return 24 * time.Hour, nil }, + }, + { + path: query, + expectedBody: string(mergedHTTPResponseBody), + expectedQueryCount: 2, + intervalFn: dynamicIntervalFn(Config{SplitQueriesByInterval: day}, mockLimits{}, querysharding.NewQueryAnalyzer(), lookbackDelta), + }, + { + path: longQuery, + expectedBody: string(mergedHTTPResponseBody), + expectedQueryCount: 31, + intervalFn: func(_ context.Context, _ tripperware.Request) (time.Duration, error) { return day, nil }, + }, + { + path: longQuery, + expectedBody: string(mergedHTTPResponseBody), + expectedQueryCount: 8, + intervalFn: dynamicIntervalFn(Config{SplitQueriesByInterval: day, DynamicQuerySplitsConfig: DynamicQuerySplitsConfig{MaxSplitsPerQuery: 10}}, mockLimits{}, querysharding.NewQueryAnalyzer(), lookbackDelta), + }, } { tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { @@ -310,11 +335,10 @@ func TestSplitByDay(t *testing.T) { u, err := url.Parse(s.URL) require.NoError(t, err) - interval := func(_ context.Context, _ tripperware.Request) (time.Duration, error) { return 24 * time.Hour, nil } roundtripper := tripperware.NewRoundTripper(singleHostRoundTripper{ host: u.Host, next: http.DefaultTransport, - }, PrometheusCodec, nil, NewLimitsMiddleware(mockLimits{}, 5*time.Minute), SplitByIntervalMiddleware(interval, mockLimits{}, PrometheusCodec, nil, lookbackDelta)) + }, PrometheusCodec, nil, NewLimitsMiddleware(mockLimits{}, 5*time.Minute), SplitByIntervalMiddleware(tc.intervalFn, mockLimits{}, PrometheusCodec, nil, lookbackDelta)) req, err := http.NewRequest("GET", tc.path, http.NoBody) require.NoError(t, err) @@ -414,13 +438,14 @@ func Test_evaluateAtModifier(t *testing.T) { } } -func TestDynamicIntervalFn(t *testing.T) { +func Test_dynamicIntervalFn(t *testing.T) { for _, tc := range []struct { name string baseSplitInterval time.Duration req tripperware.Request expectedInterval time.Duration expectedError bool + verticalShardSize int maxQueryIntervalSplits int maxDurationOfDataFetched time.Duration }{ @@ -546,7 +571,7 @@ func TestDynamicIntervalFn(t *testing.T) { }, { baseSplitInterval: day, - name: "query with multiple matrix selectors, expect split by 10 day", + name: "query with multiple matrix selectors, expect split by 9 days", req: &tripperware.PrometheusRequest{ Start: (14 * 24 * 3600 * seconds) + (3600*seconds - 120*seconds), End: (52 * 24 * 3600 * seconds) + (2*3600*seconds + 500*seconds), @@ -554,11 +579,11 @@ func TestDynamicIntervalFn(t *testing.T) { Query: "rate(up[2d]) + rate(up[5d]) + rate(up[7d])", }, maxDurationOfDataFetched: 200 * day, - expectedInterval: 10 * day, + expectedInterval: 9 * day, }, { baseSplitInterval: day, - name: "100 day range with subquery, expect split by 13 day", + name: "100 day range with subquery, expect split by 13 days", req: &tripperware.PrometheusRequest{ Start: 0, End: 100 * 24 * 3600 * seconds, @@ -569,6 +594,32 @@ func TestDynamicIntervalFn(t *testing.T) { maxDurationOfDataFetched: 150 * day, expectedInterval: 13 * day, }, + { + baseSplitInterval: day, + name: "60 day range with 3 vertical shard size and 15 max splits, expect split by 12 days", + req: &tripperware.PrometheusRequest{ + Start: 0, + End: 60 * 24 * 3600 * seconds, + Step: 5 * 60 * seconds, + Query: "sum by (pod) (up)", + }, + verticalShardSize: 3, + maxQueryIntervalSplits: 15, + expectedInterval: 12 * day, + }, + { + baseSplitInterval: day, + name: "query with multiple matrix selectors and 3 vertical shard size, expect split by 33 day", + req: &tripperware.PrometheusRequest{ + Start: (14 * 24 * 3600 * seconds) + (3600*seconds - 120*seconds), + End: (32 * 24 * 3600 * seconds) + (2*3600*seconds + 500*seconds), + Step: 60 * seconds, + Query: "rate(up[2d]) + rate(up[5d]) + rate(up[7d])", + }, + verticalShardSize: 3, + maxDurationOfDataFetched: 200 * day, + expectedInterval: 33 * day, + }, { baseSplitInterval: 2 * time.Hour, name: "duration of data fetched is much larger than config, expect large interval and no sharding", @@ -586,12 +637,12 @@ func TestDynamicIntervalFn(t *testing.T) { cfg := Config{ SplitQueriesByInterval: tc.baseSplitInterval, DynamicQuerySplitsConfig: DynamicQuerySplitsConfig{ - MaxShardsPerQuery: tc.maxQueryIntervalSplits, - MaxDurationOfDataFetchedFromStoragePerQuery: tc.maxDurationOfDataFetched, + MaxSplitsPerQuery: tc.maxQueryIntervalSplits, + MaxFetchedStorageDataDurationPerQuery: tc.maxDurationOfDataFetched, }, } ctx := user.InjectOrgID(context.Background(), "1") - interval, err := dynamicIntervalFn(cfg, mockLimits{}, querysharding.NewQueryAnalyzer(), lookbackDelta)(ctx, tc.req) + interval, err := dynamicIntervalFn(cfg, mockLimits{queryVerticalShardSize: tc.verticalShardSize}, querysharding.NewQueryAnalyzer(), lookbackDelta)(ctx, tc.req) require.Equal(t, tc.expectedInterval, interval) if !tc.expectedError { require.Nil(t, err) @@ -599,3 +650,115 @@ func TestDynamicIntervalFn(t *testing.T) { }) } } + +func Test_getIntervalFromMaxSplits(t *testing.T) { + for _, tc := range []struct { + name string + baseSplitInterval time.Duration + req tripperware.Request + maxSplits int + expectedInterval time.Duration + }{ + { + name: "24 hours with 30 max splits, expected to split by 1 hour", + baseSplitInterval: time.Hour, + req: &tripperware.PrometheusRequest{ + Start: 0, + End: 24 * 3600 * seconds, + Step: 60 * seconds, + Query: "foo", + }, + maxSplits: 30, + expectedInterval: time.Hour, + }, + { + name: "24 hours with 10 max splits, expected to split by 3 hours", + baseSplitInterval: time.Hour, + req: &tripperware.PrometheusRequest{ + Start: 0, + End: 24 * 3600 * seconds, + Step: 60 * seconds, + Query: "foo", + }, + maxSplits: 10, + expectedInterval: 3 * time.Hour, + }, + { + name: "120 hours with 20 max splits, expected to split by 6 hours", + baseSplitInterval: time.Hour, + req: &tripperware.PrometheusRequest{ + Start: 0, + End: 5 * 24 * 3600 * seconds, + Step: 60 * seconds, + Query: "foo", + }, + maxSplits: 20, + expectedInterval: 6 * time.Hour, + }, + { + name: "30 days with 10 max splits, expected to split by 3 days", + baseSplitInterval: day, + req: &tripperware.PrometheusRequest{ + Start: 0, + End: 30 * 24 * 3600 * seconds, + Step: 5 * 60 * seconds, + Query: "foo", + }, + maxSplits: 10, + expectedInterval: 3 * day, + }, + { + name: "60 days with 15 max splits, expected to split by 4 days", + baseSplitInterval: day, + req: &tripperware.PrometheusRequest{ + Start: 0 * 24 * 3600 * seconds, + End: 60 * 24 * 3600 * seconds, + Step: 5 * 60 * seconds, + Query: "foo", + }, + maxSplits: 15, + expectedInterval: 4 * day, + }, + { + name: "61 days with 15 max splits, expected to split by 5 days", + baseSplitInterval: day, + req: &tripperware.PrometheusRequest{ + Start: 0, + End: 61 * 24 * 3600 * seconds, + Step: 5 * 60 * seconds, + Query: "foo", + }, + maxSplits: 15, + expectedInterval: 5 * day, + }, + { + name: "51 days with 5 max splits, expected to split by 9 days", + baseSplitInterval: day, + req: &tripperware.PrometheusRequest{ + Start: (13 * 24 * 3600 * seconds) + (7*3600*seconds - 1300*seconds), + End: (51 * 24 * 3600 * seconds) + (1*3600*seconds + 4900*seconds), + Step: 5 * 60 * seconds, + Query: "foo", + }, + maxSplits: 5, + expectedInterval: 9 * day, + }, + { + name: "101 hours with 7 max splits, expected to split by 16 hours", + baseSplitInterval: time.Hour, + req: &tripperware.PrometheusRequest{ + Start: (3 * 24 * 3600 * seconds) - (4*3600*seconds + 240*seconds), + End: (7 * 24 * 3600 * seconds) + (1*3600*seconds + 60*seconds), + Step: 5 * 60 * seconds, + Query: "foo", + }, + maxSplits: 7, + expectedInterval: 16 * time.Hour, + }, + } { + t.Run(tc.name, func(t *testing.T) { + interval := getIntervalFromMaxSplits(tc.req, tc.baseSplitInterval, tc.maxSplits) + require.Equal(t, tc.expectedInterval, interval) + }) + } +}