From 7b72bd42af91a3db5cf870f71b358b40376c0b51 Mon Sep 17 00:00:00 2001 From: Varun C Date: Mon, 27 Jan 2025 19:28:10 +0000 Subject: [PATCH] Revert more changes --- plugins/outputs/cloudwatch/cloudwatch.go | 19 ++++++++++++------- plugins/outputs/cloudwatch/cloudwatch_test.go | 8 ++++---- plugins/outputs/cloudwatch/util.go | 18 ++++++++++-------- 3 files changed, 26 insertions(+), 19 deletions(-) diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index b8b1332faa..583f2515c6 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -174,15 +174,20 @@ func (c *CloudWatch) pushMetricDatum() { case metric := <-c.metricChan: entity, datums := c.BuildMetricDatum(metric) numberOfPartitions := len(datums) - entityStr := entityToString(entity) - entityPresent := false - if entityStr != "" { - c.metricDatumBatch.Size += calculateEntitySize(entity) - entityPresent = true - } + // We currently do not account for entity information as a part of the payload size. + // This is by design and should be revisited once the sdk protocol changes. + // In the meantime there has been a payload limit increase applied in the background to accomodate this decision + + // Otherwise to include entity size you would do something like this: + // entityPresent := false + // if entityStr != "" { + // c.metricDatumBatch.Size += calculateEntitySize(entity) + // entityPresent = true + // } for i := 0; i < numberOfPartitions; i++ { + entityStr := entityToString(entity) c.metricDatumBatch.Partition[entityStr] = append(c.metricDatumBatch.Partition[entityStr], datums[i]) - c.metricDatumBatch.Size += payload(datums[i], entityPresent) + c.metricDatumBatch.Size += payload(datums[i]) // possibly need to pass a variable indicating whether this metric has an entity attached - depends on how the sdk protocol changes c.metricDatumBatch.Count++ if c.metricDatumBatch.isFull() { // if batch is full diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go index 840cd1a2cd..e7dc205e56 100644 --- a/plugins/outputs/cloudwatch/cloudwatch_test.go +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -428,7 +428,7 @@ func TestConsumeMetrics(t *testing.T) { cloudWatchOutput.publisher, _ = publisher.NewPublisher( publisher.NewNonBlockingFifoQueue(10), 10, 2*time.Second, cloudWatchOutput.WriteToCloudWatch) - metrics := makeMetrics(1200) + metrics := makeMetrics(1500) cloudWatchOutput.Write(metrics) time.Sleep(2*time.Second + 2*cloudWatchOutput.config.ForceFlushInterval) svc.On("PutMetricData", mock.Anything).Return(&res, nil) @@ -438,8 +438,8 @@ func TestConsumeMetrics(t *testing.T) { 10, 2*time.Second, cw.WriteToCloudWatch) - // Expect 1200 metrics batched in 2 API calls. - pmetrics := createTestMetrics(1200, 1, 1, "B/s") + // Expect 1500 metrics batched in 2 API calls. + pmetrics := createTestMetrics(1500, 1, 1, "B/s") ctx := context.Background() cw.ConsumeMetrics(ctx, pmetrics) time.Sleep(2*time.Second + 2*cw.config.ForceFlushInterval) @@ -485,7 +485,7 @@ func TestPublish(t *testing.T) { interval := 60 * time.Second // The buffer holds 50 batches of 1,000 metrics. So choose 5x. numMetrics := 5 * datumBatchChanBufferSize * defaultMaxDatumsPerCall - expectedCalls := 395 // Updated to match the observed number of calls + expectedCalls := numMetrics / defaultMaxDatumsPerCall log.Printf("I! interval %v, numMetrics %v, expectedCalls %v", interval, numMetrics, expectedCalls) cw := newCloudWatchClient(svc, interval) diff --git a/plugins/outputs/cloudwatch/util.go b/plugins/outputs/cloudwatch/util.go index 46ea065fcc..6c1130239e 100644 --- a/plugins/outputs/cloudwatch/util.go +++ b/plugins/outputs/cloudwatch/util.go @@ -29,8 +29,6 @@ const ( statisticsSize = 246 // &MetricData.member.100.Timestamp=2018-05-29T21%3A14%3A00Z timestampSize = 57 - // &StrictEntityValidation=false - strictEntityValidationSize = 29 overallConstPerRequestSize = pmdActionSize + versionSize // &Namespace=, this is per request @@ -48,12 +46,16 @@ const ( valueOverheads = 47 // &MetricData.member.1.Unit=Kilobytes/Second unitOverheads = 42 - // &EntityMetricData.member.100.Entity.KeyAttributes.entry.1.key= &EntityMetricData.member.100.Entity.KeyAttributes.entry.1.value= - entityKeyAttributesOverhead = 62 + 64 - // &EntityMetricData.member.100.Entity.Attributes.entry.1.key= &EntityMetricData.member.100.Entity.Attributes.entry.1.value= - entityAttributesOverhead = 59 + 61 - // EntityMetricData.member.100. - entityMetricDataPrefixOverhead = 28 + + // Entity overheads + // // &StrictEntityValidation=false + // strictEntityValidationSize = 29 + // // &EntityMetricData.member.100.Entity.KeyAttributes.entry.1.key= &EntityMetricData.member.100.Entity.KeyAttributes.entry.1.value= + // entityKeyAttributesOverhead = 62 + 64 + // // &EntityMetricData.member.100.Entity.Attributes.entry.1.key= &EntityMetricData.member.100.Entity.Attributes.entry.1.value= + // entityAttributesOverhead = 59 + 61 + // // EntityMetricData.member.100. + // entityMetricDataPrefixOverhead = 28 ) // Set seed once.