Skip to content

Commit

Permalink
[receiver/kubeletstats] Use pdata to improve assertions in unit tests (
Browse files Browse the repository at this point in the history
…open-telemetry#30348)

**Description:**
This PR introduces the usage of pdata and golden files within the unit
tests of the `kubeletstats` receiver. Insted of manually asserting the
unit-tests' results and having the expected values hard-coded within the
tests we use generated pdata stored in golden files to improve the
assertions by using the
[`pdatatest`](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/c87856083e3afa11cd35dbd9cbb8fbda0194fbf4/pkg/pdatatest/README.md#L11)
helper functions.

**Link to tracking Issue:**
Fixes
open-telemetry#27468

**Testing:** <Describe what testing was performed and which tests were
added.>
Unit tests being improved.

---------

Signed-off-by: ChrsMark <[email protected]>
  • Loading branch information
ChrsMark authored Jan 11, 2024
1 parent 522542e commit 5d3c324
Show file tree
Hide file tree
Showing 16 changed files with 14,759 additions and 111 deletions.
194 changes: 83 additions & 111 deletions receiver/kubeletstatsreceiver/scraper_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,18 +7,18 @@ import (
"context"
"errors"
"os"
"strings"
"path/filepath"
"testing"

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/receiver/receivertest"
"go.uber.org/zap"
"go.uber.org/zap/zaptest/observer"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"

"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata"
)
Expand Down Expand Up @@ -61,6 +61,19 @@ func TestScraper(t *testing.T) {
md, err := r.Scrape(context.Background())
require.NoError(t, err)
require.Equal(t, dataLen, md.DataPointCount())
expectedFile := filepath.Join("testdata", "scraper", "test_scraper_expected.yaml")

// Uncomment to regenerate '*_expected.yaml' files
// golden.WriteMetrics(t, expectedFile, md)

expectedMetrics, err := golden.ReadMetrics(expectedFile)
require.NoError(t, err)
require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, md,
pmetrictest.IgnoreStartTimestamp(),
pmetrictest.IgnoreResourceMetricsOrder(),
pmetrictest.IgnoreMetricDataPointsOrder(),
pmetrictest.IgnoreTimestamp(),
pmetrictest.IgnoreMetricsOrder()))
}

func TestScraperWithMetadata(t *testing.T) {
Expand All @@ -73,7 +86,7 @@ func TestScraperWithMetadata(t *testing.T) {
requiredLabel string
}{
{
name: "Container Metadata",
name: "Container_Metadata",
metadataLabels: []kubelet.MetadataLabel{kubelet.MetadataLabelContainerID},
metricGroups: map[kubelet.MetricGroup]bool{
kubelet.ContainerMetricGroup: true,
Expand All @@ -83,7 +96,7 @@ func TestScraperWithMetadata(t *testing.T) {
requiredLabel: "container.id",
},
{
name: "Volume Metadata",
name: "Volume_Metadata",
metadataLabels: []kubelet.MetadataLabel{kubelet.MetadataLabelVolumeType},
metricGroups: map[kubelet.MetricGroup]bool{
kubelet.VolumeMetricGroup: true,
Expand All @@ -110,23 +123,22 @@ func TestScraperWithMetadata(t *testing.T) {

md, err := r.Scrape(context.Background())
require.NoError(t, err)
require.Equal(t, tt.dataLen, md.DataPointCount())

for i := 0; i < md.ResourceMetrics().Len(); i++ {
rm := md.ResourceMetrics().At(i)
for j := 0; j < rm.ScopeMetrics().Len(); j++ {
ilm := rm.ScopeMetrics().At(j)
require.Equal(t, "otelcol/kubeletstatsreceiver", ilm.Scope().Name())
for k := 0; k < ilm.Metrics().Len(); k++ {
m := ilm.Metrics().At(k)
if strings.HasPrefix(m.Name(), tt.metricPrefix) {
_, ok := rm.Resource().Attributes().Get(tt.requiredLabel)
require.True(t, ok)
continue
}
}
}
}

filename := "test_scraper_with_metadata_" + tt.name + "_expected.yaml"
expectedFile := filepath.Join("testdata", "scraper", filename)

// Uncomment to regenerate '*_expected.yaml' files
// golden.WriteMetrics(t, expectedFile, md)

expectedMetrics, err := golden.ReadMetrics(expectedFile)
require.NoError(t, err)
require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, md,
pmetrictest.IgnoreStartTimestamp(),
pmetrictest.IgnoreResourceMetricsOrder(),
pmetrictest.IgnoreMetricDataPointsOrder(),
pmetrictest.IgnoreTimestamp(),
pmetrictest.IgnoreMetricsOrder()))

})
}
}
Expand Down Expand Up @@ -303,44 +315,19 @@ func TestScraperWithPercentMetrics(t *testing.T) {

md, err := r.Scrape(context.Background())
require.NoError(t, err)
require.Equal(t, 8, md.DataPointCount())

currentMetric := md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0)
assert.Equal(t, "k8s.pod.cpu_limit_utilization", currentMetric.Name())
assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() <= 1)
assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() >= 0)

currentMetric = md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1)
assert.Equal(t, "k8s.pod.cpu_request_utilization", currentMetric.Name())
assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() > 1)

currentMetric = md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2)
assert.Equal(t, "k8s.pod.memory_limit_utilization", currentMetric.Name())
assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() <= 1)
assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() >= 0)

currentMetric = md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(3)
assert.Equal(t, "k8s.pod.memory_request_utilization", currentMetric.Name())
assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() > 1)

currentMetric = md.ResourceMetrics().At(1).ScopeMetrics().At(0).Metrics().At(0)
assert.Equal(t, "k8s.container.cpu_limit_utilization", currentMetric.Name())
assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() <= 1)
assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() >= 0)

currentMetric = md.ResourceMetrics().At(1).ScopeMetrics().At(0).Metrics().At(1)
assert.Equal(t, "k8s.container.cpu_request_utilization", currentMetric.Name())
assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() > 1)

currentMetric = md.ResourceMetrics().At(1).ScopeMetrics().At(0).Metrics().At(2)
assert.Equal(t, "k8s.container.memory_limit_utilization", currentMetric.Name())
assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() <= 1)
assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() >= 0)
expectedFile := filepath.Join("testdata", "scraper", "test_scraper_with_percent_expected.yaml")

currentMetric = md.ResourceMetrics().At(1).ScopeMetrics().At(0).Metrics().At(3)
assert.Equal(t, "k8s.container.memory_request_utilization", currentMetric.Name())
assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() > 1)
// Uncomment to regenerate '*_expected.yaml' files
// golden.WriteMetrics(t, expectedFile, md)

expectedMetrics, err := golden.ReadMetrics(expectedFile)
require.NoError(t, err)
require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, md,
pmetrictest.IgnoreStartTimestamp(),
pmetrictest.IgnoreMetricDataPointsOrder(),
pmetrictest.IgnoreTimestamp(),
pmetrictest.IgnoreMetricsOrder()))
}

func TestScraperWithMetricGroups(t *testing.T) {
Expand All @@ -350,40 +337,40 @@ func TestScraperWithMetricGroups(t *testing.T) {
dataLen int
}{
{
name: "all groups",
name: "all_groups",
metricGroups: allMetricGroups,
dataLen: dataLen,
},
{
name: "only container group",
name: "only_container_group",
metricGroups: map[kubelet.MetricGroup]bool{
kubelet.ContainerMetricGroup: true,
},
dataLen: numContainers * containerMetrics,
},
{
name: "only pod group",
name: "only_pod_group",
metricGroups: map[kubelet.MetricGroup]bool{
kubelet.PodMetricGroup: true,
},
dataLen: numPods * podMetrics,
},
{
name: "only node group",
name: "only_node_group",
metricGroups: map[kubelet.MetricGroup]bool{
kubelet.NodeMetricGroup: true,
},
dataLen: numNodes * nodeMetrics,
},
{
name: "only volume group",
name: "only_volume_group",
metricGroups: map[kubelet.MetricGroup]bool{
kubelet.VolumeMetricGroup: true,
},
dataLen: numVolumes * volumeMetrics,
},
{
name: "pod and node groups",
name: "pod_and_node_groups",
metricGroups: map[kubelet.MetricGroup]bool{
kubelet.PodMetricGroup: true,
kubelet.NodeMetricGroup: true,
Expand All @@ -406,7 +393,21 @@ func TestScraperWithMetricGroups(t *testing.T) {

md, err := r.Scrape(context.Background())
require.NoError(t, err)
require.Equal(t, test.dataLen, md.DataPointCount())

filename := "test_scraper_with_metric_groups_" + test.name + "_expected.yaml"
expectedFile := filepath.Join("testdata", "scraper", filename)

// Uncomment to regenerate '*_expected.yaml' files
// golden.WriteMetrics(t, expectedFile, md)

expectedMetrics, err := golden.ReadMetrics(expectedFile)
require.NoError(t, err)
require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, md,
pmetrictest.IgnoreStartTimestamp(),
pmetrictest.IgnoreResourceMetricsOrder(),
pmetrictest.IgnoreMetricDataPointsOrder(),
pmetrictest.IgnoreTimestamp(),
pmetrictest.IgnoreMetricsOrder()))
})
}
}
Expand Down Expand Up @@ -460,7 +461,7 @@ func TestScraperWithPVCDetailedLabels(t *testing.T) {
dataLen: numVolumes,
},
{
name: "pvc doesn't exist",
name: "pvc_doesnot_exist",
k8sAPIClient: fake.NewSimpleClientset(),
dataLen: numVolumes - 3,
volumeClaimsToMiss: map[string]bool{
Expand All @@ -471,7 +472,7 @@ func TestScraperWithPVCDetailedLabels(t *testing.T) {
numLogs: 3,
},
{
name: "empty volume name in pvc",
name: "empty_volume_name_in_pvc",
k8sAPIClient: fake.NewSimpleClientset(getMockedObjectsWithEmptyVolumeName()...),
expectedVolumes: map[string]expectedVolume{
"volume_claim_1": {
Expand Down Expand Up @@ -502,7 +503,7 @@ func TestScraperWithPVCDetailedLabels(t *testing.T) {
numLogs: 1,
},
{
name: "non existent volume in pvc",
name: "non_existent_volume_in_pvc",
k8sAPIClient: fake.NewSimpleClientset(getMockedObjectsWithNonExistentVolumeName()...),
expectedVolumes: map[string]expectedVolume{
"volume_claim_1": {
Expand Down Expand Up @@ -533,7 +534,7 @@ func TestScraperWithPVCDetailedLabels(t *testing.T) {
numLogs: 1,
},
{
name: "don't collect detailed labels",
name: "do_not_collect_detailed_labels",
dataLen: numVolumes,
},
}
Expand All @@ -556,52 +557,23 @@ func TestScraperWithPVCDetailedLabels(t *testing.T) {

md, err := r.Scrape(context.Background())
require.NoError(t, err)
require.Equal(t, test.dataLen*volumeMetrics, md.DataPointCount())

// If Kubernetes API is set, assert additional labels as well.
if test.k8sAPIClient != nil && len(test.expectedVolumes) > 0 {
rms := md.ResourceMetrics()
for i := 0; i < rms.Len(); i++ {
resource := rms.At(i).Resource()
claimName, ok := resource.Attributes().Get("k8s.persistentvolumeclaim.name")
// claimName will be non empty only when PVCs are used, all test cases
// in this method are interested only in such cases.
if !ok {
continue
}

ev := test.expectedVolumes[claimName.Str()]
requireExpectedVolume(t, ev, resource)

// Assert metrics from certain volume claims expected to be missed
// are not collected.
if test.volumeClaimsToMiss != nil {
for c := range test.volumeClaimsToMiss {
val, ok := resource.Attributes().Get("k8s.persistentvolumeclaim.name")
require.True(t, !ok || val.Str() != c)
}
}
}
}
})
}
}

func requireExpectedVolume(t *testing.T, ev expectedVolume, resource pcommon.Resource) {
require.NotNil(t, ev)

requireAttribute(t, resource.Attributes(), "k8s.volume.name", ev.name)
requireAttribute(t, resource.Attributes(), "k8s.volume.type", ev.typ)
for k, v := range ev.labels {
requireAttribute(t, resource.Attributes(), k, v)
}
}
filename := "test_scraper_with_pvc_labels_" + test.name + "_expected.yaml"
expectedFile := filepath.Join("testdata", "scraper", filename)

func requireAttribute(t *testing.T, attr pcommon.Map, key string, value string) {
val, ok := attr.Get(key)
require.True(t, ok)
require.Equal(t, value, val.Str())
// Uncomment to regenerate '*_expected.yaml' files
// golden.WriteMetrics(t, expectedFile, md)

expectedMetrics, err := golden.ReadMetrics(expectedFile)
require.NoError(t, err)
require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, md,
pmetrictest.IgnoreStartTimestamp(),
pmetrictest.IgnoreResourceMetricsOrder(),
pmetrictest.IgnoreMetricDataPointsOrder(),
pmetrictest.IgnoreTimestamp(),
pmetrictest.IgnoreMetricsOrder()))
})
}
}

func TestClientErrors(t *testing.T) {
Expand Down
Loading

0 comments on commit 5d3c324

Please sign in to comment.