Skip to content

Commit

Permalink
Add integration test for OTLP internal metrics (#2058)
Browse files Browse the repository at this point in the history
* add integration test for OTLP internal metrics

* rename otel gen app
  • Loading branch information
wildum authored Nov 14, 2024
1 parent 218a683 commit 7290a06
Show file tree
Hide file tree
Showing 7 changed files with 171 additions and 16 deletions.
4 changes: 2 additions & 2 deletions internal/cmd/integration-tests/common/metrics_assert.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ var PromDefaultHistogramMetric = []string{
"golang_native_histogram",
}

// Default metrics list according to what the otel-metrics-gen app is generating.
// Default metrics list according to what the otel-gen app is generating.
var OtelDefaultMetrics = []string{
"example_counter",
"example_float_counter",
Expand All @@ -34,7 +34,7 @@ var OtelDefaultMetrics = []string{
"example_float_histogram_bucket",
}

// Default histogram metrics list according to what the otel-metrics-gen app is generating.
// Default histogram metrics list according to what the otel-gen app is generating.
var OtelDefaultHistogramMetrics = []string{
"example_exponential_histogram",
"example_exponential_float_histogram",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ WORKDIR /app/
COPY go.mod go.sum ./
COPY syntax/go.mod syntax/go.sum ./syntax/
RUN go mod download
COPY ./internal/cmd/integration-tests/configs/otel-metrics-gen/ ./
COPY ./internal/cmd/integration-tests/configs/otel-gen/ ./
RUN CGO_ENABLED=0 go build -o main main.go
FROM alpine:3.18
COPY --from=build /app/main /app/main
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,10 @@ import (

"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
"go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/resource"
"go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.7.0"
)

Expand All @@ -22,39 +23,61 @@ func main() {
if !ok {
otlpExporterEndpoint = "localhost:4318"
}
exporter, err := otlpmetrichttp.New(ctx,

// Setting up the trace exporter
traceExporter, err := otlptracehttp.New(ctx,
otlptracehttp.WithInsecure(),
otlptracehttp.WithEndpoint(otlpExporterEndpoint),
)
if err != nil {
log.Fatalf("failed to create trace exporter: %v", err)
}

// Setting up the metric exporter
metricExporter, err := otlpmetrichttp.New(ctx,
otlpmetrichttp.WithInsecure(),
otlpmetrichttp.WithEndpoint(otlpExporterEndpoint),
)
if err != nil {
log.Fatalf("failed to create exporter: %v", err)
log.Fatalf("failed to create metric exporter: %v", err)
}

resource, err := resource.New(ctx,
resource.WithAttributes(
semconv.ServiceNameKey.String("otel-metrics-gen"),
semconv.ServiceNameKey.String("otel-gen"),
),
)
if err != nil {
log.Fatalf("failed to create resource: %v", err)
}

exponentialHistogramView := metric.NewView(
metric.Instrument{
tp := trace.NewTracerProvider(
trace.WithBatcher(traceExporter),
trace.WithResource(resource),
)
otel.SetTracerProvider(tp)
defer func() {
if err := tp.Shutdown(ctx); err != nil {
log.Fatalf("failed to shut down tracer provider: %v", err)
}
}()

exponentialHistogramView := sdkmetric.NewView(
sdkmetric.Instrument{
Name: "example_exponential_*",
},
metric.Stream{
Aggregation: metric.AggregationBase2ExponentialHistogram{
sdkmetric.Stream{
Aggregation: sdkmetric.AggregationBase2ExponentialHistogram{
MaxSize: 160,
MaxScale: 20,
},
},
)

provider := sdkmetric.NewMeterProvider(
sdkmetric.WithReader(sdkmetric.NewPeriodicReader(exporter, sdkmetric.WithInterval(1*time.Second))),
sdkmetric.WithReader(sdkmetric.NewPeriodicReader(metricExporter, sdkmetric.WithInterval(1*time.Second))),
sdkmetric.WithResource(resource),
metric.WithView(exponentialHistogramView),
sdkmetric.WithView(exponentialHistogramView),
)
otel.SetMeterProvider(provider)
defer func() {
Expand All @@ -66,6 +89,7 @@ func main() {
}
}()

tracer := otel.Tracer("example-tracer")
meter := otel.Meter("example-meter")
counter, _ := meter.Int64Counter("example_counter")
floatCounter, _ := meter.Float64Counter("example_float_counter")
Expand All @@ -77,6 +101,7 @@ func main() {
exponentialFloatHistogram, _ := meter.Float64Histogram("example_exponential_float_histogram")

for {
ctx, span := tracer.Start(ctx, "sample-trace")
counter.Add(ctx, 10)
floatCounter.Add(ctx, 2.5)
upDownCounter.Add(ctx, -5)
Expand All @@ -87,5 +112,6 @@ func main() {
exponentialFloatHistogram.Record(ctx, 1.5)

time.Sleep(200 * time.Millisecond)
span.End()
}
}
64 changes: 64 additions & 0 deletions internal/cmd/integration-tests/configs/tempo/tempo.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
stream_over_http_enabled: true
server:
http_listen_port: 3200
log_level: info


cache:
background:
writeback_goroutines: 5
caches:
- roles:
- frontend-search
memcached:
host: memcached:11211

query_frontend:
search:
duration_slo: 5s
throughput_bytes_slo: 1.073741824e+09
trace_by_id:
duration_slo: 5s

distributor:
receivers: # this configuration will listen on all ports and protocols that tempo is capable of.
jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can
protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver
thrift_http: #
grpc: # for a production deployment you should only enable the receivers you need!
thrift_binary:
thrift_compact:
zipkin:
otlp:
protocols:
http:
grpc:
opencensus:

ingester:
max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally

compactor:
compaction:
block_retention: 1h # overall Tempo trace retention. set for demo purposes

metrics_generator:
registry:
external_labels:
source: tempo
cluster: docker-compose
storage:
path: /var/tempo/generator/wal
remote_write:
- url: http://prometheus:9090/api/v1/write
send_exemplars: true
traces_storage:
path: /var/tempo/generator/traces

storage:
trace:
backend: local # backend configuration to use
wal:
path: /var/tempo/wal # where to store the wal locally
local:
path: /var/tempo/blocks
17 changes: 15 additions & 2 deletions internal/cmd/integration-tests/docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,19 @@ services:
- -config.file=/etc/mimir-config/mimir.yaml
ports:
- "9009:9009"

tempo:
image: grafana/tempo:latest
command: [ "-config.file=/etc/tempo.yaml" ]
volumes:
- ./configs/tempo/tempo.yaml:/etc/tempo.yaml
ports:
- "14268:14268" # jaeger ingest
- "3200:3200" # tempo
- "9095:9095" # tempo grpc
- "4319:4317" # otlp grpc
- "4320:4318" # otlp http
- "9411:9411" # zipkin

zookeeper:
image: confluentinc/cp-zookeeper:latest
Expand Down Expand Up @@ -44,9 +57,9 @@ services:
ports:
- "3100:3100"

otel-metrics-gen:
otel-gen:
build:
dockerfile: ./internal/cmd/integration-tests/configs/otel-metrics-gen/Dockerfile
dockerfile: ./internal/cmd/integration-tests/configs/otel-gen/Dockerfile
context: ../../..
environment:
- OTEL_EXPORTER_ENDPOINT=${OTEL_EXPORTER_ENDPOINT:-host.docker.internal:4318}
Expand Down
31 changes: 30 additions & 1 deletion internal/cmd/integration-tests/tests/otlp-metrics/config.alloy
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ otelcol.receiver.otlp "otlp_metrics" {

output {
metrics = [otelcol.processor.attributes.otlp_metrics.input, otelcol.exporter.prometheus.otlp_to_prom_metrics.input]
traces = [otelcol.processor.attributes.otlp_metrics.input]
}
}

Expand All @@ -13,14 +14,24 @@ otelcol.processor.attributes "otlp_metrics" {
action = "insert"
}

output {
metrics = [otelcol.processor.batch.otlp_metrics.input]
traces = [otelcol.processor.batch.otlp_metrics.input]
}
}

otelcol.processor.batch "otlp_metrics" {
output {
metrics = [otelcol.exporter.otlphttp.otlp_metrics.input]
traces = [otelcol.exporter.otlphttp.otlp_metrics.input]
}
}

otelcol.exporter.otlphttp "otlp_metrics" {
metrics_endpoint = "http://localhost:9009/otlp/v1/metrics"
traces_endpoint = "http://localhost:4320/v1/traces"
client {
endpoint = "http://localhost:9009/otlp"
endpoint = "ignore"
tls {
insecure = true
insecure_skip_verify = true
Expand All @@ -47,3 +58,21 @@ prometheus.remote_write "otlp_to_prom_metrics" {
test_name = "otlp_to_prom_metrics",
}
}

prometheus.exporter.self "otlp_integration" {}

prometheus.scrape "otlp_integration" {
targets = prometheus.exporter.self.otlp_integration.targets
forward_to = [prometheus.remote_write.otlp_integration.receiver]
scrape_interval = "1s"
scrape_timeout = "500ms"
}

prometheus.remote_write "otlp_integration" {
endpoint {
url = "http://localhost:9009/api/v1/push"
}
external_labels = {
test_name = "otlp_integration",
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
//go:build !windows

package main

import (
"testing"

"github.com/grafana/alloy/internal/cmd/integration-tests/common"
)

func TestAlloyIntegrationMetrics(t *testing.T) {
// These otel metrics are needed in the k8s-monitoring helm chart (https://github.com/grafana/k8s-monitoring-helm/blob/main/charts/k8s-monitoring-v1/default_allow_lists/alloy_integration.yaml)
var OTLPMetrics = []string{
"otelcol_exporter_send_failed_spans_total",
"otelcol_exporter_sent_spans_total",
"otelcol_processor_batch_batch_send_size_bucket",
"otelcol_processor_batch_metadata_cardinality",
"otelcol_processor_batch_timeout_trigger_send_total",
"otelcol_receiver_accepted_spans_total",
"otelcol_receiver_refused_spans_total",
}
common.MimirMetricsTest(t, OTLPMetrics, []string{}, "otlp_integration")
}

0 comments on commit 7290a06

Please sign in to comment.