forked from open-telemetry/opentelemetry-demo
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathotelcol-config-filelog.yml
192 lines (186 loc) · 5.46 KB
/
otelcol-config-filelog.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
# Copyright The OpenTelemetry Authors
# SPDX-License-Identifier: Apache-2.0
extensions:
health_check:
endpoint: 0.0.0.0:13133
http_forwarder:
ingress:
endpoint: 0.0.0.0:6060
egress:
# TODO: Ensure this is set properly
endpoint: "https://api.${SPLUNK_REALM}.signalfx.com"
zpages:
memory_ballast:
# In general, the ballast should be set to 1/3 of the collector's memory, the limit
# should be 90% of the collector's memory.
# The simplest way to specify the ballast size is set the value of SPLUNK_BALLAST_SIZE_MIB env variable.
# TODO: Ensure this is set properly
size_mib: ${SPLUNK_BALLAST_SIZE_MIB}
receivers:
filelog:
include:
- /var/lib/docker/containers/*/*-json.log
encoding: utf-8
fingerprint_size: 1kb
force_flush_period: "0"
include_file_name: false
include_file_path: true
max_concurrent_files: 1024
max_log_size: 1MiB
operators:
- id: parser-docker
timestamp:
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
parse_from: attributes.time
type: json_parser
- id: extract_metadata_from_docker_tag
parse_from: attributes.attrs.tag
regex: ^(?P<name>[^\|]+)\|(?P<image_name>[^\|]+)\|(?P<id>[^$]+)$
type: regex_parser
if: 'attributes?.attrs?.tag != nil'
- from: attributes.name
to: resource["com.splunk.sourcetype"]
type: copy
if: 'attributes?.name != nil'
- from: attributes.name
to: resource["docker.container.name"]
type: move
if: 'attributes?.name != nil'
- from: attributes.image_name
to: resource["docker.image.name"]
type: move
if: 'attributes?.image_name != nil'
- from: attributes.id
to: resource["docker.container.id"]
type: move
if: 'attributes?.id != nil'
- from: attributes.stream
to: resource["log.io.stream"]
type: move
- field: attributes.attrs.tag
type: remove
if: 'attributes?.attrs?.tag != nil'
- from: attributes.log
to: body
type: move
poll_interval: 200ms
start_at: beginning
hostmetrics:
collection_interval: 10s
scrapers:
cpu:
disk:
filesystem:
memory:
network:
# System load average metrics https://en.wikipedia.org/wiki/Load_(computing)
load:
# Paging/Swap space utilization and I/O metrics
paging:
# Aggregated system process count metrics
processes:
# System processes metrics, disabled by default
# process:
jaeger:
protocols:
grpc:
endpoint: 0.0.0.0:14250
thrift_binary:
endpoint: 0.0.0.0:6832
thrift_compact:
endpoint: 0.0.0.0:6831
thrift_http:
endpoint: 0.0.0.0:14268
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
sapm:
endpoint: 0.0.0.0:7276
signalfx:
endpoint: 0.0.0.0:9943
# This section is used to collect OpenTelemetry metrics
# Even if just a SignalFx APM customer, these metrics are included
prometheus:
config:
scrape_configs:
- job_name: 'otel-collector'
scrape_interval: 10s
static_configs:
- targets: ['localhost:8888']
# If you want to use the environment filter
# In the SignalFx dashboard
#labels:
#environment: demo
metric_relabel_configs:
- source_labels: [ __name__ ]
regex: '.*grpc_io.*'
action: drop
# Enable Zipkin to support Istio Mixer Adapter
# https://github.com/signalfx/signalfx-istio-adapter
zipkin:
endpoint: 0.0.0.0:9411
processors:
batch:
# Optional: If you have a different environment tag name
# If this option is enabled it must be added to the pipeline section below
#attributes/copyfromexistingkey:
#actions:
#- key: environment
#from_attribute: YOUR_EXISTING_TAG_NAME
#action: upsert
# Optional: If you want to add an environment tag
# If this option is enabled it must be added to the pipeline section below
#attributes/newenvironment:
#actions:
#- key: environment
#value: "YOUR_ENVIRONMENT_NAME"
#action: insert
resourcedetection:
detectors:
- env
- system
timeout: 10s
override: true
exporters:
# Traces
sapm:
# TODO: Ensure this is set properly
access_token: "${SPLUNK_ACCESS_TOKEN}"
# TODO: Ensure this is set properly
endpoint: "https://ingest.${SPLUNK_REALM}.signalfx.com/v2/trace"
# Metrics
signalfx:
# TODO: Ensure this is set properly
access_token: "${SPLUNK_ACCESS_TOKEN}"
# TODO: Ensure this is set properly
realm: "${SPLUNK_REALM}"
sync_host_metadata: true
# Logs (can also be used to send traces)
splunk_hec:
token: "${SPLUNK_HEC_TOKEN}"
endpoint: "${SPLUNK_HEC_URL}/services/collector"
source: "otel"
sourcetype: "otel"
index: "astronomyshop"
profiling_data_enabled: false
service:
extensions: [health_check, http_forwarder, zpages]
# telemetry:
# logs:
# level: "debug"
pipelines:
traces:
receivers: [jaeger, otlp, sapm, zipkin]
processors: [batch, resourcedetection]
exporters: [ sapm, signalfx ]
metrics:
receivers: [otlp, signalfx, prometheus, hostmetrics]
processors: [batch, resourcedetection]
exporters: [signalfx]
logs:
receivers: [otlp, signalfx, filelog]
processors: [batch, resourcedetection]
exporters: [splunk_hec]