-
Notifications
You must be signed in to change notification settings - Fork 25
/
flows_v1beta2_flowcollector.yaml
197 lines (197 loc) · 4.88 KB
/
flows_v1beta2_flowcollector.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
apiVersion: flows.netobserv.io/v1beta2
kind: FlowCollector
metadata:
name: cluster
spec:
namespace: netobserv
deploymentModel: Direct
networkPolicy:
enable: false
additionalNamespaces: []
agent:
type: eBPF
ebpf:
imagePullPolicy: IfNotPresent
logLevel: info
sampling: 50
cacheActiveTimeout: 5s
cacheMaxFlows: 100000
# Change privileged to "true" on old kernel version not knowing CAP_BPF or when using "PacketDrop" feature
privileged: false
# features:
# - "PacketDrop"
# - "DNSTracking"
# - "FlowRTT"
# - "NetworkEvents"
interfaces: []
excludeInterfaces: ["lo"]
kafkaBatchSize: 1048576
#flowFilter:
# tcpFlags: "SYN"
# action: Accept
# cidr: 0.0.0.0/0
# protocol: TCP
# sourcePorts: 53
# enable: true
metrics:
server:
port: 9400
# Custom optionnal resources configuration
resources:
requests:
memory: 50Mi
cpu: 100m
limits:
memory: 800Mi
kafka:
address: "kafka-cluster-kafka-bootstrap.netobserv"
topic: network-flows
tls:
enable: false
caCert:
type: secret
name: kafka-cluster-cluster-ca-cert
certFile: ca.crt
userCert:
type: secret
name: flp-kafka
certFile: user.crt
certKey: user.key
processor:
imagePullPolicy: IfNotPresent
logLevel: info
# Change logTypes to "CONVERSATIONS" or "ALL" to enable conversation tracking
logTypes: Flows
# Append a unique cluster name to each record
# clusterName: <CLUSTER NAME>
# addZone: true
# subnetLabels:
# openShiftAutoDetect: true
# customLabels:
# - cidrs: []
# name: ""
metrics:
server:
port: 9401
disableAlerts: []
# includeList:
# - "node_ingress_bytes_total"
# - "workload_ingress_bytes_total"
# - "namespace_flows_total"
# - "namespace_drop_packets_total"
# - "namespace_rtt_seconds"
# Kafka consumer stage configuration
kafkaConsumerReplicas: 3
kafkaConsumerAutoscaler: null
kafkaConsumerQueueCapacity: 1000
kafkaConsumerBatchSize: 10485760
# Custom optionnal resources configuration
resources:
requests:
memory: 100Mi
cpu: 100m
limits:
memory: 800Mi
# advanced:
# secondaryNetworks:
# - name: "my-vms/custom-nad"
# # Any of: MAC, IP, Interface
# index: [MAC]
loki:
enable: true
# Change mode to "LokiStack" to use with the loki operator
mode: Monolithic
monolithic:
url: 'http://loki.netobserv.svc:3100/'
tenantID: netobserv
tls:
enable: false
caCert:
type: configmap
name: loki-gateway-ca-bundle
certFile: service-ca.crt
lokiStack:
name: loki
# Change loki operator instance namespace
# namespace: loki-operator
# Console plugin read timeout
readTimeout: 30s
# Write stage configuration
writeTimeout: 10s
writeBatchWait: 1s
writeBatchSize: 10485760
prometheus:
querier:
enable: true
mode: Auto
timeout: 30s
consolePlugin:
enable: true
imagePullPolicy: IfNotPresent
logLevel: info
# Scaling configuration
replicas: 1
autoscaler:
status: Disabled
minReplicas: 1
maxReplicas: 3
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 50
# Custom optionnal port-to-service name translation
portNaming:
enable: true
portNames:
"3100": loki
# Custom optionnal filter presets
quickFilters:
- name: Applications
filter:
flow_layer: '"app"'
default: true
- name: Infrastructure
filter:
flow_layer: '"infra"'
- name: Pods network
filter:
src_kind: '"Pod"'
dst_kind: '"Pod"'
default: true
- name: Services network
filter:
dst_kind: '"Service"'
# Custom optionnal resources configuration
resources:
requests:
memory: 50Mi
cpu: 100m
limits:
memory: 100Mi
exporters: []
# - type: Kafka
# kafka:
# address: "kafka-cluster-kafka-bootstrap.netobserv"
# topic: netobserv-flows-export
# or
# - type: IPFIX
# ipfix:
# targetHost: "ipfix-collector.ipfix.svc.cluster.local"
# targetPort: 4739
# transport: TCP or UDP (optional - defaults to TCP)
# or
# - type: OpenTelemetry
# openTelemetry:
# targetHost: "1.2.3.4:443"
# targetPort: 4317
# protocol: grpc
# logs:
# enable: true
# metrics:
# enable: true
# prefix: netobserv
# pushTimeInterval: 20s
# expiryTime: 2m