-
Notifications
You must be signed in to change notification settings - Fork 14
/
values-example.yaml
191 lines (167 loc) · 5.41 KB
/
values-example.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
clusterDomain: "example.com"
ingress-nginx:
controller:
config:
proxy-body-size: 100m
cert-manager:
# Set your email address here so auto-generated HTTPS certs will work:
email: "[email protected]"
elasticsearch:
enabled: false
metricsserver:
enabled: false
vpa:
enabled: false
opensearch:
enabled: false
prometheusstack:
enabled: false
grafana:
enabled: false
ingress:
enabled: true
ingressClassName: nginx
annotations:
cert-manager.io/cluster-issuer: "harmony-letsencrypt-global"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
hosts:
- grafana.example.com
tls:
- secretName: promstack-ingress-tls
hosts:
- grafana.example.com
# alertmanager:
# config: {} # Set it using `--set-file prometheusstack.alertmanager.config=<path-to-file>`
k8sdashboard:
enabled: false
velero:
enabled: false
configuration:
backupStorageLocation:
- name: velero-backup-harmony
provider: aws
bucket: se6099-e665a5
default: true
config:
s3Url: https://nyc3.digitaloceanspaces.com
region: nyc3
volumeSnapshotLocation:
- name: velero-volume-snapshot-harmony
provider: digitalocean.com/velero"
config:
region: nyc3
credentials:
extraEnvVars:
DIGITALOCEAN_TOKEN: "" # DigitalOcean API token
secretContents:
cloud: |
[default]
aws_access_key_id="" # AWS access key ID
aws_secret_access_key="" # AWS secret access key
initContainers:
- name: velero-plugin-for-aws
image: velero/velero-plugin-for-aws:1.8.4
volumeMounts:
- mountPath: /target
name: plugins
- name: velero-plugin-for-digitalocean
image: digitalocean/velero-plugin:1.1.0
volumeMounts:
- mountPath: /target
name: plugins
openfaas:
enabled: false
# ClickHouse Vector Sink
vector:
enabled: false
customConfig:
transforms:
# Events should be separated per namespace, and a different sink should be
# implemented for every namespace with Aspects
logs_openedx_demo:
type: filter
inputs:
- kubernetes_tutor_logs
condition: '.kubernetes.pod_namespace == "openedx_demo"' # Mkae sure to update the namespace
xapi_openedx_demo:
type: remap
inputs:
- logs_openedx_demo
drop_on_error: true
drop_on_abort: true
source: |-
parsed, err_regex = parse_regex(.message, r'^.* \[xapi_tracking\] [^{}]*
(?P<tracking_message>\{.*\})$')
if err_regex != null {
abort
}
message, err = strip_whitespace(parsed.tracking_message)
parsed_json, err_json = parse_json(parsed.tracking_message)
if err_json != null {
log("Unable to parse JSON from xapi tracking log message: " + err_json, level: "error")
abort
}
time, err_timestamp = parse_timestamp(parsed_json.timestamp, "%+")
if err_timestamp != null {
log("Unable to parse timestamp from tracking log 'time' field: " + err_timestamp, level: "warn")
time, err_timestamp = parse_timestamp(parsed_json.timestamp, "%+")
if err_timestamp != null {
log("Unable to parse timestamp from tracking log 'timestamp' field: " + err_timestamp, level: "error")
abort
}
}
event_id = parsed_json.id
. = {"event_id": event_id, "emission_time": format_timestamp!(time,
format: "%+"), "event": encode_json(parsed_json)}
sinks:
# Example ClickHouse Sink
clickhouse_openedx_demo:
type: clickhouse
auth:
strategy: basic
user: 'ch_vector'
password: 'password'
encoding:
timestamp_format: unix
date_time_best_effort: true
inputs:
- xapi_openedx_demo
# http://{{CLICKHOUSE_HOST }}.{{CLICKHOUSE_NAMESPACE}}:{{ CLICKHOUSE_INTERNAL_HTTP_PORT }}
endpoint: http://clickhouse-clickhouse.openedx-harmony:8123
# ASPECTS_VECTOR_DATABASE
database: 'openedx'
table: 'xapi_events_all'
healthcheck: true
tracking_logs_to_s3:
type: aws_s3
inputs:
- tracking_logs
filename_append_uuid: true
filename_time_format: "log-%Y%m%d-%H"
# Helm tries to render the .type and .kubernetes variables. We need to escape them to avoid errors
# See> https://github.com/helm/helm/issues/2798
key_prefix: {{ `{{ .kubernetes.pod_namespace }}/{{ .type }}/{{ .kubernetes.container_name }}/date=%F/` }}
compression: gzip
encoding:
codec: text
bucket: "set_me"
auth:
access_key_id: "set_me"
secret_access_key: "set_me"
region: "set_me"
# When using AWS-compatible services like MinIO, set the endpoint and tweak SSL if necessary
# endpoint: "http://minio.{namespace}:9000"
# region: none
healthcheck:
enabled: false
logs_to_cloudwatch:
type: aws_cloudwatch
inputs:
- application_logs
group_name: my-cluster
stream_name: {{ `{{ .kubernetes.pod_namespace }}/{{ .kubernetes.container_name }}` }}
auth:
access_key_id: "set_me"
secret_access_key: "set_me"
encoding:
codec: json