-
Notifications
You must be signed in to change notification settings - Fork 5
/
values.yaml
147 lines (141 loc) · 5.28 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
global:
config:
dbName: app #test
crunchy: # enable it for TEST and PROD, for PR based pipelines simply use single postgres
enabled: true
crunchyImage: artifacts.developer.gov.bc.ca/bcgov-docker-local/crunchy-postgres-gis:ubi8-16.2-3.4-0
postgresVersion: 16
postGISVersion: '3.4'
imagePullPolicy: IfNotPresent
# enable below to start a new crunchy cluster after disaster from a backed-up location, crunchy will choose the best place to recover from.
# follow https://access.crunchydata.com/documentation/postgres-operator/5.2.0/tutorial/disaster-recovery/
# Clone From Backups Stored in S3 / GCS / Azure Blob Storage
clone:
enabled: false
s3:
enabled: false
pvc:
enabled: false
path: ~ # provide the proper path to source the cluster. ex: /backups/cluster/version/1, if current new cluster being created, this should be current cluster version -1, ideally
# enable this to go back to a specific timestamp in history in the current cluster.
# follow https://access.crunchydata.com/documentation/postgres-operator/5.2.0/tutorial/disaster-recovery/
# Perform an In-Place Point-in-time-Recovery (PITR)
restore:
repoName: ~ # provide repo name
enabled: false
target: ~ # 2024-03-24 17:16:00-07 this is the target timestamp to go back to in current cluster
instances:
name: db # high availability
replicas: 2 # 2 or 3 for high availability in TEST and PROD.
metadata:
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '9187'
dataVolumeClaimSpec:
storage: 200Mi
storageClassName: netapp-block-standard
walStorage: 255Mi
requests:
cpu: 50m
memory: 128Mi
limits:
cpu: 150m
memory: 256Mi
replicaCertCopy:
requests:
cpu: 1m
memory: 32Mi
limits:
cpu: 50m
memory: 64Mi
pgBackRest:
enabled: true
backupPath: /backups/test/cluster/version # change it for PROD, create values-prod.yaml
clusterCounter: 1 # this is the number to identify what is the current counter for the cluster, each time it is cloned it should be incremented.
image: artifacts.developer.gov.bc.ca/bcgov-docker-local/crunchy-pgbackrest:ubi8-2.49-0
# If retention-full-type set to 'count' then the oldest backups will expire when the number of backups reach the number defined in retention
# If retention-full-type set to 'time' then the number defined in retention will take that many days worth of full backups before expiration
retentionFullType: count
s3:
enabled: false # if enabled, below must be provided
retention: 7 # one weeks backup in object store.
bucket: ~
endpoint: ~
accessKey: ~
secretKey: ~
fullBackupSchedule: ~ # make sure to provide values here, if s3 is enabled.
incrementalBackupSchedule: ~ # make sure to provide values here, if s3 is enabled.
pvc:
retention: 1 # one day hot active backup in pvc
retentionFullType: count
fullBackupSchedule: 0 8 * * *
incrementalBackupSchedule: 0 0,12 * * * # every 12 hour incremental
volume:
accessModes: "ReadWriteOnce"
storage: 100Mi
storageClassName: netapp-file-backup
config:
requests:
cpu: 5m
memory: 32Mi
limits:
cpu: 20m
memory: 64Mi
repoHost:
requests:
cpu: 20m
memory: 128Mi
limits:
cpu: 50m
memory: 256Mi
sidecars:
requests:
cpu: 5m
memory: 16Mi
limits:
cpu: 20m
memory: 64Mi
jobs:
requests:
cpu: 20m
memory: 128Mi
limits:
cpu: 100m
memory: 256Mi
patroni:
postgresql:
pg_hba:
- "host all all 0.0.0.0/0 md5"
- "host all all ::1/128 md5"
parameters:
shared_buffers: 16MB # default is 128MB; a good tuned default for shared_buffers is 25% of the memory allocated to the pod
wal_buffers: "64kB" # this can be set to -1 to automatically set as 1/32 of shared_buffers or 64kB, whichever is larger
min_wal_size: 32MB
max_wal_size: 64MB # default is 1GB
max_slot_wal_keep_size: 128MB # default is -1, allowing unlimited wal growth when replicas fall behind
work_mem: 2MB # a work_mem value of 2 MB
log_min_duration_statement: 1000ms # log queries taking more than 1 second to respond.
effective_io_concurrency: 20 #If the underlying disk can handle multiple simultaneous requests, then you should increase the effective_io_concurrency value and test what value provides the best application performance. All BCGov clusters have SSD.
proxy:
enabled: true
pgBouncer:
image: # it's not necessary to specify an image as the images specified in the Crunchy Postgres Operator will be pulled by default
replicas: 1
requests:
cpu: 5m
memory: 32Mi
limits:
cpu: 20m
memory: 64Mi
maxConnections: 10 # make sure less than postgres max connections
# Postgres Cluster resource values:
pgmonitor:
enabled: false
exporter:
image: # it's not necessary to specify an image as the images specified in the Crunchy Postgres Operator will be pulled by default
requests:
cpu: 1m
memory: 16Mi
limits:
cpu: 35m
memory: 32Mi