Skip to content

Commit

Permalink
refactor: kubernetes infrastructure deployment
Browse files Browse the repository at this point in the history
1. Dummy instead of getting images from grafana, opened issue #152
2. Fix linter issues
  • Loading branch information
DmitryTravyan committed Feb 1, 2023
1 parent 26b1ebd commit 5efec05
Show file tree
Hide file tree
Showing 14 changed files with 143 additions and 33 deletions.
15 changes: 13 additions & 2 deletions internal/deployment/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,20 @@ execute command for set environment variables KUBECONFIG before using:
stroppyBinaryPath = "/usr/local/bin/stroppy"
stroppyHomePath = "/home/stroppy"

//nolint
addToHosts = `
//nolint
addToHosts = `
%s prometheus.cluster.picodata.io
%s status.cluster.picodata.io
`
)

const (
pgDefaultURI = "postgres://stroppy:stroppy@acid-postgres-cluster/stroppy?sslmode=disable"
fdbDefultURI = "fdb.cluster"
mongoDefaultURI = "mongodb://stroppy:stroppy@sample-cluster-name-mongos" +
".default.svc.cluster.local/admin?ssl=false"

crDefaultURI = "postgres://stroppy:stroppy@/stroppy?sslmode=disable"
cartDefaultURI = "http://routers:8081"
ydbDefaultURI = "grpc://stroppy-ydb-database-grpc:2135/root/stroppy-ydb-database"
)
2 changes: 1 addition & 1 deletion pkg/engine/kubeengine/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ const (
ConnectionRetryCount = 3

// path to monitoring script.
GetPngScriptPath = "./get_png.sh"
GetPngScriptName = "get_png.sh"

SSHUserName = "ubuntu"
)
8 changes: 6 additions & 2 deletions pkg/engine/kubeengine/deploy_crd.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,16 +94,20 @@ var (
func (storageClient *StorageClient) Get(
ctx context.Context,
name string,
options *metav1.GetOptions,
getOptions *metav1.GetOptions,
) (*ydbApi.Storage, error) {
var err error
result := &ydbApi.Storage{} //nolint

if name == "" {
return nil, errStorageNameMustNotBeNull
}

if err = storageClient.client.Get().
Namespace(storageClient.ns).
Resource(storagePluralName).
Name(name).
VersionedParams(options, scheme.ParameterCodec).
VersionedParams(getOptions, scheme.ParameterCodec).
Do(ctx).
Into(result); err != nil {
return nil, errors.Wrap(err, "failed to get storage")
Expand Down
13 changes: 2 additions & 11 deletions pkg/engine/kubeengine/deploy_objects.go
Original file line number Diff line number Diff line change
Expand Up @@ -297,9 +297,8 @@ func GetPromtailValues(shellState *state.State) ([]byte, error) {

func GetPrometheusValues(shellState *state.State) ([]byte, error) {
var (
bytes []byte
err error
values map[string]interface{}
bytes []byte
err error
)

if bytes, err = os.ReadFile(path.Join(
Expand All @@ -311,14 +310,6 @@ func GetPrometheusValues(shellState *state.State) ([]byte, error) {
return nil, merry.Prepend(err, "failed to open prometheus values template")
}

if err = k8sYaml.Unmarshal(bytes, &values); err != nil {
return nil, merry.Prepend(err, "failed to deserialize values")
}

if bytes, err = goYaml.Marshal(&values); err != nil {
return nil, merry.Prepend(err, "failed to serialize values")
}

return bytes, nil
}

Expand Down
5 changes: 3 additions & 2 deletions pkg/engine/kubeengine/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@ import (
func CreateSystemShell(settings *config.Settings) (sc ssh.Client, err error) {
kubernetesMasterAddress := settings.TestSettings.KubernetesMasterAddress
commandClientType := ssh.RemoteClient
if settings.TestSettings.UseCloudStroppy {

if settings.TestSettings.IsController() {
if kubernetesMasterAddress == "" {
err = fmt.Errorf("kubernetes master address is empty")
return
Expand Down Expand Up @@ -51,7 +52,7 @@ func createKubernetesObject(
pObj = &Engine{
clusterConfigFile: filepath.Join(shellState.Settings.WorkingDirectory, "config"),
sc: sshClient,
UseLocalSession: shellState.Settings.Local,
UseLocalSession: shellState.Settings.TestSettings.IsLocal(),
isSshKeyFileOnMaster: false,
}
return
Expand Down
4 changes: 3 additions & 1 deletion pkg/engine/kubeengine/nodeops.go
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,8 @@ func (e *Engine) AddNodeLabels(shellState *state.State) error { //nolint
newLabels[NodeNameDBMS] = trueVal
}

newLabels["topology.kubernetes.io/zone"] = node.Name

for key, value := range newLabels {
nodeLabels[key] = value
}
Expand All @@ -209,7 +211,7 @@ func (e *Engine) AddNodeLabels(shellState *state.State) error { //nolint
nodesList.Items[index].ResourceVersion = ""
nodesList.Items[index].UID = types.UID("")

llog.Tracef("Cluster node %s now has new lables: %v", node.Name, nodeLabels)
llog.Tracef("Cluster node %s now has new labels: %v", node.Name, nodeLabels)
}

if err = applyNodeLabels(clientSet, nodesList); err != nil {
Expand Down
14 changes: 12 additions & 2 deletions pkg/engine/kubeengine/toolfuncs.go
Original file line number Diff line number Diff line change
Expand Up @@ -201,12 +201,22 @@ func (e Engine) CollectMonitoringData(
"third_party",
"monitoring",
)
getPngScriptPath := filepath.Join(
"./third_party", //nolint
"monitoring",
GetPngScriptName,
)

llog.Debugf("Getting monitoring images script path: %s", getPngScriptPath)

getImagesCmd := exec.Command(
GetPngScriptPath,
getPngScriptPath,
fmt.Sprintf("%v", startTime),
fmt.Sprintf("%v", finishTime),
fmt.Sprintf("%v", monitoringPort),
monImagesArchName, workersIps)
monImagesArchName,
workersIps,
)
getImagesCmd.Dir = workingDirectory

if result, err := getImagesCmd.CombinedOutput(); err != nil {
Expand Down
10 changes: 4 additions & 6 deletions pkg/kubernetes/deploy.go
Original file line number Diff line number Diff line change
Expand Up @@ -831,13 +831,13 @@ func (k *Kubernetes) deployPrometheus(shellState *state.State) error { //nolint
bytes []byte
)

if bytes, err = kubeengine.GetPrometheusValues(shellState); err != nil {
if bytes, err = kubeengine.GetPrometheusValues(shellState); err != nil || len(bytes) == 0 {
return merry.Prepend(err, "failed to get prometheus values")
}

if err = k.Engine.DeployChart(
&kubeengine.InstallOptions{ //nolint
ChartName: path.Join(prometheusHelmRepoName, "prometheus"),
ChartName: path.Join(prometheusHelmRepoName, "kube-prometheus-stack"),
ChartNamespace: "default",
ReleaseName: "prometheus",
RepositoryURL: prometheusHelmRepoURL,
Expand Down Expand Up @@ -916,10 +916,7 @@ func (k *Kubernetes) checkMasterDeploymentStatus() bool {
}

func (k *Kubernetes) deployIngress(shellState *state.State) error { //nolint
var (
err error
bytes []byte
)
var err error // bytes []byte

if bytes, err = kubeengine.GetIngressValues(shellState); err != nil {
return merry.Prepend(err, "failed to get ingress values")
Expand All @@ -928,6 +925,7 @@ func (k *Kubernetes) deployIngress(shellState *state.State) error { //nolint
if err = k.Engine.DeployChart(
&kubeengine.InstallOptions{ //nolint
ChartName: path.Join(nginxHelmRepoName, "ingress-nginx"),
ChartVersion: "4.2.5",
ChartNamespace: "default",
ReleaseName: "ingress-nginx",
RepositoryURL: nginxHelmRepoURL,
Expand Down
1 change: 1 addition & 0 deletions pkg/kubernetes/toolfuncs.go
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,7 @@ func (k *Kubernetes) GenerateMonitoringInventory(shellState *state.State) ([]byt
},
"grafana_address": shellState.NodesInfo.IPs.FirstMasterIP.Internal,
"grafana_port": grafanaPort,
"grafana_manage_repo": false,
"grafana_datasources": []interface{}{
map[string]interface{}{
"name": "Prometheus",
Expand Down
2 changes: 1 addition & 1 deletion third_party/extra/manifests/pv-provisioner/configmap.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ data:
hostDir: /mnt/network-ssd-nonreplicated
mountDir: /mnt/network-ssd-nonreplicated
blockCleanerCommand:
- "/scripts/dd_zero.sh"
- "/scripts/shred.sh"
- "2"
volumeMode: Block
fsType: ext4
Expand Down
1 change: 1 addition & 0 deletions third_party/extra/values/ingress-nginx-values-tpl.yml
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
---
controller:
publishService:
enabled: true
Expand Down
82 changes: 82 additions & 0 deletions third_party/monitoring/get_png.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
#!/bin/bash
# Example usage:
# ./get_png.sh 1637859362000 1637895618000 stroppy qwerty123! localhost 3000 mongo_100mln_pay_6cpu_16GB_without_operator_with_arb.tar.gz "10.1.20.171;10.1.20.73;10.1.20.210;10.1.20.90;10.1.20.138"
# where
#$1 - start in unix time,
#$2 - end in unix time
#$4
#$3 - port of grafana
#$4 - name of archive with dashboard images
#$5 - string with internal cluster machines ip addresses
#
#
#
rm -rf png

start=$1
end=$2
user=$3
password=$4
address=$5
port=$6
arch_name=$7
ip_string=$8
base_url="http://$user:$password@$address:$port/render/d-solo"
tz="tz=Europe%2FMoscow"

ip_array=($(echo $ip_string | tr ";" "\n"))

for theme in "light" "dark"
do
for size in "width=3000&height=1800" "width=1000&height=500"
do
mkdir -p "png/$size/$theme/node-exporter"
mkdir -p "png/$size/$theme/k8s"
i=1
for worker in "${ip_array[@]}"
do

curl -s -o png/$size/$theme/node-exporter/cpu-worker-$i.png "$base_url/rYdddlPWk/node-exporter-full?orgId=1&from=$start&to=$end&var-job=node-exporter&var-node=$worker:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B&theme=$theme&panelId=77&$size&$tz"
curl -s -o png/$size/$theme/node-exporter/cpu-details-worker-$i.png "$base_url/rYdddlPWk/node-exporter-full?orgId=1&from=$start&to=$end&var-job=node-exporter&var-node=$worker:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B&theme=$theme&panelId=3&$size&$tz"
curl -s -o png/$size/$theme/node-exporter/ram-worker-$i.png "$base_url/rYdddlPWk/node-exporter-full?orgId=1&from=$start&to=$end&var-job=node-exporter&var-node=$worker:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B&theme=$theme&panelId=78&$size&$tz"
curl -s -o png/$size/$theme/node-exporter/ram-details-worker-$i.png "$base_url/rYdddlPWk/node-exporter-full?orgId=1&from=$start&to=$end&var-job=node-exporter&var-node=$worker:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B&theme=$theme&panelId=24&$size&$tz"
curl -s -o png/$size/$theme/node-exporter/network-traffic-worker-$i.png "$base_url/rYdddlPWk/node-exporter-full?orgId=1&from=$start&to=$end&var-job=node-exporter&var-node=$worker:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B&theme=$theme&panelId=74&$size&$tz"
curl -s -o png/$size/$theme/node-exporter/netstat-in-out-octets-worker-$i.png "$base_url/rYdddlPWk/node-exporter-full?orgId=1&from=$start&to=$end&var-job=node-exporter&var-node=$worker:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B&theme=$theme&panelId=221&$size&$tz"
curl -s -o png/$size/$theme/node-exporter/network-in-out-udp-worker-$i.png "$base_url/rYdddlPWk/node-exporter-full?orgId=1&from=$start&to=$end&var-job=node-exporter&var-node=$worker:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B&theme=$theme&panelId=55&$size&$tz"
curl -s -o png/$size/$theme/node-exporter/network-in-out-tcp-worker-$i.png "$base_url/rYdddlPWk/node-exporter-full?orgId=1&from=$start&to=$end&var-job=node-exporter&var-node=$worker:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B&theme=$theme&panelId=299&$size&$tz"
curl -s -o png/$size/$theme/node-exporter/disk-space-used-worker-$i.png "$base_url/rYdddlPWk/node-exporter-full?orgId=1&from=$start&to=$end&var-job=node-exporter&var-node=$worker:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B&theme=$theme&panelId=156&$size&$tz"
curl -s -o png/$size/$theme/node-exporter/disk-iops-worker-$i.png "$base_url/rYdddlPWk/node-exporter-full?orgId=1&from=$start&to=$end&var-job=node-exporter&var-node=$worker:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B&theme=$theme&panelId=229&$size&$tz"
curl -s -o png/$size/$theme/node-exporter/disk-io-usage-rw-worker-$i.png "$base_url/rYdddlPWk/node-exporter-full?orgId=1&from=$start&to=$end&var-job=node-exporter&var-node=$worker:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B&theme=$theme&panelId=42&$size&$tz"
curl -s -o png/$size/$theme/node-exporter/disk-io-utilization-worker-$i.png "$base_url/rYdddlPWk/node-exporter-full?orgId=1&from=$start&to=$end&var-job=node-exporter&var-node=$worker:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B&theme=$theme&panelId=127&$size&$tz"
curl -s -o png/$size/$theme/node-exporter/disk-average-wait-time-worker-$i.png "$base_url/rYdddlPWk/node-exporter-full?orgId=1&from=$start&to=$end&var-job=node-exporter&var-node=$worker:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B&theme=$theme&panelId=37&$size&$tz"
curl -s -o png/$size/$theme/node-exporter/disk-rw-merged-worker-$i.png "$base_url/rYdddlPWk/node-exporter-full?orgId=1&from=$start&to=$end&var-job=node-exporter&var-node=$worker:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B&theme=$theme&panelId=133&$size&$tz"
curl -s -o png/$size/$theme/node-exporter/disk-average-queue-size-worker-$i.png "$base_url/rYdddlPWk/node-exporter-full?orgId=1&from=$start&to=$end&var-job=node-exporter&var-node=$worker:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B&theme=$theme&panelId=35&$size&$tz"
curl -s -o png/$size/$theme/node-exporter/system-load-worker-$i.png "$base_url/rYdddlPWk/node-exporter-full?orgId=1&from=$start&to=$end&var-job=node-exporter&var-node=$worker:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B&theme=$theme&panelId=7&$size&$tz"
curl -s -o png/$size/$theme/node-exporter/cpu-context-switches-worker-$i.png "$base_url/rYdddlPWk/node-exporter-full?orgId=1&from=$start&to=$end&var-job=node-exporter&var-node=$worker:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B&theme=$theme&panelId=8&$size&$tz"
curl -s -o png/$size/$theme/node-exporter/ram-active-inactive-worker-$i.png "$base_url/rYdddlPWk/node-exporter-full?orgId=1&from=$start&to=$end&var-job=node-exporter&var-node=$worker:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B&theme=$theme&panelId=191&$size&$tz"
let i++
done
curl -s -o png/$size/$theme/k8s/k8s-cpu-by-namespaces.png "$base_url/efa86fd1d0c121a26444b636a3f509a8/kubernetes-compute-resources-cluster?orgId=1&from=$start&to=$end&theme=$theme&panelId=7&$size&$tz"
curl -s -o png/$size/$theme/k8s/k8s-memory-by-namespaces.png "$base_url/efa86fd1d0c121a26444b636a3f509a8/kubernetes-compute-resources-cluster?orgId=1&from=$start&to=$end&theme=$theme&panelId=9&$size&$tz"
curl -s -o png/$size/$theme/k8s/k8s-cpu-by-pods-in-default-namespace.png "$base_url/85a562078cdf77779eaa1add43ccec1e/kubernetes-compute-resources-namespace-pods?orgId=1&from=$start&to=$end&theme=$theme&panelId=5&$size&$tz"
curl -s -o png/$size/$theme/k8s/k8s-memory-by-pods-in-default-namespace.png "$base_url/85a562078cdf77779eaa1add43ccec1e/kubernetes-compute-resources-namespace-pods?orgId=1&from=$start&to=$end&theme=$theme&panelId=7&$size&$tz"
# warning: 'var-node=worker-1&var-node=worker-2&var-node=worker-3' block is variable
curl -s -o png/$size/$theme/k8s/k8s-cpu-by-all-pods.png "$base_url/200ac8fdbfbb74b39aff88118e4d1c2c/kubernetes-compute-resources-node-pods?orgId=1&var-datasource=default&var-cluster=&var-node=worker-1&var-node=worker-2&var-node=worker-3&from=$start&to=$end&theme=$theme&panelId=1&$size&$tz"
curl -s -o png/$size/$theme/k8s/k8s-memory-by-all-pods.png "$base_url/200ac8fdbfbb74b39aff88118e4d1c2c/kubernetes-compute-resources-node-pods?orgId=1&var-datasource=default&var-cluster=&var-node=worker-1&var-node=worker-2&var-node=worker-3&from=$start&to=$end&theme=$theme&panelId=3&$size&$tz"
curl -s -o png/$size/$theme/k8s/k8s-cpu-by-sts.png "$base_url/a164a7f0339f99e89cea5cb47e9be617/kubernetes-compute-resources-workload?orgId=1&from=$start&to=$end&theme=$theme&panelId=1&$size&$tz"
curl -s -o png/$size/$theme/k8s/k8s-memory-by-sts.png "$base_url/a164a7f0339f99e89cea5cb47e9be617/kubernetes-compute-resources-workload?orgId=1&from=$start&to=$end&theme=$theme&panelId=3&$size&$tz"
curl -s -o png/$size/$theme/k8s/k8s-net-receive-by-namespaces.png "$base_url/ff635a025bcfea7bc3dd4f508990a3e9/kubernetes-networking-cluster?orgId=1&var-resolution=30s&var-interval=4h&var-datasource=default&var-cluster=&from=$start&to=$end&theme=$theme&panelId=10&$size&$tz"
curl -s -o png/$size/$theme/k8s/k8s-net-transmit-by-namespaces.png "$base_url/ff635a025bcfea7bc3dd4f508990a3e9/kubernetes-networking-cluster?orgId=1&var-resolution=30s&var-interval=4h&var-datasource=default&var-cluster=&from=$start&to=$end&theme=$theme&panelId=11&$size&$tz"
curl -s -o png/$size/$theme/k8s/k8s-net-receive-by-all-pods.png "$base_url/8b7a8b326d7a6f1f04244066368c67af/kubernetes-networking-namespace-pods?orgId=1&var-datasource=default&var-cluster=&var-namespace=All&var-resolution=30s&var-interval=4h&from=$start&to=$end&theme=$theme&panelId=7&$size&$tz"
curl -s -o png/$size/$theme/k8s/k8s-net-transmit-by-all-pods.png "$base_url/8b7a8b326d7a6f1f04244066368c67af/kubernetes-networking-namespace-pods?orgId=1&var-datasource=default&var-cluster=&var-namespace=All&var-resolution=30s&var-interval=4h&from=$start&to=$end&theme=$theme&panelId=8&$size&$tz"
curl -s -o png/$size/$theme/k8s/k8s-net-receive-by-sts.png "$base_url/bbb2a765a623ae38130206c7d94a160f/kubernetes-networking-namespace-workload?orgId=1&var-datasource=default&var-cluster=&var-namespace=default&var-type=statefulset&var-resolution=30s&var-interval=4h&from=$start&to=$end&theme=$theme&panelId=10&$size&$tz"
curl -s -o png/$size/$theme/k8s/k8s-net-transmit-by-sts.png "$base_url/bbb2a765a623ae38130206c7d94a160f/kubernetes-networking-namespace-workload?orgId=1&var-datasource=default&var-cluster=&var-namespace=default&var-type=statefulset&var-resolution=30s&var-interval=4h&from=$start&to=$end&theme=$theme&panelId=11&$size&$tz"
curl -s -o png/$size/$theme/k8s/k8s-net-receive-by-pods-in-sts.png "$base_url/728bf77cc1166d2f3133bf25846876cc/kubernetes-networking-workload?orgId=1&var-datasource=default&var-cluster=&var-namespace=All&var-workload=acid-postgres-cluster&var-type=statefulset&var-resolution=30s&var-interval=4h&from=$start&to=$end&theme=$theme&panelId=9&$size&$tz"
curl -s -o png/$size/$theme/k8s/k8s-net-transmit-by-pods-in-sts.png "$base_url/728bf77cc1166d2f3133bf25846876cc/kubernetes-networking-workload?orgId=1&var-datasource=default&var-cluster=&var-namespace=All&var-workload=acid-postgres-cluster&var-type=statefulset&var-resolution=30s&var-interval=4h&from=$start&to=$end&theme=$theme&panelId=10&$size&$tz"
done
done

mv 'png/width=1000&height=500' png/1000x500
mv 'png/width=3000&height=1800' png/3000x1800

tar cfvz $arch_name png
9 changes: 9 additions & 0 deletions third_party/monitoring/grafana_get_png.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
- hosts: master
become: true
any_errors_fatal: false
tasks:
- name: Get node-exporter-full png
ansible.builtin.get_url:
url: "http://{{ grafana_address }}:{{ grafana_port }}/render/d-solo/rYdddlPWk/node-exporter-full?orgId=1&from={{ start_time }}&to={{ end_time }}&var-job=node-exporter&var-node={{ worker_id }}:9100&var-diskdevices=%5Ba-z%5D%2B%7Cnvme%5B0-9%5D%2Bn%5B0-9%5D%2B&theme=$theme&panelId=77&$size&$tz"
dest: "tmp/png/$size/$theme/node-exporter/cpu-worker-$i.png"

10 changes: 5 additions & 5 deletions third_party/terraform/vars.tf
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,13 @@ variable "masters_count" {
variable "masters_cpu" {
type = number
description = "Yandex Cloud cpu cores per master"
default = 10
default = 16
}

variable "masters_memory" {
type = number
description = "Yandex Cloud memory gigabytes per master"
default = 40
default = 48
}

variable "masters_boot_disk" {
Expand All @@ -65,19 +65,19 @@ variable "masters_secondary_disk" {
variable "workers_count" {
type = number
description = "Yandex Cloud count of workers"
default = 8
default = 9
}

variable "workers_cpu" {
type = number
description = "Yandex Cloud cpu in cores per worker"
default = 10
default = 16
}

variable "workers_memory" {
type = number
description = "Yandex Cloud memory in GB per worker"
default = 40
default = 48
}

variable "workers_boot_disk" {
Expand Down

0 comments on commit 5efec05

Please sign in to comment.