diff --git a/ydb/tools/cfg/base.py b/ydb/tools/cfg/base.py index 7f6e5a1661e4..14433fec9d6a 100644 --- a/ydb/tools/cfg/base.py +++ b/ydb/tools/cfg/base.py @@ -60,10 +60,11 @@ def merge_with_default(dft, override): class KiKiMRDrive(object): - def __init__(self, type, path, shared_with_os=False, expected_slot_count=None, kind=None): + def __init__(self, type, path, shared_with_os=False, expected_slot_count=None, kind=None, pdisk_config=None): self.type = type self.path = path self.shared_with_os = shared_with_os + self.pdisk_config = pdisk_config self.expected_slot_count = expected_slot_count self.kind = kind @@ -74,10 +75,11 @@ def __eq__(self, other): and self.shared_with_os == other.shared_with_os and self.expected_slot_count == other.expected_slot_count and self.kind == other.kind + and self.pdisk_config == other.pdisk_config ) def __hash__(self): - return hash("\0".join(map(str, (self.type, self.path, self.shared_with_os, self.expected_slot_count, self.kind)))) + return hash("\0".join(map(str, (self.type, self.path, self.shared_with_os, self.expected_slot_count, self.kind, self.pdisk_config)))) Domain = collections.namedtuple( @@ -264,7 +266,7 @@ def normalize_domain(domain_name): class ClusterDetailsProvider(object): - def __init__(self, template, walle_provider, validator=None, database=None, use_new_style_cfg=False): + def __init__(self, template, host_info_provider, validator=None, database=None, use_new_style_cfg=False): if not validator: validator = validation.default_validator() @@ -277,10 +279,14 @@ def __init__(self, template, walle_provider, validator=None, database=None, use_ if database is not None: self.__cluster_description = self.get_subjective_description(self.__cluster_description, database, self.__validator) - self._use_walle = self.__cluster_description.get("use_walle", True) - if not walle_provider: - walle_provider = walle.NopHostsInformationProvider() - self._walle = walle_provider + self._use_walle = self.__cluster_description.get("use_walle", False) + self._k8s_settings = self.__cluster_description.get("k8s_settings", {"use": False}) + + if host_info_provider is not None: + self._host_info_provider = host_info_provider + else: + self._host_info_provider = walle.NopHostsInformationProvider + self.__translated_storage_pools_deprecated = None self.__translated_hosts = None self.__racks = {} @@ -299,6 +305,7 @@ def __init__(self, template, walle_provider, validator=None, database=None, use_ self.blob_storage_config = self.__cluster_description.get("blob_storage_config") self.memory_controller_config = self.__cluster_description.get("memory_controller_config", {}) self.channel_profile_config = self.__cluster_description.get("channel_profile_config") + self.immediate_controls_config = self.__cluster_description.get("immediate_controls_config") self.pdisk_key_config = self.__cluster_description.get("pdisk_key_config", {}) if not self.need_txt_files and not self.use_new_style_kikimr_cfg: assert "cannot remove txt files without new style kikimr cfg!" @@ -344,6 +351,18 @@ def storage_config_generation(self): def use_walle(self): return self._use_walle + @property + def use_k8s_api(self): + return self._k8s_settings.get("use") + + @property + def k8s_rack_label(self): + return self._k8s_settings.get("k8s_rack_label") + + @property + def k8s_dc_label(self): + return self._k8s_settings.get("k8s_dc_label") + @property def security_settings(self): return self.__cluster_description.get("security_settings", {}) @@ -358,7 +377,7 @@ def _get_datacenter(self, host_description): dc = host_description.get("location", {}).get("data_center", None) if dc: return str(dc) - return str(self._walle.get_datacenter(host_description.get("name", host_description.get("host")))) + return str(self._host_info_provider.get_datacenter(host_description.get("name", host_description.get("host")))) def _get_rack(self, host_description): if host_description.get("rack") is not None: @@ -366,7 +385,13 @@ def _get_rack(self, host_description): rack = host_description.get("location", {}).get("rack", None) if rack: return str(rack) - return str(self._walle.get_rack(host_description.get("name", host_description.get("host")))) + + hostname = host_description.get("name", host_description.get("host")) + + if isinstance(self._host_info_provider, walle.NopHostsInformationProvider): + raise RuntimeError(f"there is no 'rack' specified for host {hostname} in template, and no host info provider has been specified") + + return str(self._host_info_provider.get_rack(hostname)) def _get_body(self, host_description): if host_description.get("body") is not None: @@ -374,7 +399,7 @@ def _get_body(self, host_description): body = host_description.get("location", {}).get("body", None) if body: return str(body) - return str(self._walle.get_body(host_description.get("name", host_description.get("host")))) + return str(self._host_info_provider.get_body(host_description.get("name", host_description.get("host")))) def _collect_drives_info(self, host_description): host_config_id = host_description.get("host_config_id", None) @@ -670,6 +695,10 @@ def grpc_config(self): def dynamicnameservice_config(self): return merge_with_default(DYNAMIC_NAME_SERVICE, self.__cluster_description.get("dynamicnameservice", {})) + @property + def nameservice_config(self): + return self.__cluster_description.get("nameservice_config") + @property def grpc_port(self): return self.grpc_config.get("port") diff --git a/ydb/tools/cfg/bin/__main__.py b/ydb/tools/cfg/bin/__main__.py index c76379907f9d..50ec51785a85 100644 --- a/ydb/tools/cfg/bin/__main__.py +++ b/ydb/tools/cfg/bin/__main__.py @@ -12,6 +12,7 @@ from ydb.tools.cfg.static import StaticConfigGenerator from ydb.tools.cfg.utils import write_to_file from ydb.tools.cfg.walle import NopHostsInformationProvider, WalleHostsInformationProvider +from ydb.tools.cfg.k8s_api import K8sApiHostsInformationProvider logging_config.dictConfig( { @@ -48,11 +49,28 @@ def cfg_generate(args): with open(args.cluster_description, "r") as yaml_template: cluster_template = yaml.safe_load(yaml_template) - hosts_provider = NopHostsInformationProvider() + host_info_provider = NopHostsInformationProvider() + + k8s_enabled = cluster_template.get("k8s_settings", {}).get("use", False) + walle_enabled = cluster_template.get("use_walle", False) + if args.hosts_provider_url: - hosts_provider = WalleHostsInformationProvider(args.hosts_provider_url) + if not walle_enabled: + raise RuntimeError("you specified --hosts-provider-url, but `use_walle` is false in template.\nSpecify `use_walle: True` to continue") + host_info_provider = WalleHostsInformationProvider(args.hosts_provider_url) + elif k8s_enabled: + host_info_provider = K8sApiHostsInformationProvider(args.kubeconfig) + + if walle_enabled and not isinstance(host_info_provider, WalleHostsInformationProvider): + raise RuntimeError("you specified 'use_walle: True', but didn't specify --hosts-provider-url to initialize walle") + + if walle_enabled and k8s_enabled: + raise RuntimeError("you specified 'use_walle: True' and 'k8s_settings.use: True', please select a single host info provider") + + if not walle_enabled and not k8s_enabled: + logger.warning("you didn't specify any host info provider (neither walle nor k8s). Make sure you know what you are doing") - generator = cfg_cls(cluster_template, args.binary_path, args.output_dir, walle_provider=hosts_provider, **kwargs) + generator = cfg_cls(cluster_template, args.binary_path, args.output_dir, host_info_provider=host_info_provider, **kwargs) all_configs = generator.get_all_configs() for cfg_name, cfg_value in all_configs.items(): @@ -60,7 +78,7 @@ def cfg_generate(args): def main(): - parser = get_parser(cfg_generate, [{"name": "--hosts-provider-url", "help": "URL from which information about hosts can be obtained."}]) + parser = get_parser(cfg_generate) args = parser.parse_args() args.func(args) diff --git a/ydb/tools/cfg/configurator_setup.py b/ydb/tools/cfg/configurator_setup.py index 981bdd7b5dd2..92a9ebea4853 100644 --- a/ydb/tools/cfg/configurator_setup.py +++ b/ydb/tools/cfg/configurator_setup.py @@ -1,6 +1,8 @@ import argparse import random +from pathlib import Path + def parse_optional_arguments(args): kwargs = {} @@ -73,6 +75,19 @@ def get_parser(generate_func, extra_cfg_arguments=[]): help=v['help'], ) + parser_cfg.add_argument( + "--hosts-provider-url", + type=str, + help="""URL from which information about hosts can be obtained. + Mutually exclusive with --hosts-provider-k8s""") + + home_directory = str(Path.home()) + defaultKubeconfigLocation = "{0}/.kube/config".format(home_directory) + parser_cfg.add_argument("--kubeconfig", + type=str, + help="path to the kubeconfig file. Default `$HOME/.kube/config`, also see --hosts-provider-k8s", + default=defaultKubeconfigLocation) + argument_group = parser_cfg.add_mutually_exclusive_group() argument_group.add_argument( diff --git a/ydb/tools/cfg/dynamic.py b/ydb/tools/cfg/dynamic.py index 0b8a3a753157..69db8120f1a6 100644 --- a/ydb/tools/cfg/dynamic.py +++ b/ydb/tools/cfg/dynamic.py @@ -20,19 +20,19 @@ def __init__( output_dir, grpc_endpoint=None, local_binary_path=None, - walle_provider=None, + host_info_provider=None, **kwargs ): self._template = template self._binary_path = binary_path self._local_binary_path = local_binary_path or binary_path self._output_dir = output_dir - self._walle_provider = walle_provider - self._cluster_details = base.ClusterDetailsProvider(template, walle_provider=self._walle_provider) + self._host_info_provider = host_info_provider + self._cluster_details = base.ClusterDetailsProvider(template, host_info_provider=self._host_info_provider) self._grpc_endpoint = grpc_endpoint self.__configure_request = None self.__static_config = static.StaticConfigGenerator( - template, binary_path, output_dir, walle_provider=walle_provider, local_binary_path=local_binary_path + template, binary_path, output_dir, host_info_provider=host_info_provider, local_binary_path=local_binary_path ) @property diff --git a/ydb/tools/cfg/k8s_api/__init__.py b/ydb/tools/cfg/k8s_api/__init__.py new file mode 100644 index 000000000000..10e69406434a --- /dev/null +++ b/ydb/tools/cfg/k8s_api/__init__.py @@ -0,0 +1,5 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +from .k8s_api import K8sApiHostsInformationProvider # noqa + +__all__ = ("k8s_api",) diff --git a/ydb/tools/cfg/k8s_api/k8s_api.py b/ydb/tools/cfg/k8s_api/k8s_api.py new file mode 100644 index 000000000000..238fcede65c7 --- /dev/null +++ b/ydb/tools/cfg/k8s_api/k8s_api.py @@ -0,0 +1,68 @@ +import logging +import threading +import hashlib + +from kubernetes import client, config + +from ydb.tools.cfg.walle import HostsInformationProvider + +logger = logging.getLogger() + + +class K8sApiHostsInformationProvider(HostsInformationProvider): + def __init__(self, kubeconfig): + self._kubeconfig = kubeconfig + self._cache = {} + self._timeout_seconds = 5 + self._retry_count = 10 + self._lock = threading.Lock() + self._k8s_rack_label = None + self._k8s_dc_label = None + + def _init_k8s_labels(self, rack_label, dc_label): + self._k8s_rack_label = rack_label + self._k8s_dc_label = dc_label + logger.info(f"initialized rack with {rack_label}, dc with {dc_label}") + self._populate_cache() + + def _populate_cache(self): + try: + config.load_kube_config(config_file=self._kubeconfig) + with client.ApiClient() as api_client: + v1 = client.CoreV1Api(api_client) + nodes = v1.list_node().items + + with self._lock: + for node in nodes: + hostname = node.metadata.name + self._cache[hostname] = node.metadata.labels + except client.exceptions.ApiException as e: + print(f"Failed to fetch node labels: {e}") + + def get_rack(self, hostname): + if self._k8s_rack_label is None: + return "defaultRack" + + labels = self._cache.get(hostname) + if labels and self._k8s_rack_label in labels: + logging.info(f"get_rack invoked on {hostname}, value {labels[self._k8s_rack_label]}") + return labels[self._k8s_rack_label] + logging.info(f"rack not found for hostname {hostname}") + return "" + + def get_datacenter(self, hostname): + logging.info(f"get_datacenter invoked on {hostname}") + + if self._k8s_dc_label is None: + return "defaultDC" + + labels = self._cache.get(hostname) + if labels and self._k8s_dc_label in labels: + return labels[self._k8s_dc_label] + return "" + + def get_body(self, hostname): + # Just something for now, please present better ideas + hex_digest = hashlib.md5(hostname.encode()).hexdigest() + decimal_value = int(hex_digest, 16) % (1 << 31) + return decimal_value diff --git a/ydb/tools/cfg/k8s_api/ya.make b/ydb/tools/cfg/k8s_api/ya.make new file mode 100644 index 000000000000..46587f7f2113 --- /dev/null +++ b/ydb/tools/cfg/k8s_api/ya.make @@ -0,0 +1,12 @@ +PY3_LIBRARY() + +PY_SRCS( + __init__.py + k8s_api.py +) + +PEERDIR( + contrib/python/kubernetes +) + +END() diff --git a/ydb/tools/cfg/static.py b/ydb/tools/cfg/static.py index 4429c7cc402b..d51d969b308a 100644 --- a/ydb/tools/cfg/static.py +++ b/ydb/tools/cfg/static.py @@ -10,6 +10,7 @@ import yaml from ydb.core.fq.libs.config.protos.fq_config_pb2 import TConfig as TFederatedQueryConfig +from ydb.core.protos import blobstorage_pdisk_config_pb2 as pdisk_config_pb from google.protobuf import json_format from ydb.core.protos import ( @@ -45,7 +46,7 @@ def __init__( database=None, node_broker_port=2135, ic_port=19001, - walle_provider=None, + host_info_provider=None, grpc_port=2135, mon_port=8765, cfg_home="/Berkanavt/kikimr", @@ -60,13 +61,17 @@ def __init__( self.__binary_path = binary_path self.__local_binary_path = local_binary_path or binary_path self.__output_dir = output_dir - # collects and provides information about cluster hosts - self.__cluster_details = base.ClusterDetailsProvider(template, walle_provider, validator=schema_validator, database=database) + + self._host_info_provider = host_info_provider + + self.__cluster_details = base.ClusterDetailsProvider(template, host_info_provider, validator=schema_validator, database=database) + if self.__cluster_details.use_k8s_api: + self._host_info_provider._init_k8s_labels(self.__cluster_details.k8s_rack_label, self.__cluster_details.k8s_dc_label) + self._enable_cores = template.get("enable_cores", enable_cores) self._yaml_config_enabled = template.get("yaml_config_enabled", False) self.__is_dynamic_node = True if database is not None else False self._database = database - self._walle_provider = walle_provider self._skip_location = skip_location self.__node_broker_port = node_broker_port self.__grpc_port = grpc_port @@ -103,6 +108,7 @@ def __init__( "pqcd.txt": None, "failure_injection.txt": None, "pdisk_key.txt": None, + "immediate_controls_config.txt": None, } self.__optional_config_files = set( ( @@ -112,16 +118,12 @@ def __init__( "fq.txt", "failure_injection.txt", "pdisk_key.txt", + "immediate_controls_config.txt", ) ) tracing = template.get("tracing_config") if tracing is not None: - self.__tracing = ( - tracing["backend"], - tracing.get("uploader"), - tracing.get("sampling", []), - tracing.get("external_throttling", []), - ) + self.__tracing = tracing else: self.__tracing = None self.__write_mbus_settings_to_kikimr_cfg = False @@ -266,6 +268,14 @@ def pdisk_key_txt(self): def pdisk_key_txt_enabled(self): return self.__proto_config("pdisk_key.txt").ByteSize() > 0 + @property + def immediate_controls_config_txt(self): + return self.__proto_config("immediate_controls_config.txt", config_pb2.TImmediateControlsConfig, self.__cluster_details.immediate_controls_config) + + @property + def immediate_controls_config_txt_enabled(self): + return self.__proto_config("immediate_controls_config.txt").ByteSize() > 0 + @property def mbus_enabled(self): mbus_config = self.__cluster_details.get_service("message_bus_config") @@ -407,6 +417,22 @@ def get_normalized_config(self): 'expected_slot_count': drive.pop('expected_slot_count') } + # support type-safe `pdisk_config` directly in `host_configs`, for example: + # - path: /dev/disk/by-partlabel/ydb_disk_hdd_04 + # type: ROT + # pdisk_config: + # expected_slot_count: 8 + # drive_model_trim_speed_bps: 0 + # drive_model_TYPO_speed_bps: 0 # will fail + if 'pdisk_config' in drive: + pd = pdisk_config_pb.TPDiskConfig() + utils.apply_config_changes( + pd, + drive['pdisk_config'], + ) + + drive['pdisk_config'] = self.normalize_dictionary(json_format.MessageToDict(pd)) + if self.table_service_config: normalized_config["table_service_config"] = self.table_service_config @@ -522,6 +548,7 @@ def get_normalized_config(self): if 'pdisk_config' in vdisk_location: if 'expected_slot_count' in vdisk_location['pdisk_config']: vdisk_location['pdisk_config']['expected_slot_count'] = int(vdisk_location['pdisk_config']['expected_slot_count']) + if self.__cluster_details.channel_profile_config is not None: normalized_config["channel_profile_config"] = self.__cluster_details.channel_profile_config else: @@ -627,6 +654,8 @@ def get_app_config(self): app_config.MergeFrom(self.tracing_txt) if self.pdisk_key_txt_enabled: app_config.PDiskKeyConfig.CopyFrom(self.pdisk_key_txt) + if self.immediate_controls_config_txt_enabled: + app_config.ImmediateControlsConfig.CopyFrom(self.immediate_controls_config_txt) return app_config def __proto_config(self, config_file, config_class=None, cluster_details_for_field=None): @@ -1196,6 +1225,8 @@ def __generate_log_txt(self): def __generate_names_txt(self): self.__proto_configs["names.txt"] = config_pb2.TStaticNameserviceConfig() + if self.__cluster_details.nameservice_config is not None: + utils.wrap_parse_dict(self.__cluster_details.nameservice_config, self.names_txt) for host in self.__cluster_details.hosts: node = self.names_txt.Node.add( @@ -1210,6 +1241,10 @@ def __generate_names_txt(self): node.WalleLocation.DataCenter = host.datacenter node.WalleLocation.Rack = host.rack node.WalleLocation.Body = int(host.body) + elif self.__cluster_details.use_k8s_api: + node.Location.DataCenter = host.datacenter + node.Location.Rack = host.rack + node.Location.Body = int(host.body) else: node.Location.DataCenter = host.datacenter node.Location.Rack = host.rack @@ -1217,7 +1252,13 @@ def __generate_names_txt(self): if self.__cluster_details.use_cluster_uuid: accepted_uuids = self.__cluster_details.accepted_cluster_uuids - cluster_uuid = self.__cluster_details.cluster_uuid + + # cluster_uuid can be initialized from `nameservice_config` proto, same as `config.yaml`, + # OR in the old manner, through `cluster_uuid: ...` key in `template.yaml` + cluster_uuid = self.names_txt.ClusterUUID # already read from proto + if len(cluster_uuid) == 0: + cluster_uuid = self.__cluster_details.cluster_uuid # fall back to `cluster_uuid: ...` + cluster_uuid = "ydb:{}".format(utils.uuid()) if cluster_uuid is None else cluster_uuid self.names_txt.ClusterUUID = cluster_uuid self.names_txt.AcceptUUID.append(cluster_uuid) @@ -1247,133 +1288,10 @@ def __generate_sys_txt(self): self.__generate_sys_txt_advanced() def __generate_tracing_txt(self): - def get_selectors(selectors): - selectors_pb = config_pb2.TTracingConfig.TSelectors() - - request_type = selectors["request_type"] - if request_type is not None: - selectors_pb.RequestType = request_type - - return selectors_pb - - def get_sampling_scope(sampling): - sampling_scope_pb = config_pb2.TTracingConfig.TSamplingRule() - selectors = sampling.get("scope") - if selectors is not None: - sampling_scope_pb.Scope.CopyFrom(get_selectors(selectors)) - sampling_scope_pb.Fraction = sampling['fraction'] - sampling_scope_pb.Level = sampling['level'] - sampling_scope_pb.MaxTracesPerMinute = sampling['max_traces_per_minute'] - sampling_scope_pb.MaxTracesBurst = sampling.get('max_traces_burst', 0) - return sampling_scope_pb - - def get_external_throttling(throttling): - throttling_scope_pb = config_pb2.TTracingConfig.TExternalThrottlingRule() - selectors = throttling.get("scope") - if selectors is not None: - throttling_scope_pb.Scope.CopyFrom(get_selectors(selectors)) - throttling_scope_pb.Level = throttling['level'] - throttling_scope_pb.MaxTracesPerMinute = throttling['max_traces_per_minute'] - throttling_scope_pb.MaxTracesBurst = throttling.get('max_traces_burst', 0) - return throttling_scope_pb - - def get_auth_config(auth): - auth_pb = config_pb2.TTracingConfig.TBackendConfig.TAuthConfig() - tvm = auth.get("tvm") - if tvm is not None: - tvm_pb = auth_pb.Tvm - - if "host" in tvm: - tvm_pb.Host = tvm["host"] - if "port" in tvm: - tvm_pb.Port = tvm["port"] - tvm_pb.SelfTvmId = tvm["self_tvm_id"] - tvm_pb.TracingTvmId = tvm["tracing_tvm_id"] - if "disk_cache_dir" in tvm: - tvm_pb.DiskCacheDir = tvm["disk_cache_dir"] - - if "plain_text_secret" in tvm: - tvm_pb.PlainTextSecret = tvm["plain_text_secret"] - elif "secret_file" in tvm: - tvm_pb.SecretFile = tvm["secret_file"] - elif "secret_environment_variable" in tvm: - tvm_pb.SecretEnvironmentVariable = tvm["secret_environment_variable"] - return auth_pb - - def get_opentelemetry(opentelemetry): - opentelemetry_pb = config_pb2.TTracingConfig.TBackendConfig.TOpentelemetryBackend() - - opentelemetry_pb.CollectorUrl = opentelemetry["collector_url"] - opentelemetry_pb.ServiceName = opentelemetry["service_name"] - - return opentelemetry_pb - - def get_backend(backend): - backend_pb = config_pb2.TTracingConfig.TBackendConfig() - - auth = backend.get("auth_config") - if auth is not None: - backend_pb.AuthConfig.CopyFrom(get_auth_config(auth)) - - opentelemetry = backend["opentelemetry"] - if opentelemetry is not None: - backend_pb.Opentelemetry.CopyFrom(get_opentelemetry(opentelemetry)) - - return backend_pb - - def get_uploader(uploader): - uploader_pb = config_pb2.TTracingConfig.TUploaderConfig() - - max_exported_spans_per_second = uploader.get("max_exported_spans_per_second") - if max_exported_spans_per_second is not None: - uploader_pb.MaxExportedSpansPerSecond = max_exported_spans_per_second - - max_spans_in_batch = uploader.get("max_spans_in_batch") - if max_spans_in_batch is not None: - uploader_pb.MaxSpansInBatch = max_spans_in_batch - - max_bytes_in_batch = uploader.get("max_bytes_in_batch") - if max_bytes_in_batch is not None: - uploader_pb.MaxBytesInBatch = max_bytes_in_batch - - max_batch_accumulation_milliseconds = uploader.get("max_batch_accumulation_milliseconds") - if max_batch_accumulation_milliseconds is not None: - uploader_pb.MaxBatchAccumulationMilliseconds = max_batch_accumulation_milliseconds - - span_export_timeout_seconds = uploader.get("span_export_timeout_seconds") - if span_export_timeout_seconds is not None: - uploader_pb.SpanExportTimeoutSeconds = span_export_timeout_seconds - - max_export_requests_inflight = uploader.get("max_export_requests_inflight") - if max_export_requests_inflight is not None: - uploader_pb.MaxExportRequestsInflight = max_export_requests_inflight - - return uploader_pb - pb = config_pb2.TAppConfig() if self.__tracing: tracing_pb = pb.TracingConfig - ( - backend, - uploader, - sampling, - external_throttling - ) = self.__tracing - - assert isinstance(sampling, list) - assert isinstance(external_throttling, list) - - tracing_pb.Backend.CopyFrom(get_backend(backend)) - - if uploader is not None: - tracing_pb.Uploader.CopyFrom(get_uploader(uploader)) - - for sampling_scope in sampling: - tracing_pb.Sampling.append(get_sampling_scope(sampling_scope)) - - for throttling_scope in external_throttling: - tracing_pb.ExternalThrottling.append(get_external_throttling(throttling_scope)) - + utils.wrap_parse_dict(self.__tracing, tracing_pb) self.__proto_configs["tracing.txt"] = pb def __generate_sys_txt_advanced(self): diff --git a/ydb/tools/cfg/utils.py b/ydb/tools/cfg/utils.py index 94c053345994..b6635875e145 100644 --- a/ydb/tools/cfg/utils.py +++ b/ydb/tools/cfg/utils.py @@ -5,7 +5,7 @@ import string import six -from google.protobuf import text_format +from google.protobuf import text_format, json_format from google.protobuf.pyext._message import FieldDescriptor from library.python import resource @@ -152,3 +152,29 @@ def apply_config_changes(target, changes, fix_names=None): def random_int(low, high, *seed): random.seed(''.join(map(str, seed))) return random.randint(low, high) + + +def wrap_parse_dict(dictionary, proto): + def get_camel_case_string(snake_str): + components = snake_str.split('_') + camelCased = ''.join(x.capitalize() for x in components) + abbreviations = { + 'Uuid': 'UUID', + 'Pdisk': 'PDisk', + 'Vdisk': 'VDisk', + 'NtoSelect': 'NToSelect', + 'Ssid': 'SSId', + } + for k, v in abbreviations.items(): + camelCased = camelCased.replace(k, v) + return camelCased + + def convert_keys(data): + if isinstance(data, dict): + return {get_camel_case_string(k): convert_keys(v) for k, v in data.items()} + elif isinstance(data, list): + return [convert_keys(item) for item in data] + else: + return data + + json_format.ParseDict(convert_keys(dictionary), proto) diff --git a/ydb/tools/cfg/validation.py b/ydb/tools/cfg/validation.py index 33d455588cb3..3ab5c311aabf 100644 --- a/ydb/tools/cfg/validation.py +++ b/ydb/tools/cfg/validation.py @@ -138,81 +138,8 @@ TRACING_SCHEMA = dict( type="object", - properties=dict( - backend=dict( - type="object", - properties=dict( - auth_config=dict( - type="object", - properties=dict( - tvm=dict( - type="object", - properties=dict( - url=dict(type="string"), - self_tvm_id=dict(type="integer"), - tracing_tvm_id=dict(type="integer"), - disc_cache_dir=dict(type="string"), - plain_text_secret=dict(type="string"), - secret_file=dict(type="string"), - secret_environment_variable=dict(type="string"), - ), - required=["self_tvm_id", "tracing_tvm_id"], - ) - ), - required=["tvm"], - ), - opentelemetry=dict( - type="object", - properties=dict( - collector_url=dict(type="string"), - service_name=dict(type="string"), - ) - ), - ), - required=["opentelemetry"], - additionalProperties=False, - ), - uploader=dict( - type="object", - properties=dict( - max_exported_spans_per_second=dict(type="integer", minimum=1), - max_spans_in_batch=dict(type="integer", minimum=1), - max_bytes_in_batch=dict(type="integer"), - max_batch_accumulation_milliseconds=dict(type="integer"), - span_export_timeout_seconds=dict(type="integer", minimum=1), - max_export_requests_inflight=dict(type="integer", minimum=1), - ), - additionalProperties=False, - ), - sampling=dict( - type="array", - items=dict( - type="object", - properties=dict( - scope=SELECTORS_CONFIGS, - fraction=dict(type="number", minimum=0, maximum=1), - level=dict(type="integer", minimum=0, maximum=15), - max_traces_per_minute=dict(type="integer", minimum=0), - max_traces_burst=dict(type="integer", minimum=0), - ), - required=["fraction", "level", "max_traces_per_minute"], - ), - ), - external_throttling=dict( - type="array", - items=dict( - type="object", - properties=dict( - scope=SELECTORS_CONFIGS, - max_traces_per_minute=dict(type="integer", minimum=0), - max_traces_burst=dict(type="integer", minimum=0), - ), - required=["max_traces_per_minute"], - ), - ), - ), - required=["backend"], - additionalProperties=False, + properties={}, + additionalProperties=True, ) FAILURE_INJECTION_CONFIG_SCHEMA = { @@ -228,6 +155,11 @@ "path": dict(type="string", minLength=1), "shared_with_os": dict(type="boolean"), "expected_slot_count": dict(type="integer"), + "pdisk_config": { + "type": "object", + "additionalProperties": True, + "properties": {}, + }, "kind": dict(type="integer"), }, "required": ["type", "path"], @@ -485,7 +417,7 @@ "type": "array", "items": { "type": "string", - "enum": utils.get_resources_list("resources/console_initializers/"), + "enum": ["cloud_ssd_table_profile", "cloud_ssdencrypted_table_profile"], }, }, }, diff --git a/ydb/tools/cfg/ya.make b/ydb/tools/cfg/ya.make index ad24d29c07fc..e088dc8b9e95 100644 --- a/ydb/tools/cfg/ya.make +++ b/ydb/tools/cfg/ya.make @@ -2,7 +2,7 @@ RECURSE( bin ) -PY23_LIBRARY() +PY3_LIBRARY() PY_SRCS( __init__.py @@ -16,12 +16,6 @@ PY_SRCS( validation.py ) -IF (PYTHON2) - PEERDIR( - contrib/deprecated/python/enum34 - ) -ENDIF() - PEERDIR( contrib/python/protobuf contrib/python/PyYAML @@ -29,6 +23,7 @@ PEERDIR( contrib/python/requests contrib/python/six ydb/tools/cfg/walle + ydb/tools/cfg/k8s_api library/cpp/resource library/python/resource ydb/core/protos