diff --git a/paasta_tools/setup_tron_namespace.py b/paasta_tools/setup_tron_namespace.py index b6233f9c1f..58784f90c7 100755 --- a/paasta_tools/setup_tron_namespace.py +++ b/paasta_tools/setup_tron_namespace.py @@ -23,6 +23,7 @@ import argparse import logging import sys +from typing import List import ruamel.yaml as yaml @@ -31,7 +32,9 @@ from paasta_tools.kubernetes_tools import ensure_service_account from paasta_tools.kubernetes_tools import KubeClient from paasta_tools.tron_tools import KUBERNETES_NAMESPACE +from paasta_tools.tron_tools import load_tron_service_config from paasta_tools.tron_tools import MASTER_NAMESPACE +from paasta_tools.tron_tools import TronJobConfig from paasta_tools.utils import load_system_paasta_config log = logging.getLogger(__name__) @@ -68,32 +71,29 @@ def parse_args(): return args -def ensure_service_accounts(raw_config: str) -> None: +def ensure_service_accounts(job_configs: List[TronJobConfig]) -> None: # NOTE: these are lru_cache'd so it should be fine to call these for every service system_paasta_config = load_system_paasta_config() kube_client = KubeClient() - # this is kinda silly, but the tron create_config functions return strings - # we should refactor to pass the dicts around until the we're going to send the config to tron - # (where we can finally convert it to a string) - config = yaml.safe_load(raw_config) - for _, job in config.get("jobs", {}).items(): - for _, action in job.get("actions", {}).items(): - if action.get("service_account_name") is not None: + + for job in job_configs: + for action in job.get_actions(): + if action.get_iam_role(): ensure_service_account( - action["service_account_name"], + action.get_iam_role(), namespace=KUBERNETES_NAMESPACE, kube_client=kube_client, ) # spark executors are special in that we want the SA to exist in two namespaces: # the tron namespace - for the spark driver # and the spark namespace - for the spark executor - if action.get("executor") == "spark": + if action.get_executor() == "spark": # this kubeclient creation is lru_cache'd so it should be fine to call this for every spark action spark_kube_client = KubeClient( config_file=system_paasta_config.get_spark_kubeconfig() ) ensure_service_account( - action["service_account_name"], + action.get_iam_role(), namespace=spark_tools.SPARK_EXECUTOR_NAMESPACE, kube_client=spark_kube_client, ) @@ -171,7 +171,15 @@ def main(): else: # PaaSTA will not necessarily have created the SAs we want to use # ...so let's go ahead and create them! - ensure_service_accounts(new_config) + job_configs = load_tron_service_config( + service=service, + cluster=args.cluster, + load_deployments=False, + soa_dir=args.soa_dir, + # XXX: we can remove for_validation now that we've refactored how service account stuff works + for_validation=False, + ) + ensure_service_accounts(job_configs) if client.update_namespace(service, new_config): updated.append(service)