From ead42282197179410d669fc38ec3b16b1760e23b Mon Sep 17 00:00:00 2001 From: Evgeny Blokhin Date: Thu, 19 Oct 2023 01:43:23 +0200 Subject: [PATCH] Polish docs & messages --- CLOUD.md | 23 +++++++++++++---------- README.md | 18 ++++++++++++++++-- yascheduler/clouds/cloud_api_manager.py | 3 ++- yascheduler/scheduler.py | 2 +- 4 files changed, 32 insertions(+), 14 deletions(-) diff --git a/CLOUD.md b/CLOUD.md index 5b3ad73..effc6fd 100644 --- a/CLOUD.md +++ b/CLOUD.md @@ -16,7 +16,7 @@ az account subscription list Create a dedicated Resource Group. See [documentation][az_manage_rg]. For example, consider `yascheduler-rg` in `westeurope` location. -Write down the resource group and location to the config. +Save the resource group and location to the cloud config. ```bash az group create -l westeurope -g yascheduler-rg @@ -24,14 +24,14 @@ az group create -l westeurope -g yascheduler-rg Create a dedicated _Enterprise Application_ for service. See [documentation][az_app_create]. -Write down `appId` as `client_id` to config. +Save `appId` as `az_client_id` to the cloud config. ```bash az ad app create --display-name yascheduler ``` Assign roles _Network Contributor_ and _Virtual Machine Contributor_ -in the _Resource Group_. Use the correct `appId`. +in the _Resource Group_. Use the correct `appId`: ```bash az role assignment create \ @@ -45,14 +45,15 @@ az role assignment create \ ``` Create an _Application Registration_. -Add _Client Secret_ to the Application Registration. Use correct `appId`. -Write down `tenant` as `tenant_id` and `password` as `client_secret`. +Add the _Client Secret_ to this Application Registration. Use the correct `appId`: ```bash az ad app credential reset --id 00000000-0000-0000-0000-000000000000 --append ``` -Create virtual networks. +Use `tenant` as the `az_tenant_id` and `password` as the `az_client_secret` cloud settings. + +Create virtual networks: ```bash az network nsg create \ @@ -71,14 +72,16 @@ az network vnet create \ --subnet-prefix 10.0.0.0/22 ``` -It is possible to setup a _jump host_. It allows connections from the outside. -If `yascheduler` is installed in the internal network, it is optional. +According to our experience, while creating the nodes, the Azure allocates the new public +IP-addresses slowly and unwillingly, so we support **the internal IP-addresses** only. +This is no problem, if `yascheduler` is installed in the internal network. +If this is not the case, one has to setup a _jump host_, allowing connections from the outside: ```bash az vm create \ -g yascheduler-rg -l westeurope \ --name yascheduler-jump-host \ - --image Debian \ + --image Debian11 \ --size Standard_B1s \ --nsg yascheduler-nsg \ --public-ip-address yascheduler-jump-host-ip \ @@ -90,7 +93,7 @@ az vm create \ --ssh-key-values "$(ssh-keygen -y -f path/to/private/key)" ``` -Write down `publicIpAddress` as `jump_host`. `jump_user` will be `yascheduler`. +Save the `publicIpAddress` as `az_jump_host`, and `az_jump_user` will be `yascheduler`. [az_cli_install]: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli [az_manage_rg]: https://docs.microsoft.com/en-us/cli/azure/manage-azure-groups-azure-cli diff --git a/README.md b/README.md index 8f47036..bd625dd 100644 --- a/README.md +++ b/README.md @@ -47,6 +47,20 @@ result = yac.queue_submit_task( print(result) ``` +Or run directly in console with `yascheduler` (use a key `-l DEBUG` to change the log level). + +_Supervisor_ config reads e.g.: + +``` +[program:scheduler] +command=/usr/local/bin/yascheduler +user=root +autostart=true +autorestart=true +stderr_logfile=/data/yascheduler.log +stdout_logfile=/data/yascheduler.log +``` + File paths can be set using the environment variables: - `YASCHEDULER_CONF_PATH` @@ -104,7 +118,7 @@ Connection to a PostgreSQL database. Path to root directory of local data files. Can be relative to the current working directory. - _Default_: `./data` + _Default_: `./data` (but it's always a good idea to set up explicitly!) _Example_: `/srv/yadata` @@ -362,7 +376,7 @@ Settings prefix is `upcloud`. Password. -#### Engines `[engine.*]` +### Engines `[engine.*]` Supported engines should be defined in the section(s) `[engine.name]`. The name is alphanumeric string to represent the real engine name. diff --git a/yascheduler/clouds/cloud_api_manager.py b/yascheduler/clouds/cloud_api_manager.py index d307824..830c232 100644 --- a/yascheduler/clouds/cloud_api_manager.py +++ b/yascheduler/clouds/cloud_api_manager.py @@ -129,8 +129,9 @@ async def select_best_provider( self.log.debug("Used providers: %s", used_providers) if not suitable_providers: - self.log.debug("No suitable cloud provides") + self.log.debug("No suitable cloud providers") return + ok_apis = filter(lambda x: x.name in suitable_providers, self.apis.values()) ok_apis_sorted = sorted(ok_apis, key=lambda x: x.config.priority, reverse=True) api = ok_apis_sorted[0] diff --git a/yascheduler/scheduler.py b/yascheduler/scheduler.py index e2bb333..eff505f 100755 --- a/yascheduler/scheduler.py +++ b/yascheduler/scheduler.py @@ -497,7 +497,7 @@ async def allocator_producer( tasks = await self.db.get_tasks_by_status((TaskStatus.TO_DO,), tlim) if tasks: ids = [str(t.task_id) for t in tasks] - self.log.debug("Want allocate tasks: %s" % ", ".join(ids)) + self.log.debug("Want to allocate tasks: %s" % ", ".join(ids)) for task in tasks: yield UMessage(task.task_id, task)