From 7a11aa967f8f393a5de9264818dee876a6241aef Mon Sep 17 00:00:00 2001 From: David Stancu Date: Mon, 29 Jan 2024 10:57:00 -0500 Subject: [PATCH] Update Job DTO, specifically handle 404 (#45) * Revert "support for multi-arch (#44)" This reverts commit fa7ba4f48cd8fdf1673d3675695141e88e88ac59. * fix for resource not exist if ID is set * expose data_security_mode for cluster settings, queue for jobs * move data_security_mode to jobsettings pass queued through to api create request * update crds in chart, bump versions --- .github/workflows/rust.yml | 42 ++++++------------- Cargo.lock | 2 +- charts/databricks-kube-operator/Chart.yaml | 4 +- .../templates/crds.yaml | 40 ++++++++++++++++++ charts/databricks-kube-operator/values.yaml | 1 + databricks-kube/Cargo.toml | 2 +- databricks-kube/src/crds/databricks_job.rs | 1 + .../src/traits/remote_api_resource.rs | 33 ++++++++++++++- databricks-rust-jobs/.openapi-generator/FILES | 2 + .../.openapi-generator/VERSION | 2 +- databricks-rust-jobs/README.md | 1 + databricks-rust-jobs/docs/JobSettings.md | 1 + databricks-rust-jobs/docs/JobSettingsQueue.md | 11 +++++ .../docs/JobsCreateRequest.md | 1 + databricks-rust-jobs/docs/NewCluster.md | 2 + databricks-rust-jobs/docs/NewTaskCluster.md | 2 + .../src/models/job_settings.rs | 3 ++ .../src/models/job_settings_queue.rs | 39 +++++++++++++++++ .../src/models/jobs_create_request.rs | 3 ++ databricks-rust-jobs/src/models/mod.rs | 2 + .../src/models/new_cluster.rs | 30 +++++++++++++ .../src/models/new_task_cluster.rs | 30 +++++++++++++ openapi/jobs-2.1-aws.yaml | 21 ++++++++++ 23 files changed, 240 insertions(+), 35 deletions(-) create mode 100644 databricks-rust-jobs/docs/JobSettingsQueue.md create mode 100644 databricks-rust-jobs/src/models/job_settings_queue.rs diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index cadc71d..b4efad2 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -11,33 +11,22 @@ jobs: with: api-key: ${{secrets.FOSSA_API_KEY}} test: - name: Test (x86) + name: Build, test (x86) runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: Swatinem/rust-cache@v2 - - uses: houseabsolute/actions-rust-cross@v0 + - uses: actions-rs/toolchain@v1 with: - command: "test" toolchain: stable - args: "--test-threads=1" - build: - name: Build - runs-on: ubuntu-latest - needs: test - strategy: - matrix: - target: [ aarch64-unknown-linux-gnu, x86_64-unknown-linux-gnu ] - steps: - - uses: actions/checkout@v4 - - name: Build binary - uses: houseabsolute/actions-rust-cross@v0 + - uses: Swatinem/rust-cache@v2 + - uses: actions-rs/cargo@v1 with: - command: "build" - target: ${{ matrix.target }} - toolchain: stable - args: "--locked --release --all-features" - strip: true + command: test + args: -- --test-threads=1 + - uses: actions-rs/cargo@v1 + with: + command: build + args: --release --all-features - uses: actions/upload-artifact@v4 with: name: crd_gen @@ -45,11 +34,11 @@ jobs: - uses: actions/upload-artifact@v4 with: name: databricks_kube - path: target/release/databricks_kube - publish: + path: target/release/databricks_kube + publish_image: name: Docker runs-on: ubuntu-latest - needs: build + needs: test if: github.ref == 'refs/heads/master' steps: - uses: actions/checkout@v4 @@ -59,10 +48,6 @@ jobs: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - with: - platforms: linux/amd64,linux/arm64 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Docker meta @@ -83,6 +68,5 @@ jobs: with: context: . push: true - platforms: linux/amd64, linux/arm64 tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} diff --git a/Cargo.lock b/Cargo.lock index 9851236..a5b5293 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -392,7 +392,7 @@ dependencies = [ [[package]] name = "databricks_kube" -version = "0.6.2" +version = "0.7.0" dependencies = [ "assert-json-diff", "async-stream", diff --git a/charts/databricks-kube-operator/Chart.yaml b/charts/databricks-kube-operator/Chart.yaml index d0b1901..ec2d8a0 100644 --- a/charts/databricks-kube-operator/Chart.yaml +++ b/charts/databricks-kube-operator/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: 0.6.2 +appVersion: 0.7.0 name: databricks-kube-operator description: A kube-rs operator for managing Databricks API resources -version: 0.6.2 +version: 0.7.0 home: https://github.com/mach-kernel/databricks-kube-operator sources: diff --git a/charts/databricks-kube-operator/templates/crds.yaml b/charts/databricks-kube-operator/templates/crds.yaml index 1d75cdb..ec84be5 100644 --- a/charts/databricks-kube-operator/templates/crds.yaml +++ b/charts/databricks-kube-operator/templates/crds.yaml @@ -262,6 +262,17 @@ spec: description: 'An object with key value pairs. The key length must be between 1 and 127 UTF-8 characters, inclusive. The value length must be less than or equal to 255 UTF-8 characters. For a list of all restrictions, see AWS Tag Restrictions: ' nullable: true type: object + data_security_mode: + description: Data security mode decides what data governance model to use when accessing data from a cluster. + enum: + - NONE + - SINGLE_USER + - USER_ISOLATION + - LEGACY_TABLE_ACL + - LEGACY_PASSTHROUGH + - LEGACY_SINGLE_USER + nullable: true + type: string docker_image: nullable: true properties: @@ -374,6 +385,10 @@ spec: description: 'The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the `spark_version` value. Allowed values include: * `PHOTON`: Use the Photon runtime engine type. * `STANDARD`: Use the standard runtime engine type. This field is optional.' nullable: true type: string + single_user_name: + description: Single user name if data_security_mode is SINGLE_USER + nullable: true + type: string spark_conf: additionalProperties: true description: An arbitrary object where the object key is a configuration propery name and the value is a configuration property value. @@ -410,6 +425,14 @@ spec: description: An optional name for the job. nullable: true type: string + queue: + description: 'JobSettingsQueue : The queue settings of the job.' + nullable: true + properties: + enabled: + nullable: true + type: boolean + type: object schedule: nullable: true properties: @@ -716,6 +739,17 @@ spec: description: 'An object with key value pairs. The key length must be between 1 and 127 UTF-8 characters, inclusive. The value length must be less than or equal to 255 UTF-8 characters. For a list of all restrictions, see AWS Tag Restrictions: ' nullable: true type: object + data_security_mode: + description: Data security mode decides what data governance model to use when accessing data from a cluster. + enum: + - NONE + - SINGLE_USER + - USER_ISOLATION + - LEGACY_TABLE_ACL + - LEGACY_PASSTHROUGH + - LEGACY_SINGLE_USER + nullable: true + type: string docker_image: nullable: true properties: @@ -828,6 +862,10 @@ spec: description: 'The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the `spark_version` value. Allowed values include: * `PHOTON`: Use the Photon runtime engine type. * `STANDARD`: Use the standard runtime engine type. This field is optional.' nullable: true type: string + single_user_name: + description: Single user name if data_security_mode is SINGLE_USER + nullable: true + type: string spark_conf: additionalProperties: true description: An arbitrary object where the object key is a configuration propery name and the value is a configuration property value. @@ -1275,4 +1313,6 @@ spec: served: true storage: true subresources: {} + + {{- end -}} \ No newline at end of file diff --git a/charts/databricks-kube-operator/values.yaml b/charts/databricks-kube-operator/values.yaml index 9e58873..32af581 100644 --- a/charts/databricks-kube-operator/values.yaml +++ b/charts/databricks-kube-operator/values.yaml @@ -9,5 +9,6 @@ image: podAnnotations: {} nodeSelector: kubernetes.io/os: linux + kubernetes.io/arch: amd64 resources: {} affinity: {} diff --git a/databricks-kube/Cargo.toml b/databricks-kube/Cargo.toml index 14a64a4..aa5e477 100644 --- a/databricks-kube/Cargo.toml +++ b/databricks-kube/Cargo.toml @@ -5,7 +5,7 @@ path = "src/crdgen.rs" [package] name = "databricks_kube" default-run = "databricks_kube" -version = "0.6.2" +version = "0.7.0" edition = "2021" [dependencies] diff --git a/databricks-kube/src/crds/databricks_job.rs b/databricks-kube/src/crds/databricks_job.rs index 3b942e7..cbd94a6 100644 --- a/databricks-kube/src/crds/databricks_job.rs +++ b/databricks-kube/src/crds/databricks_job.rs @@ -244,6 +244,7 @@ impl RemoteAPIResource for DatabricksJob { git_source: job_settings.git_source, format: job_settings.format.map(job_settings_to_create_format), continuous: job_settings.continuous, + queue: job_settings.queue, ..JobsCreateRequest::default() } ).await?; diff --git a/databricks-kube/src/traits/remote_api_resource.rs b/databricks-kube/src/traits/remote_api_resource.rs index a37632d..5df0664 100644 --- a/databricks-kube/src/traits/remote_api_resource.rs +++ b/databricks-kube/src/traits/remote_api_resource.rs @@ -1,6 +1,6 @@ use std::{fmt::Debug, hash::Hash, pin::Pin, sync::Arc, time::Duration}; -use crate::{context::Context, error::DatabricksKubeError}; +use crate::{context::Context, error::{DatabricksKubeError, OpenAPIError}}; use assert_json_diff::assert_json_matches_no_panic; use futures::{Future, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; @@ -55,6 +55,37 @@ where match latest_remote { Err(DatabricksKubeError::IDUnsetError) => { + log::info!( + "Resource {} {} does not have an ID set, creating...", + TCRDType::api_resource().kind, + resource.name_unchecked() + ); + + let created = resource + .remote_create(context.clone()) + .next() + .await + .unwrap()?; + + log::info!( + "Created {} {} in Databricks", + TCRDType::api_resource().kind, + resource.name_unchecked() + ); + + kube_api + .replace(&resource.name_unchecked(), &PostParams::default(), &created) + .await + .map_err(|e| DatabricksKubeError::ResourceUpdateError(e.to_string()))?; + + log::info!( + "Updated {} {} in K8S", + TCRDType::api_resource().kind, + resource.name_unchecked() + ); + }, + // TODO: stricter assertion that doesn't rely on dbx error presentation + Err(DatabricksKubeError::APIError(OpenAPIError::ResponseError(re))) if re.status == 400 && re.content.contains("does not exist") => { log::info!( "Resource {} {} is missing in Databricks, creating", TCRDType::api_resource().kind, diff --git a/databricks-rust-jobs/.openapi-generator/FILES b/databricks-rust-jobs/.openapi-generator/FILES index 4bb548f..d7d6e2b 100644 --- a/databricks-rust-jobs/.openapi-generator/FILES +++ b/databricks-rust-jobs/.openapi-generator/FILES @@ -45,6 +45,7 @@ docs/Job.md docs/JobCluster.md docs/JobEmailNotifications.md docs/JobSettings.md +docs/JobSettingsQueue.md docs/JobTask.md docs/JobTaskSettings.md docs/JobsCreate200Response.md @@ -176,6 +177,7 @@ src/models/job.rs src/models/job_cluster.rs src/models/job_email_notifications.rs src/models/job_settings.rs +src/models/job_settings_queue.rs src/models/job_task.rs src/models/job_task_settings.rs src/models/jobs_create_200_response.rs diff --git a/databricks-rust-jobs/.openapi-generator/VERSION b/databricks-rust-jobs/.openapi-generator/VERSION index 73a86b1..4b49d9b 100644 --- a/databricks-rust-jobs/.openapi-generator/VERSION +++ b/databricks-rust-jobs/.openapi-generator/VERSION @@ -1 +1 @@ -7.0.1 \ No newline at end of file +7.2.0 \ No newline at end of file diff --git a/databricks-rust-jobs/README.md b/databricks-rust-jobs/README.md index 20342cb..442c400 100644 --- a/databricks-rust-jobs/README.md +++ b/databricks-rust-jobs/README.md @@ -87,6 +87,7 @@ Class | Method | HTTP request | Description - [JobCluster](docs/JobCluster.md) - [JobEmailNotifications](docs/JobEmailNotifications.md) - [JobSettings](docs/JobSettings.md) + - [JobSettingsQueue](docs/JobSettingsQueue.md) - [JobTask](docs/JobTask.md) - [JobTaskSettings](docs/JobTaskSettings.md) - [JobsCreate200Response](docs/JobsCreate200Response.md) diff --git a/databricks-rust-jobs/docs/JobSettings.md b/databricks-rust-jobs/docs/JobSettings.md index c1f42c8..ddae2e1 100644 --- a/databricks-rust-jobs/docs/JobSettings.md +++ b/databricks-rust-jobs/docs/JobSettings.md @@ -6,6 +6,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **name** | Option<**String**> | An optional name for the job. | [optional][default to Untitled] **tags** | Option<[**::std::collections::HashMap**](serde_json::Value.md)> | A map of tags associated with the job. These are forwarded to the cluster as cluster tags for jobs clusters, and are subject to the same limitations as cluster tags. A maximum of 25 tags can be added to the job. | [optional][default to {}] +**queue** | Option<[**crate::models::JobSettingsQueue**](JobSettings_queue.md)> | | [optional] **tasks** | Option<[**Vec**](JobTaskSettings.md)> | A list of task specifications to be executed by this job. | [optional] **job_clusters** | Option<[**Vec**](JobCluster.md)> | A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. | [optional] **email_notifications** | Option<[**crate::models::JobEmailNotifications**](JobEmailNotifications.md)> | | [optional] diff --git a/databricks-rust-jobs/docs/JobSettingsQueue.md b/databricks-rust-jobs/docs/JobSettingsQueue.md new file mode 100644 index 0000000..568ca1d --- /dev/null +++ b/databricks-rust-jobs/docs/JobSettingsQueue.md @@ -0,0 +1,11 @@ +# JobSettingsQueue + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**enabled** | Option<**bool**> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/databricks-rust-jobs/docs/JobsCreateRequest.md b/databricks-rust-jobs/docs/JobsCreateRequest.md index 772956a..0b172bb 100644 --- a/databricks-rust-jobs/docs/JobsCreateRequest.md +++ b/databricks-rust-jobs/docs/JobsCreateRequest.md @@ -6,6 +6,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **name** | Option<**String**> | An optional name for the job. | [optional][default to Untitled] **tags** | Option<[**::std::collections::HashMap**](serde_json::Value.md)> | A map of tags associated with the job. These are forwarded to the cluster as cluster tags for jobs clusters, and are subject to the same limitations as cluster tags. A maximum of 25 tags can be added to the job. | [optional][default to {}] +**queue** | Option<[**crate::models::JobSettingsQueue**](JobSettings_queue.md)> | | [optional] **tasks** | Option<[**Vec**](JobTaskSettings.md)> | A list of task specifications to be executed by this job. | [optional] **job_clusters** | Option<[**Vec**](JobCluster.md)> | A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. | [optional] **email_notifications** | Option<[**crate::models::JobEmailNotifications**](JobEmailNotifications.md)> | | [optional] diff --git a/databricks-rust-jobs/docs/NewCluster.md b/databricks-rust-jobs/docs/NewCluster.md index 132764d..2e5a6c4 100644 --- a/databricks-rust-jobs/docs/NewCluster.md +++ b/databricks-rust-jobs/docs/NewCluster.md @@ -10,6 +10,8 @@ Name | Type | Description | Notes **spark_conf** | Option<[**::std::collections::HashMap**](serde_json::Value.md)> | An arbitrary object where the object key is a configuration propery name and the value is a configuration property value. | [optional] **aws_attributes** | Option<[**crate::models::AwsAttributes**](AwsAttributes.md)> | | [optional] **node_type_id** | Option<**String**> | This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or compute intensive workloads A list of available node types can be retrieved by using the [List node types](https://docs.databricks.com/dev-tools/api/latest/clusters.html#list-node-types) API call. | [optional] +**data_security_mode** | Option<**String**> | Data security mode decides what data governance model to use when accessing data from a cluster. | [optional] +**single_user_name** | Option<**String**> | Single user name if data_security_mode is SINGLE_USER | [optional] **driver_node_type_id** | Option<**String**> | The node type of the Spark driver. This field is optional; if unset, the driver node type is set as the same value as `node_type_id` defined above. | [optional] **ssh_public_keys** | Option<**Vec**> | SSH public key contents that are added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can be specified. | [optional] **custom_tags** | Option<[**::std::collections::HashMap**](serde_json::Value.md)> | An object with key value pairs. The key length must be between 1 and 127 UTF-8 characters, inclusive. The value length must be less than or equal to 255 UTF-8 characters. For a list of all restrictions, see AWS Tag Restrictions: | [optional] diff --git a/databricks-rust-jobs/docs/NewTaskCluster.md b/databricks-rust-jobs/docs/NewTaskCluster.md index 7e203de..102694d 100644 --- a/databricks-rust-jobs/docs/NewTaskCluster.md +++ b/databricks-rust-jobs/docs/NewTaskCluster.md @@ -10,6 +10,8 @@ Name | Type | Description | Notes **spark_conf** | Option<[**::std::collections::HashMap**](serde_json::Value.md)> | An arbitrary object where the object key is a configuration propery name and the value is a configuration property value. | [optional] **aws_attributes** | Option<[**crate::models::AwsAttributes**](AwsAttributes.md)> | | [optional] **node_type_id** | Option<**String**> | This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or compute intensive workloads A list of available node types can be retrieved by using the [List node types](https://docs.databricks.com/dev-tools/api/latest/clusters.html#list-node-types) API call. | [optional] +**data_security_mode** | Option<**String**> | Data security mode decides what data governance model to use when accessing data from a cluster. | [optional] +**single_user_name** | Option<**String**> | Single user name if data_security_mode is SINGLE_USER | [optional] **driver_node_type_id** | Option<**String**> | The node type of the Spark driver. This field is optional; if unset, the driver node type is set as the same value as `node_type_id` defined above. | [optional] **ssh_public_keys** | Option<**Vec**> | SSH public key contents that are added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can be specified. | [optional] **custom_tags** | Option<[**::std::collections::HashMap**](serde_json::Value.md)> | An object with key value pairs. The key length must be between 1 and 127 UTF-8 characters, inclusive. The value length must be less than or equal to 255 UTF-8 characters. For a list of all restrictions, see AWS Tag Restrictions: | [optional] diff --git a/databricks-rust-jobs/src/models/job_settings.rs b/databricks-rust-jobs/src/models/job_settings.rs index f17785a..6cf3a74 100644 --- a/databricks-rust-jobs/src/models/job_settings.rs +++ b/databricks-rust-jobs/src/models/job_settings.rs @@ -20,6 +20,8 @@ pub struct JobSettings { /// A map of tags associated with the job. These are forwarded to the cluster as cluster tags for jobs clusters, and are subject to the same limitations as cluster tags. A maximum of 25 tags can be added to the job. #[serde(rename = "tags", skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, + #[serde(rename = "queue", skip_serializing_if = "Option::is_none")] + pub queue: Option>, /// A list of task specifications to be executed by this job. #[serde(rename = "tasks", skip_serializing_if = "Option::is_none")] pub tasks: Option>, @@ -52,6 +54,7 @@ impl JobSettings { JobSettings { name: None, tags: None, + queue: None, tasks: None, job_clusters: None, email_notifications: None, diff --git a/databricks-rust-jobs/src/models/job_settings_queue.rs b/databricks-rust-jobs/src/models/job_settings_queue.rs new file mode 100644 index 0000000..f13683e --- /dev/null +++ b/databricks-rust-jobs/src/models/job_settings_queue.rs @@ -0,0 +1,39 @@ +use schemars::JsonSchema; +/* + * Jobs API 2.1 + * + * The Jobs API allows you to create, edit, and delete jobs. You should never hard code secrets or store them in plain text. Use the [Secrets API](https://docs.databricks.com/dev-tools/api/latest/secrets.html) to manage secrets in the [Databricks CLI](https://docs.databricks.com/dev-tools/cli/index.html). Use the [Secrets utility](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-secrets) to reference secrets in notebooks and jobs. + * + * The version of the OpenAPI document: 2.1 + * + * Generated by: https://openapi-generator.tech + */ + +/// JobSettingsQueue : The queue settings of the job. + + + +#[derive(JsonSchema, Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct JobSettingsQueue { + #[serde(rename = "enabled")] + pub enabled: Option, +} + +impl JobSettingsQueue { + /// The queue settings of the job. + pub fn new() -> JobSettingsQueue { + JobSettingsQueue { + enabled: Some(false), + } + } +} + +impl Default for JobSettingsQueue { + fn default() -> Self { + JobSettingsQueue { + enabled: Some(false), + } + } +} + + diff --git a/databricks-rust-jobs/src/models/jobs_create_request.rs b/databricks-rust-jobs/src/models/jobs_create_request.rs index f42bc25..e9e992c 100644 --- a/databricks-rust-jobs/src/models/jobs_create_request.rs +++ b/databricks-rust-jobs/src/models/jobs_create_request.rs @@ -20,6 +20,8 @@ pub struct JobsCreateRequest { /// A map of tags associated with the job. These are forwarded to the cluster as cluster tags for jobs clusters, and are subject to the same limitations as cluster tags. A maximum of 25 tags can be added to the job. #[serde(rename = "tags", skip_serializing_if = "Option::is_none")] pub tags: Option<::std::collections::HashMap>, + #[serde(rename = "queue", skip_serializing_if = "Option::is_none")] + pub queue: Option>, /// A list of task specifications to be executed by this job. #[serde(rename = "tasks", skip_serializing_if = "Option::is_none")] pub tasks: Option>, @@ -55,6 +57,7 @@ impl JobsCreateRequest { JobsCreateRequest { name: None, tags: None, + queue: None, tasks: None, job_clusters: None, email_notifications: None, diff --git a/databricks-rust-jobs/src/models/mod.rs b/databricks-rust-jobs/src/models/mod.rs index e9fcc7e..30c5afe 100644 --- a/databricks-rust-jobs/src/models/mod.rs +++ b/databricks-rust-jobs/src/models/mod.rs @@ -80,6 +80,8 @@ pub mod job_email_notifications; pub use self::job_email_notifications::JobEmailNotifications; pub mod job_settings; pub use self::job_settings::JobSettings; +pub mod job_settings_queue; +pub use self::job_settings_queue::JobSettingsQueue; pub mod job_task; pub use self::job_task::JobTask; pub mod job_task_settings; diff --git a/databricks-rust-jobs/src/models/new_cluster.rs b/databricks-rust-jobs/src/models/new_cluster.rs index f438c85..64bf978 100644 --- a/databricks-rust-jobs/src/models/new_cluster.rs +++ b/databricks-rust-jobs/src/models/new_cluster.rs @@ -30,6 +30,12 @@ pub struct NewCluster { /// This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or compute intensive workloads A list of available node types can be retrieved by using the [List node types](https://docs.databricks.com/dev-tools/api/latest/clusters.html#list-node-types) API call. #[serde(rename = "node_type_id", skip_serializing_if = "Option::is_none")] pub node_type_id: Option, + /// Data security mode decides what data governance model to use when accessing data from a cluster. + #[serde(rename = "data_security_mode", skip_serializing_if = "Option::is_none")] + pub data_security_mode: Option, + /// Single user name if data_security_mode is SINGLE_USER + #[serde(rename = "single_user_name", skip_serializing_if = "Option::is_none")] + pub single_user_name: Option, /// The node type of the Spark driver. This field is optional; if unset, the driver node type is set as the same value as `node_type_id` defined above. #[serde(rename = "driver_node_type_id", skip_serializing_if = "Option::is_none")] pub driver_node_type_id: Option, @@ -78,6 +84,8 @@ impl NewCluster { spark_conf: None, aws_attributes: None, node_type_id: None, + data_security_mode: None, + single_user_name: None, driver_node_type_id: None, ssh_public_keys: None, custom_tags: None, @@ -95,4 +103,26 @@ impl NewCluster { } } +/// Data security mode decides what data governance model to use when accessing data from a cluster. +#[derive(JsonSchema, Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum DataSecurityMode { + #[serde(rename = "NONE")] + None, + #[serde(rename = "SINGLE_USER")] + SingleUser, + #[serde(rename = "USER_ISOLATION")] + UserIsolation, + #[serde(rename = "LEGACY_TABLE_ACL")] + LegacyTableAcl, + #[serde(rename = "LEGACY_PASSTHROUGH")] + LegacyPassthrough, + #[serde(rename = "LEGACY_SINGLE_USER")] + LegacySingleUser, +} + +impl Default for DataSecurityMode { + fn default() -> DataSecurityMode { + Self::None + } +} diff --git a/databricks-rust-jobs/src/models/new_task_cluster.rs b/databricks-rust-jobs/src/models/new_task_cluster.rs index c26f195..a2f2167 100644 --- a/databricks-rust-jobs/src/models/new_task_cluster.rs +++ b/databricks-rust-jobs/src/models/new_task_cluster.rs @@ -31,6 +31,12 @@ pub struct NewTaskCluster { /// This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or compute intensive workloads A list of available node types can be retrieved by using the [List node types](https://docs.databricks.com/dev-tools/api/latest/clusters.html#list-node-types) API call. #[serde(rename = "node_type_id", skip_serializing_if = "Option::is_none")] pub node_type_id: Option, + /// Data security mode decides what data governance model to use when accessing data from a cluster. + #[serde(rename = "data_security_mode", skip_serializing_if = "Option::is_none")] + pub data_security_mode: Option, + /// Single user name if data_security_mode is SINGLE_USER + #[serde(rename = "single_user_name", skip_serializing_if = "Option::is_none")] + pub single_user_name: Option, /// The node type of the Spark driver. This field is optional; if unset, the driver node type is set as the same value as `node_type_id` defined above. #[serde(rename = "driver_node_type_id", skip_serializing_if = "Option::is_none")] pub driver_node_type_id: Option, @@ -80,6 +86,8 @@ impl NewTaskCluster { spark_conf: None, aws_attributes: None, node_type_id: None, + data_security_mode: None, + single_user_name: None, driver_node_type_id: None, ssh_public_keys: None, custom_tags: None, @@ -97,4 +105,26 @@ impl NewTaskCluster { } } +/// Data security mode decides what data governance model to use when accessing data from a cluster. +#[derive(JsonSchema, Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum DataSecurityMode { + #[serde(rename = "NONE")] + None, + #[serde(rename = "SINGLE_USER")] + SingleUser, + #[serde(rename = "USER_ISOLATION")] + UserIsolation, + #[serde(rename = "LEGACY_TABLE_ACL")] + LegacyTableAcl, + #[serde(rename = "LEGACY_PASSTHROUGH")] + LegacyPassthrough, + #[serde(rename = "LEGACY_SINGLE_USER")] + LegacySingleUser, +} + +impl Default for DataSecurityMode { + fn default() -> DataSecurityMode { + Self::None + } +} diff --git a/openapi/jobs-2.1-aws.yaml b/openapi/jobs-2.1-aws.yaml index 29c12b9..03f5d21 100644 --- a/openapi/jobs-2.1-aws.yaml +++ b/openapi/jobs-2.1-aws.yaml @@ -1203,6 +1203,14 @@ components: cluster as cluster tags for jobs clusters, and are subject to the same limitations as cluster tags. A maximum of 25 tags can be added to the job. + queue: + type: object + description: The queue settings of the job. + example: + enabled: true + properties: + enabled: + type: boolean tasks: type: array maxItems: 100 @@ -1643,6 +1651,19 @@ components: by using the [List node types](https://docs.databricks.com/dev-tools/api/latest/clusters.html#list-node-types) API call. + data_security_mode: + description: Data security mode decides what data governance model to use when accessing data from a cluster. + enum: + - NONE + - SINGLE_USER + - USER_ISOLATION + - LEGACY_TABLE_ACL + - LEGACY_PASSTHROUGH + - LEGACY_SINGLE_USER + type: string + single_user_name: + description: Single user name if data_security_mode is SINGLE_USER + type: string driver_node_type_id: type: string description: >-