Skip to content

Commit

Permalink
Remove ingest behavior, better controller behavior, use finalizers fo…
Browse files Browse the repository at this point in the history
…r cleanup (#19)

* feedback / deprecate ingest feature
use kube-rs finalizer helper instead of managing delete watchers
no more default_error_policy since there should be different requeue
behavior based on the controller trait + is best to be defined along
with the reconciler
refactor and fix tests

* fix delete mock to respond to PATCH requests for removing finalizers
update docs
bump version

* fix log msg

* update docker tag in helm chart
  • Loading branch information
mach-kernel authored Dec 7, 2022
1 parent 6556762 commit 5eac87e
Show file tree
Hide file tree
Showing 13 changed files with 457 additions and 598 deletions.
2 changes: 1 addition & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 1 addition & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -77,15 +77,14 @@ metadata:
databricks-operator/owner: operator
```
By default, databricks-kube-operator will also sync existing API resources from Databricks into Kubernetes (goal: surface status). Resources owned by the API are tagged as such with an annotation on ingest:
It is also possible to set a resource's owner to `api`, which will update the Kubernetes resource as it changes on Databricks.

```yaml
apiVersion: com.dstancu.databricks/v1
kind: DatabricksJob
metadata:
annotations:
databricks-operator/owner: api
creationTimestamp: "2022-11-04T21:46:12Z"
generation: 1
name: hello-world
...
Expand Down
2 changes: 1 addition & 1 deletion charts/databricks-kube-operator/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.2.3
version: 0.3.0

# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
Expand Down
2 changes: 1 addition & 1 deletion charts/databricks-kube-operator/templates/sts.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: dko
image: ghcr.io/mach-kernel/databricks-kube-operator:0.2.3
image: ghcr.io/mach-kernel/databricks-kube-operator:0.3.0
imagePullPolicy: Always
env:
- name: DATABRICKS_KUBE_CONFIGMAP
Expand Down
2 changes: 1 addition & 1 deletion databricks-kube/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ path = "src/crdgen.rs"
[package]
name = "databricks_kube"
default-run = "databricks_kube"
version = "0.2.3"
version = "0.3.0"
edition = "2021"

[dependencies]
Expand Down
8 changes: 0 additions & 8 deletions databricks-kube/src/context.rs
Original file line number Diff line number Diff line change
@@ -1,17 +1,11 @@
use std::{collections::BTreeMap, env, sync::Arc};

use crate::error::DatabricksKubeError;

use flurry::HashMap;

use k8s_openapi::api::core::v1::{ConfigMap, Secret};
use kube::{runtime::reflector::Store, Client};
use lazy_static::lazy_static;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};

use tokio::task::JoinHandle;

lazy_static! {
pub static ref CONFIGMAP_NAME: String =
env::var("DATABRICKS_KUBE_CONFIGMAP").unwrap_or("databricks-kube-operator".to_owned());
Expand All @@ -21,7 +15,6 @@ lazy_static! {
#[derive(Clone)]
pub struct Context {
pub client: Client,
pub delete_watchers: Arc<HashMap<String, Box<JoinHandle<Result<(), DatabricksKubeError>>>>>,
configmap_store: Arc<Store<ConfigMap>>,
api_secret_store: Arc<Store<Secret>>,
}
Expand Down Expand Up @@ -72,7 +65,6 @@ impl Context {
api_secret_store,
client,
configmap_store,
delete_watchers: HashMap::new().into(),
}
.into()
}
Expand Down
13 changes: 13 additions & 0 deletions databricks-kube/src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ use crate::context::CONFIGMAP_NAME;
use databricks_rust_git_credentials::apis::Error as GitCredentialAPIError;
use databricks_rust_jobs::apis::Error as JobsAPIError;
use databricks_rust_repos::apis::Error as ReposAPIError;
use kube::runtime::finalizer::Error as KubeFinalizerError;

impl<T> From<JobsAPIError<T>> for DatabricksKubeError
where
Expand Down Expand Up @@ -34,6 +35,16 @@ where
}
}

impl<T> From<KubeFinalizerError<T>> for DatabricksKubeError
where
T: Debug,
T: Error,
{
fn from(e: KubeFinalizerError<T>) -> Self {
Self::FinalizerError(format!("{:?}", e))
}
}

#[derive(Debug)]
#[allow(dead_code)]
pub enum DatabricksKubeError {
Expand All @@ -46,6 +57,7 @@ pub enum DatabricksKubeError {
Shutdown(String),
ResourceUpdateError(String),
ResourceStatusError(String),
FinalizerError(String),
}

impl Display for DatabricksKubeError {
Expand Down Expand Up @@ -75,6 +87,7 @@ impl Display for DatabricksKubeError {
DatabricksKubeError::SecretMissingError => "The secret referenced by this resource is missing".to_owned(),
DatabricksKubeError::ResourceUpdateError(s) => format!("Unable to update K8S Resource {}", s),
DatabricksKubeError::ResourceStatusError(s) => format!("Unable to get status {}", s),
DatabricksKubeError::FinalizerError(s) => format!("Finalizer failed {}", s),
};
write!(f, "{}", msg)
}
Expand Down
6 changes: 1 addition & 5 deletions databricks-kube/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ async fn log_controller_event<TCRDType>(
{
match event {
Ok((object, _)) => log::info!("{} reconciled", object.name),
Err(e) => log::error!("{}", e),
Err(e) => log::error!("{:?}", e),
}
}

Expand Down Expand Up @@ -93,7 +93,6 @@ async fn main() -> Result<(), DatabricksKubeError> {

let job_controller = DatabricksJob::controller(ctx.clone());
let job_status_controller = DatabricksJob::status_controller(ctx.clone());
let job_ingest = DatabricksJob::ingest_task(ctx.clone());

let git_credential_controller = GitCredential::controller(ctx.clone());
let repo_controller = Repo::controller(ctx.clone());
Expand All @@ -119,9 +118,6 @@ async fn main() -> Result<(), DatabricksKubeError> {
})
},
)
.start("job_ingest", |_: SubsystemHandle<DatabricksKubeError>| {
job_ingest
})
.start(
"git_credential_controller",
|_: SubsystemHandle<DatabricksKubeError>| {
Expand Down
Loading

0 comments on commit 5eac87e

Please sign in to comment.