Skip to content

Commit

Permalink
fix todos
Browse files Browse the repository at this point in the history
Signed-off-by: Adem Baccara <[email protected]>
  • Loading branch information
Adembc committed Jul 13, 2024
1 parent 5f2597f commit 262a435
Show file tree
Hide file tree
Showing 4 changed files with 61 additions and 55 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ spec:
deferUpdates: false

## the WorkspaceKind to use
kind: "juptyerlab"
kind: "jupyterlab"

## options for "podTemplate"-type WorkspaceKinds
##
Expand Down
20 changes: 9 additions & 11 deletions workspaces/controller/config/samples/v1beta1_workspacekind.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -110,10 +110,10 @@ spec:
## - spec for Probe:
## https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#probe-v1-core
##
probes:
startupProbe: {}
livenessProbe: {}
readinessProbe: {}
# probes:
# startupProbe: {}
# livenessProbe: {}
# readinessProbe: {}

## volume mount paths
##
Expand Down Expand Up @@ -191,11 +191,11 @@ spec:
## https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#securitycontext-v1-core
##
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
allowPrivilegeEscalation: true
# capabilities:
# drop:
# - ALL
runAsNonRoot: false

## ==============================================================
## WORKSPACE OPTIONS
Expand Down Expand Up @@ -334,7 +334,6 @@ spec:
- key: "memory"
value: "2Gi"
hidden: false
redirect: {}
spec:
## affinity configs for the pod
## - https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core
Expand Down Expand Up @@ -374,7 +373,6 @@ spec:
- key: "gpu"
value: "1"
hidden: false
redirect: {}
spec:
affinity: {}
nodeSelector: {}
Expand Down
93 changes: 51 additions & 42 deletions workspaces/controller/internal/controller/workspace_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,26 +19,27 @@ package controller
import (
"context"
"fmt"
"reflect"
"strings"

kubefloworgv1beta1 "github.com/kubeflow/notebooks/workspaces/controller/api/v1beta1"
"github.com/kubeflow/notebooks/workspaces/controller/internal/helper"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"reflect"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"strings"
)

const (
Expand Down Expand Up @@ -118,6 +119,7 @@ func (r *WorkspaceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
workspaceKind := &kubefloworgv1beta1.WorkspaceKind{}
if err := r.Get(ctx, client.ObjectKey{Name: workspaceKindName}, workspaceKind); err != nil {
if apierrors.IsNotFound(err) {
log.Error(err, "workspaceKind not found in the cluster")
workspace.Status.State = kubefloworgv1beta1.WorkspaceStateError
workspace.Status.StateMessage = fmt.Sprintf(stateMsgErrorInvalidWorkspaceKind, workspaceKindName)
if err := r.Status().Update(ctx, workspace); err != nil {
Expand Down Expand Up @@ -194,24 +196,17 @@ func (r *WorkspaceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
if *workspace.Spec.Paused && workspace.Status.PendingRestart && !*workspace.Spec.DeferUpdates {
workspace.Spec.PodTemplate.Options.ImageConfig = workspace.Status.PodTemplateOptions.ImageConfig.Desired
workspace.Spec.PodTemplate.Options.PodConfig = workspace.Status.PodTemplateOptions.PodConfig.Desired
workspace.Status.PendingRestart = false
//
// TODO: does `r.Update(` also update the `status`, if not, this needs to be done separately
//
if err := r.Update(ctx, workspace); err != nil {
log.Error(err, "unable to update Workspace")
//
// TODO: do we actually need to set `Requeue: true`?
//
return ctrl.Result{Requeue: true}, err
return ctrl.Result{}, err
}
workspace.Status.PendingRestart = false
if err := r.Status().Update(ctx, workspace); err != nil {
log.Error(err, "unable to update Workspace status")
return ctrl.Result{}, err
}
}

//
// TODO: reconcile the Istio VirtualService to expose the Workspace
// and implement the `spec.podTemplate.httpProxy` options
//

// generate StatefulSet
statefulSet, err := generateStatefulSet(workspace, workspaceKind, currentImageConfig.Spec, currentPodConfig.Spec)
if err != nil {
Expand Down Expand Up @@ -261,12 +256,8 @@ func (r *WorkspaceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
} else {
foundStatefulSet := &ownedStatefulSets.Items[0]
statefulSetName = foundStatefulSet.ObjectMeta.Name
//
// TODO: confirm if this is the correct way to compare and update StatefulSets
//
if !reflect.DeepEqual(statefulSet.Spec, foundStatefulSet.Spec) {
if helper.CopyStatefulSetFields(statefulSet, foundStatefulSet) {
log.V(1).Info("Updating StatefulSet", "statefulSet", statefulSetName)
foundStatefulSet.Spec = statefulSet.Spec
if err := r.Update(ctx, foundStatefulSet); err != nil {
log.Error(err, "unable to update StatefulSet")
return ctrl.Result{}, err
Expand Down Expand Up @@ -320,12 +311,8 @@ func (r *WorkspaceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
} else {
foundService := &ownedServices.Items[0]
serviceName = foundService.ObjectMeta.Name
//
// TODO: confirm if this is the correct way to compare and update Services
//
if !reflect.DeepEqual(service.Spec, foundService.Spec) {
if helper.CopyServiceFields(service, foundService) {
log.V(1).Info("Updating Service", "service", serviceName)
foundService.Spec = service.Spec
if err := r.Update(ctx, foundService); err != nil {
log.Error(err, "unable to update Service")
return ctrl.Result{}, err
Expand All @@ -334,6 +321,19 @@ func (r *WorkspaceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
}
}

// reconcile VirtualService
//virtualService, err := generateVirtualService(workspace)
//if err != nil {
// log.Info("Unable to generate VirtualService...", err)
// return ctrl.Result{}, err
//}
//if err := ctrl.SetControllerReference(workspace, virtualService, r.Scheme); err != nil {
// log.Error(err, "unable to set controller reference on VirtualService")
// return ctrl.Result{}, err
//}
foundVirtual := &unstructured.Unstructured{}
foundVirtual.SetAPIVersion("networking.istio.io/v1alpha3")
foundVirtual.SetKind("VirtualService")
// fetch Pod
// NOTE: the Pod will be named "{statefulSetName}-0"
pod := &corev1.Pod{}
Expand All @@ -350,16 +350,14 @@ func (r *WorkspaceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
// TODO: figure out how to set `status.pauseTime`, it will probably have to be done in a webhook
//

//
// TODO: reduce the number of status update API calls by only updating the status when it changes
//

// update Workspace status
// update Workspace status if there is changes
workspaceStatus := generateWorkspaceStatus(workspace, pod)
workspace.Status = workspaceStatus
if err := r.Status().Update(ctx, workspace); err != nil {
log.Error(err, "unable to update Workspace status")
return ctrl.Result{}, err
if !reflect.DeepEqual(workspace.Status, workspaceStatus) {
workspace.Status = workspaceStatus
if err := r.Status().Update(ctx, workspace); err != nil {
log.Error(err, "unable to update Workspace status")
return ctrl.Result{}, err
}
}
log.V(1).Info("finished reconciling Workspace")
return ctrl.Result{}, nil
Expand Down Expand Up @@ -670,6 +668,21 @@ func generateStatefulSet(workspace *kubefloworgv1beta1.Workspace, workspaceKind
volumes = append(volumes, dataVolume)
volumeMounts = append(volumeMounts, dataVolumeMount)
}
// Probes
var readinessProbe *corev1.Probe
var livenessProbe *corev1.Probe
var startupProbe *corev1.Probe
if workspaceKind.Spec.PodTemplate.Probes != nil {
if workspaceKind.Spec.PodTemplate.Probes.ReadinessProbe != nil {
readinessProbe = workspaceKind.Spec.PodTemplate.Probes.ReadinessProbe
}
if workspaceKind.Spec.PodTemplate.Probes.LivenessProbe != nil {
livenessProbe = workspaceKind.Spec.PodTemplate.Probes.LivenessProbe
}
if workspaceKind.Spec.PodTemplate.Probes.StartupProbe != nil {
startupProbe = workspaceKind.Spec.PodTemplate.Probes.StartupProbe
}
}

// add extra volumes
for _, extraVolume := range workspaceKind.Spec.PodTemplate.ExtraVolumes {
Expand Down Expand Up @@ -728,9 +741,9 @@ func generateStatefulSet(workspace *kubefloworgv1beta1.Workspace, workspaceKind
Image: imageConfigSpec.Image,
ImagePullPolicy: imagePullPolicy,
Ports: containerPorts,
ReadinessProbe: workspaceKind.Spec.PodTemplate.Probes.ReadinessProbe,
LivenessProbe: workspaceKind.Spec.PodTemplate.Probes.LivenessProbe,
StartupProbe: workspaceKind.Spec.PodTemplate.Probes.StartupProbe,
ReadinessProbe: readinessProbe,
LivenessProbe: livenessProbe,
StartupProbe: startupProbe,
SecurityContext: workspaceKind.Spec.PodTemplate.ContainerSecurityContext,
VolumeMounts: volumeMounts,
Env: containerEnv,
Expand Down Expand Up @@ -786,10 +799,6 @@ func generateService(workspace *kubefloworgv1beta1.Workspace, imageConfigSpec ku
func generateWorkspaceStatus(workspace *kubefloworgv1beta1.Workspace, pod *corev1.Pod) kubefloworgv1beta1.WorkspaceStatus {
status := workspace.Status

//
// TODO: review this logic, and ensure that the checks are ordered correctly so that the correct status is set
//

// STATUS: Terminating
if pod.GetDeletionTimestamp() != nil {
status.State = kubefloworgv1beta1.WorkspaceStateTerminating
Expand Down
1 change: 0 additions & 1 deletion workspaces/controller/internal/helper/helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ import (
corev1 "k8s.io/api/core/v1"
)

// TODO: if we no longer need these functions, feel free to remove
func CopyStatefulSetFields(from, to *appsv1.StatefulSet) bool {
requireUpdate := false
for k, v := range to.Labels {
Expand Down

0 comments on commit 262a435

Please sign in to comment.