diff --git a/cmd/broker/broker_suite_test.go b/cmd/broker/broker_suite_test.go index 0e5248e4ac..8491e2c682 100644 --- a/cmd/broker/broker_suite_test.go +++ b/cmd/broker/broker_suite_test.go @@ -266,7 +266,9 @@ func NewBrokerSuiteTestWithConfig(t *testing.T, cfg *Config, version ...string) expirationHandler := expiration.NewHandler(db.Instances(), db.Operations(), deprovisioningQueue, logs) expirationHandler.AttachRoutes(ts.router) - runtimeHandler := kebRuntime.NewHandler(db.Instances(), db.Operations(), db.RuntimeStates(), db.InstancesArchived(), cfg.MaxPaginationPage, cfg.DefaultRequestRegion, provisionerClient, logs) + runtimeHandler := kebRuntime.NewHandler(db.Instances(), db.Operations(), db.RuntimeStates(), db.InstancesArchived(), cfg.MaxPaginationPage, cfg.DefaultRequestRegion, provisionerClient, cli, broker.KimConfig{ + Enabled: false, + }, logs) runtimeHandler.AttachRoutes(ts.router) ts.httpServer = httptest.NewServer(ts.router) diff --git a/cmd/broker/main.go b/cmd/broker/main.go index e2cced6745..1d14157fe3 100644 --- a/cmd/broker/main.go +++ b/cmd/broker/main.go @@ -385,7 +385,10 @@ func main() { // create list runtimes endpoint runtimeHandler := runtime.NewHandler(db.Instances(), db.Operations(), db.RuntimeStates(), db.InstancesArchived(), cfg.MaxPaginationPage, - cfg.DefaultRequestRegion, provisionerClient, logs) + cfg.DefaultRequestRegion, provisionerClient, + cli, + cfg.Broker.KimConfig, + logs) runtimeHandler.AttachRoutes(router) // create expiration endpoint diff --git a/common/runtime/model.go b/common/runtime/model.go index 4cc0578183..a0b5eefd35 100644 --- a/common/runtime/model.go +++ b/common/runtime/model.go @@ -53,6 +53,7 @@ type RuntimeDTO struct { AVSInternalEvaluationID int64 `json:"avsInternalEvaluationID"` KymaConfig *gqlschema.KymaConfigInput `json:"kymaConfig,omitempty"` ClusterConfig *gqlschema.GardenerConfigInput `json:"clusterConfig,omitempty"` + RuntimeConfig *map[string]interface{} `json:"runtimeConfig,omitempty"` } type RuntimeStatus struct { @@ -119,6 +120,7 @@ const ( ClusterConfigParam = "cluster_config" ExpiredParam = "expired" GardenerConfigParam = "gardener_config" + RuntimeConfigParam = "runtime_config" ) type OperationDetail string @@ -139,6 +141,8 @@ type ListParameters struct { KymaConfig bool // ClusterConfig specifies whether Gardener cluster configuration details should be included in the response for each runtime ClusterConfig bool + // RuntimeResourceConfig specifies whether current Runtime Custom Resource details should be included in the response for each runtime + RuntimeResourceConfig bool // GardenerConfig specifies whether current Gardener cluster configuration details from provisioner should be included in the response for each runtime GardenerConfig bool // GlobalAccountIDs parameter filters runtimes by specified global account IDs diff --git a/internal/broker/broker.go b/internal/broker/broker.go index 0422b22c02..3d5ab1f717 100644 --- a/internal/broker/broker.go +++ b/internal/broker/broker.go @@ -10,7 +10,6 @@ import ( "github.com/kyma-project/control-plane/components/provisioner/pkg/gqlschema" "github.com/kyma-project/kyma-environment-broker/internal" - kim "github.com/kyma-project/kyma-environment-broker/internal/kim" ) const ( @@ -50,7 +49,7 @@ type Config struct { EnableShootAndSeedSameRegion bool `envconfig:"default=false"` Binding BindingConfig - KimConfig kim.Config + KimConfig KimConfig UseSmallerMachineTypes bool `envconfig:"default=false"` } diff --git a/internal/kim/config.go b/internal/broker/kim_config.go similarity index 55% rename from internal/kim/config.go rename to internal/broker/kim_config.go index 1b51b82a85..f41cc4a048 100644 --- a/internal/kim/config.go +++ b/internal/broker/kim_config.go @@ -1,6 +1,6 @@ -package kim +package broker -type Config struct { +type KimConfig struct { Enabled bool `envconfig:"default=false"` // if true, KIM will be used DryRun bool `envconfig:"default=true"` // if true, only yamls are generated, no resources are created ViewOnly bool `envconfig:"default=true"` // if true, provisioner will control the process @@ -8,7 +8,7 @@ type Config struct { KimOnlyPlans []string `envconfig:"default=,"` } -func (c *Config) IsEnabledForPlan(planName string) bool { +func (c *KimConfig) IsEnabledForPlan(planName string) bool { if c.Enabled == false { return false } @@ -20,7 +20,7 @@ func (c *Config) IsEnabledForPlan(planName string) bool { return false } -func (c *Config) IsDrivenByKimOnly(planName string) bool { +func (c *KimConfig) IsDrivenByKimOnly(planName string) bool { if !c.IsEnabledForPlan(planName) { return false } @@ -32,6 +32,21 @@ func (c *Config) IsDrivenByKimOnly(planName string) bool { return false } -func (c *Config) IsDrivenByKim(planName string) bool { +func (c *KimConfig) IsPlanIdDrivenByKimOnly(planID string) bool { + planName := PlanIDsMapping[planID] + return c.IsDrivenByKimOnly(planName) +} + +func (c *KimConfig) IsPlanIdDrivenByKim(planID string) bool { + planName := PlanIDsMapping[planID] + return c.IsDrivenByKim(planName) +} + +func (c *KimConfig) IsDrivenByKim(planName string) bool { return (c.IsEnabledForPlan(planName) && !c.ViewOnly && !c.DryRun) || c.IsDrivenByKimOnly(planName) } + +func (c *KimConfig) IsEnabledForPlanID(planID string) bool { + planName := PlanIDsMapping[planID] + return c.IsEnabledForPlan(planName) +} diff --git a/internal/kim/config_test.go b/internal/broker/kim_config_test.go similarity index 82% rename from internal/kim/config_test.go rename to internal/broker/kim_config_test.go index 7fa8c6eb29..492bb77053 100644 --- a/internal/kim/config_test.go +++ b/internal/broker/kim_config_test.go @@ -1,4 +1,4 @@ -package kim +package broker import ( "testing" @@ -7,7 +7,7 @@ import ( ) func TestIsEnabled_KimDisabled(t *testing.T) { - config := &Config{ + config := &KimConfig{ Enabled: false, Plans: []string{"gcp", "preview"}, ViewOnly: false, @@ -22,7 +22,7 @@ func TestIsEnabled_KimDisabled(t *testing.T) { } func TestIsEnabled_KimEnabledForPreview(t *testing.T) { - config := &Config{ + config := &KimConfig{ Enabled: true, Plans: []string{"preview"}, ViewOnly: false, @@ -38,7 +38,7 @@ func TestIsEnabled_KimEnabledForPreview(t *testing.T) { } func TestIsEnabled_KimEnabledForPreview_DryRun(t *testing.T) { - config := &Config{ + config := &KimConfig{ Enabled: true, Plans: []string{"preview"}, ViewOnly: false, @@ -54,7 +54,7 @@ func TestIsEnabled_KimEnabledForPreview_DryRun(t *testing.T) { } func TestDrivenByKimOnly_KimDisabled(t *testing.T) { - config := &Config{ + config := &KimConfig{ Enabled: false, Plans: []string{"gcp", "preview"}, KimOnlyPlans: []string{"preview"}, @@ -67,10 +67,16 @@ func TestDrivenByKimOnly_KimDisabled(t *testing.T) { assert.False(t, config.IsDrivenByKim("preview")) assert.False(t, config.IsDrivenByKimOnly("gcp")) assert.False(t, config.IsDrivenByKimOnly("preview")) + assert.False(t, config.IsPlanIdDrivenByKimOnly("ca6e5357-707f-4565-bbbd-b3ab732597c6")) + assert.False(t, config.IsPlanIdDrivenByKimOnly("5cb3d976-b85c-42ea-a636-79cadda109a9")) + assert.False(t, config.IsPlanIdDrivenByKim("ca6e5357-707f-4565-bbbd-b3ab732597c6")) + assert.False(t, config.IsPlanIdDrivenByKim("5cb3d976-b85c-42ea-a636-79cadda109a9")) + assert.False(t, config.IsPlanIdDrivenByKimOnly("ca6e5357-707f-4565-bbbd-b3ab732597c6")) + assert.False(t, config.IsPlanIdDrivenByKimOnly("5cb3d976-b85c-42ea-a636-79cadda109a9")) } func TestDrivenByKimOnly_PreviewByKimOnly(t *testing.T) { - config := &Config{ + config := &KimConfig{ Enabled: true, Plans: []string{"preview"}, KimOnlyPlans: []string{"preview"}, @@ -86,7 +92,7 @@ func TestDrivenByKimOnly_PreviewByKimOnly(t *testing.T) { } func TestDrivenByKimOnly_PreviewByKimOnlyButNotEnabled(t *testing.T) { - config := &Config{ + config := &KimConfig{ Enabled: true, KimOnlyPlans: []string{"preview"}, ViewOnly: false, @@ -101,7 +107,7 @@ func TestDrivenByKimOnly_PreviewByKimOnlyButNotEnabled(t *testing.T) { } func TestDrivenByKim_ButNotByKimOnly(t *testing.T) { - config := &Config{ + config := &KimConfig{ Enabled: true, KimOnlyPlans: []string{"no-plan"}, Plans: []string{"preview"}, diff --git a/internal/process/deprovisioning/remove_runtime.go b/internal/process/deprovisioning/remove_runtime.go index b00d7aeafb..0c27b08847 100644 --- a/internal/process/deprovisioning/remove_runtime.go +++ b/internal/process/deprovisioning/remove_runtime.go @@ -4,8 +4,6 @@ import ( "fmt" "time" - "github.com/kyma-project/kyma-environment-broker/internal/kim" - "github.com/kyma-project/kyma-environment-broker/internal/storage/dberr" "github.com/kyma-project/kyma-environment-broker/internal/broker" @@ -23,10 +21,10 @@ type RemoveRuntimeStep struct { instanceStorage storage.Instances provisionerClient provisioner.Client provisionerTimeout time.Duration - kimConfig kim.Config + kimConfig broker.KimConfig } -func NewRemoveRuntimeStep(os storage.Operations, is storage.Instances, cli provisioner.Client, provisionerTimeout time.Duration, kimConfig kim.Config) *RemoveRuntimeStep { +func NewRemoveRuntimeStep(os storage.Operations, is storage.Instances, cli provisioner.Client, provisionerTimeout time.Duration, kimConfig broker.KimConfig) *RemoveRuntimeStep { return &RemoveRuntimeStep{ operationManager: process.NewOperationManager(os), instanceStorage: is, @@ -41,7 +39,7 @@ func (s *RemoveRuntimeStep) Name() string { } func (s *RemoveRuntimeStep) Run(operation internal.Operation, log logrus.FieldLogger) (internal.Operation, time.Duration, error) { - if s.kimConfig.IsDrivenByKimOnly(broker.PlanNamesMapping[operation.ProvisioningParameters.PlanID]) { + if !s.kimConfig.IsDrivenByKim(broker.PlanNamesMapping[operation.ProvisioningParameters.PlanID]) { log.Infof("KIM is driving the process for plan %s, skipping", broker.PlanNamesMapping[operation.ProvisioningParameters.PlanID]) return operation, 0, nil } diff --git a/internal/process/provisioning/check_runtime_step.go b/internal/process/provisioning/check_runtime_step.go index 502b09b548..1d7f3d5fa8 100644 --- a/internal/process/provisioning/check_runtime_step.go +++ b/internal/process/provisioning/check_runtime_step.go @@ -5,7 +5,6 @@ import ( "time" "github.com/kyma-project/kyma-environment-broker/internal/broker" - "github.com/kyma-project/kyma-environment-broker/internal/kim" "github.com/sirupsen/logrus" @@ -21,13 +20,13 @@ type CheckRuntimeStep struct { provisionerClient provisioner.Client operationManager *process.OperationManager provisioningTimeout time.Duration - kimConfig kim.Config + kimConfig broker.KimConfig } func NewCheckRuntimeStep(os storage.Operations, provisionerClient provisioner.Client, provisioningTimeout time.Duration, - kimConfig kim.Config) *CheckRuntimeStep { + kimConfig broker.KimConfig) *CheckRuntimeStep { return &CheckRuntimeStep{ provisionerClient: provisionerClient, operationManager: process.NewOperationManager(os), diff --git a/internal/process/provisioning/check_runtime_step_test.go b/internal/process/provisioning/check_runtime_step_test.go index 55823d536a..e88344b6c5 100644 --- a/internal/process/provisioning/check_runtime_step_test.go +++ b/internal/process/provisioning/check_runtime_step_test.go @@ -4,8 +4,6 @@ import ( "testing" "time" - "github.com/kyma-project/kyma-environment-broker/internal/kim" - "github.com/kyma-project/control-plane/components/provisioner/pkg/gqlschema" "github.com/kyma-project/kyma-environment-broker/internal" "github.com/kyma-project/kyma-environment-broker/internal/broker" @@ -45,7 +43,7 @@ func TestCheckRuntimeStep_RunProvisioningSucceeded(t *testing.T) { RuntimeID: ptr.String(statusRuntimeID), }) - kimConfig := kim.Config{ + kimConfig := broker.KimConfig{ Enabled: false, } @@ -92,7 +90,7 @@ func TestCheckRuntimeStep_RunProvisioningSucceeded_WithKimOnly(t *testing.T) { RuntimeID: ptr.String(statusRuntimeID), }) - kimConfig := kim.Config{ + kimConfig := broker.KimConfig{ Enabled: true, Plans: []string{"gcp"}, KimOnlyPlans: []string{"gcp"}, diff --git a/internal/process/provisioning/create_runtime_resource_step.go b/internal/process/provisioning/create_runtime_resource_step.go index 982796ab34..ad5580c4a6 100644 --- a/internal/process/provisioning/create_runtime_resource_step.go +++ b/internal/process/provisioning/create_runtime_resource_step.go @@ -26,8 +26,6 @@ import ( imv1 "github.com/kyma-project/infrastructure-manager/api/v1" "github.com/kyma-project/kyma-environment-broker/internal/broker" - "github.com/kyma-project/kyma-environment-broker/internal/kim" - "github.com/kyma-project/kyma-environment-broker/internal" "github.com/kyma-project/kyma-environment-broker/internal/process" "github.com/kyma-project/kyma-environment-broker/internal/storage" @@ -39,14 +37,14 @@ type CreateRuntimeResourceStep struct { instanceStorage storage.Instances runtimeStateStorage storage.RuntimeStates k8sClient client.Client - kimConfig kim.Config + kimConfig broker.KimConfig config input.Config trialPlatformRegionMapping map[string]string useSmallerMachineTypes bool oidcDefaultValues internal.OIDCConfigDTO } -func NewCreateRuntimeResourceStep(os storage.Operations, is storage.Instances, k8sClient client.Client, kimConfig kim.Config, cfg input.Config, +func NewCreateRuntimeResourceStep(os storage.Operations, is storage.Instances, k8sClient client.Client, kimConfig broker.KimConfig, cfg input.Config, trialPlatformRegionMapping map[string]string, useSmallerMachines bool, oidcDefaultValues internal.OIDCConfigDTO) *CreateRuntimeResourceStep { return &CreateRuntimeResourceStep{ operationManager: process.NewOperationManager(os), diff --git a/internal/process/provisioning/create_runtime_resource_step_test.go b/internal/process/provisioning/create_runtime_resource_step_test.go index 47c14115a2..bef162d1b8 100644 --- a/internal/process/provisioning/create_runtime_resource_step_test.go +++ b/internal/process/provisioning/create_runtime_resource_step_test.go @@ -27,8 +27,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "github.com/kyma-project/kyma-environment-broker/internal/fixture" - "github.com/kyma-project/kyma-environment-broker/internal/kim" - "github.com/kyma-project/kyma-environment-broker/internal/storage" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -816,8 +814,8 @@ func getClientForTests(t *testing.T) client.Client { return cli } -func fixKimConfig(planName string, dryRun bool) kim.Config { - return kim.Config{ +func fixKimConfig(planName string, dryRun bool) broker.KimConfig { + return broker.KimConfig{ Enabled: true, Plans: []string{planName}, ViewOnly: false, @@ -825,8 +823,8 @@ func fixKimConfig(planName string, dryRun bool) kim.Config { } } -func fixKimConfigWithAllPlans(dryRun bool) kim.Config { - return kim.Config{ +func fixKimConfigWithAllPlans(dryRun bool) broker.KimConfig { + return broker.KimConfig{ Enabled: true, Plans: []string{"azure", "gcp", "azure_lite", "trial", "aws", "free", "preview", "sap-converged-cloud"}, ViewOnly: false, @@ -834,15 +832,6 @@ func fixKimConfigWithAllPlans(dryRun bool) kim.Config { } } -func fixKimConfigProvisionerDriven(planName string, dryRun bool) kim.Config { - return kim.Config{ - Enabled: true, - Plans: []string{planName}, - ViewOnly: true, - DryRun: dryRun, - } -} - func fixInstanceAndOperation(planID, region, platformRegion string) (internal.Instance, internal.Operation) { instance := fixInstance() operation := fixOperationForCreateRuntimeResourceStep(OperationID, instance.InstanceID, planID, region, platformRegion) @@ -884,22 +873,3 @@ modules: [] ` return operation } - -func fixProvisionerParameters(cloudProvider internal.CloudProvider, region string) internal.ProvisioningParametersDTO { - return internal.ProvisioningParametersDTO{ - Name: "cluster-test", - VolumeSizeGb: ptr.Integer(50), - MachineType: ptr.String("Standard_D8_v3"), - Region: ptr.String(region), - Purpose: ptr.String("Purpose"), - LicenceType: ptr.String("LicenceType"), - Zones: []string{"1"}, - AutoScalerParameters: internal.AutoScalerParameters{ - AutoScalerMin: ptr.Integer(3), - AutoScalerMax: ptr.Integer(10), - MaxSurge: ptr.Integer(4), - MaxUnavailable: ptr.Integer(1), - }, - Provider: &cloudProvider, - } -} diff --git a/internal/process/provisioning/create_runtime_without_kyma_step.go b/internal/process/provisioning/create_runtime_without_kyma_step.go index fb72179203..c1a7166734 100644 --- a/internal/process/provisioning/create_runtime_without_kyma_step.go +++ b/internal/process/provisioning/create_runtime_without_kyma_step.go @@ -4,11 +4,9 @@ import ( "fmt" "time" - "github.com/kyma-project/kyma-environment-broker/internal/broker" - "github.com/kyma-project/kyma-environment-broker/internal/kim" - "github.com/kyma-project/control-plane/components/provisioner/pkg/gqlschema" "github.com/kyma-project/kyma-environment-broker/internal" + "github.com/kyma-project/kyma-environment-broker/internal/broker" kebError "github.com/kyma-project/kyma-environment-broker/internal/error" "github.com/kyma-project/kyma-environment-broker/internal/process" "github.com/kyma-project/kyma-environment-broker/internal/provisioner" @@ -30,10 +28,10 @@ type CreateRuntimeWithoutKymaStep struct { instanceStorage storage.Instances runtimeStateStorage storage.RuntimeStates provisionerClient provisioner.Client - kimConfig kim.Config + kimConfig broker.KimConfig } -func NewCreateRuntimeWithoutKymaStep(os storage.Operations, runtimeStorage storage.RuntimeStates, is storage.Instances, cli provisioner.Client, kimConfig kim.Config) *CreateRuntimeWithoutKymaStep { +func NewCreateRuntimeWithoutKymaStep(os storage.Operations, runtimeStorage storage.RuntimeStates, is storage.Instances, cli provisioner.Client, kimConfig broker.KimConfig) *CreateRuntimeWithoutKymaStep { return &CreateRuntimeWithoutKymaStep{ operationManager: process.NewOperationManager(os), instanceStorage: is, diff --git a/internal/process/provisioning/create_runtime_without_kyma_test.go b/internal/process/provisioning/create_runtime_without_kyma_test.go index 9877263a5c..18916d984a 100644 --- a/internal/process/provisioning/create_runtime_without_kyma_test.go +++ b/internal/process/provisioning/create_runtime_without_kyma_test.go @@ -5,8 +5,6 @@ import ( "reflect" "testing" - "github.com/kyma-project/kyma-environment-broker/internal/kim" - "github.com/kyma-project/control-plane/components/provisioner/pkg/gqlschema" "github.com/kyma-project/kyma-environment-broker/internal/broker" "github.com/kyma-project/kyma-environment-broker/internal/provider" @@ -32,7 +30,7 @@ func TestCreateRuntimeWithoutKyma_Run(t *testing.T) { err = memoryStorage.Instances().Insert(fixInstance()) assert.NoError(t, err) - kimConfig := kim.Config{ + kimConfig := broker.KimConfig{ Enabled: false, } @@ -82,7 +80,7 @@ func TestCreateRuntimeWithoutKyma_SkipForKIM(t *testing.T) { err = memoryStorage.Instances().Insert(fixInstance()) assert.NoError(t, err) - kimConfig := kim.Config{ + kimConfig := broker.KimConfig{ Enabled: true, Plans: []string{"gcp"}, KimOnlyPlans: []string{"gcp"}, @@ -130,7 +128,7 @@ func TestCreateRuntimeWithoutKyma_RunWithEuAccess(t *testing.T) { err = memoryStorage.Instances().Insert(fixInstance()) assert.NoError(t, err) - kimConfig := kim.Config{ + kimConfig := broker.KimConfig{ Enabled: false, } @@ -242,7 +240,7 @@ func TestCreateRuntimeWithoutKymaStep_RunWithBadRequestError(t *testing.T) { err := memoryStorage.Operations().InsertOperation(operation) assert.NoError(t, err) - kimConfig := kim.Config{ + kimConfig := broker.KimConfig{ Enabled: false, } diff --git a/internal/process/provisioning/get_kubeconfig_step.go b/internal/process/provisioning/get_kubeconfig_step.go index 9b9f57f2ed..a8e7a8ff81 100644 --- a/internal/process/provisioning/get_kubeconfig_step.go +++ b/internal/process/provisioning/get_kubeconfig_step.go @@ -3,8 +3,6 @@ package provisioning import ( "time" - "github.com/kyma-project/kyma-environment-broker/internal/kim" - "github.com/kyma-project/kyma-environment-broker/internal/broker" "github.com/sirupsen/logrus" @@ -18,12 +16,12 @@ type GetKubeconfigStep struct { provisionerClient provisioner.Client operationManager *process.OperationManager provisioningTimeout time.Duration - kimConfig kim.Config + kimConfig broker.KimConfig } func NewGetKubeconfigStep(os storage.Operations, provisionerClient provisioner.Client, - kimConfig kim.Config) *GetKubeconfigStep { + kimConfig broker.KimConfig) *GetKubeconfigStep { return &GetKubeconfigStep{ provisionerClient: provisionerClient, operationManager: process.NewOperationManager(os), diff --git a/internal/process/provisioning/get_kubeconfig_test.go b/internal/process/provisioning/get_kubeconfig_test.go index f1b352a0e1..9952f40c97 100644 --- a/internal/process/provisioning/get_kubeconfig_test.go +++ b/internal/process/provisioning/get_kubeconfig_test.go @@ -3,8 +3,6 @@ package provisioning import ( "testing" - "github.com/kyma-project/kyma-environment-broker/internal/kim" - "github.com/kyma-project/kyma-environment-broker/internal" "github.com/kyma-project/kyma-environment-broker/internal/broker" "github.com/kyma-project/kyma-environment-broker/internal/fixture" @@ -24,7 +22,7 @@ const ( func TestGetKubeconfigStep(t *testing.T) { - kimConfig := kim.Config{ + kimConfig := broker.KimConfig{ Enabled: false, } diff --git a/internal/process/steps/gardener_cluster.go b/internal/process/steps/gardener_cluster.go index 97e39841c1..53d5c1bd5a 100644 --- a/internal/process/steps/gardener_cluster.go +++ b/internal/process/steps/gardener_cluster.go @@ -7,10 +7,8 @@ import ( "strings" "time" - "github.com/kyma-project/kyma-environment-broker/internal/broker" - "github.com/kyma-project/kyma-environment-broker/internal/kim" - "github.com/kyma-project/kyma-environment-broker/internal" + "github.com/kyma-project/kyma-environment-broker/internal/broker" "github.com/kyma-project/kyma-environment-broker/internal/process" "github.com/kyma-project/kyma-environment-broker/internal/storage" "github.com/sirupsen/logrus" @@ -23,7 +21,7 @@ import ( const GardenerClusterStateReady = "Ready" -func NewSyncGardenerCluster(os storage.Operations, k8sClient client.Client, kimConfig kim.Config) *syncGardenerCluster { +func NewSyncGardenerCluster(os storage.Operations, k8sClient client.Client, kimConfig broker.KimConfig) *syncGardenerCluster { return &syncGardenerCluster{ k8sClient: k8sClient, kimConfig: kimConfig, @@ -31,7 +29,7 @@ func NewSyncGardenerCluster(os storage.Operations, k8sClient client.Client, kimC } } -func NewCheckGardenerCluster(os storage.Operations, k8sClient client.Client, kimConfig kim.Config, gardenerClusterStepTimeout time.Duration) *checkGardenerCluster { +func NewCheckGardenerCluster(os storage.Operations, k8sClient client.Client, kimConfig broker.KimConfig, gardenerClusterStepTimeout time.Duration) *checkGardenerCluster { return &checkGardenerCluster{ k8sClient: k8sClient, kimConfig: kimConfig, @@ -43,7 +41,7 @@ func NewCheckGardenerCluster(os storage.Operations, k8sClient client.Client, kim type checkGardenerCluster struct { k8sClient client.Client operationManager *process.OperationManager - kimConfig kim.Config + kimConfig broker.KimConfig gardenerClusterStepTimeout time.Duration } @@ -96,7 +94,7 @@ func (s *checkGardenerCluster) GetGardenerCluster(name string, namespace string) type syncGardenerCluster struct { k8sClient client.Client - kimConfig kim.Config + kimConfig broker.KimConfig operationManager *process.OperationManager } diff --git a/internal/process/steps/gardener_cluster_test.go b/internal/process/steps/gardener_cluster_test.go index ffcc489b3a..08c0015c04 100644 --- a/internal/process/steps/gardener_cluster_test.go +++ b/internal/process/steps/gardener_cluster_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/kyma-project/kyma-environment-broker/internal/kim" + "github.com/kyma-project/kyma-environment-broker/internal/broker" "github.com/pivotal-cf/brokerapi/v8/domain" @@ -53,7 +53,7 @@ spec: func TestSyncGardenerCluster_RunWithExistingResource(t *testing.T) { // given os := storage.NewMemoryStorage().Operations() - kimConfig := kim.Config{ + kimConfig := broker.KimConfig{ Enabled: true, Plans: []string{"azure"}, ViewOnly: true, @@ -107,7 +107,7 @@ spec: func TestSyncGardenerCluster_Run(t *testing.T) { // given os := storage.NewMemoryStorage().Operations() - kimConfig := kim.Config{ + kimConfig := broker.KimConfig{ Enabled: true, Plans: []string{"azure"}, ViewOnly: true, @@ -149,13 +149,12 @@ spec: func TestCheckGardenerCluster_RunWhenReady(t *testing.T) { // given os := storage.NewMemoryStorage().Operations() - kimConfig := kim.Config{ + kimConfig := broker.KimConfig{ Enabled: true, Plans: []string{"azure"}, ViewOnly: true, DryRun: false, } - existingGC := NewGardenerCluster("runtime-id-000", "kcp-system") err := existingGC.SetState("Ready") assert.NoError(t, err) @@ -179,7 +178,7 @@ func TestCheckGardenerCluster_RunWhenReady(t *testing.T) { func TestCheckGardenerCluster_RunWhenNotReady_OperationFail(t *testing.T) { // given os := storage.NewMemoryStorage().Operations() - kimConfig := kim.Config{ + kimConfig := broker.KimConfig{ Enabled: true, Plans: []string{"azure"}, ViewOnly: true, @@ -212,7 +211,7 @@ func TestCheckGardenerCluster_RunWhenNotReady_OperationFail(t *testing.T) { func TestCheckGardenerCluster_IgnoreWhenNotReadyButKimDrives(t *testing.T) { // given os := storage.NewMemoryStorage().Operations() - kimConfig := kim.Config{ + kimConfig := broker.KimConfig{ Enabled: true, Plans: []string{"azure"}, ViewOnly: false, @@ -244,7 +243,7 @@ func TestCheckGardenerCluster_IgnoreWhenNotReadyButKimDrives(t *testing.T) { func TestCheckGardenerCluster_IgnoreWhenNotReadyButKimOnlyPlanUsed(t *testing.T) { // given os := storage.NewMemoryStorage().Operations() - kimConfig := kim.Config{ + kimConfig := broker.KimConfig{ Enabled: true, Plans: []string{"azure"}, KimOnlyPlans: []string{"azure"}, @@ -277,7 +276,7 @@ func TestCheckGardenerCluster_IgnoreWhenNotReadyButKimOnlyPlanUsed(t *testing.T) func TestCheckGardenerCluster_RunWhenNotReady_Retry(t *testing.T) { // given os := storage.NewMemoryStorage().Operations() - kimConfig := kim.Config{ + kimConfig := broker.KimConfig{ Enabled: true, Plans: []string{"azure"}, ViewOnly: true, diff --git a/internal/process/steps/lifecycle_manager.go b/internal/process/steps/lifecycle_manager.go index 3bd8093d47..8ca24366d7 100644 --- a/internal/process/steps/lifecycle_manager.go +++ b/internal/process/steps/lifecycle_manager.go @@ -56,11 +56,15 @@ func KymaName(operation internal.Operation) string { } func KymaRuntimeResourceName(operation internal.Operation) string { - return strings.ToLower(operation.RuntimeID) + return KymaRuntimeResourceNameFromID(operation.RuntimeID) } func KymaNameFromInstance(instance *internal.Instance) string { - return strings.ToLower(instance.RuntimeID) + return KymaRuntimeResourceNameFromID(instance.RuntimeID) +} + +func KymaRuntimeResourceNameFromID(ID string) string { + return strings.ToLower(ID) } func CreateKymaNameFromOperation(operation internal.Operation) string { diff --git a/internal/process/steps/runtime_resource.go b/internal/process/steps/runtime_resource.go index 1c58458d78..ab2ced81bb 100644 --- a/internal/process/steps/runtime_resource.go +++ b/internal/process/steps/runtime_resource.go @@ -5,10 +5,8 @@ import ( "fmt" "time" - "github.com/kyma-project/kyma-environment-broker/internal/broker" - "github.com/kyma-project/kyma-environment-broker/internal/kim" - imv1 "github.com/kyma-project/infrastructure-manager/api/v1" + "github.com/kyma-project/kyma-environment-broker/internal/broker" "github.com/kyma-project/kyma-environment-broker/internal" "github.com/kyma-project/kyma-environment-broker/internal/process" @@ -19,7 +17,7 @@ import ( const RuntimeResourceStateReady = "Ready" -func NewCheckRuntimeResourceStep(os storage.Operations, k8sClient client.Client, kimConfig kim.Config, runtimeResourceStepTimeout time.Duration) *checkRuntimeResource { +func NewCheckRuntimeResourceStep(os storage.Operations, k8sClient client.Client, kimConfig broker.KimConfig, runtimeResourceStepTimeout time.Duration) *checkRuntimeResource { return &checkRuntimeResource{ k8sClient: k8sClient, operationManager: process.NewOperationManager(os), @@ -30,7 +28,7 @@ func NewCheckRuntimeResourceStep(os storage.Operations, k8sClient client.Client, type checkRuntimeResource struct { k8sClient client.Client - kimConfig kim.Config + kimConfig broker.KimConfig operationManager *process.OperationManager runtimeResourceStepTimeout time.Duration } diff --git a/internal/process/steps/runtime_resource_test.go b/internal/process/steps/runtime_resource_test.go index 9f9a3efdad..d26ecd75ff 100644 --- a/internal/process/steps/runtime_resource_test.go +++ b/internal/process/steps/runtime_resource_test.go @@ -4,8 +4,9 @@ import ( "testing" "time" + "github.com/kyma-project/kyma-environment-broker/internal/broker" + imv1 "github.com/kyma-project/infrastructure-manager/api/v1" - "github.com/kyma-project/kyma-environment-broker/internal/kim" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/pivotal-cf/brokerapi/v8/domain" @@ -103,8 +104,8 @@ func TestCheckRuntimeResource_RunWhenNotReady_Retry(t *testing.T) { assert.NotZero(t, backoff) } -func fixKimConfigForAzure() kim.Config { - return kim.Config{ +func fixKimConfigForAzure() broker.KimConfig { + return broker.KimConfig{ Enabled: true, Plans: []string{"azure"}, ViewOnly: false, diff --git a/internal/provisioner/fake_client.go b/internal/provisioner/fake_client.go index 6df2c9e2c7..4aedf73f1d 100644 --- a/internal/provisioner/fake_client.go +++ b/internal/provisioner/fake_client.go @@ -20,6 +20,8 @@ type runtime struct { runtimeInput schema.ProvisionRuntimeInput } +type runtimesSet map[string]interface{} + type FakeClient struct { mu sync.Mutex graphqlizer Graphqlizer @@ -32,12 +34,27 @@ type FakeClient struct { gardenerClient dynamic.Interface gardenerNamespace string + + //needed in the transition period + kimOnlyDrivenRuntimes runtimesSet } func NewFakeClient() *FakeClient { return NewFakeClientWithGardener(nil, "") } +func NewFakeClientWithKimOnlyDrivenRuntimes(kimOnlyDrivenRuntimes runtimesSet) *FakeClient { + return &FakeClient{ + graphqlizer: Graphqlizer{}, + runtimes: []runtime{}, + operations: make(map[string]schema.OperationStatus), + upgrades: make(map[string]schema.UpgradeRuntimeInput), + shootUpgrades: make(map[string]schema.UpgradeShootInput), + gardenerClient: nil, + kimOnlyDrivenRuntimes: kimOnlyDrivenRuntimes, + } +} + func NewFakeClientWithGardener(gc dynamic.Interface, ns string) *FakeClient { return &FakeClient{ graphqlizer: Graphqlizer{}, @@ -205,6 +222,11 @@ func (c *FakeClient) RuntimeStatus(accountID, runtimeID string) (schema.RuntimeS c.mu.Lock() defer c.mu.Unlock() + // simulating provider behavior when runtime is KIM only driven + if _, ok := c.kimOnlyDrivenRuntimes[runtimeID]; ok { + return schema.RuntimeStatus{}, fmt.Errorf("not found") + } + for _, ops := range c.operations { if *ops.RuntimeID == runtimeID { return schema.RuntimeStatus{ diff --git a/internal/runtime/handler.go b/internal/runtime/handler.go index bb40eb6f6b..ef2711c37e 100644 --- a/internal/runtime/handler.go +++ b/internal/runtime/handler.go @@ -1,9 +1,17 @@ package runtime import ( + "context" "fmt" + "k8s.io/apimachinery/pkg/api/errors" "net/http" + "github.com/kyma-project/kyma-environment-broker/internal/process/steps" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/sirupsen/logrus" "github.com/kyma-project/kyma-environment-broker/internal/broker" @@ -32,12 +40,16 @@ type Handler struct { converter Converter defaultMaxPage int provisionerClient provisioner.Client + k8sClient client.Client + kimConfig broker.KimConfig logger logrus.FieldLogger } func NewHandler(instanceDb storage.Instances, operationDb storage.Operations, runtimeStatesDb storage.RuntimeStates, instancesArchived storage.InstancesArchived, defaultMaxPage int, defaultRequestRegion string, - provisionerClient provisioner.Client, logger logrus.FieldLogger) *Handler { + provisionerClient provisioner.Client, + k8sClient client.Client, kimConfig broker.KimConfig, + logger logrus.FieldLogger) *Handler { return &Handler{ instancesDb: instanceDb, operationsDb: operationDb, @@ -46,6 +58,8 @@ func NewHandler(instanceDb storage.Instances, operationDb storage.Operations, ru defaultMaxPage: defaultMaxPage, provisionerClient: provisionerClient, instancesArchivedDb: instancesArchived, + kimConfig: kimConfig, + k8sClient: k8sClient, logger: logger.WithField("service", "RuntimeHandler"), } } @@ -177,6 +191,7 @@ func (h *Handler) getRuntimes(w http.ResponseWriter, req *http.Request) { kymaConfig := getBoolParam(pkg.KymaConfigParam, req) clusterConfig := getBoolParam(pkg.ClusterConfigParam, req) gardenerConfig := getBoolParam(pkg.GardenerConfigParam, req) + runtimeResourceConfig := getBoolParam(pkg.RuntimeConfigParam, req) instances, count, totalCount, err := h.listInstances(filter) if err != nil { @@ -205,13 +220,34 @@ func (h *Handler) getRuntimes(w http.ResponseWriter, req *http.Request) { httputil.WriteErrorResponse(w, http.StatusInternalServerError, err) return } - err = h.setRuntimeOptionalAttributes(&dto, kymaConfig, clusterConfig, gardenerConfig) + + instanceDrivenByKimOnly := h.kimConfig.IsDrivenByKimOnly(dto.ServicePlanName) + + err = h.setRuntimeOptionalAttributes(&dto, kymaConfig, clusterConfig, gardenerConfig, instanceDrivenByKimOnly) if err != nil { h.logger.Warn(fmt.Sprintf("unable to set optional attributes: %s", err.Error())) httputil.WriteErrorResponse(w, http.StatusInternalServerError, err) return } + if runtimeResourceConfig && dto.RuntimeID != "" { + runtimeResourceName, runtimeNamespaceName := h.getRuntimeNamesFromLastOperation(dto) + + runtimeResourceObject := &unstructured.Unstructured{} + runtimeResourceObject.SetGroupVersionKind(RuntimeResourceGVK()) + err = h.k8sClient.Get(context.Background(), client.ObjectKey{ + Namespace: runtimeNamespaceName, + Name: runtimeResourceName, + }, runtimeResourceObject) + switch { + case errors.IsNotFound(err): + h.logger.Info(fmt.Sprintf("Runtime resource %s/%s: is not found: %s", dto.InstanceID, dto.RuntimeID, err.Error())) + case err != nil: + h.logger.Warn(fmt.Sprintf("unable to get Runtime resource %s/%s: %s", dto.InstanceID, dto.RuntimeID, err.Error())) + } + dto.RuntimeConfig = &runtimeResourceObject.Object + } + toReturn = append(toReturn, dto) } @@ -223,6 +259,20 @@ func (h *Handler) getRuntimes(w http.ResponseWriter, req *http.Request) { httputil.WriteResponse(w, http.StatusOK, runtimePage) } +func (h *Handler) getRuntimeNamesFromLastOperation(dto pkg.RuntimeDTO) (string, string) { + // TODO get rid of additional DB query - we have this info fetched from DB but it is tedious to pass it through + op, err := h.operationsDb.GetLastOperation(dto.InstanceID) + runtimeResourceName := steps.KymaRuntimeResourceNameFromID(dto.RuntimeID) + runtimeNamespaceName := "kcp-system" + if err != nil || op.RuntimeResourceName != "" { + runtimeResourceName = op.RuntimeResourceName + } + if err != nil || op.KymaResourceNamespace != "" { + runtimeNamespaceName = op.KymaResourceNamespace + } + return runtimeResourceName, runtimeNamespaceName +} + func (h *Handler) takeLastNonDryRunClusterOperations(oprs []internal.UpgradeClusterOperation) ([]internal.UpgradeClusterOperation, int) { toReturn := make([]internal.UpgradeClusterOperation, 0) totalCount := 0 @@ -353,7 +403,8 @@ func (h *Handler) setRuntimeLastOperation(dto *pkg.RuntimeDTO) error { return nil } -func (h *Handler) setRuntimeOptionalAttributes(dto *pkg.RuntimeDTO, kymaConfig, clusterConfig, gardenerConfig bool) error { +func (h *Handler) setRuntimeOptionalAttributes(dto *pkg.RuntimeDTO, kymaConfig, clusterConfig, gardenerConfig, drivenByKimOnly bool) error { + if kymaConfig || clusterConfig { states, err := h.runtimeStatesDb.ListByRuntimeID(dto.RuntimeID) if err != nil && !dberr.IsNotFound(err) { @@ -374,12 +425,14 @@ func (h *Handler) setRuntimeOptionalAttributes(dto *pkg.RuntimeDTO, kymaConfig, } } - if gardenerConfig { + if gardenerConfig && dto.RuntimeID != "" && !drivenByKimOnly { runtimeStatus, err := h.provisionerClient.RuntimeStatus(dto.GlobalAccountID, dto.RuntimeID) if err != nil { - return fmt.Errorf("while fetching runtime status from provisioner for instance %s: %w", dto.InstanceID, err) + dto.Status.GardenerConfig = nil + h.logger.Warnf("unable to fetch runtime status for instance %s: %s", dto.InstanceID, err.Error()) + } else { + dto.Status.GardenerConfig = runtimeStatus.RuntimeConfiguration.ClusterConfig } - dto.Status.GardenerConfig = runtimeStatus.RuntimeConfiguration.ClusterConfig } return nil @@ -466,3 +519,11 @@ func getBoolParam(param string, req *http.Request) bool { return requested } + +func RuntimeResourceGVK() schema.GroupVersionKind { + return schema.GroupVersionKind{ + Group: "infrastructuremanager.kyma-project.io", + Version: "v1", + Kind: "Runtime", + } +} diff --git a/internal/runtime/handler_test.go b/internal/runtime/handler_test.go index 3f7c90b598..0bea51da0e 100644 --- a/internal/runtime/handler_test.go +++ b/internal/runtime/handler_test.go @@ -8,6 +8,11 @@ import ( "testing" "time" + "github.com/kyma-project/kyma-environment-broker/internal/broker" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "github.com/kyma-project/kyma-environment-broker/internal/storage" "github.com/gorilla/mux" @@ -26,9 +31,15 @@ import ( ) func TestRuntimeHandler(t *testing.T) { + k8sClient := fake.NewClientBuilder().Build() + kimConfig := broker.KimConfig{ + Enabled: false, + } + t.Run("test pagination should work", func(t *testing.T) { // given provisionerClient := provisioner.NewFakeClient() + db := storage.NewMemoryStorage() operations := db.Operations() instances := db.Instances() @@ -54,7 +65,7 @@ func TestRuntimeHandler(t *testing.T) { err = instances.Insert(testInstance2) require.NoError(t, err) - runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, logrus.New()) + runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, k8sClient, kimConfig, logrus.New()) req, err := http.NewRequest("GET", "/runtimes?page_size=1", nil) require.NoError(t, err) @@ -102,13 +113,14 @@ func TestRuntimeHandler(t *testing.T) { t.Run("test validation should work", func(t *testing.T) { // given provisionerClient := provisioner.NewFakeClient() + db := storage.NewMemoryStorage() operations := db.Operations() instances := db.Instances() states := db.RuntimeStates() archived := db.InstancesArchived() - runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "region", provisionerClient, logrus.New()) + runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "region", provisionerClient, k8sClient, kimConfig, logrus.New()) req, err := http.NewRequest("GET", "/runtimes?page_size=a", nil) require.NoError(t, err) @@ -141,6 +153,7 @@ func TestRuntimeHandler(t *testing.T) { t.Run("test filtering should work", func(t *testing.T) { // given provisionerClient := provisioner.NewFakeClient() + db := storage.NewMemoryStorage() operations := db.Operations() instances := db.Instances() @@ -166,7 +179,7 @@ func TestRuntimeHandler(t *testing.T) { err = operations.InsertOperation(testOp2) require.NoError(t, err) - runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, logrus.New()) + runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, k8sClient, kimConfig, logrus.New()) req, err := http.NewRequest("GET", fmt.Sprintf("/runtimes?account=%s&subaccount=%s&instance_id=%s&runtime_id=%s®ion=%s&shoot=%s", testID1, testID1, testID1, testID1, testID1, fmt.Sprintf("Shoot-%s", testID1)), nil) require.NoError(t, err) @@ -242,7 +255,7 @@ func TestRuntimeHandler(t *testing.T) { err = operations.InsertDeprovisioningOperation(deprovOp3) require.NoError(t, err) - runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, logrus.New()) + runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, k8sClient, kimConfig, logrus.New()) rr := httptest.NewRecorder() router := mux.NewRouter() @@ -351,7 +364,7 @@ func TestRuntimeHandler(t *testing.T) { }) require.NoError(t, err) - runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, logrus.New()) + runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, k8sClient, kimConfig, logrus.New()) req, err := http.NewRequest("GET", "/runtimes", nil) require.NoError(t, err) @@ -424,7 +437,7 @@ func TestRuntimeHandler(t *testing.T) { }) require.NoError(t, err) - runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, logrus.New()) + runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, k8sClient, kimConfig, logrus.New()) req, err := http.NewRequest("GET", "/runtimes", nil) require.NoError(t, err) @@ -496,7 +509,7 @@ func TestRuntimeHandler(t *testing.T) { }) require.NoError(t, err) - runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, logrus.New()) + runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, k8sClient, kimConfig, logrus.New()) req, err := http.NewRequest("GET", "/runtimes", nil) require.NoError(t, err) @@ -551,7 +564,7 @@ func TestRuntimeHandler(t *testing.T) { err = operations.InsertUpdatingOperation(updOp) require.NoError(t, err) - runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, logrus.New()) + runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, k8sClient, kimConfig, logrus.New()) rr := httptest.NewRecorder() router := mux.NewRouter() @@ -682,7 +695,7 @@ func TestRuntimeHandler(t *testing.T) { err = states.Insert(fixOpgClusterState) require.NoError(t, err) - runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, logrus.New()) + runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, k8sClient, kimConfig, logrus.New()) rr := httptest.NewRecorder() router := mux.NewRouter() @@ -736,7 +749,7 @@ func TestRuntimeHandler(t *testing.T) { _, err = provisionerClient.ProvisionRuntimeWithIDs(operation.GlobalAccountID, operation.SubAccountID, operation.RuntimeID, operation.ID, input) require.NoError(t, err) - runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, logrus.New()) + runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, k8sClient, kimConfig, logrus.New()) rr := httptest.NewRecorder() router := mux.NewRouter() @@ -764,6 +777,376 @@ func TestRuntimeHandler(t *testing.T) { } +func TestRuntimeHandler_WithKimOnlyDrivenInstances(t *testing.T) { + runtimeObj := fixRuntimeResource(t, "runtime-test1", "kcp-system") + k8sClient := fake.NewClientBuilder().WithRuntimeObjects(runtimeObj.obj).Build() + kimConfig := broker.KimConfig{ + Enabled: true, + Plans: []string{"preview"}, + KimOnlyPlans: []string{"preview"}, + } + runtimesNotKnownToProvisioner := map[string]interface{}{"runtime-test1": nil} + provisionerClient := provisioner.NewFakeClientWithKimOnlyDrivenRuntimes(runtimesNotKnownToProvisioner) + + t.Run("test operation detail parameter and runtime state", func(t *testing.T) { + // given + db := storage.NewMemoryStorage() + operations := db.Operations() + instances := db.Instances() + states := db.RuntimeStates() + archived := db.InstancesArchived() + testID := "Test1" + testTime := time.Now() + testInstance := fixInstanceForPreview(testID, testTime) + + err := instances.Insert(testInstance) + require.NoError(t, err) + + provOp := fixture.FixProvisioningOperation(fixRandomID(), testID) + err = operations.InsertOperation(provOp) + require.NoError(t, err) + updOp := fixture.FixUpdatingOperation(fixRandomID(), testID) + updOp.State = domain.Succeeded + updOp.CreatedAt = updOp.CreatedAt.Add(time.Minute) + err = operations.InsertUpdatingOperation(updOp) + require.NoError(t, err) + + runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, k8sClient, kimConfig, logrus.New()) + + rr := httptest.NewRecorder() + router := mux.NewRouter() + runtimeHandler.AttachRoutes(router) + + // when + req, err := http.NewRequest("GET", fmt.Sprintf("/runtimes?op_detail=%s", pkg.AllOperation), nil) + require.NoError(t, err) + router.ServeHTTP(rr, req) + + // then + require.Equal(t, http.StatusOK, rr.Code) + + var out pkg.RuntimesPage + + err = json.Unmarshal(rr.Body.Bytes(), &out) + require.NoError(t, err) + + require.Equal(t, 1, out.TotalCount) + require.Equal(t, 1, out.Count) + assert.Equal(t, testID, out.Data[0].InstanceID) + assert.NotNil(t, out.Data[0].Status.Provisioning) + assert.Nil(t, out.Data[0].Status.Deprovisioning) + assert.Equal(t, pkg.StateSucceeded, out.Data[0].Status.State) + + // when + rr = httptest.NewRecorder() + req, err = http.NewRequest("GET", fmt.Sprintf("/runtimes?op_detail=%s", pkg.LastOperation), nil) + require.NoError(t, err) + router.ServeHTTP(rr, req) + + // then + require.Equal(t, http.StatusOK, rr.Code) + + out = pkg.RuntimesPage{} + err = json.Unmarshal(rr.Body.Bytes(), &out) + require.NoError(t, err) + + require.Equal(t, 1, out.TotalCount) + require.Equal(t, 1, out.Count) + assert.Equal(t, testID, out.Data[0].InstanceID) + assert.Nil(t, out.Data[0].Status.Provisioning) + assert.Nil(t, out.Data[0].Status.Deprovisioning) + assert.Equal(t, pkg.StateSucceeded, out.Data[0].Status.State) + }) + + t.Run("test kyma_config and cluster_config optional attributes", func(t *testing.T) { + // given + db := storage.NewMemoryStorage() + operations := db.Operations() + instances := db.Instances() + states := db.RuntimeStates() + archived := db.InstancesArchived() + testID := "Test1" + testTime := time.Now() + testInstance := fixInstanceForPreview(testID, testTime) + + err := instances.Insert(testInstance) + require.NoError(t, err) + + provOp := fixture.FixProvisioningOperation(fixRandomID(), testID) + err = operations.InsertOperation(provOp) + require.NoError(t, err) + updOp := fixture.FixUpdatingOperation(fixRandomID(), testID) + updOp.State = domain.Failed + updOp.CreatedAt = updOp.CreatedAt.Add(time.Minute) + err = operations.InsertUpdatingOperation(updOp) + require.NoError(t, err) + upgClOp := fixture.FixUpgradeClusterOperation(fixRandomID(), testID) + upgClOp.CreatedAt = updOp.CreatedAt.Add(2 * time.Minute) + err = operations.InsertUpgradeClusterOperation(upgClOp) + require.NoError(t, err) + + fixProvState := internal.RuntimeState{ + ID: fixRandomID(), + CreatedAt: provOp.CreatedAt, + RuntimeID: testInstance.RuntimeID, + OperationID: provOp.ID, + KymaConfig: gqlschema.KymaConfigInput{ + Version: "1.22.0", + }, + ClusterConfig: gqlschema.GardenerConfigInput{ + Name: testID, + KubernetesVersion: "1.18.18", + Provider: string(internal.AWS), + }, + } + err = states.Insert(fixProvState) + require.NoError(t, err) + fixUpgKymaState := internal.RuntimeState{ + ID: fixRandomID(), + CreatedAt: updOp.CreatedAt, + RuntimeID: testInstance.RuntimeID, + OperationID: updOp.Operation.ID, + KymaConfig: gqlschema.KymaConfigInput{ + Version: "1.23.0", + Profile: (*gqlschema.KymaProfile)(ptr.String("production")), + Components: []*gqlschema.ComponentConfigurationInput{ + { + Component: "istio", + Namespace: "istio-system", + Configuration: []*gqlschema.ConfigEntryInput{ + { + Key: "test_key", + Value: "test_value", + }, + }, + }, + }, + }, + } + err = states.Insert(fixUpgKymaState) + require.NoError(t, err) + fixOpgClusterState := internal.RuntimeState{ + ID: fixRandomID(), + CreatedAt: upgClOp.CreatedAt, + RuntimeID: testInstance.RuntimeID, + OperationID: upgClOp.Operation.ID, + ClusterConfig: gqlschema.GardenerConfigInput{ + Name: testID, + KubernetesVersion: "1.19.19", + Provider: string(internal.AWS), + MachineImage: ptr.String("gardenlinux"), + MachineImageVersion: ptr.String("1.0.0"), + }, + } + err = states.Insert(fixOpgClusterState) + require.NoError(t, err) + + runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, k8sClient, kimConfig, logrus.New()) + + rr := httptest.NewRecorder() + router := mux.NewRouter() + runtimeHandler.AttachRoutes(router) + + // when + req, err := http.NewRequest("GET", "/runtimes?kyma_config=true&cluster_config=true", nil) + require.NoError(t, err) + router.ServeHTTP(rr, req) + + // then + require.Equal(t, http.StatusOK, rr.Code) + + var out pkg.RuntimesPage + + err = json.Unmarshal(rr.Body.Bytes(), &out) + require.NoError(t, err) + + require.Equal(t, 1, out.TotalCount) + require.Equal(t, 1, out.Count) + assert.Equal(t, testID, out.Data[0].InstanceID) + require.NotNil(t, out.Data[0].KymaConfig) + assert.Equal(t, "1.23.0", out.Data[0].KymaConfig.Version) + require.NotNil(t, out.Data[0].ClusterConfig) + assert.Equal(t, "1.19.19", out.Data[0].ClusterConfig.KubernetesVersion) + }) + + t.Run("test gardener_config optional attribute", func(t *testing.T) { + // given + db := storage.NewMemoryStorage() + operations := db.Operations() + instances := db.Instances() + states := db.RuntimeStates() + archived := db.InstancesArchived() + testID := "Test1" + testTime := time.Now() + testInstance := fixInstanceForPreview(testID, testTime) + testInstance.Provider = "aws" + testInstance.RuntimeID = fmt.Sprintf("runtime-%s", testID) + err := instances.Insert(testInstance) + require.NoError(t, err) + + operation := fixture.FixProvisioningOperation(fixRandomID(), testID) + err = operations.InsertOperation(operation) + operation.KymaResourceNamespace = "kcp-system" + require.NoError(t, err) + + input, err := operation.InputCreator.CreateProvisionRuntimeInput() + require.NoError(t, err) + + _, err = provisionerClient.ProvisionRuntimeWithIDs(operation.GlobalAccountID, operation.SubAccountID, operation.RuntimeID, operation.ID, input) + require.NoError(t, err) + + runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, k8sClient, kimConfig, logrus.New()) + + rr := httptest.NewRecorder() + router := mux.NewRouter() + runtimeHandler.AttachRoutes(router) + + // when + req, err := http.NewRequest("GET", "/runtimes?gardener_config=true", nil) + require.NoError(t, err) + router.ServeHTTP(rr, req) + + // then + require.Equal(t, http.StatusOK, rr.Code) + + var out pkg.RuntimesPage + + err = json.Unmarshal(rr.Body.Bytes(), &out) + require.NoError(t, err) + + require.Equal(t, 1, out.TotalCount) + require.Equal(t, 1, out.Count) + assert.Equal(t, testID, out.Data[0].InstanceID) + require.Nil(t, out.Data[0].Status.GardenerConfig) + require.Nil(t, out.Data[0].RuntimeConfig) + }) + + t.Run("test gardener_config optional attribute with provisioner not knowing the runtime", func(t *testing.T) { + // given + db := storage.NewMemoryStorage() + operations := db.Operations() + instances := db.Instances() + states := db.RuntimeStates() + archived := db.InstancesArchived() + testID := "test1" + testTime := time.Now() + testInstance := fixInstanceForPreview(testID, testTime) + testInstance.Provider = "aws" + testInstance.RuntimeID = fmt.Sprintf("runtime-%s", testID) + err := instances.Insert(testInstance) + require.NoError(t, err) + + operation := fixture.FixProvisioningOperation(fixRandomID(), testID) + err = operations.InsertOperation(operation) + operation.KymaResourceNamespace = "kcp-system" + require.NoError(t, err) + + input, err := operation.InputCreator.CreateProvisionRuntimeInput() + require.NoError(t, err) + + _, err = provisionerClient.ProvisionRuntimeWithIDs(operation.GlobalAccountID, operation.SubAccountID, operation.RuntimeID, operation.ID, input) + require.NoError(t, err) + + kimDisabledForPreview := broker.KimConfig{ + Enabled: true, + Plans: []string{"no-plan"}, + KimOnlyPlans: []string{"no-plan"}, + } + + runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, k8sClient, kimDisabledForPreview, logrus.New()) + + rr := httptest.NewRecorder() + router := mux.NewRouter() + runtimeHandler.AttachRoutes(router) + + // when + req, err := http.NewRequest("GET", "/runtimes?gardener_config=true", nil) + require.NoError(t, err) + router.ServeHTTP(rr, req) + + // then + require.Equal(t, http.StatusOK, rr.Code) + + var out pkg.RuntimesPage + + err = json.Unmarshal(rr.Body.Bytes(), &out) + require.NoError(t, err) + + require.Equal(t, 1, out.TotalCount) + require.Equal(t, 1, out.Count) + assert.Equal(t, testID, out.Data[0].InstanceID) + require.Nil(t, out.Data[0].Status.GardenerConfig) + require.Nil(t, out.Data[0].RuntimeConfig) + }) + + t.Run("test runtime_config optional attribute", func(t *testing.T) { + // given + db := storage.NewMemoryStorage() + operations := db.Operations() + instances := db.Instances() + states := db.RuntimeStates() + archived := db.InstancesArchived() + testID := "Test1" + testTime := time.Now() + testInstance := fixInstanceForPreview(testID, testTime) + testInstance.Provider = "aws" + testInstance.RuntimeID = fmt.Sprintf("runtime-%s", testID) + err := instances.Insert(testInstance) + require.NoError(t, err) + + operation := fixture.FixProvisioningOperation(fixRandomID(), testID) + operation.KymaResourceNamespace = "kcp-system" + + err = operations.InsertOperation(operation) + require.NoError(t, err) + + input, err := operation.InputCreator.CreateProvisionRuntimeInput() + require.NoError(t, err) + + _, err = provisionerClient.ProvisionRuntimeWithIDs(operation.GlobalAccountID, operation.SubAccountID, operation.RuntimeID, operation.ID, input) + require.NoError(t, err) + + runtimeHandler := runtime.NewHandler(instances, operations, states, archived, 2, "", provisionerClient, k8sClient, kimConfig, logrus.New()) + + rr := httptest.NewRecorder() + router := mux.NewRouter() + runtimeHandler.AttachRoutes(router) + + // when + req, err := http.NewRequest("GET", "/runtimes?runtime_config=true", nil) + require.NoError(t, err) + router.ServeHTTP(rr, req) + + // then + require.Equal(t, http.StatusOK, rr.Code) + + var out pkg.RuntimesPage + + err = json.Unmarshal(rr.Body.Bytes(), &out) + require.NoError(t, err) + + require.Equal(t, 1, out.TotalCount) + require.Equal(t, 1, out.Count) + assert.Equal(t, testID, out.Data[0].InstanceID) + require.NotNil(t, out.Data[0].RuntimeConfig) + require.Nil(t, out.Data[0].Status.GardenerConfig) + + shootName, ok, err := unstructured.NestedString(*out.Data[0].RuntimeConfig, "spec", "shoot", "name") + assert.NoError(t, err) + assert.True(t, ok) + assert.Equal(t, "kim-driven-shoot", shootName) + + workers, ok, err := unstructured.NestedSlice(*out.Data[0].RuntimeConfig, "spec", "shoot", "provider", "workers") + assert.True(t, ok) + assert.NoError(t, err) + worker, ok, err := unstructured.NestedString(workers[0].(map[string]interface{}), "name") + assert.True(t, ok) + assert.NoError(t, err) + assert.Equal(t, "worker-0", worker) + }) + +} + func fixInstance(id string, t time.Time) internal.Instance { return internal.Instance{ InstanceID: id, @@ -781,6 +1164,50 @@ func fixInstance(id string, t time.Time) internal.Instance { } } +func fixInstanceForPreview(id string, t time.Time) internal.Instance { + instance := fixInstance(id, t) + instance.ServicePlanName = broker.PreviewPlanName + instance.ServicePlanID = broker.PreviewPlanID + return instance +} + func fixRandomID() string { return rand.String(16) } + +type RuntimeResourceType struct { + obj *unstructured.Unstructured +} + +func fixRuntimeResource(t *testing.T, name, namespace string) *RuntimeResourceType { + runtimeResource := &unstructured.Unstructured{} + runtimeResource.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "infrastructuremanager.kyma-project.io", + Version: "v1", + Kind: "Runtime", + }) + runtimeResource.SetName(name) + runtimeResource.SetNamespace(namespace) + + worker := map[string]interface{}{} + err := unstructured.SetNestedField(worker, "worker-0", "name") + assert.NoError(t, err) + err = unstructured.SetNestedField(worker, "m6i.large", "machine", "type") + assert.NoError(t, err) + + err = unstructured.SetNestedSlice(runtimeResource.Object, []interface{}{worker}, "spec", "shoot", "provider", "workers") + assert.NoError(t, err) + + err = unstructured.SetNestedField(runtimeResource.Object, "kim-driven-shoot", "spec", "shoot", "name") + assert.NoError(t, err) + err = unstructured.SetNestedField(runtimeResource.Object, "test-client-id", "spec", "shoot", "kubernetes", "kubeAPIServer", "oidcConfig", "clientID") + assert.NoError(t, err) + err = unstructured.SetNestedField(runtimeResource.Object, "aws", "spec", "shoot", "provider", "type") + assert.NoError(t, err) + err = unstructured.SetNestedField(runtimeResource.Object, false, "spec", "security", "networking", "filter", "egress", "enabled") + assert.NoError(t, err) + err = unstructured.SetNestedField(runtimeResource.Object, "Ready", "status", "state") + assert.NoError(t, err) + + return &RuntimeResourceType{obj: runtimeResource} +}