diff --git a/.github/ISSUE_TEMPLATE/docs-issue.md b/.github/ISSUE_TEMPLATE/docs-issue.md index a3976c5e16..d30bf39518 100644 --- a/.github/ISSUE_TEMPLATE/docs-issue.md +++ b/.github/ISSUE_TEMPLATE/docs-issue.md @@ -16,10 +16,12 @@ This template is for both adding enhancement as well as pointing out issues with ### Expected Details -### List of things to potentially add/remove: +### List of things to potentially add/remove + This is a list of things to manipulate in the docs: + - [ ] First item to change -- [ ] Second item to change +- [ ] Second item to change ### Important Factoids diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 911c068e41..57d5b8b59f 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -11,4 +11,3 @@ How is this tested? Please see the checklist below and also describe any other r - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK - diff --git a/CHANGELOG.md b/CHANGELOG.md index 049c7e0211..82efe91e2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,122 @@ # Version changelog +## 1.37.1 + +### New Features and Improvements + * Removed `CustomizeDiff` and Client Side Validation for [databricks_grants](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/grants) ([#3290](https://github.com/databricks/terraform-provider-databricks/pull/3290)). + * Added terraform support for restrict ws admins setting ([#3243](https://github.com/databricks/terraform-provider-databricks/pull/3243)). + +### Internal Changes + * Migrated [databricks_global_init_script](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/global_init_script) to Go SDK ([#2036](https://github.com/databricks/terraform-provider-databricks/pull/2036)). + * Bump github.com/hashicorp/terraform-plugin-sdk/v2 from 2.31.0 to 2.32.0 ([#3177](https://github.com/databricks/terraform-provider-databricks/pull/3177)). + + +## 1.37.0 + +### New Features and Improvements + * Add [databricks_file](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/file) resource ([#3265](https://github.com/databricks/terraform-provider-databricks/pull/3265)). + * Add [databricks_storage_credential](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/storage_credential) and [databricks_storage_credentials](https://registry.terraform.io/providers/databricks/databricks/latest/docs/data-sources/storage_credentials) data sources ([#3254](https://github.com/databricks/terraform-provider-databricks/pull/3254)). + * Add `source` attribute to `dbt_task` and `sql_task.file` tasks to support files from workspace ([#3208](https://github.com/databricks/terraform-provider-databricks/pull/3208)). + * Add computed `volume_path` attribute to [databricks_volume](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/volume) resource ([#3272](https://github.com/databricks/terraform-provider-databricks/pull/3272)). + * Add support for Vector Search Endpoints ([#3191](https://github.com/databricks/terraform-provider-databricks/pull/3191)). + * [JOBS-16324] Terraform support for Foreach tasks (private preview) ([#3252](https://github.com/databricks/terraform-provider-databricks/pull/3252)). + * fix: properly propagate auth_type to the databricks client ([#3273](https://github.com/databricks/terraform-provider-databricks/pull/3273)). + +### Documentation Changes + * Fix images and add note on setting multiple authorizations for workspace setup ([#3259](https://github.com/databricks/terraform-provider-databricks/pull/3259)). + * Remove `data_object_type=TABLE` only restriction in [databricks_share](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/share) ([#3108](https://github.com/databricks/terraform-provider-databricks/pull/3108)). + * Remove legacy guides ([#3282](https://github.com/databricks/terraform-provider-databricks/pull/3282)). + * Update `for_each_task` docs. ([#3271](https://github.com/databricks/terraform-provider-databricks/pull/3271)). + +### Exporter + * Support for some Unity Catalog resources ([#3242](https://github.com/databricks/terraform-provider-databricks/pull/3242)). + * Rework handling of listings and interactive prompting ([#3241](https://github.com/databricks/terraform-provider-databricks/pull/3241)). + * UC exporter databricks storage credential feature ([#3219](https://github.com/databricks/terraform-provider-databricks/pull/3219)). + +### Internal Changes + * Add CustomDiffFunc for health in sql_endpoint resources ([#3227](https://github.com/databricks/terraform-provider-databricks/pull/3227)). + * Bump github.com/databricks/databricks-sdk-go 0.33.0 ([#3275](https://github.com/databricks/terraform-provider-databricks/pull/3275)). + * Suppress diff on whitespace change for resources that often use HERE-docs ([#3251](https://github.com/databricks/terraform-provider-databricks/pull/3251)). + + +## 1.36.3 + +### New Features and Improvements + * Explicitly set securable field when reading [databricks_grants](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/grants) or [databricks_grant](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/grant) ([#3246](https://github.com/databricks/terraform-provider-databricks/pull/3246)). + +### Documentation Changes + * Added information on `id` and other exposed attributes where appropriate ([#3237](https://github.com/databricks/terraform-provider-databricks/pull/3237)). + * Fixed docs in metastore `databricks_grants` example ([#3239](https://github.com/databricks/terraform-provider-databricks/pull/3239)). + +### Exporter + * Detect & handle deleted workspace objects (notebooks/files/directories) when running in incremental mode ([#3225](https://github.com/databricks/terraform-provider-databricks/pull/3225)). + +### Internal Changes + * Make IterFields take in aliases ([#3207](https://github.com/databricks/terraform-provider-databricks/pull/3207)). + + +## 1.36.2 + +### New Features and Improvements +* Added [databricks_aws_unity_catalog_policy](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/aws_unity_catalog_policy) data source ([#2483](https://github.com/databricks/terraform-provider-databricks/pull/2483)). +* Removed `omitempty` in `destination` fields in `clustes_api.go` ([#3232](https://github.com/databricks/terraform-provider-databricks/pull/3232)), to address ([#3231](https://github.com/databricks/terraform-provider-databricks/issues/3231)) + +### Exporter +* Omitted `git_provider` only for well-known Git URLs ([#3216](https://github.com/databricks/terraform-provider-databricks/pull/3216)). + +### Internal Changes +* Bumped github.com/zclconf/go-cty from 1.14.1 to 1.14.2 ([#3144](https://github.com/databricks/terraform-provider-databricks/pull/3144)). +* Bumped golang.org/x/mod from 0.14.0 to 0.15.0 ([#3229](https://github.com/databricks/terraform-provider-databricks/pull/3229)). + + +## 1.36.1 + +### New Features and Improvements +* Fixed create storage credentials with owner for account ([#3184](https://github.com/databricks/terraform-provider-databricks/pull/3184)). + +### Documentation Changes +* Removed AWS-only note for [databricks_service_principal_secret](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/service_principal_secret) resource ([#3213](https://github.com/databricks/terraform-provider-databricks/pull/3213)). + +### Internal Changes +* Fixed test: TestUcAccResourceSqlTable_Managed ([#3226](https://github.com/databricks/terraform-provider-databricks/pull/3226)). + +## 1.36.0 + +### New Features and Improvements +* Added `databricks_volumes` as data source ([#3150](https://github.com/databricks/terraform-provider-databricks/pull/3150)). +* Fixed updating owners for UC resources ([#3189](https://github.com/databricks/terraform-provider-databricks/pull/3189)). +* Validated metastore id for databricks_grant and databricks_grants resources ([#3159](https://github.com/databricks/terraform-provider-databricks/pull/3159)). +* Fixed `databricks_connection` regression when creating without owner ([#3186](https://github.com/databricks/terraform-provider-databricks/pull/3186)). +* Allow using empty strings as job parameters ([#3158](https://github.com/databricks/terraform-provider-databricks/pull/3158)). +* Changed type of value field of `JobsHealthRule` to `int64` ([#3215](https://github.com/databricks/terraform-provider-databricks/pull/3215)). + + +### Documentation Changes +* Various documentation updates ([#3198](https://github.com/databricks/terraform-provider-databricks/pull/3198)). +* Fixed typo in docs ([#3166](https://github.com/databricks/terraform-provider-databricks/pull/3166)). + +### Exporter +* Timestamps are now added to log entries ([#3146](https://github.com/databricks/terraform-provider-databricks/pull/3146)). +* Add retries for `Search`, `ReadContext` and `Import` operations when importing the resource ([#3202](https://github.com/databricks/terraform-provider-databricks/pull/3202)). +* Performance improvements for big workspaces ([#3167](https://github.com/databricks/terraform-provider-databricks/pull/3167)). +* Fix generation of cluster policy resources ([#3185](https://github.com/databricks/terraform-provider-databricks/pull/3185)). +* Skip emitting of clusters that come from more cluster sources ([#3161](https://github.com/databricks/terraform-provider-databricks/pull/3161)). + +### Internal Changes +* Migrated cluster schema to use the go-sdk struct ([#3076](https://github.com/databricks/terraform-provider-databricks/pull/3076)). +* Updated actions/setup-go to v5 ([#3154](https://github.com/databricks/terraform-provider-databricks/pull/3154)). +* Changed default branch from `master` to `main` ([#3174](https://github.com/databricks/terraform-provider-databricks/pull/3174)). +* Added .codegen.json configuration ([#3180](https://github.com/databricks/terraform-provider-databricks/pull/3180)). +* Used common.Resource consistently throughout the provider ([#3193](https://github.com/databricks/terraform-provider-databricks/pull/3193)). +* Fixed unit test ([#3201](https://github.com/databricks/terraform-provider-databricks/pull/3201)). +* Added test code for job task order ([#3183](https://github.com/databricks/terraform-provider-databricks/pull/3183)). +* Added unit test for `customizable_schema.go` ([#3192](https://github.com/databricks/terraform-provider-databricks/pull/3192)). +* Extended customizable schema with `AtLeastOneOf`, `ExactlyOneOf`, `RequiredWith` ([#3182](https://github.com/databricks/terraform-provider-databricks/pull/3182)). +* Fixed notebook parameters in acceptance test ([#3205](https://github.com/databricks/terraform-provider-databricks/pull/3205)). +* Introduced Generic Settings Resource ([#2997](https://github.com/databricks/terraform-provider-databricks/pull/2997)). +* Suppress diff should apply to new fields added in the same chained call to CustomizableSchema ([#3200](https://github.com/databricks/terraform-provider-databricks/pull/3200)). + + ## 1.35.0 ### New Features and Improvements: diff --git a/README.md b/README.md index 31f9812d78..7ba3fb5b0e 100644 --- a/README.md +++ b/README.md @@ -167,7 +167,7 @@ To make Databricks Terraform Provider generally available, we've moved it from [ You should have [`.terraform.lock.hcl`](https://github.com/databrickslabs/terraform-provider-databricks/blob/v0.6.2/scripts/versions-lock.hcl) file in your state directory that is checked into source control. terraform init will give you the following warning. -``` +```text Warning: Additional provider information from registry The remote registry returned warnings for registry.terraform.io/databrickslabs/databricks: @@ -178,6 +178,6 @@ After you replace `databrickslabs/databricks` with `databricks/databricks` in th If you didn't check-in [`.terraform.lock.hcl`](https://www.terraform.io/language/files/dependency-lock#lock-file-location) to the source code version control, you may you may see `Failed to install provider` error. Please follow the simple steps described in the [troubleshooting guide](docs/guides/troubleshooting.md). -``` +```text Warning: Exporter is experimental and provided as is. It has an evolving interface, which may change or be removed in future versions of the provider. ``` diff --git a/aws/data_aws_unity_catalog_policy.go b/aws/data_aws_unity_catalog_policy.go new file mode 100644 index 0000000000..b700ad6eca --- /dev/null +++ b/aws/data_aws_unity_catalog_policy.go @@ -0,0 +1,107 @@ +package aws + +import ( + "context" + "encoding/json" + "fmt" + "regexp" + + "github.com/databricks/terraform-provider-databricks/common" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func generateReadContext(ctx context.Context, d *schema.ResourceData, m *common.DatabricksClient) error { + bucket := d.Get("bucket_name").(string) + awsAccountId := d.Get("aws_account_id").(string) + roleName := d.Get("role_name").(string) + policy := awsIamPolicy{ + Version: "2012-10-17", + Statements: []*awsIamPolicyStatement{ + { + Effect: "Allow", + Actions: []string{ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject", + "s3:ListBucket", + "s3:GetBucketLocation", + }, + Resources: []string{ + fmt.Sprintf("arn:aws:s3:::%s/*", bucket), + fmt.Sprintf("arn:aws:s3:::%s", bucket), + }, + }, + { + Effect: "Allow", + Actions: []string{ + "sts:AssumeRole", + }, + Resources: []string{ + fmt.Sprintf("arn:aws:iam::%s:role/%s", awsAccountId, roleName), + }, + }, + }, + } + if kmsKey, ok := d.GetOk("kms_name"); ok { + policy.Statements = append(policy.Statements, &awsIamPolicyStatement{ + Effect: "Allow", + Actions: []string{ + "kms:Decrypt", + "kms:Encrypt", + "kms:GenerateDataKey*", + }, + Resources: []string{ + fmt.Sprintf("arn:aws:kms:%s", kmsKey), + }, + }) + } + policyJSON, err := json.MarshalIndent(policy, "", " ") + if err != nil { + return err + } + d.SetId(fmt.Sprintf("%s-%s-%s", bucket, awsAccountId, roleName)) + err = d.Set("json", string(policyJSON)) + if err != nil { + return err + } + return nil +} + +func validateSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "kms_name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringMatch( + regexp.MustCompile(`^[0-9a-zA-Z/_-]+$`), + "must contain only alphanumeric, hyphens, forward slashes, and underscores characters"), + }, + "bucket_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringMatch( + regexp.MustCompile(`^[0-9a-zA-Z_-]+$`), + "must contain only alphanumeric, underscore, and hyphen characters"), + }, + "role_name": { + Type: schema.TypeString, + Required: true, + }, + "aws_account_id": { + Type: schema.TypeString, + Required: true, + }, + "json": { + Type: schema.TypeString, + Computed: true, + }, + } +} + +func DataAwsUnityCatalogPolicy() common.Resource { + return common.Resource{ + Read: generateReadContext, + Schema: validateSchema(), + } +} diff --git a/aws/data_aws_unity_catalog_policy_test.go b/aws/data_aws_unity_catalog_policy_test.go new file mode 100644 index 0000000000..840a212f26 --- /dev/null +++ b/aws/data_aws_unity_catalog_policy_test.go @@ -0,0 +1,121 @@ +package aws + +import ( + "encoding/json" + "testing" + + "github.com/databricks/terraform-provider-databricks/qa" + "github.com/stretchr/testify/assert" +) + +func TestDataAwsUnityCatalogPolicy(t *testing.T) { + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsUnityCatalogPolicy(), + NonWritable: true, + ID: ".", + HCL: ` + aws_account_id = "123456789098" + bucket_name = "databricks-bucket" + role_name = "databricks-role" + kms_name = "databricks-kms" + `, + }.Apply(t) + assert.NoError(t, err) + j := d.Get("json").(string) + p := `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject", + "s3:ListBucket", + "s3:GetBucketLocation" + ], + "Resource": [ + "arn:aws:s3:::databricks-bucket/*", + "arn:aws:s3:::databricks-bucket" + ] + }, + { + "Effect": "Allow", + "Action": [ + "sts:AssumeRole" + ], + "Resource": [ + "arn:aws:iam::123456789098:role/databricks-role" + ] + }, + { + "Effect": "Allow", + "Action": [ + "kms:Decrypt", + "kms:Encrypt", + "kms:GenerateDataKey*" + ], + "Resource": [ + "arn:aws:kms:databricks-kms" + ] + } + ] + }` + compareJSON(t, j, p) +} + +func TestDataAwsUnityCatalogPolicyWithoutKMS(t *testing.T) { + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsUnityCatalogPolicy(), + NonWritable: true, + ID: ".", + HCL: ` + aws_account_id = "123456789098" + bucket_name = "databricks-bucket" + role_name = "databricks-role" + `, + }.Apply(t) + assert.NoError(t, err) + j := d.Get("json").(string) + p := `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject", + "s3:ListBucket", + "s3:GetBucketLocation" + ], + "Resource": [ + "arn:aws:s3:::databricks-bucket/*", + "arn:aws:s3:::databricks-bucket" + ] + }, + { + "Effect": "Allow", + "Action": [ + "sts:AssumeRole" + ], + "Resource": [ + "arn:aws:iam::123456789098:role/databricks-role" + ] + } + ] + }` + compareJSON(t, j, p) +} + +func compareJSON(t *testing.T, json1 string, json2 string) { + var i1 interface{} + var i2 interface{} + err := json.Unmarshal([]byte(json1), &i1) + assert.NoError(t, err, "error while unmarshalling") + err = json.Unmarshal([]byte(json2), &i2) + assert.NoError(t, err, "error while unmarshalling") + assert.Equal(t, i1, i2) +} diff --git a/catalog/data_storage_credential.go b/catalog/data_storage_credential.go new file mode 100644 index 0000000000..2b54a85da2 --- /dev/null +++ b/catalog/data_storage_credential.go @@ -0,0 +1,24 @@ +package catalog + +import ( + "context" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/common" +) + +func DataSourceStorageCredential() common.Resource { + type AccountMetastoreByID struct { + Name string `json:"name"` + StorageCredential *catalog.StorageCredentialInfo `json:"storage_credential_info,omitempty" tf:"computed" ` + } + return common.WorkspaceData(func(ctx context.Context, data *AccountMetastoreByID, w *databricks.WorkspaceClient) error { + credential, err := w.StorageCredentials.GetByName(ctx, data.Name) + if err != nil { + return err + } + data.StorageCredential = credential + return nil + }) +} diff --git a/catalog/data_storage_credential_test.go b/catalog/data_storage_credential_test.go new file mode 100644 index 0000000000..9be9fcb002 --- /dev/null +++ b/catalog/data_storage_credential_test.go @@ -0,0 +1,55 @@ +package catalog + +import ( + "testing" + + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/qa" + "github.com/stretchr/testify/mock" +) + +func TestStorageCredentialDataVerify(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + e := w.GetMockStorageCredentialsAPI().EXPECT() + e.GetByName(mock.Anything, "abc").Return( + &catalog.StorageCredentialInfo{ + Name: "abc", + Owner: "admin", + AwsIamRole: &catalog.AwsIamRole{ + RoleArn: "test", + }, + AzureManagedIdentity: &catalog.AzureManagedIdentity{ + AccessConnectorId: "test", + }, + DatabricksGcpServiceAccount: &catalog.DatabricksGcpServiceAccountResponse{ + Email: "test", + }, + }, + nil) + }, + Resource: DataSourceStorageCredential(), + Read: true, + NonWritable: true, + ID: "_", + HCL: ` + name = "abc" + `, + }.ApplyAndExpectData(t, map[string]any{ + "storage_credential_info.0.owner": "admin", + "storage_credential_info.0.aws_iam_role.0.role_arn": "test", + "storage_credential_info.0.azure_managed_identity.0.access_connector_id": "test", + "storage_credential_info.0.databricks_gcp_service_account.0.email": "test", + }) +} + +func TestStorageCredentialDataError(t *testing.T) { + qa.ResourceFixture{ + Fixtures: qa.HTTPFailures, + Resource: DataSourceStorageCredential(), + Read: true, + NonWritable: true, + ID: "_", + }.ExpectError(t, "i'm a teapot") +} diff --git a/catalog/data_storage_credentials.go b/catalog/data_storage_credentials.go new file mode 100644 index 0000000000..228610e874 --- /dev/null +++ b/catalog/data_storage_credentials.go @@ -0,0 +1,26 @@ +package catalog + +import ( + "context" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/common" +) + +func DataSourceStorageCredentials() common.Resource { + type storageCredentialsData struct { + Names []string `json:"names,omitempty" tf:"computed"` + } + return common.WorkspaceData(func(ctx context.Context, data *storageCredentialsData, w *databricks.WorkspaceClient) error { + credentials, err := w.StorageCredentials.ListAll(ctx, catalog.ListStorageCredentialsRequest{}) + if err != nil { + return err + } + data.Names = []string{} + for _, v := range credentials { + data.Names = append(data.Names, v.Name) + } + return nil + }) +} diff --git a/catalog/data_storage_credentials_test.go b/catalog/data_storage_credentials_test.go new file mode 100644 index 0000000000..7cc4ed09b3 --- /dev/null +++ b/catalog/data_storage_credentials_test.go @@ -0,0 +1,39 @@ +package catalog + +import ( + "testing" + + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/qa" + "github.com/stretchr/testify/mock" +) + +func TestStorageCredentialsData(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + e := w.GetMockStorageCredentialsAPI().EXPECT() + e.ListAll(mock.Anything, catalog.ListStorageCredentialsRequest{}).Return( + []catalog.StorageCredentialInfo{ + {Name: "a"}, {Name: "b"}, + }, + nil) + }, + Resource: DataSourceStorageCredentials(), + Read: true, + NonWritable: true, + ID: "_", + }.ApplyAndExpectData(t, map[string]any{ + "names": []interface{}{"a", "b"}, + }) +} + +func TestStorageCredentialsData_Error(t *testing.T) { + qa.ResourceFixture{ + Fixtures: qa.HTTPFailures, + Resource: DataSourceStorageCredentials(), + Read: true, + NonWritable: true, + ID: "_", + }.ExpectError(t, "i'm a teapot") +} diff --git a/catalog/data_volumes.go b/catalog/data_volumes.go new file mode 100644 index 0000000000..903e7f0bb9 --- /dev/null +++ b/catalog/data_volumes.go @@ -0,0 +1,26 @@ +package catalog + +import ( + "context" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/common" +) + +func DataSourceVolumes() common.Resource { + return common.WorkspaceData(func(ctx context.Context, data *struct { + CatalogName string `json:"catalog_name"` + SchemaName string `json:"schema_name"` + Ids []string `json:"ids,omitempty" tf:"computed,slice_set"` + }, w *databricks.WorkspaceClient) error { + volumes, err := w.Volumes.ListAll(ctx, catalog.ListVolumesRequest{CatalogName: data.CatalogName, SchemaName: data.SchemaName}) + if err != nil { + return err + } + for _, v := range volumes { + data.Ids = append(data.Ids, v.FullName) + } + return nil + }) +} diff --git a/catalog/data_volumes_test.go b/catalog/data_volumes_test.go new file mode 100644 index 0000000000..d570dba2f1 --- /dev/null +++ b/catalog/data_volumes_test.go @@ -0,0 +1,44 @@ +package catalog + +import ( + "testing" + + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/qa" +) + +func TestDataSourceVolumes(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.1/unity-catalog/volumes?catalog_name=a&schema_name=b", + Response: catalog.ListVolumesResponseContent{ + Volumes: []catalog.VolumeInfo{ + { + FullName: "a.b.c", + Name: "a", + }, + }, + }, + }, + }, + Resource: DataSourceVolumes(), + HCL: ` + catalog_name = "a" + schema_name = "b"`, + Read: true, + NonWritable: true, + ID: "_", + }.ApplyNoError(t) +} + +func TestDataSourceVolumes_Error(t *testing.T) { + qa.ResourceFixture{ + Fixtures: qa.HTTPFailures, + Resource: DataSourceVolumes(), + Read: true, + NonWritable: true, + ID: "_", + }.ExpectError(t, "i'm a teapot") +} diff --git a/catalog/permissions/permissions.go b/catalog/permissions/permissions.go index efd1f962b2..c0c21b45e9 100644 --- a/catalog/permissions/permissions.go +++ b/catalog/permissions/permissions.go @@ -92,6 +92,7 @@ func (sm SecurableMapping) KeyValue(d attributeGetter) (string, string) { } return field, v } + log.Printf("[WARN] Unexpected resource or permissions. Please proceed at your own risk.") return "unknown", "unknown" } func (sm SecurableMapping) Id(d *schema.ResourceData) string { diff --git a/catalog/resource_catalog.go b/catalog/resource_catalog.go index ddb113a16c..f114fc9e9c 100644 --- a/catalog/resource_catalog.go +++ b/catalog/resource_catalog.go @@ -148,6 +148,10 @@ func ResourceCatalog() common.Resource { } } + if !d.HasChangeExcept("owner") { + return nil + } + updateCatalogRequest.Owner = "" ci, err := w.Catalogs.Update(ctx, updateCatalogRequest) diff --git a/catalog/resource_connection.go b/catalog/resource_connection.go index 498cf177ff..289f20fe9e 100644 --- a/catalog/resource_connection.go +++ b/catalog/resource_connection.go @@ -56,20 +56,19 @@ func ResourceConnection() common.Resource { } var createConnectionRequest catalog.CreateConnection common.DataToStructPointer(d, s, &createConnectionRequest) - _, err = w.Connections.Create(ctx, createConnectionRequest) + conn, err := w.Connections.Create(ctx, createConnectionRequest) if err != nil { return err } // Update owner if it is provided - if d.Get("owner") == "" { - return nil - } - var updateConnectionRequest catalog.UpdateConnection - common.DataToStructPointer(d, s, &updateConnectionRequest) - updateConnectionRequest.NameArg = updateConnectionRequest.Name - conn, err := w.Connections.Update(ctx, updateConnectionRequest) - if err != nil { - return err + if d.Get("owner") != "" { + var updateConnectionRequest catalog.UpdateConnection + common.DataToStructPointer(d, s, &updateConnectionRequest) + updateConnectionRequest.Name = createConnectionRequest.Name + conn, err = w.Connections.Update(ctx, updateConnectionRequest) + if err != nil { + return err + } } d.Set("metastore_id", conn.MetastoreId) pi.Pack(d) @@ -84,13 +83,17 @@ func ResourceConnection() common.Resource { if err != nil { return err } - conn, err := w.Connections.GetByNameArg(ctx, connName) + conn, err := w.Connections.GetByName(ctx, connName) if err != nil { return err } // We need to preserve original sensitive options as API doesn't return them var cOrig catalog.CreateConnection common.DataToStructPointer(d, s, &cOrig) + // If there are no options returned, need to initialize the map + if conn.Options == nil { + conn.Options = map[string]string{} + } for key, element := range cOrig.Options { if slices.Contains(sensitiveOptions, key) { conn.Options[key] = element @@ -113,13 +116,12 @@ func ResourceConnection() common.Resource { if err != nil { return err } - updateConnectionRequest.NameArg = connName + updateConnectionRequest.Name = connName if d.HasChange("owner") { _, err = w.Connections.Update(ctx, catalog.UpdateConnection{ - Name: updateConnectionRequest.Name, - NameArg: updateConnectionRequest.Name, - Owner: updateConnectionRequest.Owner, + Name: updateConnectionRequest.Name, + Owner: updateConnectionRequest.Owner, }) if err != nil { return err @@ -133,9 +135,8 @@ func ResourceConnection() common.Resource { // Rollback old, new := d.GetChange("owner") _, rollbackErr := w.Connections.Update(ctx, catalog.UpdateConnection{ - Name: updateConnectionRequest.Name, - NameArg: updateConnectionRequest.Name, - Owner: old.(string), + Name: updateConnectionRequest.Name, + Owner: old.(string), }) if rollbackErr != nil { return common.OwnerRollbackError(err, rollbackErr, old.(string), new.(string)) @@ -154,7 +155,7 @@ func ResourceConnection() common.Resource { if err != nil { return err } - return w.Connections.DeleteByNameArg(ctx, connName) + return w.Connections.DeleteByName(ctx, connName) }, } } diff --git a/catalog/resource_external_location.go b/catalog/resource_external_location.go index f992349f97..a9572d5341 100644 --- a/catalog/resource_external_location.go +++ b/catalog/resource_external_location.go @@ -108,6 +108,10 @@ func ResourceExternalLocation() common.Resource { } } + if !d.HasChangeExcept("owner") { + return nil + } + updateExternalLocationRequest.Owner = "" _, err = w.ExternalLocations.Update(ctx, updateExternalLocationRequest) if err != nil { diff --git a/catalog/resource_grant.go b/catalog/resource_grant.go index 78bf5f1bb5..2c8cd174cd 100644 --- a/catalog/resource_grant.go +++ b/catalog/resource_grant.go @@ -184,7 +184,11 @@ func ResourceGrant() common.Resource { if err != nil { return err } - return common.StructToData(*grantsForPrincipal, s, d) + err = common.StructToData(*grantsForPrincipal, s, d) + if err != nil { + return err + } + return d.Set(securable, name) }, Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { w, err := c.WorkspaceClient() diff --git a/catalog/resource_grants.go b/catalog/resource_grants.go index 829c0a5ca3..a33fac2535 100644 --- a/catalog/resource_grants.go +++ b/catalog/resource_grants.go @@ -89,171 +89,6 @@ func replaceAllPermissions(a permissions.UnityCatalogPermissionsAPI, securable s }) } -type securableMapping map[string]map[string]bool - -// reuse ResourceDiff and ResourceData -type attributeGetter interface { - Get(key string) any -} - -func (sm securableMapping) kv(d attributeGetter) (string, string) { - for field := range sm { - v := d.Get(field).(string) - if v == "" { - continue - } - return field, v - } - return "unknown", "unknown" -} - -func (sm securableMapping) id(d *schema.ResourceData) string { - securable, name := sm.kv(d) - return fmt.Sprintf("%s/%s", securable, name) -} - -func (sm securableMapping) validate(d attributeGetter, pl PermissionsList) error { - securable, _ := sm.kv(d) - allowed, ok := sm[securable] - if !ok { - return fmt.Errorf(`%s is not fully supported yet`, securable) - } - for _, v := range pl.Assignments { - for _, priv := range v.Privileges { - if !allowed[strings.ToUpper(priv)] { - // check if user uses spaces instead of underscores - if allowed[strings.ReplaceAll(priv, " ", "_")] { - return fmt.Errorf(`%s is not allowed on %s. Did you mean %s?`, priv, securable, strings.ReplaceAll(priv, " ", "_")) - } - return fmt.Errorf(`%s is not allowed on %s`, priv, securable) - } - } - } - return nil -} - -var mapping = securableMapping{ - // add other securable mappings once needed - "table": { - "MODIFY": true, - "SELECT": true, - - // v1.0 - "ALL_PRIVILEGES": true, - "APPLY_TAG": true, - "BROWSE": true, - }, - "catalog": { - "CREATE": true, - "USAGE": true, - - // v1.0 - "ALL_PRIVILEGES": true, - "APPLY_TAG": true, - "USE_CATALOG": true, - "USE_SCHEMA": true, - "CREATE_SCHEMA": true, - "CREATE_TABLE": true, - "CREATE_FUNCTION": true, - "CREATE_MATERIALIZED_VIEW": true, - "CREATE_MODEL": true, - "CREATE_VOLUME": true, - "READ_VOLUME": true, - "WRITE_VOLUME": true, - "EXECUTE": true, - "MODIFY": true, - "SELECT": true, - "REFRESH": true, - "BROWSE": true, - }, - "schema": { - "CREATE": true, - "USAGE": true, - - // v1.0 - "ALL_PRIVILEGES": true, - "APPLY_TAG": true, - "USE_SCHEMA": true, - "CREATE_TABLE": true, - "CREATE_FUNCTION": true, - "CREATE_MATERIALIZED_VIEW": true, - "CREATE_MODEL": true, - "CREATE_VOLUME": true, - "READ_VOLUME": true, - "WRITE_VOLUME": true, - "EXECUTE": true, - "MODIFY": true, - "SELECT": true, - "REFRESH": true, - "BROWSE": true, - }, - "storage_credential": { - "CREATE_TABLE": true, - "READ_FILES": true, - "WRITE_FILES": true, - "CREATE_EXTERNAL_LOCATION": true, - - // v1.0 - "ALL_PRIVILEGES": true, - "CREATE_EXTERNAL_TABLE": true, - }, - "external_location": { - "CREATE_TABLE": true, - "READ_FILES": true, - "WRITE_FILES": true, - - // v1.0 - "ALL_PRIVILEGES": true, - "CREATE_EXTERNAL_TABLE": true, - "CREATE_MANAGED_STORAGE": true, - "CREATE_EXTERNAL_VOLUME": true, - "BROWSE": true, - }, - "metastore": { - // v1.0 - "CREATE_CATALOG": true, - "CREATE_CLEAN_ROOM": true, - "CREATE_CONNECTION": true, - "CREATE_EXTERNAL_LOCATION": true, - "CREATE_STORAGE_CREDENTIAL": true, - "CREATE_SHARE": true, - "CREATE_RECIPIENT": true, - "CREATE_PROVIDER": true, - "MANAGE_ALLOWLIST": true, - "USE_CONNECTION": true, - "USE_PROVIDER": true, - "USE_SHARE": true, - "USE_RECIPIENT": true, - "USE_MARKETPLACE_ASSETS": true, - "SET_SHARE_PERMISSION": true, - }, - "function": { - "ALL_PRIVILEGES": true, - "EXECUTE": true, - }, - "model": { - "ALL_PRIVILEGES": true, - "APPLY_TAG": true, - "EXECUTE": true, - }, - "share": { - "SELECT": true, - }, - "volume": { - "ALL_PRIVILEGES": true, - "READ_VOLUME": true, - "WRITE_VOLUME": true, - }, - // avoid reserved field - "foreign_connection": { - "ALL_PRIVILEGES": true, - "CREATE_FOREIGN_CATALOG": true, - "CREATE_FOREIGN_SCHEMA": true, - "CREATE_FOREIGN_TABLE": true, - "USE_CONNECTION": true, - }, -} - func (pl PermissionsList) toSdkPermissionsList() (out catalog.PermissionsList) { for _, v := range pl.Assignments { privileges := []catalog.Privilege{} @@ -294,7 +129,7 @@ func ResourceGrants() common.Resource { s := common.StructToSchema(PermissionsList{}, func(s map[string]*schema.Schema) map[string]*schema.Schema { alof := []string{} - for field := range mapping { + for field := range permissions.Mappings { s[field] = &schema.Schema{ Type: schema.TypeString, ForceNew: true, @@ -302,22 +137,13 @@ func ResourceGrants() common.Resource { } alof = append(alof, field) } - for field := range mapping { + for field := range permissions.Mappings { s[field].AtLeastOneOf = alof } return s }) return common.Resource{ Schema: s, - CustomizeDiff: func(ctx context.Context, d *schema.ResourceDiff) error { - if d.Id() == "" { - // unfortunately we cannot do validation before dependent resources exist with tfsdkv2 - return nil - } - var grants PermissionsList - common.DiffToStructPointer(d, s, &grants) - return mapping.validate(d, grants) - }, Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { w, err := c.WorkspaceClient() if err != nil { @@ -329,13 +155,13 @@ func ResourceGrants() common.Resource { } var grants PermissionsList common.DataToStructPointer(d, s, &grants) - securable, name := mapping.kv(d) + securable, name := permissions.Mappings.KeyValue(d) unityCatalogPermissionsAPI := permissions.NewUnityCatalogPermissionsAPI(ctx, c) err = replaceAllPermissions(unityCatalogPermissionsAPI, securable, name, grants.toSdkPermissionsList()) if err != nil { return err } - d.SetId(mapping.id(d)) + d.SetId(permissions.Mappings.Id(d)) return nil }, Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { @@ -351,7 +177,12 @@ func ResourceGrants() common.Resource { if len(grants.PrivilegeAssignments) == 0 { return apierr.NotFound("got empty permissions list") } - return common.StructToData(sdkPermissionsListToPermissionsList(*grants), s, d) + + err = common.StructToData(sdkPermissionsListToPermissionsList(*grants), s, d) + if err != nil { + return err + } + return d.Set(securable, name) }, Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { w, err := c.WorkspaceClient() diff --git a/catalog/resource_grants_test.go b/catalog/resource_grants_test.go index 01660863b0..81fa93f88f 100644 --- a/catalog/resource_grants_test.go +++ b/catalog/resource_grants_test.go @@ -358,31 +358,6 @@ func TestGrantReadMalformedId(t *testing.T) { }.ExpectError(t, "ID must be two elements split by `/`: foo.bar") } -type data map[string]string - -func (a data) Get(k string) any { - return a[k] -} - -func TestMappingUnsupported(t *testing.T) { - d := data{"nothing": "here"} - err := mapping.validate(d, PermissionsList{}) - assert.EqualError(t, err, "unknown is not fully supported yet") -} - -func TestInvalidPrivilege(t *testing.T) { - d := data{"table": "me"} - err := mapping.validate(d, PermissionsList{ - Assignments: []PrivilegeAssignment{ - { - Principal: "me", - Privileges: []string{"EVERYTHING"}, - }, - }, - }) - assert.EqualError(t, err, "EVERYTHING is not allowed on table") -} - func TestPermissionsList_Diff_ExternallyAddedPrincipal(t *testing.T) { diff := diffPermissions( catalog.PermissionsList{ // config @@ -600,30 +575,6 @@ func TestShareGrantUpdate(t *testing.T) { }.ApplyNoError(t) } -func TestPrivilegeWithSpace(t *testing.T) { - d := data{"table": "me"} - err := mapping.validate(d, PermissionsList{ - Assignments: []PrivilegeAssignment{ - { - Principal: "me", - Privileges: []string{"ALL PRIVILEGES"}, - }, - }, - }) - assert.EqualError(t, err, "ALL PRIVILEGES is not allowed on table. Did you mean ALL_PRIVILEGES?") - - d = data{"external_location": "me"} - err = mapping.validate(d, PermissionsList{ - Assignments: []PrivilegeAssignment{ - { - Principal: "me", - Privileges: []string{"CREATE TABLE"}, - }, - }, - }) - assert.EqualError(t, err, "CREATE TABLE is not allowed on external_location. Did you mean CREATE_TABLE?") -} - func TestConnectionGrantCreate(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ diff --git a/catalog/resource_metastore.go b/catalog/resource_metastore.go index 66ccf8b620..7edc86a301 100644 --- a/catalog/resource_metastore.go +++ b/catalog/resource_metastore.go @@ -138,6 +138,11 @@ func ResourceMetastore() common.Resource { return err } } + + if !d.HasChangeExcept("owner") { + return nil + } + update.Owner = "" _, err := acc.Metastores.Update(ctx, catalog.AccountsUpdateMetastore{ MetastoreId: d.Id(), @@ -171,6 +176,11 @@ func ResourceMetastore() common.Resource { return err } } + + if !d.HasChangeExcept("owner") { + return nil + } + update.Owner = "" _, err := w.Metastores.Update(ctx, update) if err != nil { diff --git a/catalog/resource_metastore_test.go b/catalog/resource_metastore_test.go index 0a4e1940cf..5f851de2ba 100644 --- a/catalog/resource_metastore_test.go +++ b/catalog/resource_metastore_test.go @@ -27,8 +27,7 @@ func TestCreateMetastore(t *testing.T) { MetastoreId: "abc", }, nil) e.Update(mock.Anything, catalog.UpdateMetastore{ - Id: "abc", - Name: "a", + Id: "abc", }).Return(&catalog.MetastoreInfo{ Name: "a", }, nil) @@ -58,7 +57,6 @@ func TestCreateMetastoreWithOwner(t *testing.T) { }, nil) e.Update(mock.Anything, catalog.UpdateMetastore{ Id: "abc", - Name: "a", Owner: "administrators", }).Return(&catalog.MetastoreInfo{ Name: "a", @@ -92,7 +90,6 @@ func TestCreateMetastore_DeltaSharing(t *testing.T) { }, nil) e.Update(mock.Anything, catalog.UpdateMetastore{ Id: "abc", - Name: "a", Owner: "administrators", DeltaSharingScope: "INTERNAL_AND_EXTERNAL", DeltaSharingRecipientTokenLifetimeInSeconds: 0, @@ -172,14 +169,42 @@ func TestUpdateMetastore_NoChanges(t *testing.T) { StorageRoot: "s3://b/abc", Name: "abc", }, nil) + }, + Resource: ResourceMetastore(), + ID: "abc", + Update: true, + RequiresNew: true, + InstanceState: map[string]string{ + "name": "abc", + "storage_root": "s3:/a", + "owner": "admin", + "delta_sharing_scope": "INTERNAL_AND_EXTERNAL", + "delta_sharing_recipient_token_lifetime_in_seconds": "1002", + }, + HCL: ` + name = "abc" + storage_root = "s3:/a" + owner = "admin" + delta_sharing_scope = "INTERNAL_AND_EXTERNAL" + delta_sharing_recipient_token_lifetime_in_seconds = 1002 + `, + }.ApplyNoError(t) +} + +func TestUpdateMetastore_OnlyOwnerChanges(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + e := w.GetMockMetastoresAPI().EXPECT() e.Update(mock.Anything, catalog.UpdateMetastore{ - Id: "abc", - Name: "abc", - DeltaSharingScope: "INTERNAL_AND_EXTERNAL", - DeltaSharingRecipientTokenLifetimeInSeconds: 1002, - ForceSendFields: []string{"DeltaSharingRecipientTokenLifetimeInSeconds"}, + Id: "abc", + Owner: "updatedOwner", }).Return(&catalog.MetastoreInfo{ - Name: "a", + Name: "abc", + Owner: "updatedOwner", + }, nil) + e.GetById(mock.Anything, "abc").Return(&catalog.MetastoreInfo{ + Name: "abc", + Owner: "updatedOwner", DeltaSharingScope: "INTERNAL_AND_EXTERNAL", DeltaSharingRecipientTokenLifetimeInSeconds: 1002, }, nil) @@ -198,14 +223,14 @@ func TestUpdateMetastore_NoChanges(t *testing.T) { HCL: ` name = "abc" storage_root = "s3:/a" - owner = "admin" + owner = "updatedOwner" delta_sharing_scope = "INTERNAL_AND_EXTERNAL" delta_sharing_recipient_token_lifetime_in_seconds = 1002 `, }.ApplyNoError(t) } -func TestUpdateMetastore_OwnerChanges(t *testing.T) { +func TestUpdateMetastore_OwnerAndOtherChanges(t *testing.T) { qa.ResourceFixture{ MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockMetastoresAPI().EXPECT() @@ -218,21 +243,19 @@ func TestUpdateMetastore_OwnerChanges(t *testing.T) { }, nil) e.Update(mock.Anything, catalog.UpdateMetastore{ Id: "abc", - Name: "abc", DeltaSharingScope: "INTERNAL_AND_EXTERNAL", - DeltaSharingRecipientTokenLifetimeInSeconds: 1002, + DeltaSharingRecipientTokenLifetimeInSeconds: 1004, ForceSendFields: []string{"DeltaSharingRecipientTokenLifetimeInSeconds"}, }).Return(&catalog.MetastoreInfo{ - Name: "a", Owner: "updatedOwner", DeltaSharingScope: "INTERNAL_AND_EXTERNAL", - DeltaSharingRecipientTokenLifetimeInSeconds: 1002, + DeltaSharingRecipientTokenLifetimeInSeconds: 1004, }, nil) e.GetById(mock.Anything, "abc").Return(&catalog.MetastoreInfo{ Name: "abc", Owner: "updatedOwner", DeltaSharingScope: "INTERNAL_AND_EXTERNAL", - DeltaSharingRecipientTokenLifetimeInSeconds: 1002, + DeltaSharingRecipientTokenLifetimeInSeconds: 1004, }, nil) }, Resource: ResourceMetastore(), @@ -251,7 +274,7 @@ func TestUpdateMetastore_OwnerChanges(t *testing.T) { storage_root = "s3:/a" owner = "updatedOwner" delta_sharing_scope = "INTERNAL_AND_EXTERNAL" - delta_sharing_recipient_token_lifetime_in_seconds = 1002 + delta_sharing_recipient_token_lifetime_in_seconds = 1004 `, }.ApplyNoError(t) } @@ -269,9 +292,8 @@ func TestUpdateMetastore_Rollback(t *testing.T) { }, nil) e.Update(mock.Anything, catalog.UpdateMetastore{ Id: "abc", - Name: "abc", DeltaSharingScope: "INTERNAL_AND_EXTERNAL", - DeltaSharingRecipientTokenLifetimeInSeconds: 1002, + DeltaSharingRecipientTokenLifetimeInSeconds: 1004, ForceSendFields: []string{"DeltaSharingRecipientTokenLifetimeInSeconds"}, }).Return(nil, errors.New("Something unexpected happened")) e.Update(mock.Anything, catalog.UpdateMetastore{ @@ -298,7 +320,7 @@ func TestUpdateMetastore_Rollback(t *testing.T) { storage_root = "s3:/a" owner = "updatedOwner" delta_sharing_scope = "INTERNAL_AND_EXTERNAL" - delta_sharing_recipient_token_lifetime_in_seconds = 1002 + delta_sharing_recipient_token_lifetime_in_seconds = 1004 `, }.Apply(t) qa.AssertErrorStartsWith(t, err, "Something unexpected happened") @@ -310,7 +332,6 @@ func TestUpdateMetastore_DeltaSharingScopeOnly(t *testing.T) { e := w.GetMockMetastoresAPI().EXPECT() e.Update(mock.Anything, catalog.UpdateMetastore{ Id: "abc", - Name: "abc", DeltaSharingScope: "INTERNAL_AND_EXTERNAL", DeltaSharingRecipientTokenLifetimeInSeconds: 1002, ForceSendFields: []string{"DeltaSharingRecipientTokenLifetimeInSeconds"}, @@ -360,16 +381,6 @@ func TestCreateAccountMetastore(t *testing.T) { MetastoreId: "abc", }, }, nil) - e.Update(mock.Anything, catalog.AccountsUpdateMetastore{ - MetastoreId: "abc", - MetastoreInfo: &catalog.UpdateMetastore{ - Name: "a", - }, - }).Return(&catalog.AccountsMetastoreInfo{ - MetastoreInfo: &catalog.MetastoreInfo{ - Name: "a", - }, - }, nil) e.GetByMetastoreId(mock.Anything, "abc").Return(&catalog.AccountsMetastoreInfo{ MetastoreInfo: &catalog.MetastoreInfo{ StorageRoot: "s3://b/abc", @@ -404,7 +415,6 @@ func TestCreateAccountMetastoreWithOwner(t *testing.T) { e.Update(mock.Anything, catalog.AccountsUpdateMetastore{ MetastoreId: "abc", MetastoreInfo: &catalog.UpdateMetastore{ - Name: "a", Owner: "administrators", }, }).Return(&catalog.AccountsMetastoreInfo{ @@ -449,7 +459,6 @@ func TestCreateAccountMetastore_DeltaSharing(t *testing.T) { e.Update(mock.Anything, catalog.AccountsUpdateMetastore{ MetastoreId: "abc", MetastoreInfo: &catalog.UpdateMetastore{ - Name: "a", Owner: "administrators", DeltaSharingOrganizationName: "acme", DeltaSharingScope: "INTERNAL_AND_EXTERNAL", @@ -507,22 +516,6 @@ func TestUpdateAccountMetastore_NoChanges(t *testing.T) { qa.ResourceFixture{ MockAccountClientFunc: func(a *mocks.MockAccountClient) { e := a.GetMockAccountMetastoresAPI().EXPECT() - e.Update(mock.Anything, catalog.AccountsUpdateMetastore{ - MetastoreId: "abc", - MetastoreInfo: &catalog.UpdateMetastore{ - Id: "abc", - Name: "abc", - DeltaSharingScope: "INTERNAL_AND_EXTERNAL", - DeltaSharingRecipientTokenLifetimeInSeconds: 1002, - ForceSendFields: []string{"DeltaSharingRecipientTokenLifetimeInSeconds"}, - }, - }).Return(&catalog.AccountsMetastoreInfo{ - MetastoreInfo: &catalog.MetastoreInfo{ - Name: "abc", - DeltaSharingScope: "INTERNAL_AND_EXTERNAL", - DeltaSharingRecipientTokenLifetimeInSeconds: 1002, - }, - }, nil) e.GetByMetastoreId(mock.Anything, "abc").Return(&catalog.AccountsMetastoreInfo{ MetastoreInfo: &catalog.MetastoreInfo{ StorageRoot: "s3://b/abc", @@ -569,22 +562,6 @@ func TestUpdateAccountMetastore_OwnerChanges(t *testing.T) { Owner: "updatedOwner", }, }, nil) - e.Update(mock.Anything, catalog.AccountsUpdateMetastore{ - MetastoreId: "abc", - MetastoreInfo: &catalog.UpdateMetastore{ - Id: "abc", - Name: "abc", - DeltaSharingScope: "INTERNAL_AND_EXTERNAL", - DeltaSharingRecipientTokenLifetimeInSeconds: 1002, - ForceSendFields: []string{"DeltaSharingRecipientTokenLifetimeInSeconds"}, - }, - }).Return(&catalog.AccountsMetastoreInfo{ - MetastoreInfo: &catalog.MetastoreInfo{ - Name: "abc", - DeltaSharingScope: "INTERNAL_AND_EXTERNAL", - DeltaSharingRecipientTokenLifetimeInSeconds: 1002, - }, - }, nil) e.GetByMetastoreId(mock.Anything, "abc").Return(&catalog.AccountsMetastoreInfo{ MetastoreInfo: &catalog.MetastoreInfo{ StorageRoot: "s3://b/abc", @@ -635,9 +612,8 @@ func TestUpdateAccountMetastore_Rollback(t *testing.T) { MetastoreId: "abc", MetastoreInfo: &catalog.UpdateMetastore{ Id: "abc", - Name: "abc", DeltaSharingScope: "INTERNAL_AND_EXTERNAL", - DeltaSharingRecipientTokenLifetimeInSeconds: 1002, + DeltaSharingRecipientTokenLifetimeInSeconds: 1004, ForceSendFields: []string{"DeltaSharingRecipientTokenLifetimeInSeconds"}, }, }).Return(nil, errors.New("Something unexpected happened")) @@ -671,7 +647,7 @@ func TestUpdateAccountMetastore_Rollback(t *testing.T) { storage_root = "s3:/a" owner = "updatedOwner" delta_sharing_scope = "INTERNAL_AND_EXTERNAL" - delta_sharing_recipient_token_lifetime_in_seconds = 1002 + delta_sharing_recipient_token_lifetime_in_seconds = 1004 `, }.Apply(t) qa.AssertErrorStartsWith(t, err, "Something unexpected happened") @@ -685,7 +661,6 @@ func TestUpdateAccountMetastore_DeltaSharingScopeOnly(t *testing.T) { MetastoreId: "abc", MetastoreInfo: &catalog.UpdateMetastore{ Id: "abc", - Name: "abc", DeltaSharingScope: "INTERNAL_AND_EXTERNAL", DeltaSharingRecipientTokenLifetimeInSeconds: 1002, ForceSendFields: []string{"DeltaSharingRecipientTokenLifetimeInSeconds"}, diff --git a/catalog/resource_registered_model_test.go b/catalog/resource_registered_model_test.go index 129bb12d7e..93e2f671c0 100644 --- a/catalog/resource_registered_model_test.go +++ b/catalog/resource_registered_model_test.go @@ -131,7 +131,6 @@ func TestRegisteredModelUpdate(t *testing.T) { ExpectedRequest: catalog.UpdateRegisteredModelRequest{ FullName: "catalog.schema.model", Comment: "new comment", - Name: "model", }, Response: catalog.RegisteredModelInfo{ Name: "model", diff --git a/catalog/resource_schema.go b/catalog/resource_schema.go index 0423c5ac4c..bf0a492576 100644 --- a/catalog/resource_schema.go +++ b/catalog/resource_schema.go @@ -99,6 +99,10 @@ func ResourceSchema() common.Resource { } } + if !d.HasChangeExcept("owner") { + return nil + } + updateSchemaRequest.Owner = "" schema, err := w.Schemas.Update(ctx, updateSchemaRequest) if err != nil { @@ -152,7 +156,7 @@ func ResourceSchema() common.Resource { return err } for _, v := range volumes { - w.Volumes.DeleteByFullNameArg(ctx, v.FullName) + w.Volumes.DeleteByName(ctx, v.FullName) } // delete all functions functions, err := w.Functions.ListAll(ctx, catalog.ListFunctionsRequest{ diff --git a/catalog/resource_schema_test.go b/catalog/resource_schema_test.go index a3d4b2c4c1..370cc71a7e 100644 --- a/catalog/resource_schema_test.go +++ b/catalog/resource_schema_test.go @@ -71,7 +71,6 @@ func TestCreateSchemaWithOwner(t *testing.T) { Resource: "/api/2.1/unity-catalog/schemas/b.a", ExpectedRequest: catalog.UpdateSchema{ Owner: "administrators", - Name: "a", Comment: "c", }, Response: catalog.SchemaInfo{ @@ -127,7 +126,6 @@ func TestUpdateSchema(t *testing.T) { Method: "PATCH", Resource: "/api/2.1/unity-catalog/schemas/b.a", ExpectedRequest: catalog.UpdateSchema{ - Name: "a", Comment: "c", }, Response: catalog.SchemaInfo{ @@ -185,7 +183,6 @@ func TestUpdateSchemaOwnerWithOtherFields(t *testing.T) { Method: "PATCH", Resource: "/api/2.1/unity-catalog/schemas/b.a", ExpectedRequest: catalog.UpdateSchema{ - Name: "a", Comment: "d", }, Response: catalog.SchemaInfo{ @@ -243,7 +240,6 @@ func TestUpdateSchemaRollback(t *testing.T) { Method: "PATCH", Resource: "/api/2.1/unity-catalog/schemas/b.a", ExpectedRequest: catalog.UpdateSchema{ - Name: "a", Comment: "d", }, Response: apierr.APIErrorBody{ @@ -316,7 +312,6 @@ func TestUpdateSchemaRollback_Error(t *testing.T) { Method: "PATCH", Resource: "/api/2.1/unity-catalog/schemas/b.a", ExpectedRequest: catalog.UpdateSchema{ - Name: "a", Comment: "d", }, Response: apierr.APIErrorBody{ @@ -377,7 +372,6 @@ func TestUpdateSchemaForceNew(t *testing.T) { Method: "PATCH", Resource: "/api/2.1/unity-catalog/schemas/b.a", ExpectedRequest: catalog.UpdateSchema{ - Name: "a", Comment: "c", }, Response: catalog.SchemaInfo{ diff --git a/catalog/resource_share.go b/catalog/resource_share.go index 0f7e4fe35d..0066b085bf 100644 --- a/catalog/resource_share.go +++ b/catalog/resource_share.go @@ -237,6 +237,10 @@ func ResourceShare() common.Resource { } } + if !d.HasChangeExcept("owner") { + return nil + } + err = NewSharesAPI(ctx, c).update(d.Id(), ShareUpdates{ Updates: changes, }) diff --git a/catalog/resource_sql_table.go b/catalog/resource_sql_table.go index ab7ef90fa1..5052f6d951 100644 --- a/catalog/resource_sql_table.go +++ b/catalog/resource_sql_table.go @@ -394,6 +394,7 @@ func ResourceSqlTable() common.Resource { return strings.EqualFold(strings.ToLower(old), strings.ToLower(new)) } s["storage_location"].DiffSuppressFunc = ucDirectoryPathSlashAndEmptySuppressDiff + s["view_definition"].DiffSuppressFunc = common.SuppressDiffWhitespaceChange s["cluster_id"].ConflictsWith = []string{"warehouse_id"} s["warehouse_id"].ConflictsWith = []string{"cluster_id"} diff --git a/catalog/resource_storage_credential.go b/catalog/resource_storage_credential.go index f68a55ed3d..1d1c3696e0 100644 --- a/catalog/resource_storage_credential.go +++ b/catalog/resource_storage_credential.go @@ -75,7 +75,7 @@ func ResourceStorageCredential() common.Resource { _, err = acc.StorageCredentials.Update(ctx, catalog.AccountsUpdateStorageCredential{ CredentialInfo: &update, MetastoreId: metastoreId, - StorageCredentialName: storageCredential.CredentialInfo.Id, + StorageCredentialName: storageCredential.CredentialInfo.Name, }) if err != nil { return err @@ -152,6 +152,11 @@ func ResourceStorageCredential() common.Resource { return err } } + + if !d.HasChangeExcept("owner") { + return nil + } + update.Owner = "" _, err := acc.StorageCredentials.Update(ctx, catalog.AccountsUpdateStorageCredential{ CredentialInfo: &update, @@ -191,6 +196,11 @@ func ResourceStorageCredential() common.Resource { return err } } + + if !d.HasChangeExcept("owner") { + return nil + } + update.Owner = "" _, err = w.StorageCredentials.Update(ctx, update) if err != nil { diff --git a/catalog/resource_storage_credential_test.go b/catalog/resource_storage_credential_test.go index 38c2840771..eb0897a27d 100644 --- a/catalog/resource_storage_credential_test.go +++ b/catalog/resource_storage_credential_test.go @@ -115,6 +115,72 @@ func TestCreateStorageCredentialWithOwner(t *testing.T) { }.ApplyNoError(t) } +func TestCreateAccountStorageCredentialWithOwner(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "POST", + Resource: "/api/2.0/accounts/account_id/metastores/metastore_id/storage-credentials", + ExpectedRequest: &catalog.AccountsCreateStorageCredential{ + MetastoreId: "metastore_id", + CredentialInfo: &catalog.CreateStorageCredential{ + Name: "storage_credential_name", + AwsIamRole: &catalog.AwsIamRole{ + RoleArn: "arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF", + }, + }, + }, + Response: catalog.AccountsStorageCredentialInfo{ + CredentialInfo: &catalog.StorageCredentialInfo{ + Name: "storage_credential_name", + }, + }, + }, + { + Method: "PUT", + Resource: "/api/2.0/accounts/account_id/metastores/metastore_id/storage-credentials/storage_credential_name", + ExpectedRequest: &catalog.AccountsUpdateStorageCredential{ + CredentialInfo: &catalog.UpdateStorageCredential{ + Name: "storage_credential_name", + Owner: "administrators", + AwsIamRole: &catalog.AwsIamRole{ + RoleArn: "arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF", + }, + }, + }, + Response: &catalog.AccountsStorageCredentialInfo{ + CredentialInfo: &catalog.StorageCredentialInfo{ + Name: "storage_credential_name", + }, + }, + }, + { + Method: "GET", + Resource: "/api/2.0/accounts/account_id/metastores/metastore_id/storage-credentials/storage_credential_name?", + Response: &catalog.AccountsStorageCredentialInfo{ + CredentialInfo: &catalog.StorageCredentialInfo{ + Name: "storage_credential_name", + AwsIamRole: &catalog.AwsIamRole{ + RoleArn: "arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF", + }, + }, + }, + }, + }, + Resource: ResourceStorageCredential(), + AccountID: "account_id", + Create: true, + HCL: ` + name = "storage_credential_name" + metastore_id = "metastore_id" + aws_iam_role { + role_arn = "arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF" + } + owner = "administrators" + `, + }.ApplyNoError(t) +} + func TestCreateStorageCredentialsReadOnly(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ diff --git a/catalog/resource_volume.go b/catalog/resource_volume.go index 64bfdbe540..147907a920 100644 --- a/catalog/resource_volume.go +++ b/catalog/resource_volume.go @@ -2,6 +2,8 @@ package catalog import ( "context" + "fmt" + "strings" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/terraform-provider-databricks/common" @@ -28,10 +30,22 @@ type VolumeInfo struct { VolumeType catalog.VolumeType `json:"volume_type" tf:"force_new"` } +func getNameFromId(id string) (string, error) { + split := strings.Split(id, ".") + if len(split) != 3 { + return "", fmt.Errorf("invalid id <%s>: id should be in the format catalog.schema.volume", id) + } + return split[2], nil +} + func ResourceVolume() common.Resource { s := common.StructToSchema(VolumeInfo{}, func(m map[string]*schema.Schema) map[string]*schema.Schema { m["storage_location"].DiffSuppressFunc = ucDirectoryPathSlashAndEmptySuppressDiff + m["volume_path"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } return m }) return common.Resource{ @@ -56,7 +70,7 @@ func ResourceVolume() common.Resource { var updateVolumeRequestContent catalog.UpdateVolumeRequestContent common.DataToStructPointer(d, s, &updateVolumeRequestContent) - updateVolumeRequestContent.FullNameArg = d.Id() + updateVolumeRequestContent.Name = d.Id() _, err = w.Volumes.Update(ctx, updateVolumeRequestContent) if err != nil { return err @@ -68,11 +82,15 @@ func ResourceVolume() common.Resource { if err != nil { return err } - v, err := w.Volumes.ReadByFullNameArg(ctx, d.Id()) + v, err := w.Volumes.ReadByName(ctx, d.Id()) + if err != nil { + return err + } + err = common.StructToData(v, s, d) if err != nil { return err } - return common.StructToData(v, s, d) + return d.Set("volume_path", "/Volumes/"+strings.ReplaceAll(v.FullName, ".", "/")) }, Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { w, err := c.WorkspaceClient() @@ -81,18 +99,30 @@ func ResourceVolume() common.Resource { } var updateVolumeRequestContent catalog.UpdateVolumeRequestContent common.DataToStructPointer(d, s, &updateVolumeRequestContent) - updateVolumeRequestContent.FullNameArg = d.Id() + updateVolumeRequestContent.Name = d.Id() + userProvidedName := d.Get("name").(string) + storedName, err := getNameFromId(d.Id()) + if err != nil { + return err + } + if storedName != userProvidedName { + updateVolumeRequestContent.NewName = userProvidedName + } if d.HasChange("owner") { _, err := w.Volumes.Update(ctx, catalog.UpdateVolumeRequestContent{ - FullNameArg: updateVolumeRequestContent.FullNameArg, - Owner: updateVolumeRequestContent.Owner, + Name: updateVolumeRequestContent.Name, + Owner: updateVolumeRequestContent.Owner, }) if err != nil { return err } } + if !d.HasChangeExcept("owner") { + return nil + } + updateVolumeRequestContent.Owner = "" v, err := w.Volumes.Update(ctx, updateVolumeRequestContent) if err != nil { @@ -100,8 +130,8 @@ func ResourceVolume() common.Resource { // Rollback old, new := d.GetChange("owner") _, rollbackErr := w.Volumes.Update(ctx, catalog.UpdateVolumeRequestContent{ - FullNameArg: updateVolumeRequestContent.FullNameArg, - Owner: old.(string), + Name: updateVolumeRequestContent.Name, + Owner: old.(string), }) if rollbackErr != nil { return common.OwnerRollbackError(err, rollbackErr, old.(string), new.(string)) @@ -113,6 +143,7 @@ func ResourceVolume() common.Resource { // We need to update the resource Id because Name is updatable and FullName consists of Name, // So if we don't update the field then the requests would be made to old FullName which doesn't exists. d.SetId(v.FullName) + d.Set("volume_path", "/Volumes/"+strings.ReplaceAll(v.FullName, ".", "/")) return nil }, Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { @@ -120,7 +151,7 @@ func ResourceVolume() common.Resource { if err != nil { return err } - return w.Volumes.DeleteByFullNameArg(ctx, d.Id()) + return w.Volumes.DeleteByName(ctx, d.Id()) }, } } diff --git a/catalog/resource_volume_test.go b/catalog/resource_volume_test.go index b48a8c1746..39e19205d2 100644 --- a/catalog/resource_volume_test.go +++ b/catalog/resource_volume_test.go @@ -270,6 +270,7 @@ func TestVolumesRead(t *testing.T) { assert.Equal(t, "testCatalogName", d.Get("catalog_name")) assert.Equal(t, "testSchemaName", d.Get("schema_name")) assert.Equal(t, "This is a test comment.", d.Get("comment")) + assert.Equal(t, "/Volumes/testCatalogName/testSchemaName/testName", d.Get("volume_path")) } func TestResourceVolumeRead_Error(t *testing.T) { @@ -307,7 +308,7 @@ func TestVolumesUpdate(t *testing.T) { Method: http.MethodPatch, Resource: "/api/2.1/unity-catalog/volumes/testCatalogName.testSchemaName.testName", ExpectedRequest: catalog.UpdateVolumeRequestContent{ - Name: "testNameNew", + NewName: "testNameNew", Comment: "This is a new test comment.", }, Response: catalog.VolumeInfo{ @@ -357,6 +358,7 @@ func TestVolumesUpdate(t *testing.T) { assert.Equal(t, "testCatalogName", d.Get("catalog_name")) assert.Equal(t, "testSchemaName", d.Get("schema_name")) assert.Equal(t, "This is a new test comment.", d.Get("comment")) + assert.Equal(t, "/Volumes/testCatalogName/testSchemaName/testNameNew", d.Get("volume_path")) } func TestVolumesUpdateForceNewOnCatalog(t *testing.T) { @@ -386,7 +388,7 @@ func TestVolumesUpdateForceNewOnCatalog(t *testing.T) { Method: http.MethodPatch, Resource: "/api/2.1/unity-catalog/volumes/testCatalogName.testSchemaName.testName", ExpectedRequest: catalog.UpdateVolumeRequestContent{ - Name: "testNameNew", + NewName: "testNameNew", Comment: "This is a new test comment.", }, Response: catalog.VolumeInfo{ diff --git a/clusters/clusters_api.go b/clusters/clusters_api.go index bb330a2663..4d3c62f6f9 100644 --- a/clusters/clusters_api.go +++ b/clusters/clusters_api.go @@ -194,22 +194,22 @@ type S3StorageInfo struct { // GcsStorageInfo contains the struct for when storing files in GCS type GcsStorageInfo struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } // AbfssStorageInfo contains the struct for when storing files in ADLS type AbfssStorageInfo struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } // LocalFileInfo represents a local file on disk, e.g. in a customer's container. type LocalFileInfo struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } // WorkspaceFileInfo represents a file in the Databricks workspace. type WorkspaceFileInfo struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } // StorageInfo contains the struct for either DBFS or S3 storage depending on which one is relevant. diff --git a/clusters/resource_cluster.go b/clusters/resource_cluster.go index 3450970415..b4c2cb8f37 100644 --- a/clusters/resource_cluster.go +++ b/clusters/resource_cluster.go @@ -58,17 +58,15 @@ func ZoneDiffSuppress(k, old, new string, d *schema.ResourceData) bool { return false } -type ClusterResourceProvider struct{} - -func (ClusterResourceProvider) UnderlyingType() compute.ClusterSpec { - return compute.ClusterSpec{} +type ClusterSpec struct { + compute.ClusterSpec } -func (ClusterResourceProvider) Aliases() map[string]string { +func (ClusterSpec) Aliases() map[string]string { return map[string]string{"cluster_mount_infos": "cluster_mount_info"} } -func (ClusterResourceProvider) CustomizeSchema(s map[string]*schema.Schema) map[string]*schema.Schema { +func (ClusterSpec) CustomizeSchema(s map[string]*schema.Schema) map[string]*schema.Schema { common.CustomizeSchemaPath(s, "cluster_source").SetReadOnly() common.CustomizeSchemaPath(s, "enable_elastic_disk").SetComputed() common.CustomizeSchemaPath(s, "enable_local_disk_encryption").SetComputed() @@ -76,7 +74,7 @@ func (ClusterResourceProvider) CustomizeSchema(s map[string]*schema.Schema) map[ common.CustomizeSchemaPath(s, "driver_node_type_id").SetComputed().SetConflictsWith([]string{"driver_instance_pool_id", "instance_pool_id"}) common.CustomizeSchemaPath(s, "driver_instance_pool_id").SetComputed().SetConflictsWith([]string{"driver_node_type_id", "node_type_id"}) common.CustomizeSchemaPath(s, "ssh_public_keys").SetMaxItems(10) - common.CustomizeSchemaPath(s, "init_scripts").SetMaxItems(10).AddNewField("abfss", common.StructToSchema(InitScriptStorageInfo{}, nil)["abfss"]).AddNewField("gcs", common.StructToSchema(InitScriptStorageInfo{}, nil)["gcs"]) + common.CustomizeSchemaPath(s, "init_scripts").SetMaxItems(10) common.CustomizeSchemaPath(s, "init_scripts", "dbfs").SetDeprecated(DbfsDeprecationWarning) common.CustomizeSchemaPath(s, "init_scripts", "dbfs", "destination").SetRequired() common.CustomizeSchemaPath(s, "init_scripts", "s3", "destination").SetRequired() @@ -98,20 +96,7 @@ func (ClusterResourceProvider) CustomizeSchema(s map[string]*schema.Schema) map[ common.CustomizeSchemaPath(s, "aws_attributes").SetSuppressDiff().SetConflictsWith([]string{"azure_attributes", "gcp_attributes"}) common.CustomizeSchemaPath(s, "aws_attributes", "zone_id").SetCustomSuppressDiff(ZoneDiffSuppress) common.CustomizeSchemaPath(s, "azure_attributes").SetSuppressDiff().SetConflictsWith([]string{"aws_attributes", "gcp_attributes"}) - common.CustomizeSchemaPath(s, "gcp_attributes").SetSuppressDiff().SetConflictsWith([]string{"aws_attributes", "azure_attributes"}).AddNewField( - "use_preemptible_executors", - &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Deprecated: "Please use 'availability' instead.", - }, - ).AddNewField( - "zone_id", - &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - ) + common.CustomizeSchemaPath(s, "gcp_attributes").SetSuppressDiff().SetConflictsWith([]string{"aws_attributes", "azure_attributes"}) common.CustomizeSchemaPath(s).AddNewField("library", common.StructToSchema(libraries.ClusterLibraryList{}, func(ss map[string]*schema.Schema) map[string]*schema.Schema { @@ -171,7 +156,7 @@ func (ClusterResourceProvider) CustomizeSchema(s map[string]*schema.Schema) map[ } func resourceClusterSchema() map[string]*schema.Schema { - return common.ResourceProviderStructToSchema[compute.ClusterSpec](ClusterResourceProvider{}) + return common.StructToSchema(ClusterSpec{}, nil) } func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { diff --git a/common/client.go b/common/client.go index bab0728658..e7fa4dc2e4 100644 --- a/common/client.go +++ b/common/client.go @@ -267,6 +267,7 @@ func (c *DatabricksClient) ClientForHost(ctx context.Context, url string) (*Data Host: url, Username: c.Config.Username, Password: c.Config.Password, + AuthType: c.Config.AuthType, Token: c.Config.Token, ClientID: c.Config.ClientID, ClientSecret: c.Config.ClientSecret, diff --git a/common/customizable_schema.go b/common/customizable_schema.go index 75f1308d8e..14396c2f16 100644 --- a/common/customizable_schema.go +++ b/common/customizable_schema.go @@ -108,12 +108,36 @@ func (s *CustomizableSchema) SetMinItems(value int) *CustomizableSchema { func (s *CustomizableSchema) SetConflictsWith(value []string) *CustomizableSchema { if len(value) == 0 { - panic("SetConflictsWith cannot take in empty list") + panic("SetConflictsWith cannot take in an empty list") } s.Schema.ConflictsWith = value return s } +func (s *CustomizableSchema) SetExactlyOneOf(value []string) *CustomizableSchema { + if len(value) == 0 { + panic("SetExactlyOneOf cannot take in an empty list") + } + s.Schema.ExactlyOneOf = value + return s +} + +func (s *CustomizableSchema) SetAtLeastOneOf(value []string) *CustomizableSchema { + if len(value) == 0 { + panic("SetAtLeastOneOf cannot take in an empty list") + } + s.Schema.AtLeastOneOf = value + return s +} + +func (s *CustomizableSchema) SetRequiredWith(value []string) *CustomizableSchema { + if len(value) == 0 { + panic("SetRequiredWith cannot take in an empty list") + } + s.Schema.RequiredWith = value + return s +} + func (s *CustomizableSchema) SetDeprecated(reason string) *CustomizableSchema { s.Schema.Deprecated = reason return s diff --git a/common/customizable_schema_test.go b/common/customizable_schema_test.go index eda3b0e2da..7db449bb5a 100644 --- a/common/customizable_schema_test.go +++ b/common/customizable_schema_test.go @@ -73,6 +73,20 @@ func TestCustomizableSchemaSetConflictsWith(t *testing.T) { assert.Truef(t, len(testCustomizableSchemaScm["non_optional"].ConflictsWith) == 1, "conflictsWith should be set in field: non_optional") } +func TestCustomizableSchemaSetExactlyOneOf(t *testing.T) { + CustomizeSchemaPath(testCustomizableSchemaScm, "non_optional").SetExactlyOneOf([]string{"abc"}) + assert.Truef(t, len(testCustomizableSchemaScm["non_optional"].ExactlyOneOf) == 1, "ExactlyOneOf should be set in field: non_optional") +} + +func TestCustomizableSchemaAtLeastOneOf(t *testing.T) { + CustomizeSchemaPath(testCustomizableSchemaScm, "non_optional").SetAtLeastOneOf([]string{"abc"}) + assert.Truef(t, len(testCustomizableSchemaScm["non_optional"].AtLeastOneOf) == 1, "AtLeastOneOf should be set in field: non_optional") +} + +func TestCustomizableSchemaSetRequiredWith(t *testing.T) { + CustomizeSchemaPath(testCustomizableSchemaScm, "non_optional").SetRequiredWith([]string{"abc"}) + assert.Truef(t, len(testCustomizableSchemaScm["non_optional"].RequiredWith) == 1, "RequiredWith should be set in field: non_optional") +} func TestCustomizableSchemaSetDeprecated(t *testing.T) { CustomizeSchemaPath(testCustomizableSchemaScm, "non_optional").SetDeprecated("test reason") assert.Truef(t, testCustomizableSchemaScm["non_optional"].Deprecated == "test reason", "deprecated should be overriden in field: non_optional") diff --git a/common/reflect_resource.go b/common/reflect_resource.go index d7d603d12c..71a79362cd 100644 --- a/common/reflect_resource.go +++ b/common/reflect_resource.go @@ -38,19 +38,17 @@ var kindMap = map[reflect.Kind]string{ reflect.UnsafePointer: "UnsafePointer", } -// Generic interface for resource provider struct. Using CustomizeSchema and Aliases functions to keep track of additional information +// Generic interface for ResourceProvider. Using CustomizeSchema and Aliases functions to keep track of additional information // on top of the generated go-sdk struct. This is used to replace manually maintained structs with `tf` tags. -type ResourceProviderStruct[T any] interface { - UnderlyingType() T +type ResourceProvider interface { Aliases() map[string]string CustomizeSchema(map[string]*schema.Schema) map[string]*schema.Schema } -// Takes in a ResourceProviderStruct and converts that into a map from string to schema. -func ResourceProviderStructToSchema[T any](v ResourceProviderStruct[T]) map[string]*schema.Schema { - underlyingType := v.UnderlyingType() - rv := reflect.ValueOf(underlyingType) - scm := typeToSchema(rv, []string{}, v.Aliases()) +// Takes in a ResourceProvider and converts that into a map from string to schema. +func resourceProviderStructToSchema(v ResourceProvider) map[string]*schema.Schema { + rv := reflect.ValueOf(v) + scm := typeToSchema(rv, v.Aliases()) scm = v.CustomizeSchema(scm) return scm } @@ -63,15 +61,18 @@ func reflectKind(k reflect.Kind) string { return n } -func chooseFieldNameWithAliases(typeField reflect.StructField, fieldNamePath []string, aliases map[string]string) string { +func chooseFieldNameWithAliases(typeField reflect.StructField, aliases map[string]string) string { + // If nothing in the aliases map, return the field name from plain chooseFieldName method. + if len(aliases) == 0 { + return chooseFieldName(typeField) + } + jsonFieldName := getJsonFieldName(typeField) if jsonFieldName == "-" { return "-" } - aliasKey := strings.Join(append(fieldNamePath, jsonFieldName), ".") - - if value, ok := aliases[aliasKey]; ok { + if value, ok := aliases[jsonFieldName]; ok { return value } return jsonFieldName @@ -117,8 +118,15 @@ func MustSchemaPath(s map[string]*schema.Schema, path ...string) *schema.Schema // StructToSchema makes schema from a struct type & applies customizations from callback given func StructToSchema(v any, customize func(map[string]*schema.Schema) map[string]*schema.Schema) map[string]*schema.Schema { + // If the input 'v' is an instance of ResourceProvider, call resourceProviderStructToSchema instead. + if rp, ok := v.(ResourceProvider); ok { + if customize != nil { + panic("customize should be nil if the input implements the ResourceProvider interface; use CustomizeSchema of ResourceProvider instead") + } + return resourceProviderStructToSchema(rp) + } rv := reflect.ValueOf(v) - scm := typeToSchema(rv, []string{}, map[string]string{}) + scm := typeToSchema(rv, map[string]string{}) if customize != nil { scm = customize(scm) } @@ -274,7 +282,7 @@ func listAllFields(v reflect.Value) []field { return fields } -func typeToSchema(v reflect.Value, path []string, aliases map[string]string) map[string]*schema.Schema { +func typeToSchema(v reflect.Value, aliases map[string]string) map[string]*schema.Schema { scm := map[string]*schema.Schema{} rk := v.Kind() if rk == reflect.Ptr { @@ -289,12 +297,8 @@ func typeToSchema(v reflect.Value, path []string, aliases map[string]string) map typeField := field.sf tfTag := typeField.Tag.Get("tf") - var fieldName string - if len(aliases) == 0 { - fieldName = chooseFieldName(typeField) - } else { - fieldName = chooseFieldNameWithAliases(typeField, path, aliases) - } + fieldName := chooseFieldNameWithAliases(typeField, aliases) + unwrappedAliases := unwrapAliasesMap(fieldName, aliases) if fieldName == "-" { continue } @@ -357,7 +361,7 @@ func typeToSchema(v reflect.Value, path []string, aliases map[string]string) map scm[fieldName].Type = schema.TypeList elem := typeField.Type.Elem() sv := reflect.New(elem).Elem() - nestedSchema := typeToSchema(sv, append(path, fieldName), aliases) + nestedSchema := typeToSchema(sv, unwrappedAliases) if strings.Contains(tfTag, "suppress_diff") { scm[fieldName].DiffSuppressFunc = diffSuppressor(scm[fieldName]) for _, v := range nestedSchema { @@ -375,7 +379,7 @@ func typeToSchema(v reflect.Value, path []string, aliases map[string]string) map elem := typeField.Type // changed from ptr sv := reflect.New(elem) // changed from ptr - nestedSchema := typeToSchema(sv, append(path, fieldName), aliases) + nestedSchema := typeToSchema(sv, unwrappedAliases) if strings.Contains(tfTag, "suppress_diff") { scm[fieldName].DiffSuppressFunc = diffSuppressor(scm[fieldName]) for _, v := range nestedSchema { @@ -405,7 +409,7 @@ func typeToSchema(v reflect.Value, path []string, aliases map[string]string) map case reflect.Struct: sv := reflect.New(elem).Elem() scm[fieldName].Elem = &schema.Resource{ - Schema: typeToSchema(sv, append(path, fieldName), aliases), + Schema: typeToSchema(sv, unwrappedAliases), } } default: @@ -424,7 +428,7 @@ func IsRequestEmpty(v any) (bool, error) { return false, fmt.Errorf("value of Struct is expected, but got %s: %#v", reflectKind(rv.Kind()), rv) } var isNotEmpty bool - err := iterFields(rv, []string{}, StructToSchema(v, nil), func(fieldSchema *schema.Schema, path []string, valueField *reflect.Value) error { + err := iterFields(rv, []string{}, StructToSchema(v, nil), map[string]string{}, func(fieldSchema *schema.Schema, path []string, valueField *reflect.Value) error { if isNotEmpty { return nil } @@ -450,7 +454,29 @@ func isGoSdk(v reflect.Value) bool { return false } -func iterFields(rv reflect.Value, path []string, s map[string]*schema.Schema, +// Unwraps aliases map given a fieldname. Should be called everytime we recursively call iterFields. +// +// NOTE: If the target field has an alias, we expect `fieldname` argument to be the alias. +// For example +// +// fieldName = "cluster" +// aliases = {"cluster.clusterName": "name", "libraries": "library"} +// would return: {"clusterName": "name"} +func unwrapAliasesMap(fieldName string, aliases map[string]string) map[string]string { + result := make(map[string]string) + prefix := fieldName + "." + for key, value := range aliases { + // Only keep the keys that have the prefix. + if strings.HasPrefix(key, prefix) && key != prefix { + result[key] = value + } + } + return result +} + +// Iterate through each field of the given reflect.Value object and execute a callback function with the corresponding +// terraform schema object as the input. +func iterFields(rv reflect.Value, path []string, s map[string]*schema.Schema, aliases map[string]string, cb func(fieldSchema *schema.Schema, path []string, valueField *reflect.Value) error) error { rk := rv.Kind() if rk != reflect.Struct { @@ -463,7 +489,7 @@ func iterFields(rv reflect.Value, path []string, s map[string]*schema.Schema, fields := listAllFields(rv) for _, field := range fields { typeField := field.sf - fieldName := chooseFieldName(typeField) + fieldName := chooseFieldNameWithAliases(typeField, aliases) if fieldName == "-" { continue } @@ -489,7 +515,7 @@ func iterFields(rv reflect.Value, path []string, s map[string]*schema.Schema, return nil } -func collectionToMaps(v any, s *schema.Schema) ([]any, error) { +func collectionToMaps(v any, s *schema.Schema, aliases map[string]string) ([]any, error) { resultList := []any{} if sl, ok := v.([]string); ok { // most likely list of parameters to job task @@ -521,14 +547,15 @@ func collectionToMaps(v any, s *schema.Schema) ([]any, error) { } v = v.Elem() } - err := iterFields(v, []string{}, r.Schema, func(fieldSchema *schema.Schema, + err := iterFields(v, []string{}, r.Schema, aliases, func(fieldSchema *schema.Schema, path []string, valueField *reflect.Value) error { fieldName := path[len(path)-1] + newAliases := unwrapAliasesMap(fieldName, aliases) fieldValue := valueField.Interface() fieldPath := strings.Join(path, ".") switch fieldSchema.Type { case schema.TypeList, schema.TypeSet: - nv, err := collectionToMaps(fieldValue, fieldSchema) + nv, err := collectionToMaps(fieldValue, fieldSchema, newAliases) if err != nil { return fmt.Errorf("%s: %v", path, err) } @@ -570,11 +597,12 @@ func isValueNilOrEmpty(valueField *reflect.Value, fieldPath string) bool { // StructToData reads result using schema onto resource data func StructToData(result any, s map[string]*schema.Schema, d *schema.ResourceData) error { + aliases := getAliasesMapFromStruct(result) v := reflect.ValueOf(result) if v.Kind() == reflect.Ptr { v = v.Elem() } - return iterFields(v, []string{}, s, func( + return iterFields(v, []string{}, s, aliases, func( fieldSchema *schema.Schema, path []string, valueField *reflect.Value) error { fieldValue := valueField.Interface() if fieldValue == nil { @@ -599,7 +627,7 @@ func StructToData(result any, s map[string]*schema.Schema, d *schema.ResourceDat // validation, so we don't to it twice return d.Set(fieldPath, fieldValue) } - nv, err := collectionToMaps(fieldValue, fieldSchema) + nv, err := collectionToMaps(fieldValue, fieldSchema, aliases) if err != nil { return fmt.Errorf("%s: %v", fieldPath, err) } @@ -633,7 +661,8 @@ func DiffToStructPointer(d attributeGetter, scm map[string]*schema.Schema, resul panic(fmt.Errorf("pointer is expected, but got %s: %#v", reflectKind(rk), result)) } rv = rv.Elem() - err := readReflectValueFromData([]string{}, d, rv, scm) + aliases := getAliasesMapFromStruct(result) + err := readReflectValueFromData([]string{}, d, rv, scm, aliases) if err != nil { panic(err) } @@ -641,13 +670,14 @@ func DiffToStructPointer(d attributeGetter, scm map[string]*schema.Schema, resul // DataToStructPointer reads resource data with given schema onto result pointer. Panics. func DataToStructPointer(d *schema.ResourceData, scm map[string]*schema.Schema, result any) { + aliases := getAliasesMapFromStruct(result) rv := reflect.ValueOf(result) rk := rv.Kind() if rk != reflect.Ptr { panic(fmt.Errorf("pointer is expected, but got %s: %#v", reflectKind(rk), result)) } rv = rv.Elem() - err := readReflectValueFromData([]string{}, d, rv, scm) + err := readReflectValueFromData([]string{}, d, rv, scm, aliases) if err != nil { panic(err) } @@ -655,14 +685,26 @@ func DataToStructPointer(d *schema.ResourceData, scm map[string]*schema.Schema, // DataToReflectValue reads reflect value from data func DataToReflectValue(d *schema.ResourceData, s map[string]*schema.Schema, rv reflect.Value) error { - return readReflectValueFromData([]string{}, d, rv, s) + // TODO: Pass in the right aliases map. + return readReflectValueFromData([]string{}, d, rv, s, map[string]string{}) +} + +// Get the aliases map from the given struct if it is an instance of ResourceProvider. +// NOTE: This does not return aliases defined on `tf` tags. +func getAliasesMapFromStruct(s any) map[string]string { + if v, ok := s.(ResourceProvider); ok { + return v.Aliases() + } + return map[string]string{} } func readReflectValueFromData(path []string, d attributeGetter, - rv reflect.Value, s map[string]*schema.Schema) error { - return iterFields(rv, path, s, func(fieldSchema *schema.Schema, + rv reflect.Value, s map[string]*schema.Schema, aliases map[string]string) error { + return iterFields(rv, path, s, aliases, func(fieldSchema *schema.Schema, path []string, valueField *reflect.Value) error { fieldPath := strings.Join(path, ".") + fieldName := path[len(path)-1] + newAliases := unwrapAliasesMap(fieldName, aliases) raw, ok := d.GetOk(fieldPath) if !ok { return nil @@ -699,13 +741,13 @@ func readReflectValueFromData(path []string, d attributeGetter, rawSet := raw.(*schema.Set) rawList := rawSet.List() return readListFromData(path, d, rawList, valueField, - fieldSchema, func(i int) string { + fieldSchema, newAliases, func(i int) string { return strconv.Itoa(rawSet.F(rawList[i])) }) case schema.TypeList: // here we rely on Terraform SDK to perform validation, so we don't to it twice rawList := raw.([]any) - return readListFromData(path, d, rawList, valueField, fieldSchema, strconv.Itoa) + return readListFromData(path, d, rawList, valueField, fieldSchema, newAliases, strconv.Itoa) default: return fmt.Errorf("%s[%v] unsupported field type", fieldPath, raw) } @@ -758,7 +800,7 @@ func primitiveReflectValueFromInterface(rk reflect.Kind, } func readListFromData(path []string, d attributeGetter, - rawList []any, valueField *reflect.Value, fieldSchema *schema.Schema, + rawList []any, valueField *reflect.Value, fieldSchema *schema.Schema, aliases map[string]string, offsetConverter func(i int) string) error { if len(rawList) == 0 { return nil @@ -772,7 +814,7 @@ func readListFromData(path []string, d attributeGetter, // here we rely on Terraform SDK to perform validation, so we don't to it twice nestedResource := fieldSchema.Elem.(*schema.Resource) nestedPath := append(path, offsetConverter(0)) - return readReflectValueFromData(nestedPath, d, ve, nestedResource.Schema) + return readReflectValueFromData(nestedPath, d, ve, nestedResource.Schema, aliases) case reflect.Struct: // code path for setting the struct value is different from pointer value // in a single way: we set the field only after readReflectValueFromData @@ -781,7 +823,7 @@ func readListFromData(path []string, d attributeGetter, ve := vstruct.Elem() nestedResource := fieldSchema.Elem.(*schema.Resource) nestedPath := append(path, offsetConverter(0)) - err := readReflectValueFromData(nestedPath, d, ve, nestedResource.Schema) + err := readReflectValueFromData(nestedPath, d, ve, nestedResource.Schema, aliases) if err != nil { return err } @@ -800,7 +842,7 @@ func readListFromData(path []string, d attributeGetter, nestedPath := append(path, offsetConverter(i)) vpointer := reflect.New(valueField.Type().Elem()) ve := vpointer.Elem() - err := readReflectValueFromData(nestedPath, d, ve, nestedResource.Schema) + err := readReflectValueFromData(nestedPath, d, ve, nestedResource.Schema, aliases) if err != nil { return err } diff --git a/common/reflect_resource_test.go b/common/reflect_resource_test.go index 55d1a0712e..7fce027689 100644 --- a/common/reflect_resource_test.go +++ b/common/reflect_resource_test.go @@ -45,7 +45,7 @@ func TestChooseFieldName(t *testing.T) { func TestChooseFieldNameWithAliasesMap(t *testing.T) { assert.Equal(t, "foo", chooseFieldNameWithAliases(reflect.StructField{ Tag: `json:"bar"`, - }, []string{"a"}, map[string]string{"a.bar": "foo"})) + }, map[string]string{"bar": "foo"})) } type testSliceItem struct { @@ -217,6 +217,69 @@ type Dummy struct { Other *Address `json:"other,omitempty"` } +type AddressNoTfTag struct { + Line string `json:"line"` + Lijn string `json:"lijn"` + IsPrimary bool `json:"primary"` + + OptionalString string `json:"optional_string,omitempty"` + RequiredString string `json:"required_string"` +} + +type DummyNoTfTag struct { + Enabled bool `json:"enabled"` + Workers int `json:"workers,omitempty"` + Description string `json:"description,omitempty"` + Addresses []AddressNoTfTag `json:"addresses,omitempty"` + Things []string `json:"things,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Home *AddressNoTfTag `json:"home,omitempty"` + House *AddressNoTfTag `json:"house,omitempty"` + Other *AddressNoTfTag `json:"other,omitempty"` +} + +type DummyResourceProvider struct { + DummyNoTfTag +} + +func (DummyResourceProvider) Aliases() map[string]string { + return map[string]string{"enabled": "enabled_alias", + "addresses.primary": "primary_alias"} +} + +func (DummyResourceProvider) CustomizeSchema(s map[string]*schema.Schema) map[string]*schema.Schema { + CustomizeSchemaPath(s, "addresses").SetMinItems(1) + CustomizeSchemaPath(s, "addresses").SetMaxItems(10) + CustomizeSchemaPath(s, "tags").SetMaxItems(5) + CustomizeSchemaPath(s, "home").SetSuppressDiff() + CustomizeSchemaPath(s, "things").Schema.Type = schema.TypeSet + return s +} + +var dummy = DummyNoTfTag{ + Enabled: true, + Workers: 1004, + Description: "something", + Addresses: []AddressNoTfTag{ + { + Line: "abc", + IsPrimary: false, + }, + { + Line: "def", + IsPrimary: true, + }, + }, + Things: []string{"one", "two", "two"}, + Tags: map[string]string{ + "Foo": "Bar", + }, + Home: &AddressNoTfTag{ + Line: "bcd", + IsPrimary: true, + }, +} + func TestStructToDataAndBack(t *testing.T) { d := schema.TestResourceDataRaw(t, scm, map[string]any{}) d.MarkNewResource() @@ -301,7 +364,7 @@ func TestPrimitiveReflectValueFromInterface(t *testing.T) { func TestIterFields(t *testing.T) { v := reflect.ValueOf("x") - err := iterFields(v, []string{"x"}, scm, nil) + err := iterFields(v, []string{"x"}, scm, nil, nil) assert.EqualError(t, err, "value of Struct is expected, but got String: \"x\"") v = reflect.ValueOf(testStruct{}) @@ -309,7 +372,7 @@ func TestIterFields(t *testing.T) { "integer": { Type: schema.TypeInt, }, - }, nil) + }, nil, nil) assert.EqualError(t, err, "inconsistency: integer has omitempty, but is not optional") err = iterFields(v, []string{}, map[string]*schema.Schema{ @@ -318,7 +381,7 @@ func TestIterFields(t *testing.T) { Default: nil, Optional: true, }, - }, nil) + }, nil, nil) assert.EqualError(t, err, "inconsistency: non_optional is optional, default is empty, but has no omitempty") err = iterFields(v, []string{}, map[string]*schema.Schema{ @@ -327,23 +390,92 @@ func TestIterFields(t *testing.T) { Default: "_", Optional: true, }, - }, func(fieldSchema *schema.Schema, path []string, valueField *reflect.Value) error { + }, nil, func(fieldSchema *schema.Schema, path []string, valueField *reflect.Value) error { return fmt.Errorf("test error") }) assert.EqualError(t, err, "non_optional: test error") } func TestCollectionToMaps(t *testing.T) { - v, err := collectionToMaps([]string{"a", "b"}, nil) + v, err := collectionToMaps([]string{"a", "b"}, nil, nil) assert.NoError(t, err) assert.Equal(t, []any{"a", "b"}, v) _, err = collectionToMaps([]int{1, 2}, &schema.Schema{ Elem: schema.TypeBool, - }) + }, nil) assert.EqualError(t, err, "not resource") } +func TestStructToSchemaWithResourceProviderCustomization(t *testing.T) { + s := StructToSchema(DummyResourceProvider{}, nil) + assert.NotNil(t, s) + assert.Equal(t, 5, s["tags"].MaxItems) + assert.Equal(t, 10, s["addresses"].MaxItems) +} + +func TestStructToSchemaWithResourceProviderAliases(t *testing.T) { + s := StructToSchema(DummyResourceProvider{}, nil) + sp, err := SchemaPath(s, "enabled_alias") + assert.NoError(t, err) + assert.Equal(t, schema.TypeBool, sp.Type) +} + +func TestStructToDataWithResourceProviderStruct(t *testing.T) { + s := StructToSchema(DummyResourceProvider{}, nil) + + dummyResourceProvider := DummyResourceProvider{DummyNoTfTag: dummy} + d := schema.TestResourceDataRaw(t, s, map[string]any{}) + d.MarkNewResource() + err := StructToData(dummyResourceProvider, s, d) + assert.NoError(t, err) + + assert.Equal(t, "something", d.Get("description")) + assert.Equal(t, true, d.Get("enabled_alias")) // Testing aliases. + assert.Equal(t, 2, d.Get("addresses.#")) + + assert.NotNil(t, s["home"].DiffSuppressFunc) + assert.True(t, s["home"].DiffSuppressFunc("home.#", "1", "0", d)) + assert.False(t, s["home"].DiffSuppressFunc("home.#", "1", "1", d)) + + { + //lint:ignore SA1019 Empty optional string should not be set. + _, ok := d.GetOkExists("addresses.0.optional_string") + assert.Falsef(t, ok, "Empty optional string should not be set in ResourceData") + } + + { + //lint:ignore SA1019 Empty required string should be set. + _, ok := d.GetOkExists("addresses.0.required_string") + assert.Truef(t, ok, "Empty required string should be set in ResourceData") + } +} + +func TestDataToStructPointerWithResourceProviderStruct(t *testing.T) { + s := StructToSchema(DummyResourceProvider{}, nil) + d := schema.TestResourceDataRaw(t, s, map[string]any{}) + d.MarkNewResource() + dummyResourceProvider := DummyResourceProvider{DummyNoTfTag: dummy} + err := StructToData(dummyResourceProvider, s, d) + assert.NoError(t, err) + var dummyCopy DummyResourceProvider + DataToStructPointer(d, s, &dummyCopy) + + assert.Equal(t, len(dummyCopy.Addresses), len(dummy.Addresses)) + assert.Equal(t, dummyCopy.Enabled, dummy.Enabled) + assert.Len(t, dummyCopy.Things, 2) + + err = d.Set("addresses", []any{ + map[string]string{ + "line": "ABC", + "lijn": "CBA", + }, + }) + assert.NoError(t, err) + + DataToStructPointer(d, s, &dummyCopy) +} + func TestStructToData(t *testing.T) { s := StructToSchema(Dummy{}, func(s map[string]*schema.Schema) map[string]*schema.Schema { return s @@ -454,7 +586,7 @@ func TestTypeToSchemaNoStruct(t *testing.T) { fmt.Sprintf("%s", p)) }() v := reflect.ValueOf(1) - typeToSchema(v, []string{}, map[string]string{}) + typeToSchema(v, nil) } func TestTypeToSchemaUnsupported(t *testing.T) { @@ -467,7 +599,7 @@ func TestTypeToSchemaUnsupported(t *testing.T) { New chan int `json:"new"` } v := reflect.ValueOf(nonsense{}) - typeToSchema(v, []string{}, map[string]string{}) + typeToSchema(v, nil) } type data map[string]any @@ -515,11 +647,11 @@ func TestDiffToStructPointer(t *testing.T) { } func TestReadListFromData(t *testing.T) { - err := readListFromData([]string{}, data{}, []any{}, nil, nil, nil) + err := readListFromData([]string{}, data{}, []any{}, nil, nil, nil, nil) assert.NoError(t, err) x := reflect.ValueOf(0) - err = readListFromData([]string{}, data{}, []any{1}, &x, nil, nil) + err = readListFromData([]string{}, data{}, []any{1}, &x, nil, nil, nil) assert.EqualError(t, err, "[[1]] unknown collection field") } @@ -541,7 +673,7 @@ func TestReadReflectValueFromDataCornerCases(t *testing.T) { var n Nonsense v := reflect.ValueOf(&n) rv := v.Elem() - err := readReflectValueFromData([]string{}, data{"new": 0.123, "invalid": 1}, rv, s) + err := readReflectValueFromData([]string{}, data{"new": 0.123, "invalid": 1}, rv, s, nil) assert.EqualError(t, err, "invalid: invalid[1] unsupported field type") } diff --git a/common/util.go b/common/util.go index 91eeb3b0e6..5c4013c70d 100644 --- a/common/util.go +++ b/common/util.go @@ -2,7 +2,9 @@ package common import ( "context" + "log" "regexp" + "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -27,3 +29,8 @@ func GetTerraformVersionFromContext(ctx context.Context) string { func IsExporter(ctx context.Context) bool { return GetTerraformVersionFromContext(ctx) == "exporter" } + +func SuppressDiffWhitespaceChange(k, old, new string, d *schema.ResourceData) bool { + log.Printf("[DEBUG] Suppressing diff for %v: old=%#v new=%#v", k, old, new) + return strings.TrimSpace(old) == strings.TrimSpace(new) +} diff --git a/common/util_test.go b/common/util_test.go index d6fb6f3e75..662bc84909 100644 --- a/common/util_test.go +++ b/common/util_test.go @@ -25,3 +25,8 @@ func TestGetTerraformVersionFromContext(t *testing.T) { // assert.True(t, IsExporter(ctx)) } + +func TestSuppressDiffWhitespaceChange(t *testing.T) { + assert.True(t, SuppressDiffWhitespaceChange("k", "value", " value ", nil)) + assert.False(t, SuppressDiffWhitespaceChange("k", "value", "new_value", nil)) +} diff --git a/common/version.go b/common/version.go index 11c01f4ac1..596f1eedc0 100644 --- a/common/version.go +++ b/common/version.go @@ -3,7 +3,7 @@ package common import "context" var ( - version = "1.35.0" + version = "1.37.1" // ResourceName is resource name without databricks_ prefix ResourceName contextKey = 1 // Provider is the current instance of provider diff --git a/docs/data-sources/aws_bucket_policy.md b/docs/data-sources/aws_bucket_policy.md index ed5d4cd1ee..6cf4f73618 100644 --- a/docs/data-sources/aws_bucket_policy.md +++ b/docs/data-sources/aws_bucket_policy.md @@ -91,6 +91,6 @@ In addition to all arguments above, the following attributes are exported: The following resources are used in the same context: * [Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection](../guides/aws-e2-firewall-hub-and-spoke.md) guide. -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide +* [End to end workspace management](../guides/workspace-management.md) guide * [databricks_instance_profile](../resources/instance_profile.md) to manage AWS EC2 instance profiles that users can launch [databricks_cluster](../resources/cluster.md) and access data, like [databricks_mount](../resources/mount.md). * [databricks_mount](../resources/mount.md) to [mount your cloud storage](https://docs.databricks.com/data/databricks-file-system.html#mount-object-storage-to-dbfs) on `dbfs:/mnt/name`. diff --git a/docs/data-sources/aws_unity_catalog_policy.md b/docs/data-sources/aws_unity_catalog_policy.md new file mode 100644 index 0000000000..9077e50b52 --- /dev/null +++ b/docs/data-sources/aws_unity_catalog_policy.md @@ -0,0 +1,75 @@ +--- +subcategory: "Deployment" +--- +# databricks_aws_unity_catalog_policy Data Source + +-> **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default) in case of any questions. + +This data source constructs necessary AWS Unity Catalog policy for you, which is based on [official documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws). + +## Example Usage + +```hcl +data "databricks_aws_unity_catalog_policy" "this" { + aws_account_id = var.aws_account_id + bucket_name = "databricks-bucket" + role_name = "databricks-role" + kms_name = "databricks-kms" +} + +data "aws_iam_policy_document" "passrole_for_uc" { + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + identifiers = [ + "arn:aws:iam::414351767826:role/unity-catalog-prod-UCMasterRole-14S5ZJVKOTYTL" # Databricks Account ID + ] + type = "AWS" + } + condition { + test = "StringEquals" + variable = "sts:ExternalId" + values = [var.databricks_account_id] + } + } + statement { + sid = "ExplicitSelfRoleAssumption" + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "AWS" + identifiers = ["arn:aws:iam::${var.aws_account_id}:root"] + } + condition { + test = "ArnLike" + variable = "aws:PrincipalArn" + values = ["arn:aws:iam::${var.aws_account_id}:role/${var.prefix}-uc-access"] + } + } +} + +resource "aws_iam_policy" "unity_metastore" { + name = "${var.prefix}-unity-catalog-metastore-access-iam-policy" + policy = data.databricks_aws_unity_catalog_policy.this.json +} + +resource "aws_iam_role" "metastore_data_access" { + name = "${var.prefix}-uc-access" + assume_role_policy = data.aws_iam_policy_document.passrole_for_uc.json + managed_policy_arns = [aws_iam_policy.unity_metastore.arn] +} +``` + +## Argument Reference + +* `aws_account_id` (Required) The Account ID of the current AWS account (not your Databricks account). +* `bucket_name` (Required) The name of the S3 bucket used as root storage location for [managed tables](https://docs.databricks.com/data-governance/unity-catalog/index.html#managed-table) in Unity Catalog. +* `role_name` (Required) The name of the AWS IAM role that you created in the previous step in the [official documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws). +* `kms_name` (Optional) If encryption is enabled, provide the name of the KMS key that encrypts the S3 bucket contents. If encryption is disabled, do not provide this argument. + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `json` - AWS IAM Policy JSON document diff --git a/docs/data-sources/catalogs.md b/docs/data-sources/catalogs.md index 5afd76d194..5c20123c89 100644 --- a/docs/data-sources/catalogs.md +++ b/docs/data-sources/catalogs.md @@ -3,6 +3,8 @@ subcategory: "Unity Catalog" --- # databricks_catalogs Data Source +-> **Note** This data source could be only used with workspace-level provider! + -> **Note** If you have a fully automated setup with workspaces created by [databricks_mws_workspaces](../resources/mws_workspaces.md) or [azurerm_databricks_workspace](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/databricks_workspace), please make sure to add [depends_on attribute](../guides/troubleshooting.md#data-resources-and-authentication-is-not-configured-errors) in order to prevent _default auth: cannot configure default credentials_ errors. Retrieves a list of [databricks_catalog](../resources/catalog.md) ids, that were created by Terraform or manually, so that special handling could be applied. diff --git a/docs/data-sources/cluster.md b/docs/data-sources/cluster.md index bb442e7cf6..7e5cdbba2b 100644 --- a/docs/data-sources/cluster.md +++ b/docs/data-sources/cluster.md @@ -56,7 +56,7 @@ This data source exports the following attributes: The following resources are often used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide. +* [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_cluster](../resources/cluster.md) to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). * [databricks_cluster_policy](../resources/cluster_policy.md) to create a [databricks_cluster](../resources/cluster.md) policy, which limits the ability to create clusters based on a set of rules. * [databricks_instance_pool](../resources/instance_pool.md) to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce [cluster](../resources/cluster.md) start and auto-scaling times by maintaining a set of idle, ready-to-use instances. diff --git a/docs/data-sources/clusters.md b/docs/data-sources/clusters.md index 70995952f7..60436d1ad7 100644 --- a/docs/data-sources/clusters.md +++ b/docs/data-sources/clusters.md @@ -40,10 +40,10 @@ This data source exports the following attributes: The following resources are used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide. +* [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_cluster](../resources/cluster.md) to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). * [databricks_cluster_policy](../resources/cluster_policy.md) to create a [databricks_cluster](../resources/cluster.md) policy, which limits the ability to create clusters based on a set of rules. * [databricks_instance_pool](../resources/instance_pool.md) to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce [cluster](../resources/cluster.md) start and auto-scaling times by maintaining a set of idle, ready-to-use instances. * [databricks_job](../resources/job.md) to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a [databricks_cluster](../resources/cluster.md). * [databricks_library](../resources/library.md) to install a [library](https://docs.databricks.com/libraries/index.html) on [databricks_cluster](../resources/cluster.md). -* [databricks_pipeline](../resources/pipeline.md) to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). +* [databricks_pipeline](../resources/pipeline.md) to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). diff --git a/docs/data-sources/current_config.md b/docs/data-sources/current_config.md index 7631e78acf..1d24ff3ecf 100644 --- a/docs/data-sources/current_config.md +++ b/docs/data-sources/current_config.md @@ -51,7 +51,7 @@ Data source exposes the following attributes: The following resources are used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide +* [End to end workspace management](../guides/workspace-management.md) guide * [databricks_directory](../resources/directory.md) to manage directories in [Databricks Workpace](https://docs.databricks.com/workspace/workspace-objects.html). * [databricks_notebook](../resources/notebook.md) to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). * [databricks_repo](../resources/repo.md) to manage [Databricks Repos](https://docs.databricks.com/repos.html). diff --git a/docs/data-sources/current_metastore.md b/docs/data-sources/current_metastore.md index e3bd858fa1..d2add1a2eb 100644 --- a/docs/data-sources/current_metastore.md +++ b/docs/data-sources/current_metastore.md @@ -38,7 +38,7 @@ This data source exports the following attributes: * `storage_root_credential_id` - ID of a storage credential used for the `storage_root`. * `storage_root_credential_name` - Name of a storage credential used for the `storage_root`. * `default_data_access_config_id` - the ID of the default data access configuration. - * `delta_sharing_scope` - Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + * `delta_sharing_scope` - Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. INTERNAL only allows sharing within the same account, and INTERNAL_AND_EXTERNAL allows cross account sharing and token based sharing. * `delta_sharing_recipient_token_lifetime_in_seconds` - the expiration duration in seconds on recipient data access tokens. * `delta_sharing_organization_name` - The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. * `created_at` - Timestamp (in milliseconds) when the current metastore was created. diff --git a/docs/data-sources/current_user.md b/docs/data-sources/current_user.md index 80d8a920f6..47a5fdc9c3 100644 --- a/docs/data-sources/current_user.md +++ b/docs/data-sources/current_user.md @@ -72,7 +72,7 @@ Data source exposes the following attributes: The following resources are used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide +* [End to end workspace management](../guides/workspace-management.md) guide * [databricks_directory](../resources/directory.md) to manage directories in [Databricks Workpace](https://docs.databricks.com/workspace/workspace-objects.html). * [databricks_notebook](../resources/notebook.md) to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). * [databricks_repo](../resources/repo.md) to manage [Databricks Repos](https://docs.databricks.com/repos.html). diff --git a/docs/data-sources/dbfs_file.md b/docs/data-sources/dbfs_file.md index 7e71213951..16a14c5479 100644 --- a/docs/data-sources/dbfs_file.md +++ b/docs/data-sources/dbfs_file.md @@ -15,10 +15,11 @@ data "databricks_dbfs_file" "report" { limit_file_size = "true" } ``` + ## Argument Reference * `path` - (Required) Path on DBFS for the file from which to get content. -* `limit_file_size` - (Required - boolean) Do not load content for files larger than 4MB. +* `limit_file_size` - (Required - boolean) Do not load content for files larger than 4MB. ## Attribute Reference @@ -31,7 +32,7 @@ This data source exports the following attributes: The following resources are used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide. +* [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_dbfs_file_paths](dbfs_file_paths.md) data to get list of file names from get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). * [databricks_dbfs_file](../resources/dbfs_file.md) to manage relatively small files on [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). * [databricks_mount](../resources/mount.md) to [mount your cloud storage](https://docs.databricks.com/data/databricks-file-system.html#mount-object-storage-to-dbfs) on `dbfs:/mnt/name`. diff --git a/docs/data-sources/dbfs_file_paths.md b/docs/data-sources/dbfs_file_paths.md index 7fe88130df..caee7471dd 100644 --- a/docs/data-sources/dbfs_file_paths.md +++ b/docs/data-sources/dbfs_file_paths.md @@ -15,6 +15,7 @@ data "databricks_dbfs_file_paths" "partitions" { recursive = false } ``` + ## Argument Reference * `path` - (Required) Path on DBFS for the file to perform listing @@ -26,12 +27,11 @@ This data source exports the following attributes: * `path_list` - returns list of objects with `path` and `file_size` attributes in each - ## Related Resources The following resources are used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide. +* [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_dbfs_file](dbfs_file.md) data to get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). * [databricks_dbfs_file_paths](dbfs_file_paths.md) data to get list of file names from get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). * [databricks_dbfs_file](../resources/dbfs_file.md) to manage relatively small files on [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). diff --git a/docs/data-sources/group.md b/docs/data-sources/group.md index 859c864788..aa6481cd36 100644 --- a/docs/data-sources/group.md +++ b/docs/data-sources/group.md @@ -48,12 +48,11 @@ Data source exposes the following attributes: * `allow_instance_pool_create` - True if group members can create [instance pools](../resources/instance_pool.md) * `acl_principal_id` - identifier for use in [databricks_access_control_rule_set](../resources/access_control_rule_set.md), e.g. `groups/Some Group`. - ## Related Resources The following resources are used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide +* [End to end workspace management](../guides/workspace-management.md) guide * [databricks_cluster](../resources/cluster.md) to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). * [databricks_directory](../resources/directory.md) to manage directories in [Databricks Workpace](https://docs.databricks.com/workspace/workspace-objects.html). * [databricks_group_member](../resources/group_member.md) to attach [users](../resources/user.md) and [groups](../resources/group.md) as group members. diff --git a/docs/data-sources/instance_pool.md b/docs/data-sources/instance_pool.md index b1bc904264..052e4dea28 100644 --- a/docs/data-sources/instance_pool.md +++ b/docs/data-sources/instance_pool.md @@ -35,4 +35,3 @@ Data source exposes the following attributes: - `id` - The id of the instance pool. - `pool_info` - block describing instance pool and its state. Check documentation for [databricks_instance_pool](../resources/instance_pool.md) for a list of exposed attributes. - diff --git a/docs/data-sources/instance_profiles.md b/docs/data-sources/instance_profiles.md index 1d4172446e..3339305715 100644 --- a/docs/data-sources/instance_profiles.md +++ b/docs/data-sources/instance_profiles.md @@ -26,6 +26,7 @@ There are no arguments available for this data source. ## Attribute Reference This data source exports the following attributes: + * `instance_profiles` - Set of objects for a [databricks_instance_profile](../resources/instance_profile.md). This contains the following attributes: * `name` - Name of the instance profile. * `arn` - ARN of the instance profile. diff --git a/docs/data-sources/job.md b/docs/data-sources/job.md index fd195799e8..ec4ba1f286 100755 --- a/docs/data-sources/job.md +++ b/docs/data-sources/job.md @@ -26,7 +26,6 @@ output "job_num_workers" { This data source exports the following attributes: - * `id` - the id of [databricks_job](../resources/job.md) if the resource was matched by name. * `name` - the job name of [databricks_job](../resources/job.md) if the resource was matched by id. * `job_settings` - the same fields as in [databricks_job](../resources/job.md). diff --git a/docs/data-sources/metastore.md b/docs/data-sources/metastore.md index 1ac6654711..f8fbf44ae6 100644 --- a/docs/data-sources/metastore.md +++ b/docs/data-sources/metastore.md @@ -3,6 +3,8 @@ subcategory: "Unity Catalog" --- # databricks_metastore Data Source +-> **Note** This data source could be only used with account-level provider! + Retrieves information about metastore for a given id of [databricks_metastore](../resources/metastore.md) object, that was created by Terraform or manually, so that special handling could be applied. -> **Note** If you have a fully automated setup with workspaces created by [databricks_mws_workspaces](../resources/mws_workspaces.md) or [azurerm_databricks_workspace](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/databricks_workspace), please make sure to add [depends_on attribute](../guides/troubleshooting.md#data-resources-and-authentication-is-not-configured-errors) in order to prevent _authentication is not configured for provider_ errors. @@ -42,7 +44,7 @@ This data source exports the following attributes: * `name` - Name of metastore. * `storage_root` - Path on cloud storage account, where managed `databricks_table` are stored. * `owner` - Username/groupname/sp application_id of the metastore owner. - * `delta_sharing_scope` - Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + * `delta_sharing_scope` - Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. INTERNAL only allows sharing within the same account, and INTERNAL_AND_EXTERNAL allows cross account sharing and token based sharing. * `delta_sharing_recipient_token_lifetime_in_seconds` - Used to set expiration duration in seconds on recipient data access tokens. * `delta_sharing_organization_name` - The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. diff --git a/docs/data-sources/metastores.md b/docs/data-sources/metastores.md index 4ee64096ff..a6dbe45c84 100644 --- a/docs/data-sources/metastores.md +++ b/docs/data-sources/metastores.md @@ -3,6 +3,8 @@ subcategory: "Unity Catalog" --- # databricks_metastores Data Source +-> **Note** This data source could be only used with account-level provider! + Retrieves a mapping of name to id of [databricks_metastore](../resources/metastore.md) objects, that were created by Terraform or manually, so that special handling could be applied. -> **Note** [`account_id`](../index.md#account_id) provider configuration property is required for this resource to work. Data resource will error in case of metastores with duplicate names. This data source is only available for users & service principals with account admin status diff --git a/docs/data-sources/mlflow_model.md b/docs/data-sources/mlflow_model.md index c05af7fcbc..dcd69a3a86 100644 --- a/docs/data-sources/mlflow_model.md +++ b/docs/data-sources/mlflow_model.md @@ -63,10 +63,10 @@ resource "databricks_model_serving" "this" { This data source exports the following attributes: * `model` - Model object - * `description` - User-specified description for the object. - * `id` - Unique identifier for the object. - * `latest_versions` - Array of model versions, each the latest version for its stage. - * `name` - Name of the model. - * `permission_level` - Permission level of the requesting user on the object. For what is allowed at each level, see MLflow Model permissions. - * `tags` - Array of tags associated with the model. - * `user_id` - The username of the user that created the object. + * `description` - User-specified description for the object. + * `id` - Unique identifier for the object. + * `latest_versions` - Array of model versions, each the latest version for its stage. + * `name` - Name of the model. + * `permission_level` - Permission level of the requesting user on the object. For what is allowed at each level, see MLflow Model permissions. + * `tags` - Array of tags associated with the model. + * `user_id` - The username of the user that created the object. diff --git a/docs/data-sources/mws_workspaces.md b/docs/data-sources/mws_workspaces.md index 9474a4e46d..87c0590e09 100755 --- a/docs/data-sources/mws_workspaces.md +++ b/docs/data-sources/mws_workspaces.md @@ -11,7 +11,7 @@ Lists all [databricks_mws_workspaces](../resources/mws_workspaces.md) in Databri ## Example Usage -Listing all workspaces in +Listing all workspaces in ```hcl provider "databricks" { diff --git a/docs/data-sources/node_type.md b/docs/data-sources/node_type.md index 2152addb78..43bb36029b 100644 --- a/docs/data-sources/node_type.md +++ b/docs/data-sources/node_type.md @@ -40,12 +40,12 @@ resource "databricks_cluster" "research" { Data source allows you to pick groups by the following attributes -* `min_memory_gb` - (Optional) Minimum amount of memory per node in gigabytes. Defaults to *0*. -* `gb_per_core` - (Optional) Number of gigabytes per core available on instance. Conflicts with `min_memory_gb`. Defaults to *0*. -* `min_cores` - (Optional) Minimum number of CPU cores available on instance. Defaults to *0*. -* `min_gpus` - (Optional) Minimum number of GPU's attached to instance. Defaults to *0*. -* `local_disk` - (Optional) Pick only nodes with local storage. Defaults to *false*. -* `local_disk_min_size` - (Optional) Pick only nodes that have size local storage greater or equal to given value. Defaults to *0*. +* `min_memory_gb` - (Optional) Minimum amount of memory per node in gigabytes. Defaults to _0_. +* `gb_per_core` - (Optional) Number of gigabytes per core available on instance. Conflicts with `min_memory_gb`. Defaults to _0_. +* `min_cores` - (Optional) Minimum number of CPU cores available on instance. Defaults to _0_. +* `min_gpus` - (Optional) Minimum number of GPU's attached to instance. Defaults to _0_. +* `local_disk` - (Optional) Pick only nodes with local storage. Defaults to _false_. +* `local_disk_min_size` - (Optional) Pick only nodes that have size local storage greater or equal to given value. Defaults to _0_. * `category` - (Optional, case insensitive string) Node category, which can be one of (depending on the cloud environment, could be checked with `databricks clusters list-node-types -o json|jq '.node_types[]|.category'|sort |uniq`): * `General Purpose` (all clouds) * `General Purpose (HDD)` (Azure) @@ -54,12 +54,12 @@ Data source allows you to pick groups by the following attributes * `Memory Optimized (Remote HDD)` (Azure) * `Storage Optimized` (AWS, Azure) * `GPU Accelerated` (AWS, Azure) -* `photon_worker_capable` - (Optional) Pick only nodes that can run Photon workers. Defaults to *false*. -* `photon_driver_capable` - (Optional) Pick only nodes that can run Photon driver. Defaults to *false*. -* `graviton` - (boolean, optional) if we should limit the search only to nodes with AWS Graviton CPUs. Default to *false*. -* `fleet` - (boolean, optional) if we should limit the search only to [AWS fleet instance types](https://docs.databricks.com/compute/aws-fleet-instances.html). Default to *false*. -* `is_io_cache_enabled` - (Optional) . Pick only nodes that have IO Cache. Defaults to *false*. -* `support_port_forwarding` - (Optional) Pick only nodes that support port forwarding. Defaults to *false*. +* `photon_worker_capable` - (Optional) Pick only nodes that can run Photon workers. Defaults to _false_. +* `photon_driver_capable` - (Optional) Pick only nodes that can run Photon driver. Defaults to _false_. +* `graviton` - (boolean, optional) if we should limit the search only to nodes with AWS Graviton CPUs. Default to _false_. +* `fleet` - (boolean, optional) if we should limit the search only to [AWS fleet instance types](https://docs.databricks.com/compute/aws-fleet-instances.html). Default to _false_. +* `is_io_cache_enabled` - (Optional) . Pick only nodes that have IO Cache. Defaults to _false_. +* `support_port_forwarding` - (Optional) Pick only nodes that support port forwarding. Defaults to _false_. ## Attribute Reference @@ -71,7 +71,7 @@ Data source exposes the following attributes: The following resources are used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide. +* [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_cluster](../resources/cluster.md) to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). * [databricks_cluster_policy](../resources/cluster_policy.md) to create a [databricks_cluster](../resources/cluster.md) policy, which limits the ability to create clusters based on a set of rules. * [databricks_instance_pool](../resources/instance_pool.md) to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce [cluster](../resources/cluster.md) start and auto-scaling times by maintaining a set of idle, ready-to-use instances. diff --git a/docs/data-sources/pipelines.md b/docs/data-sources/pipelines.md index d0223725ee..a7d02a47d1 100755 --- a/docs/data-sources/pipelines.md +++ b/docs/data-sources/pipelines.md @@ -5,7 +5,7 @@ subcategory: "Compute" -> **Note** If you have a fully automated setup with workspaces created by [databricks_mws_workspaces](../resources/mws_workspaces.md) or [azurerm_databricks_workspace](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/databricks_workspace), please make sure to add [depends_on attribute](../guides/troubleshooting.md#data-resources-and-authentication-is-not-configured-errors) in order to prevent _authentication is not configured for provider_ errors. -Retrieves a list of all [databricks_pipeline](../resources/pipeline.md) ([Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html)) ids deployed in a workspace, or those matching the provided search term. Maximum 100 results. +Retrieves a list of all [databricks_pipeline](../resources/pipeline.md) ([Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html)) ids deployed in a workspace, or those matching the provided search term. Maximum 100 results. ## Example Usage @@ -49,7 +49,6 @@ This data source exports the following attributes: * `pipeline_name` - (Optional) Filter Delta Live Tables pipelines by name for a given search term. `%` is the supported wildcard operator. - ## Attribute Reference This data source exports the following attributes: diff --git a/docs/data-sources/schemas.md b/docs/data-sources/schemas.md index 50e675581e..1128195989 100644 --- a/docs/data-sources/schemas.md +++ b/docs/data-sources/schemas.md @@ -3,6 +3,8 @@ subcategory: "Unity Catalog" --- # databricks_schemas Data Source +-> **Note** This data source could be only used with workspace-level provider! + -> **Note** If you have a fully automated setup with workspaces created by [databricks_mws_workspaces](../resources/mws_workspaces.md) or [azurerm_databricks_workspace](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/databricks_workspace), please make sure to add [depends_on attribute](../guides/troubleshooting.md#data-resources-and-authentication-is-not-configured-errors) in order to prevent _default auth: cannot configure default credentials_ errors. Retrieves a list of [databricks_schema](../resources/schema.md) ids, that were created by Terraform or manually, so that special handling could be applied. diff --git a/docs/data-sources/service_principal.md b/docs/data-sources/service_principal.md index 983e351bf8..f9c7b593aa 100644 --- a/docs/data-sources/service_principal.md +++ b/docs/data-sources/service_principal.md @@ -44,13 +44,14 @@ Data source exposes the following attributes: - `home` - Home folder of the [service principal](../resources/service_principal.md), e.g. `/Users/11111111-2222-3333-4444-555666777888`. - `repos` - Repos location of the [service principal](../resources/service_principal.md), e.g. `/Repos/11111111-2222-3333-4444-555666777888`. - `active` - Whether service principal is active or not. + * `acl_principal_id` - identifier for use in [databricks_access_control_rule_set](../resources/access_control_rule_set.md), e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. ## Related Resources The following resources are used in the same context: -- [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide. +- [End to end workspace management](../guides/workspace-management.md) guide. - [databricks_current_user](current_user.md) data to retrieve information about [databricks_user](../resources/user.md) or [databricks_service_principal](../resources/service_principal.md), that is calling Databricks REST API. - [databricks_group](../resources/group.md) to manage [groups in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/groups.html) or [Account Console](https://accounts.cloud.databricks.com/) (for AWS deployments). - [databricks_group](group.md) data to retrieve information about [databricks_group](../resources/group.md) members, entitlements and instance profiles. diff --git a/docs/data-sources/service_principals.md b/docs/data-sources/service_principals.md index ca5522014f..457e0f98bc 100644 --- a/docs/data-sources/service_principals.md +++ b/docs/data-sources/service_principals.md @@ -49,11 +49,11 @@ Data source exposes the following attributes: The following resources are used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide. -* [databricks_current_user](current_user.md) data to retrieve information about [databricks_user](../resources/user.md) or [databricks_service_principal](../resources/service_principal.md), that is calling Databricks REST API. -* [databricks_group](../resources/group.md) to manage [groups in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/groups.html) or [Account Console](https://accounts.cloud.databricks.com/) (for AWS deployments). -* [databricks_group](group.md) data to retrieve information about [databricks_group](../resources/group.md) members, entitlements and instance profiles. -* [databricks_group_instance_profile](../resources/group_instance_profile.md) to attach [databricks_instance_profile](../resources/instance_profile.md) (AWS) to [databricks_group](../resources/group.md). -* [databricks_group_member](../resources/group_member.md) to attach [users](../resources/user.md) and [groups](../resources/group.md) as group members. -* [databricks_permissions](../resources/permissions.md) to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. -* [databricks_service principal](../resources/service_principal.md) to manage [service principals](../resources/service_principal.md) +- [End to end workspace management](../guides/workspace-management.md) guide. +- [databricks_current_user](current_user.md) data to retrieve information about [databricks_user](../resources/user.md) or [databricks_service_principal](../resources/service_principal.md), that is calling Databricks REST API. +- [databricks_group](../resources/group.md) to manage [groups in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/groups.html) or [Account Console](https://accounts.cloud.databricks.com/) (for AWS deployments). +- [databricks_group](group.md) data to retrieve information about [databricks_group](../resources/group.md) members, entitlements and instance profiles. +- [databricks_group_instance_profile](../resources/group_instance_profile.md) to attach [databricks_instance_profile](../resources/instance_profile.md) (AWS) to [databricks_group](../resources/group.md). +- [databricks_group_member](../resources/group_member.md) to attach [users](../resources/user.md) and [groups](../resources/group.md) as group members. +- [databricks_permissions](../resources/permissions.md) to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. +- [databricks_service principal](../resources/service_principal.md) to manage [service principals](../resources/service_principal.md) diff --git a/docs/data-sources/spark_version.md b/docs/data-sources/spark_version.md index be45e869b9..3c91c7f0d0 100644 --- a/docs/data-sources/spark_version.md +++ b/docs/data-sources/spark_version.md @@ -49,7 +49,7 @@ Data source allows you to pick groups by the following attributes: * `scala` - (string, optional) if we should limit the search only to runtimes that are based on specific Scala version. Default to `2.12`. * `spark_version` - (string, optional) if we should limit the search only to runtimes that are based on specific Spark version. Default to empty string. It could be specified as `3`, or `3.0`, or full version, like, `3.0.1`. * `photon` - (boolean, optional) if we should limit the search only to Photon runtimes. Default to `false`. *Deprecated with DBR 14.0 release. Specify `runtime_engine=\"PHOTON\"` in the cluster configuration instead!* -* `graviton` - (boolean, optional) if we should limit the search only to runtimes supporting AWS Graviton CPUs. Default to `false`. *Deprecated with DBR 14.0 release. DBR version compiled for Graviton will be automatically installed when nodes with Graviton CPUs are specified in the cluster configuration.* +* `graviton` - (boolean, optional) if we should limit the search only to runtimes supporting AWS Graviton CPUs. Default to `false`. _Deprecated with DBR 14.0 release. DBR version compiled for Graviton will be automatically installed when nodes with Graviton CPUs are specified in the cluster configuration._ ## Attribute Reference @@ -61,7 +61,7 @@ Data source exposes the following attributes: The following resources are used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide. +* [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_cluster](../resources/cluster.md) to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). * [databricks_cluster_policy](../resources/cluster_policy.md) to create a [databricks_cluster](../resources/cluster.md) policy, which limits the ability to create clusters based on a set of rules. * [databricks_instance_pool](../resources/instance_pool.md) to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce [cluster](../resources/cluster.md) start and auto-scaling times by maintaining a set of idle, ready-to-use instances. diff --git a/docs/data-sources/sql_warehouse.md b/docs/data-sources/sql_warehouse.md index 897d228e25..e601a183cd 100644 --- a/docs/data-sources/sql_warehouse.md +++ b/docs/data-sources/sql_warehouse.md @@ -49,9 +49,9 @@ This data source exports the following attributes: * `enable_photon` - Whether [Photon](https://databricks.com/product/delta-engine) is enabled. * `enable_serverless_compute` - Whether this SQL warehouse is a serverless SQL warehouse. - - **For AWS**: If your account needs updated [terms of use](https://docs.databricks.com/sql/admin/serverless.html#accept-terms), workspace admins are prompted in the Databricks SQL UI. A workspace must meet the [requirements](https://docs.databricks.com/sql/admin/serverless.html#requirements) and might require an update its instance profile role to [add a trust relationship](https://docs.databricks.com/sql/admin/serverless.html#aws-instance-profile-setup). + * **For AWS**: If your account needs updated [terms of use](https://docs.databricks.com/sql/admin/serverless.html#accept-terms), workspace admins are prompted in the Databricks SQL UI. A workspace must meet the [requirements](https://docs.databricks.com/sql/admin/serverless.html#requirements) and might require an update its instance profile role to [add a trust relationship](https://docs.databricks.com/sql/admin/serverless.html#aws-instance-profile-setup). - - **For Azure**, you must [enable your workspace for serverless SQL warehouse](https://learn.microsoft.com/azure/databricks/sql/admin/serverless). + * **For Azure**, you must [enable your workspace for serverless SQL warehouse](https://learn.microsoft.com/azure/databricks/sql/admin/serverless). * `warehouse_type` - SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). * `channel` block, consisting of following fields: diff --git a/docs/data-sources/storage_credential.md b/docs/data-sources/storage_credential.md new file mode 100644 index 0000000000..4f480b2080 --- /dev/null +++ b/docs/data-sources/storage_credential.md @@ -0,0 +1,63 @@ +--- +subcategory: "Unity Catalog" +--- +# databricks_storage_credential Data Source + +-> **Note** This data source could be only used with workspace-level provider! + +Retrieves details about a [databricks_storage_credential](../resources/storage_credential.md) that were created by Terraform or manually. + +## Example Usage + +Getting details of an existing storage credential in the metastore + +```hcl +data "databricks_storage_credential" "this" { + name = "this" +} + +output "created_by" { + value = data.databricks_storage_credential.this.created_by + sensitive = false +} +``` + +## Argument Reference + +* `name` - (Required) The name of the storage credential + +## Attribute Reference + +This data source exports the following attributes: + +* `metastore_id` - Unique identifier of the parent Metastore. +* `owner` - Username/groupname/sp application_id of the storage credential owner. +* `read_only` - Indicates whether the storage credential is only usable for read operations. + +`aws_iam_role` credential details for AWS: + +* `role_arn` - The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` +* `external_id` (output only) - The external ID used in role assumption to prevent confused deputy problem. +* `unity_catalog_iam_arn` (output only) - The Amazon Resource Name (ARN) of the AWS IAM user managed by Databricks. This is the identity that is going to assume the AWS IAM role. + +`azure_managed_identity` managed identity credential details for Azure + +* `access_connector_id` - The Resource ID of the Azure Databricks Access Connector resource, of the form `/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg-name/providers/Microsoft.Databricks/accessConnectors/connector-name`. + +* `managed_identity_id` - The Resource ID of the Azure User Assigned Managed Identity associated with Azure Databricks Access Connector, of the form `/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg-name/providers/Microsoft.ManagedIdentity/userAssignedIdentities/user-managed-identity-name`. + +`databricks_gcp_service_account` credential details for GCP: + +* `email` - The email of the GCP service account created, to be granted access to relevant buckets. + +`azure_service_principal` service principal credential details for Azure: + +* `directory_id` - The directory ID corresponding to the Azure Active Directory (AAD) tenant of the application +* `application_id` - The application ID of the application registration within the referenced AAD tenant + +## Related Resources + +The following resources are used in the same context: + +* [databricks_storage_credentials](./storage_credentials.md) to get names of all credentials +* [databricks_storage_credential](../resources/storage_credential.md) to manage Storage Credentials within Unity Catalog. diff --git a/docs/data-sources/storage_credentials.md b/docs/data-sources/storage_credentials.md new file mode 100644 index 0000000000..ac59a97673 --- /dev/null +++ b/docs/data-sources/storage_credentials.md @@ -0,0 +1,33 @@ +--- +subcategory: "Unity Catalog" +--- +# databricks_storage_credentials Data Source + +-> **Note** This data source could be only used with workspace-level provider! + +Retrieves a list of [databricks_storage_credential](./storage_credential.md) objects, that were created by Terraform or manually, so that special handling could be applied. + +## Example Usage + +List all storage credentials in the metastore + +```hcl +data "databricks_storage_credentials" "all" {} + +output "all_metastores" { + value = data.databricks_metastores.all.names +} +``` + +## Attribute Reference + +This data source exports the following attributes: + +* `namé` - List of names of [databricks_storage_credential](./storage_credential.md) in the metastore + +## Related Resources + +The following resources are used in the same context: + +* [databricks_storage_credential](./storage_credential.md) to get information about a single credential +* [databricks_storage_credential](../resources/storage_credential.md) to manage Storage Credentials within Unity Catalog. diff --git a/docs/data-sources/tables.md b/docs/data-sources/tables.md index 6f4fb162d0..fcb9516fc3 100644 --- a/docs/data-sources/tables.md +++ b/docs/data-sources/tables.md @@ -3,6 +3,8 @@ subcategory: "Unity Catalog" --- # databricks_tables Data Source +-> **Note** This data source could be only used with workspace-level provider! + -> **Note** If you have a fully automated setup with workspaces created by [databricks_mws_workspaces](../resources/mws_workspaces.md) or [azurerm_databricks_workspace](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/databricks_workspace), please make sure to add [depends_on attribute](../guides/troubleshooting.md#data-resources-and-authentication-is-not-configured-errors) in order to prevent _default auth: cannot configure default credentials_ errors. Retrieves a list of managed or external table full names in Unity Catalog, that were created by Terraform or manually. Use [databricks_views](views.md) for retrieving a list of views. diff --git a/docs/data-sources/user.md b/docs/data-sources/user.md index cea0e38709..7a93272ede 100644 --- a/docs/data-sources/user.md +++ b/docs/data-sources/user.md @@ -32,7 +32,7 @@ resource "databricks_group_member" "my_member_a" { Data source allows you to pick groups by the following attributes - `user_name` - (Optional) User name of the user. The user must exist before this resource can be planned. -- `user_id` - (Optional) ID of the user. +- `user_id` - (Optional) ID of the user. ## Attribute Reference @@ -45,18 +45,19 @@ Data source exposes the following attributes: - `home` - Home folder of the [user](../resources/user.md), e.g. `/Users/mr.foo@example.com`. - `repos` - Personal Repos location of the [user](../resources/user.md), e.g. `/Repos/mr.foo@example.com`. - `alphanumeric` - Alphanumeric representation of user local name. e.g. `mr_foo`. + * `acl_principal_id` - identifier for use in [databricks_access_control_rule_set](../resources/access_control_rule_set.md), e.g. `users/mr.foo@example.com`. ## Related Resources The following resources are used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide. -* [databricks_current_user](current_user.md) data to retrieve information about [databricks_user](../resources/user.md) or [databricks_service_principal](../resources/service_principal.md), that is calling Databricks REST API. -* [databricks_group](../resources/group.md) to manage [groups in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/groups.html) or [Account Console](https://accounts.cloud.databricks.com/) (for AWS deployments). -* [databricks_group](group.md) data to retrieve information about [databricks_group](../resources/group.md) members, entitlements and instance profiles. -* [databricks_group_instance_profile](../resources/group_instance_profile.md) to attach [databricks_instance_profile](../resources/instance_profile.md) (AWS) to [databricks_group](../resources/group.md). -* [databricks_group_member](../resources/group_member.md) to attach [users](../resources/user.md) and [groups](../resources/group.md) as group members. -* [databricks_permissions](../resources/permissions.md) to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. -* [databricks_user](../resources/user.md) to [manage users](https://docs.databricks.com/administration-guide/users-groups/users.html), that could be added to [databricks_group](../resources/group.md) within the workspace. -* [databricks_user_instance_profile](../resources/user_instance_profile.md) to attach [databricks_instance_profile](../resources/instance_profile.md) (AWS) to [databricks_user](../resources/user.md). +- [End to end workspace management](../guides/workspace-management.md) guide. +- [databricks_current_user](current_user.md) data to retrieve information about [databricks_user](../resources/user.md) or [databricks_service_principal](../resources/service_principal.md), that is calling Databricks REST API. +- [databricks_group](../resources/group.md) to manage [groups in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/groups.html) or [Account Console](https://accounts.cloud.databricks.com/) (for AWS deployments). +- [databricks_group](group.md) data to retrieve information about [databricks_group](../resources/group.md) members, entitlements and instance profiles. +- [databricks_group_instance_profile](../resources/group_instance_profile.md) to attach [databricks_instance_profile](../resources/instance_profile.md) (AWS) to [databricks_group](../resources/group.md). +- [databricks_group_member](../resources/group_member.md) to attach [users](../resources/user.md) and [groups](../resources/group.md) as group members. +- [databricks_permissions](../resources/permissions.md) to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. +- [databricks_user](../resources/user.md) to [manage users](https://docs.databricks.com/administration-guide/users-groups/users.html), that could be added to [databricks_group](../resources/group.md) within the workspace. +- [databricks_user_instance_profile](../resources/user_instance_profile.md) to attach [databricks_instance_profile](../resources/instance_profile.md) (AWS) to [databricks_user](../resources/user.md). diff --git a/docs/data-sources/volumes.md b/docs/data-sources/volumes.md new file mode 100644 index 0000000000..619e05c513 --- /dev/null +++ b/docs/data-sources/volumes.md @@ -0,0 +1,42 @@ +--- +subcategory: "Unity Catalog" +--- +# databricks_volumes Data Source + +-> **Note** This data source could be only used with workspace-level provider! + +Retrieves a list of [databricks_volume](../resources/volume.md) ids (full names), that were created by Terraform or manually. + +## Example Usage + +Listing all volumes in a _things_ [databricks_schema](../resources/schema.md) of a _sandbox_ [databricks_catalog](../resources/catalog.md): + +```hcl +data "databricks_volumes" "this" { + catalog_name = "sandbox" + schema_name = "things" +} + +output "all_volumes" { + value = data.databricks_volumes.this +} +``` + +## Argument Reference + +* `catalog_name` - (Required) Name of [databricks_catalog](../resources/catalog.md) +* `schema_name` - (Required) Name of [databricks_schema](../resources/schema.md) + +## Attribute Reference + +This data source exports the following attributes: + +* `ids` - a list of [databricks_volume](../resources/volume.md) full names: *`catalog`.`schema`.`volume`* + +## Related Resources + +The following resources are used in the same context: + +* [databricks_volume](../resources/volume.md) to manage volumes within Unity Catalog. +* [databricks_schema](../resources/schema.md) to manage schemas within Unity Catalog. +* [databricks_catalog](../resources/catalog.md) to manage catalogs within Unity Catalog. diff --git a/docs/data-sources/zones.md b/docs/data-sources/zones.md index 4813ecd406..d38379d902 100644 --- a/docs/data-sources/zones.md +++ b/docs/data-sources/zones.md @@ -12,6 +12,7 @@ This data source allows you to fetch all available AWS availability zones on you ```hcl data "databricks_zones" "zones" {} ``` + ## Argument Reference There are no arguments to this data source and only attributes that are computed. diff --git a/docs/guides/aws-workspace.md b/docs/guides/aws-workspace.md index aadb41b7a4..429dcd8834 100644 --- a/docs/guides/aws-workspace.md +++ b/docs/guides/aws-workspace.md @@ -331,7 +331,7 @@ resource "time_sleep" "wait" { } ``` -#### IAM policy error +### IAM policy error If you notice the below error: @@ -341,8 +341,12 @@ Error: MALFORMED_REQUEST: Failed credentials validation checks: Spot Cancellatio - Try creating workspace from UI: -![create_workspace_error](https://github.com/databricks/terraform-provider-databricks/raw/master/docs/images/create_workspace_error.png) +![create_workspace_error](https://raw.githubusercontent.com/databricks/terraform-provider-databricks/main/docs/images/create_workspace_error.png) - Verify if the role and policy exist (assume role should allow external ID) -![iam_role_trust_error](https://github.com/databricks/terraform-provider-databricks/raw/master/docs/images/iam_role_trust_error.png) +![iam_role_trust_error](https://raw.githubusercontent.com/databricks/terraform-provider-databricks/main/docs/images/iam_role_trust_error.png) + +### More than one authorization method configured error + +See the [troubleshooting guide](https://registry.terraform.io/providers/databricks/databricks/latest/docs/guides/troubleshooting#more-than-one-authorization-method-configured) diff --git a/docs/guides/azure-private-link-workspace-simplified.md b/docs/guides/azure-private-link-workspace-simplified.md index b27131248a..483ed910f9 100644 --- a/docs/guides/azure-private-link-workspace-simplified.md +++ b/docs/guides/azure-private-link-workspace-simplified.md @@ -13,6 +13,7 @@ page_title: "Provisioning Azure Databricks with Private Link - Simple deployment You can use Terraform to deploy the underlying cloud resources and the private access settings resources automatically using a programmatic approach. This guide covers a [simple deployment](https://learn.microsoft.com/en-us/azure/databricks/administration-guide/cloud-configurations/azure/private-link-simplified) to configure Azure Databricks with Private Link: + * No separate VNet separates user access from the VNet that you use for your compute resources in the Classic data plane * A transit subnet in the data plane VNet is used for user access * Only a single private endpoint is used for both front-end and back-end connectivity. @@ -23,25 +24,25 @@ This guide covers a [simple deployment](https://learn.microsoft.com/en-us/azure/ This guide uses the following variables: -- `cidr`: The CIDR for the Azure Vnet -- `rg_name`: The name of the existing resource group -- `location`: The location for Azure resources +* `cidr`: The CIDR for the Azure Vnet +* `rg_name`: The name of the existing resource group +* `location`: The location for Azure resources This guide is provided as-is, and you can use it as the basis for your custom Terraform module. This guide takes you through the following high-level steps to set up a workspace with Azure Private Link: -- Initialize the required providers -- Configure Azure objects: - - Deploy an Azure Vnet with the following subnets: - - Public and private subnets for Azure Databricks workspace - - Private Link subnet that will contain the following private endpoints: - - Frontend / Backend private endpoint - - Web_auth private endpoint - - Configure the private DNS zone to add: - - DNS A record to map connection for workspace access - - DNS A record(s) for web_auth -- Workspace Creation +* Initialize the required providers +* Configure Azure objects: + * Deploy an Azure Vnet with the following subnets: + * Public and private subnets for Azure Databricks workspace + * Private Link subnet that will contain the following private endpoints: + * Frontend / Backend private endpoint + * Web_auth private endpoint + * Configure the private DNS zone to add: + * DNS A record to map connection for workspace access + * DNS A record(s) for web_auth +* Workspace Creation ## Provider initialization @@ -218,7 +219,6 @@ resource "azurerm_subnet" "plsubnet" { Create a private endpoint with sub-resource **databricks_ui_api**: - ```hcl resource "azurerm_private_endpoint" "uiapi" { name = "uiapipvtendpoint" diff --git a/docs/guides/azure-private-link-workspace-standard.md b/docs/guides/azure-private-link-workspace-standard.md index 982b36f49b..6b9e3431ea 100644 --- a/docs/guides/azure-private-link-workspace-standard.md +++ b/docs/guides/azure-private-link-workspace-standard.md @@ -2,30 +2,30 @@ page_title: "Provisioning Azure Databricks with Private Link - Standard deployment." --- -# Deploying pre-requisite resources and enabling Private Link connections - Standard deployment. +# Deploying pre-requisite resources and enabling Private Link connections - Standard deployment -> **Note** - - Refer to [adb-with-private-link-standard](https://github.com/databricks/terraform-databricks-examples/tree/main/modules/adb-with-private-link-standard), a Terraform module that contains code used to deploy an Azure Databricks workspace with Azure Private Link using the Standard deployment approach. - - Refer to the [Databricks Terraform Registry modules](https://registry.terraform.io/modules/databricks/examples/databricks/latest) for more Terraform modules and examples to deploy Azure Databricks resources. - - This guide assumes that connectivity from the on-premises user environment is already configured using ExpressRoute or a VPN gateway connection. + +- Refer to [adb-with-private-link-standard](https://github.com/databricks/terraform-databricks-examples/tree/main/modules/adb-with-private-link-standard), a Terraform module that contains code used to deploy an Azure Databricks workspace with Azure Private Link using the Standard deployment approach. +- Refer to the [Databricks Terraform Registry modules](https://registry.terraform.io/modules/databricks/examples/databricks/latest) for more Terraform modules and examples to deploy Azure Databricks resources. +- This guide assumes that connectivity from the on-premises user environment is already configured using ExpressRoute or a VPN gateway connection. [Azure Private Link](https://learn.microsoft.com/en-us/azure/private-link/private-link-overview) support enables private connectivity between users and their Databricks workspaces and between clusters on the data plane and core services on the control plane within the Databricks workspace infrastructure. You can use Terraform to deploy the underlying cloud resources and the private access settings resources automatically using a programmatic approach. - This guide covers a [standard deployment](https://learn.microsoft.com/en-us/azure/databricks/administration-guide/cloud-configurations/azure/private-link-standard) to configure Azure Databricks with Private Link: -* Two separate VNets are used: - * A transit VNet - * A customer Data Plane VNet -* A private endpoint is used for back-end connectivity and deployed in the customer Data Plane VNet. -* A private endpoint is used for front-end connectivity and deployed in the transit VNet. -* A private endpoint is used for web authentication and deployed in the transit VNet. -* A dedicated Databricks workspace, called Web Auth workspace, is used for web authentication traffic. This workspace is configured with the sub-resource **browser_authentication** and deployed using subnets in the transit VNet. +- Two separate VNets are used: + - A transit VNet + - A customer Data Plane VNet +- A private endpoint is used for back-end connectivity and deployed in the customer Data Plane VNet. +- A private endpoint is used for front-end connectivity and deployed in the transit VNet. +- A private endpoint is used for web authentication and deployed in the transit VNet. +- A dedicated Databricks workspace, called Web Auth workspace, is used for web authentication traffic. This workspace is configured with the sub-resource **browser_authentication** and deployed using subnets in the transit VNet. -> **Note** -* A separate Web Auth workspace is not mandatory but recommended. -* DNS mapping for SSO login callbacks to the Azure Databricks web application can be managed by the Web Auth workspace or another workspace associated with the **browser_authentication** private endpoint. +- A separate Web Auth workspace is not mandatory but recommended. +- DNS mapping for SSO login callbacks to the Azure Databricks web application can be managed by the Web Auth workspace or another workspace associated with the **browser_authentication** private endpoint. ![Azure Databricks with Private Link - Standard deployment](https://github.com/databricks/terraform-provider-databricks/raw/master/docs/images/azure-private-link-standard.png) @@ -44,14 +44,14 @@ This guide takes you through the following high-level steps to set up a workspac - Initialize the required providers - Configure Azure objects: - Deploy two Azure VNets with the following subnets: - - Public and private subnets for each Azure Databricks workspace in the Data Plane VNet - - Private Link subnet in the Data Plane VNet that will contain the Backend private endpoint + - Public and private subnets for each Azure Databricks workspace in the Data Plane VNet + - Private Link subnet in the Data Plane VNet that will contain the Backend private endpoint - Private Link subnet in the Transit VNet that will contain the following private endpoints: - - Frontend private endpoint - - Web auth private endpoint + - Frontend private endpoint + - Web auth private endpoint - Configure the private DNS zone to add: - - DNS A record to map connection for workspace access - - DNS A record(s) for web_auth + - DNS A record to map connection for workspace access + - DNS A record(s) for web_auth - Workspace Creation ## Provider initialization @@ -124,12 +124,12 @@ locals { ## Summary -* In the Transit resource group: +- In the Transit resource group: 1. Create a Transit VNet 2. Create a private DNS zone 3. Create Web Auth Databricks workspace with the sub-resource **browser_authentication** 4. Create a Frontend private endpoint with the sub-resource **databricks_ui_api** -* In the Data Plane resource group: +- In the Data Plane resource group: 1. Create a Data Plane VNet 2. Create a private DNS zone 3. Create a new Azure Databricks workspace @@ -518,5 +518,6 @@ resource "azurerm_private_endpoint" "app_dpcp" { ``` -> **Note** + - The public network access to the workspace is disabled. You can access the workspace only through private connectivity to the on-premises user environment. For testing purposes, you can deploy an Azure VM in the Transit VNet to test the frontend connectivity. - If you wish to deploy a test VM in the Data Plane VNet, you should configure a peering connection between the two VNets diff --git a/docs/guides/experimental-exporter.md b/docs/guides/experimental-exporter.md index 8687431424..6c58361da4 100644 --- a/docs/guides/experimental-exporter.md +++ b/docs/guides/experimental-exporter.md @@ -21,10 +21,10 @@ Exporter can also be used in a non-interactive mode: export DATABRICKS_HOST=... export DATABRICKS_TOKEN=... ./terraform-provider-databricks exporter -skip-interactive \ - -services=groups,secrets,access,compute,users,jobs,storage \ - -listing=jobs,compute \ - -last-active-days=90 \ - -debug + -services=groups,secrets,access,compute,users,jobs,storage \ + -listing=jobs,compute \ + -last-active-days=90 \ + -debug ``` ## Argument Reference @@ -46,7 +46,7 @@ All arguments are optional, and they tune what code is being generated. * `-includeUserDomains` - optionally include domain name into generated resource name for `databricks_user` resource. * `-importAllUsers` - optionally include all users and service principals even if they are only part of the `users` group. * `-exportDeletedUsersAssets` - optionally include assets of deleted users and service principals. -* `-incremental` - experimental option for incremental export of modified resources and merging with existing resources. *Please note that only a limited set of resources (notebooks, SQL queries/dashboards/alerts, ...) provides information about the last modified date - all other resources will be re-exported again! Also, it's impossible to detect the deletion of the resources, so you must do periodic full export if resources are deleted!* **Requires** `-updated-since` option if no `exporter-run-stats.json` file exists in the output directory. +* `-incremental` - experimental option for incremental export of modified resources and merging with existing resources. *Please note that only a limited set of resources (notebooks, SQL queries/dashboards/alerts, ...) provides information about the last modified date - all other resources will be re-exported again! Also, it's impossible to detect the deletion of many resource types (i.e. clusters, jobs, ...), so you must do periodic full export if resources are deleted! For Workspace objects (notebooks, workspace files and directories) exporter tries to detect deleted objects and remove them from generated code (requires presence of `ws_objects.json` file that is written on each export that pulls all workspace objects). For workspace objects renames are handled as deletion of existing/creation of new resource!* **Requires** `-updated-since` option if no `exporter-run-stats.json` file exists in the output directory. * `-updated-since` - timestamp (in ISO8601 format supported by Go language) for exporting of resources modified since a given timestamp. I.e., `2023-07-24T00:00:00Z`. If not specified, the exporter will try to load the last run timestamp from the `exporter-run-stats.json` file generated during the export and use it. * `-notebooksFormat` - optional format for exporting of notebooks. Supported values are `SOURCE` (default), `DBC`, `JUPYTER`. This option could be used to export notebooks with embedded dashboards. * `-noformat` - optionally turn off the execution of `terraform fmt` on the exported files (enabled by default). @@ -79,10 +79,20 @@ Services are just logical groups of resources used for filtering and organizatio * `sql-endpoints` - **listing** [databricks_sql_endpoint](../resources/sql_endpoint.md) along with [databricks_sql_global_config](../resources/sql_global_config.md). * `sql-queries` - **listing** [databricks_sql_query](../resources/sql_query.md). * `storage` - only [databricks_dbfs_file](../resources/dbfs_file.md) referenced in other resources (libraries, init scripts, ...) will be downloaded locally and properly arranged into terraform state. -* `uc-artifact-allowlist` - exports [databricks_artifact_allowlist](../resources/artifact_allowlist.md) resources for Unity Catalog Allow Lists attached to the current metastore. -* `uc-system-schemas` - exports [databricks_system_schema](../resources/system_schema.md) resources for the UC metastore of the current workspace. +* `uc-artifact-allowlist` - **listing** exports [databricks_artifact_allowlist](../resources/artifact_allowlist.md) resources for Unity Catalog Allow Lists attached to the current metastore. +* `uc-catalogs` - **listing** [databricks_catalog](../resources/catalog.md) and [databricks_catalog_workspace_binding](../resources/catalog_workspace_binding.md) +* `uc-connections` - **listing** [databricks_connection](../resources/connection.md). *Please note that because API doesn't return sensitive fields, such as, passwords, tokens, ..., the generated `options` block could be incomplete!* +* `uc-grants` - [databricks_grants](../resources/grants.md) +* `uc-metastores` - **listing** [databricks_metastore](../resources/metastore.md) and [databricks_metastore_assignment](../resource/metastore_assignment.md) (only on account-level). *Please note that when using workspace-level configuration, only metastores from the workspace's region are listed!* +* `uc-models` - [databricks_registered_model](../resources/registered_model.md) +* `uc-schemas` - [databricks_schema](../resources/schema.md) +* `uc-shares` - **listing** [databricks_share](../resources/share.md) and [databricks_recipient](../resources/recipient.md) +* `uc-system-schemas` - **listing** exports [databricks_system_schema](../resources/system_schema.md) resources for the UC metastore of the current workspace. +* `uc-storage-credentials` - **listing** exports [databricks_storage_credential](../resources/storage_credential) resources on workspace or account level. +* `uc-external-locations` - **listing** exports [databricks_external_location[(../resources/external_location) resource. +* `uc-volumes` - [databricks_volume](../resources/volume.md) * `users` - [databricks_user](../resources/user.md) and [databricks_service_principal](../resources/service_principal.md) are written to their own file, simply because of their amount. If you use SCIM provisioning, migrating workspaces is the only use case for importing `users` service. -* `workspace` - [databricks_workspace_conf](../resources/workspace_conf.md) and [databricks_global_init_script](../resources/global_init_script.md) +* `workspace` - **listing** [databricks_workspace_conf](../resources/workspace_conf.md) and [databricks_global_init_script](../resources/global_init_script.md) ## Secrets @@ -98,7 +108,6 @@ To speed up export, Terraform Exporter performs many operations, such as listing * `EXPORTER_PARALLELISM_NNN` - number of Goroutines used to process resources of a specific type (replace `NNN` with the exact resource name, for example, `EXPORTER_PARALLELISM_databricks_notebook=10` sets the number of Goroutines for `databricks_notebook` resource to `10`). There is a shared channel (with name `default`) for handling of resources for which there are no dedicated channels - use `EXPORTER_PARALLELISM_default` to increase it's size (default size is `15`). Defaults for some resources are defined by the `goroutinesNumber` map in `exporter/context.go` or equal to `2` if there is no value. *Don't increase default values too much to avoid REST API throttling!* * `EXPORTER_DEFAULT_HANDLER_CHANNEL_SIZE` - the size of the shared channel (default: `200000`) - you may need to increase it if you have a huge workspace. - ## Support Matrix Exporter aims to generate HCL code for most of the resources within the Databricks workspace: @@ -107,10 +116,14 @@ Exporter aims to generate HCL code for most of the resources within the Databric | --- | --- | --- | | [databricks_access_control_rule_set](../resources/access_control_rule_set.md) | Yes | No | | [databricks_artifact_allowlist](../resources/artifact_allowlist.md) | Yes | No | +| [databricks_catalog](../resources/catalog.md) | Yes | Yes | | [databricks_cluster](../resources/cluster.md) | Yes | No | | [databricks_cluster_policy](../resources/cluster_policy.md) | Yes | No | +| [databricks_connection](../resources/connection.md) | Yes | Yes | | [databricks_dbfs_file](../resources/dbfs_file.md) | Yes | No | +| [databricks_external_location](../resources/external_location.md) | Yes | Yes | | [databricks_global_init_script](../resources/global_init_script.md) | Yes | Yes | +| [databricks_grants](../resources/grants.md) | Yes | No | | [databricks_group](../resources/group.md) | Yes | No | | [databricks_group_instance_profile](../resources/group_instance_profile.md) | Yes | No | | [databricks_group_member](../resources/group_member.md) | Yes | No | @@ -120,20 +133,26 @@ Exporter aims to generate HCL code for most of the resources within the Databric | [databricks_ip_access_list](../resources/ip_access_list.md) | Yes | Yes | | [databricks_job](../resources/job.md) | Yes | No | | [databricks_library](../resources/library.md) | Yes\* | No | -| [databricks_mlflow_model](../resources/mlflow_model.md) | No | No | +| [databricks_metastore](../resources/metastore.md) | Yes | Yes | +| [databricks_metastore_assignment](../resources/metastore_assignment.md) | Yes | No | | [databricks_mlflow_experiment](../resources/mlflow_experiment.md) | No | No | +| [databricks_mlflow_model](../resources/mlflow_model.md) | No | No | | [databricks_mlflow_webhook](../resources/mlflow_webhook.md) | Yes | Yes | | [databricks_model_serving](../resources/model_serving) | Yes | Yes | | [databricks_notebook](../resources/notebook.md) | Yes | Yes | | [databricks_obo_token](../resources/obo_token.md) | Not Applicable | No | | [databricks_permissions](../resources/permissions.md) | Yes | No | | [databricks_pipeline](../resources/pipeline.md) | Yes | Yes | +| [databricks_recipient](../resources/recipient.md) | Yes | Yes | +| [databricks_registered_model](../resources/registered.md) | Yes | Yes | | [databricks_repo](../resources/repo.md) | Yes | No | +| [databricks_schema](../resources/schema.md) | Yes | Yes | | [databricks_secret](../resources/secret.md) | Yes | No | | [databricks_secret_acl](../resources/secret_acl.md) | Yes | No | | [databricks_secret_scope](../resources/secret_scope.md) | Yes | No | | [databricks_service_principal](../resources/service_principal.md) | Yes | No | | [databricks_service_principal_role](../resources/service_principal_role.md) | Yes | No | +| [databricks_share](../resources/share.md) | Yes | Yes | | [databricks_sql_alert](../resources/sql_alert.md) | Yes | Yes | | [databricks_sql_dashboard](../resources/sql_dashboard.md) | Yes | Yes | | [databricks_sql_endpoint](../resources/sql_endpoint.md) | Yes | No | @@ -142,14 +161,16 @@ Exporter aims to generate HCL code for most of the resources within the Databric | [databricks_sql_query](../resources/sql_query.md) | Yes | Yes | | [databricks_sql_visualization](../resources/sql_visualization.md) | Yes | Yes | | [databricks_sql_widget](../resources/sql_widget.md) | Yes | Yes | +| [databricks_storage_credential](../resources/storage_credential.md) | Yes | Yes | | [databricks_system_schema](../resources/system_schema.md) | Yes | No | | [databricks_token](../resources/token.md) | Not Applicable | No | | [databricks_user](../resources/user.md) | Yes | No | | [databricks_user_instance_profile](../resources/user_instance_profile.md) | No (Deprecated) | No | | [databricks_user_role](../resources/user_role.md) | Yes | No | +| [databricks_volume](../resources/volume.md) | Yes | Yes | | [databricks_workspace_conf](../resources/workspace_conf.md) | Yes (partial) | No | | [databricks_workspace_file](../resources/workspace_file.md) | Yes | Yes | Notes: -- \* - libraries are exported as blocks inside the cluster definition instead of generating `databricks_library` resources. This is done to decrease the number of generated resources. +* \* - libraries are exported as blocks inside the cluster definition instead of generating `databricks_library` resources. This is done to decrease the number of generated resources. diff --git a/docs/guides/gcp-workspace.md b/docs/guides/gcp-workspace.md index 1c302efd50..87aa056687 100644 --- a/docs/guides/gcp-workspace.md +++ b/docs/guides/gcp-workspace.md @@ -268,3 +268,7 @@ provider "databricks" { ``` We assume that you have a terraform module in your project that creates a workspace (using [Databricks Workspace](#creating-a-databricks-workspace) section), and you named it as `dbx_gcp` while calling it in the **main.tf** file of your terraform project. And `workspace_url` and `token_value` are the output attributes of that module. This provider configuration will allow you to use the generated token to authenticate to the created workspace during workspace creation. + +### More than one authorization method configured error + +See the [troubleshooting guide](https://registry.terraform.io/providers/databricks/databricks/latest/docs/guides/troubleshooting#more-than-one-authorization-method-configured) diff --git a/docs/guides/migration-0.3.x.md b/docs/guides/migration-0.3.x.md deleted file mode 100644 index 25d1e1636e..0000000000 --- a/docs/guides/migration-0.3.x.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -page_title: "Migration from 0.2.x to 0.3.x" ---- -# Migration from 0.2.x to 0.3.x - -Certain resources underwent changes in order to ensure consistency with REST API and standard expected Terraform behavior. You can upgrade the provider with `terraform init -upgrade`. - -## provider - -* Rewrite `basic_auth` block with `username` and `password` fields, as specified in [main document](https://registry.terraform.io/providers/databricks/databricks/latest/docs#authenticating-with-hostname-username-and-password). -* Rewrite `azure_auth` block with appropriate [Azure configuration](https://registry.terraform.io/providers/databricks/databricks/latest/docs#special-configurations-for-azure). - -## databricks_job - -* Rewrite `spark_submit_parameters` with [spark_submit_task](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/job#spark_submit_task-configuration-block) configuration block. -* Rewrite `python_file` and `python_parameters` with [spark_python_task](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/job#spark_python_task-configuration-block) configuration block. -* Rewrite `jar_uri`, `jar_main_class_name`, and `jar_parameters` with [spark_jar_task](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/job#spark_jar_task-configuration-block) configuration block. -* Rewrite `notebook_path` and `notebook_base_parameters` with [notebook_task](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/job#notebook_task-configuration-block) configuration block. -* Rewrite `library_jar`, `library_egg`, `library_whl`, `library_pypi`, `library_cran`, and `library_maven` with [library](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/cluster#library-configuration-block) configuration block. - -## databricks_dbfs_file - -* Rename the `content` to `content_base64`, as this better represents actual data within the field and simplifies internal code reusability. -* Remove the `overwrite` attribute. Starting from v0.3.0 it behaves as if it is set to `true`. -* Remove the `mkdirs` attribute. Starting from v0.3.0 it behaves as if it is set to `true`. -* Remove the `validate_remote_file` attribute. Due to performance reasons, starting from v0.3.0 it doesn't fetch the contents of the remote file to verify the checksum. -* If you've relied on the internal `content_b64_md5` attribute, please remove it. Starting from v0.3.0 its behavior is internalized. - -DBFS files would only be changed, if Terraform stage did change. This means that any manual changes to managed file won't be overwritten by Terraform, if there's no local change. - -## databricks_notebook - -* Rename the `content` to `content_base64`, as this better represents actual data within the field and simplifies internal code reusability. -* Remove the `format` attribute. Starting from v0.3.0 it behaves as if it is set to `SOURCE`. -* Remove the `overwrite` attribute. Starting from v0.3.0 it behaves as if it is set to `true`. -* Remove the `mkdirs` attributes. Starting from v0.3.0 it behaves as if it is set to `true`. - -After changing the code, `terraform apply` would replace managed notebooks. - -Notebook on Databricks workspace would only be changed, if Terraform stage did change. This means that any manual changes to managed notebook won't be overwritten by Terraform, if there's no local change to notebook sources. Notebooks are identified by their path, so changing notebook's name manually on the workspace and then applying Terraform state would result in creation of notebook from Terraform state. - -## databricks_cluster - -* Rewrite `library_jar`, `library_egg`, `library_whl`, `library_pypi`, `library_cran`, and `library_maven` with [library](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/cluster#library-configuration-block) configuration block. - -## databricks_instance_profile - -* Remove `skip_validation` from all `databricks_instance_profile` resources. In order to ensure consistency, all AWS EC2 profiles are now checked to work before returning the state to the main Terraform process. - -## databricks_mws_workspaces - -* Remove `verify_workspace_runnning` attribute from all `databricks_mws_workspaces` resources. All workspaces are verified to be running automatically as of [this change](https://github.com/databricks/terraform-provider-databricks/commit/ef64b5d26daa23ff2532f1076a0db01864e4f73c). - -## databricks_instance_pool - -* Remove `default_tags`. - -## databricks_scim_user - -* This resource was removed as deprecated. Please rewrite using [databricks_user](../resources/user.md). - -## databricks_scim_group - -* This resource was removed as deprecated. Please rewrite using [databricks_group](../resources/group.md). - -## databricks_default_user_roles - -* This data source was removed as deprecated. Please use [databricks_group](../data-sources/group.md) data source for performing equivalent tasks. diff --git a/docs/guides/migration-0.4.x.md b/docs/guides/migration-0.4.x.md deleted file mode 100644 index 4f99b6b435..0000000000 --- a/docs/guides/migration-0.4.x.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -page_title: "Migration from 0.3.x to 0.4.x" ---- -# Migration from 0.3.x to 0.4.x - -Certain resources underwent changes in order to improve long-term maintainability. You can upgrade the provider with `terraform init -upgrade`. If you're currently using v0.2.x of provider, please first complete the rewrites specified in [0.2.x to 0.3.x](migration-0.3.x.md) guide. - -## provider - -* Remove `azure_use_pat_for_spn`, `azure_use_pat_for_cli`, `azure_pat_token_duration_seconds` attributes. -* Remove deprecated `azure_workspace_name`, `azure_resource_group`, `azure_subscription_id` in favor of just using `azure_workspace_resource_id`. -* `DATABRICKS_AZURE_CLIENT_SECRET` environment variable is no longer having any effect in favor of just using `ARM_CLIENT_SECRET`. -* `DATABRICKS_AZURE_CLIENT_ID` environment variable is no longer having any effect in favor of just using `ARM_CLIENT_ID`. -* `DATABRICKS_AZURE_TENANT_ID` environment variable is no longer having any effect in favor of just using `ARM_TENANT_ID`. -* Rename `DATABRICKS_AZURE_WORKSPACE_RESOURCE_ID` environment variable to `DATABRICKS_AZURE_RESOURCE_ID`. - -## databricks_mount - -* Rewrite deprecated `databricks_aws_s3_mount`, `databricks_azure_adls_gen1_mount`, `databricks_azure_adls_gen2_mount`, and `databricks_azure_blob_mount` resources into `databricks_mount`. - -## databricks_user and databricks_group - -* Globally rename `allow_sql_analytics_access` to `databricks_sql_access` field to allow users and groups access to Databricks SQL diff --git a/docs/guides/passthrough-cluster-per-user.md b/docs/guides/passthrough-cluster-per-user.md deleted file mode 100644 index eafa13fdfd..0000000000 --- a/docs/guides/passthrough-cluster-per-user.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -page_title: "Dynamic Passthrough Clusters for a Group" ---- - -# Dynamic Passthrough Clusters - -This example addresses a pretty common use-case: data science team, which is managed as a group through SCIM provisioning, needs a collection of individual passthrough [databricks_cluster](../resources/cluster.md), which they should be able to restart. It could be achieved by using [databricks_group](../data-sources/group.md) and [databricks_user](../data-sources/user.md) data sources to get the list of user names Terraform's `for_each` meta-attribute helps to do this easily. - -```hcl -data "databricks_group" "dev" { - display_name = "dev-clusters" -} - -data "databricks_user" "dev" { - for_each = data.databricks_group.dev.members - user_id = each.key -} -``` - -Once we have a specific list of user resources, we could proceed with creating clusters and permissions with `for_each = data.databricks_user.dev` to ensure it's done for each user: - -```hcl -data "databricks_spark_version" "latest" {} -data "databricks_node_type" "smallest" { - local_disk = true -} - -resource "databricks_cluster" "dev" { - for_each = data.databricks_user.dev - - cluster_name = "${each.value.display_name} dev cluster" - single_user_name = each.value.user_name - - spark_version = data.databricks_spark_version.latest.id - node_type_id = data.databricks_node_type.smallest.id - autotermination_minutes = 10 - - spark_conf = { - # Single-node - "spark.databricks.cluster.profile" : "singleNode" - "spark.master" : "local[*]", - - # Passthrough - "spark.databricks.passthrough.enabled" : "true" - } - - custom_tags = { - "ResourceClass" = "SingleNode" - } -} - -resource "databricks_permissions" "dev_restart" { - for_each = data.databricks_user.dev - cluster_id = databricks_cluster.dev[each.key].cluster_id - access_control { - user_name = each.value.user_name - permission_level = "CAN_RESTART" - } -} -``` diff --git a/docs/guides/troubleshooting.md b/docs/guides/troubleshooting.md index 504698cd5f..e6113f98a9 100644 --- a/docs/guides/troubleshooting.md +++ b/docs/guides/troubleshooting.md @@ -58,16 +58,16 @@ terraform { │ │ ├── main.tf │ │ └── versions.tf │ └── production -│ ├── README.md -│ ├── main.tf -│ └── versions.tf +│ ├── README.md +│ ├── main.tf +│ └── versions.tf └── modules - ├── first-module - │ ├── ... - │ └── versions.tf - └── second-module - ├── ... - └── versions.tf + ├── first-module + │ ├── ... + │ └── versions.tf + └── second-module + ├── ... + └── versions.tf ``` ### Error: Failed to install provider @@ -135,7 +135,6 @@ then it means that you're trying to access a workspace that uses private link wi ### Error: ....: Unauthorized access to Org: NNNNNNNNNN - There are a few possible reasons for this error: * You’re trying to access a Databricks workspace with a private link enabled and public network access set to disabled. Typically this happens when a computer from which you’re running terraform apply or terraform plan doesn’t have domain name resolution configured correctly, and Terraform is reaching the workspace via a public IP address. Also, this may happen when you’re accessing the internet via a proxy, so all traffic from Terraform is forwarded to the proxy, and routed via the public internet. @@ -183,3 +182,24 @@ If the metastore assigned to the workspace has changed, the new metastore id mus ``` To solve this error, the new Metastore ID must be set in the field `metastore_id` of the failing resources. + +### More than one authorization method configured error + +If you notice the below error: + +```sh +Error: validate: more than one authorization method configured +``` + +Ensure that you only have one authorization method set. All available authorization methods are documented [here](https://registry.terraform.io/providers/databricks/databricks/latest/docs#auth_type). + +If you want to enforce a specific authorization method, you can set the `auth_type` attribute in the provider block: + +```hcl +provider "databricks" { + ... + auth_type = "pat" +} +``` + +The above would enforce the use of PAT authorization. diff --git a/docs/guides/unity-catalog.md b/docs/guides/unity-catalog.md index bf79f47913..8d5b632b09 100644 --- a/docs/guides/unity-catalog.md +++ b/docs/guides/unity-catalog.md @@ -250,13 +250,13 @@ data "aws_iam_policy_document" "passrole_for_uc" { effect = "Allow" actions = ["sts:AssumeRole"] principals { - identifiers = [databricks_storage_credential.external.aws_iam_role.unity_catalog_iam_arn] + identifiers = [databricks_storage_credential.external.aws_iam_role[0].unity_catalog_iam_arn] type = "AWS" } condition { test = "StringEquals" variable = "sts:ExternalId" - values = [databricks_storage_credential.external.aws_iam_role.external_id] + values = [databricks_storage_credential.external.aws_iam_role[0].external_id] } } statement { @@ -297,7 +297,7 @@ resource "aws_iam_policy" "external_data_access" { "${aws_s3_bucket.external.arn}/*" ], "Effect" : "Allow" - }, + }, { "Action" : [ "sts:AssumeRole" diff --git a/docs/index.md b/docs/index.md index 85038d3980..20ddfd1137 100644 --- a/docs/index.md +++ b/docs/index.md @@ -355,6 +355,10 @@ Except for metastore, metastore assignment and storage credential objects, Unity If you are configuring a new Databricks account for the first time, please create at least one workspace with an identity (user or service principal) that you intend to use for Unity Catalog rollout. You can then configure the provider using that identity and workspace to provision the required Unity Catalog resources. +## Special considerations for Unity Catalog Resources + +When performing a single Terraform apply to update both the owner and other fields for Unity Catalog resources, the process first updates the owner, followed by the other fields using the new owner's permissions. If your principal is not the owner (specifically, the newly updated owner), you will not have the authority to modify those fields. In cases where you wish to change the owner to another individual and also update other fields, we recommend initially updating the fields using your principal, which should have owner permissions, and then updating the owner in a separate step. + ## Miscellaneous configuration parameters !> **Warning** Combination of `debug_headers` and `debug_truncate_bytes` results in dumping of sensitive information to logs. Use it for troubleshooting purposes only. diff --git a/docs/resources/access_control_rule_set.md b/docs/resources/access_control_rule_set.md index fd95f3aa0f..44a6479651 100644 --- a/docs/resources/access_control_rule_set.md +++ b/docs/resources/access_control_rule_set.md @@ -238,16 +238,22 @@ grant_rules { Arguments of the `grant_rules` block are: -- `role` - (Required) Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles), [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page) or [marketplace roles](https://docs.databricks.com/en/marketplace/get-started-provider.html#assign-the-marketplace-admin-role). +* `role` - (Required) Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles), [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page) or [marketplace roles](https://docs.databricks.com/en/marketplace/get-started-provider.html#assign-the-marketplace-admin-role). * `roles/servicePrincipal.manager` - Manager of a service principal. * `roles/servicePrincipal.user` - User of a service principal. * `roles/group.manager` - Manager of a group. * `roles/marketplace.admin` - Admin of marketplace. -- `principals` - (Required) a list of principals who are granted a role. The following format is supported: +* `principals` - (Required) a list of principals who are granted a role. The following format is supported: * `users/{username}` (also exposed as `acl_principal_id` attribute of `databricks_user` resource). * `groups/{groupname}` (also exposed as `acl_principal_id` attribute of `databricks_group` resource). * `servicePrincipals/{applicationId}` (also exposed as `acl_principal_id` attribute of `databricks_service_principal` resource). +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - ID of the access control rule set - the same as `name`. + ## Related Resources The following resources are often used in the same context: diff --git a/docs/resources/artifact_allowlist.md b/docs/resources/artifact_allowlist.md index 01373e19d3..4973872a3c 100644 --- a/docs/resources/artifact_allowlist.md +++ b/docs/resources/artifact_allowlist.md @@ -41,12 +41,18 @@ In addition to all arguments above, the following attributes are exported: * `created_at` - Time at which this artifact allowlist was set. * `created_by` - Identity that set the artifact allowlist. +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - ID of the artifact allow list in form of `metastore_id|artifact_type`. + ## Import This resource can be imported by name: ```bash -terraform import databricks_artifact_allowlist.this | +terraform import databricks_artifact_allowlist.this '|' ``` ## Related Resources diff --git a/docs/resources/catalog.md b/docs/resources/catalog.md index b9e35be92e..c09d05a8d4 100644 --- a/docs/resources/catalog.md +++ b/docs/resources/catalog.md @@ -41,8 +41,8 @@ The following arguments are required: In addition to all arguments above, the following attributes are exported: -* `metastore_id` - ID of the parent metastore. * `id` - ID of this catalog - same as the `name`. +* `metastore_id` - ID of the parent metastore. ## Import diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md index d101978f3a..5a74322065 100644 --- a/docs/resources/cluster.md +++ b/docs/resources/cluster.md @@ -436,7 +436,7 @@ The following options are available: * `google_service_account` - (Optional, string) Google Service Account email address that the cluster uses to authenticate with Google Identity. This field is used for authentication with the GCS and BigQuery data sources. * `availability` - (Optional) Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. * `boot_disk_size` (optional, int) Boot disk size in GB -* `local_ssd_count` (optional, int) Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. +* `local_ssd_count` (optional, int) Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. * `zone_id` (optional) Identifier for the availability zone in which the cluster resides. This can be one of the following: * `HA` (default): High availability, spread nodes across availability zones for a Databricks deployment region. * `AUTO`: Databricks picks an availability zone to schedule the cluster on. @@ -553,7 +553,7 @@ terraform import databricks_cluster.this The following resources are often used in the same context: -* [Dynamic Passthrough Clusters for a Group](../guides/passthrough-cluster-per-user.md) guide. +* [Dynamic Passthrough Clusters for a Group](../guides/workspace-management.md) guide. * [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_clusters](../data-sources/clusters.md) data to retrieve a list of [databricks_cluster](cluster.md) ids. * [databricks_cluster_policy](cluster_policy.md) to create a [databricks_cluster](cluster.md) policy, which limits the ability to create clusters based on a set of rules. diff --git a/docs/resources/cluster_policy.md b/docs/resources/cluster_policy.md index ae6ba28c97..39b7b1dd31 100644 --- a/docs/resources/cluster_policy.md +++ b/docs/resources/cluster_policy.md @@ -148,7 +148,7 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: -* `id` - Canonical unique identifier for the cluster policy. This is equal to policy_id. +* `id` - Canonical unique identifier for the cluster policy. This is equal to `policy_id`. * `policy_id` - Canonical unique identifier for the cluster policy. ## Import @@ -163,7 +163,7 @@ terraform import databricks_cluster_policy.this The following resources are often used in the same context: -* [Dynamic Passthrough Clusters for a Group](../guides/passthrough-cluster-per-user.md) guide. +* [Dynamic Passthrough Clusters for a Group](../guides/workspace-management.md) guide. * [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_clusters](../data-sources/clusters.md) data to retrieve a list of [databricks_cluster](cluster.md) ids. * [databricks_cluster](cluster.md) to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). diff --git a/docs/resources/dbfs_file.md b/docs/resources/dbfs_file.md index 0823da3d46..e3b9c73eea 100644 --- a/docs/resources/dbfs_file.md +++ b/docs/resources/dbfs_file.md @@ -49,7 +49,7 @@ resource "databricks_library" "app" { ## Argument Reference --> **Note** DBFS files would only be changed, if Terraform stage did change. This means that any manual changes to managed file won't be overwritten by Terraform, if there's no local change. +-> **Note** DBFS files would only be changed, if Terraform stage did change. This means that any manual changes to managed file won't be overwritten by Terraform, if there's no local change. The following arguments are supported: @@ -65,13 +65,12 @@ In addition to all arguments above, the following attributes are exported: * `file_size` - The file size of the file that is being tracked by this resource in bytes. * `dbfs_path` - Path, but with `dbfs:` prefix. - ## Import The resource dbfs file can be imported using the path of the file: ```bash -$ terraform import databricks_dbfs_file.this +terraform import databricks_dbfs_file.this ``` ## Related Resources diff --git a/docs/resources/default_namespace_settings.md b/docs/resources/default_namespace_settings.md index ff4a5f992e..78b766d63e 100644 --- a/docs/resources/default_namespace_settings.md +++ b/docs/resources/default_namespace_settings.md @@ -12,6 +12,7 @@ a fully qualified 3 level name. For example, if the default catalog is set to 'r 'SELECT * FROM myTable' would reference the object 'retail_prod.default.myTable' (the schema 'default' is always assumed). This setting requires a restart of clusters and SQL warehouses to take effect. Additionally, the default namespace only applies when using Unity Catalog-enabled compute. + ## Example Usage ```hcl @@ -28,5 +29,3 @@ The resource supports the following arguments: * `namespace` - (Required) The configuration details. * `value` - (Required) The value for the setting. - - diff --git a/docs/resources/directory.md b/docs/resources/directory.md index 390334265e..ed53546ce0 100644 --- a/docs/resources/directory.md +++ b/docs/resources/directory.md @@ -40,17 +40,17 @@ In addition to all arguments above, the following attributes are exported: The resource directory can be imported using directory path: ```bash -$ terraform import databricks_directory.this /path/to/directory +terraform import databricks_directory.this /path/to/directory ``` ## Related Resources The following resources are often used in the same context: -* [End to end workspace management](../guides/workspace-management.md) guide. -* [databricks_notebook](notebook.md) to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). -* [databricks_notebook](../data-sources/notebook.md) data to export a notebook from Databricks Workspace. -* [databricks_notebook_paths](../data-sources/notebook_paths.md) data to list notebooks in Databricks Workspace. -* [databricks_repo](repo.md) to manage [Databricks Repos](https://docs.databricks.com/repos.html). -* [databricks_spark_version](../data-sources/spark_version.md) data to get [Databricks Runtime (DBR)](https://docs.databricks.com/runtime/dbr.html) version that could be used for `spark_version` parameter in [databricks_cluster](cluster.md) and other resources. -* [databricks_workspace_conf](workspace_conf.md) to manage workspace configuration for expert usage. +- [End to end workspace management](../guides/workspace-management.md) guide. +- [databricks_notebook](notebook.md) to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). +- [databricks_notebook](../data-sources/notebook.md) data to export a notebook from Databricks Workspace. +- [databricks_notebook_paths](../data-sources/notebook_paths.md) data to list notebooks in Databricks Workspace. +- [databricks_repo](repo.md) to manage [Databricks Repos](https://docs.databricks.com/repos.html). +- [databricks_spark_version](../data-sources/spark_version.md) data to get [Databricks Runtime (DBR)](https://docs.databricks.com/runtime/dbr.html) version that could be used for `spark_version` parameter in [databricks_cluster](cluster.md) and other resources. +- [databricks_workspace_conf](workspace_conf.md) to manage workspace configuration for expert usage. diff --git a/docs/resources/file.md b/docs/resources/file.md new file mode 100644 index 0000000000..9738e15c02 --- /dev/null +++ b/docs/resources/file.md @@ -0,0 +1,93 @@ +--- +subcategory: "Storage" +--- +# databricks_file Resource + +This resource allows uploading and downloading files in [databricks_volume](volume.md). + +Notes: + +* Currently the limit is 5GiB in octet-stream. +* Currently, only UC volumes are supported. The list of destinations may change. + +## Example Usage + +In order to manage a file on Unity Catalog Volumes with Terraform, you must specify the `source` attribute containing the full path to the file on the local filesystem. + +```hcl +resource "databricks_catalog" "sandbox" { + metastore_id = databricks_metastore.this.id + name = "sandbox" + comment = "this catalog is managed by terraform" + properties = { + purpose = "testing" + } +} + +resource "databricks_schema" "things" { + catalog_name = databricks_catalog.sandbox.name + name = "things" + comment = "this schema is managed by terraform" + properties = { + kind = "various" + } +} + +resource "databricks_volume" "this" { + name = "quickstart_volume" + catalog_name = databricks_catalog.sandbox.name + schema_name = databricks_schema.things.name + volume_type = "MANAGED" + comment = "this volume is managed by terraform" +} + +resource "databricks_file" "this" { + source = "/full/path/on/local/system" + path = "${databricks_volume.this.volume_path}/fileName" +} +``` + +You can also inline sources through `content_base64` attribute. + +```hcl +resource "databricks_file" "init_script" { + content_base64 = base64encode(<<-EOT + #!/bin/bash + echo "Hello World" + EOT + ) + path = "${databricks_volume.this.volume_path}/fileName" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `source` - The full absolute path to the file. Conflicts with `content_base64`. +* `content_base64` - Contents in base 64 format. Conflicts with `source`. +* `path` - The path of the file in which you wish to save. For example, `/Volumes/main/default/volume1/file.txt`. + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - Same as `path`. +* `file_size` - The file size of the file that is being tracked by this resource in bytes. +* `modification_time` - The last time stamp when the file was modified + +## Import + +The resource `databricks_file` can be imported using the path of the file: + +```bash +terraform import databricks_file.this +``` + +## Related Resources + +The following resources are often used in the same context: + +* [databricks_workspace_file](./workspace_file.md) +* [End to end workspace management](../guides/workspace-management.md) guide. +* [databricks_volume](../resources/volume.md) to manage [volumes within Unity Catalog](https://docs.databricks.com/en/connect/unity-catalog/volumes.html). diff --git a/docs/resources/git_credential.md b/docs/resources/git_credential.md index f46b90450e..93c728585b 100644 --- a/docs/resources/git_credential.md +++ b/docs/resources/git_credential.md @@ -5,7 +5,6 @@ subcategory: "Workspace" This resource allows you to manage credentials for [Databricks Repos](https://docs.databricks.com/repos.html) using [Git Credentials API](https://docs.databricks.com/dev-tools/api/latest/gitcredentials.html). - ## Example Usage You can declare Terraform-managed Git credential using following code: @@ -20,7 +19,6 @@ resource "databricks_git_credential" "ado" { ## Argument Reference - The following arguments are supported: * `personal_access_token` - (Required) The personal access token used to authenticate to the corresponding Git provider. If value is not provided, it's sourced from the first environment variable of [`GITHUB_TOKEN`](https://registry.terraform.io/providers/integrations/github/latest/docs#oauth--personal-access-token), [`GITLAB_TOKEN`](https://registry.terraform.io/providers/gitlabhq/gitlab/latest/docs#required), or [`AZDO_PERSONAL_ACCESS_TOKEN`](https://registry.terraform.io/providers/microsoft/azuredevops/latest/docs#argument-reference), that has a non-empty value. @@ -39,10 +37,9 @@ In addition to all arguments above, the following attributes are exported: The resource cluster can be imported using ID of Git credential that could be obtained via REST API: ```bash -$ terraform import databricks_git_credential.this +terraform import databricks_git_credential.this ``` - ## Related Resources The following resources are often used in the same context: diff --git a/docs/resources/global_init_script.md b/docs/resources/global_init_script.md index 4fdd921a2d..bb8e50d98b 100644 --- a/docs/resources/global_init_script.md +++ b/docs/resources/global_init_script.md @@ -28,7 +28,7 @@ resource "databricks_global_init_script" "init2" { name = "hello script" } ``` - + ## Argument Reference -> **Note** Global init script in the Databricks workspace would only be changed, if Terraform stage did change. This means that any manual changes to managed global init script won't be overwritten by Terraform, if there's no local change to source. @@ -56,7 +56,7 @@ Global init scripts are available only for administrators, so you can't change p The resource global init script can be imported using script ID: ```bash -$ terraform import databricks_global_init_script.this script_id +terraform import databricks_global_init_script.this script_id ``` ## Related Resources diff --git a/docs/resources/grant.md b/docs/resources/grant.md index b0393df4fe..2adfdb7d67 100644 --- a/docs/resources/grant.md +++ b/docs/resources/grant.md @@ -345,3 +345,11 @@ resource "databricks_grant" "some" { ## Other access control You can control Databricks General Permissions through [databricks_permissions](permissions.md) resource. + +## Import + +The resource can be imported using combination of securable type (`table`, `catalog`, `foreign_connection`, ...), it's name and `principal`: + +```bash +terraform import databricks_grant.this catalog/abc/user_name +``` diff --git a/docs/resources/grants.md b/docs/resources/grants.md index 1127f7293e..597ca1c8fa 100644 --- a/docs/resources/grants.md +++ b/docs/resources/grants.md @@ -35,6 +35,7 @@ You can grant `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, ```hcl resource "databricks_grants" "sandbox" { + metastore = "metastore_id" grant { principal = "Data Engineers" privileges = ["CREATE_CATALOG", "CREATE_EXTERNAL_LOCATION"] @@ -340,3 +341,11 @@ resource "databricks_grants" "some" { ## Other access control You can control Databricks General Permissions through [databricks_permissions](permissions.md) resource. + +## Import + +The resource can be imported using combination of securable type (`table`, `catalog`, `foreign_connection`, ...) and it's name: + +```bash +terraform import databricks_grants.this catalog/abc +``` diff --git a/docs/resources/group_member.md b/docs/resources/group_member.md index 5f8df90a3d..ce87a9c7e0 100644 --- a/docs/resources/group_member.md +++ b/docs/resources/group_member.md @@ -53,7 +53,7 @@ In addition to all arguments above, the following attributes are exported: You can import a `databricks_group_member` resource with name `my_group_member` like the following: ```bash -$ terraform import databricks_group_member.my_group_member "|" +terraform import databricks_group_member.my_group_member "|" ``` ## Related Resources diff --git a/docs/resources/instance_pool.md b/docs/resources/instance_pool.md index c3d574bdb4..2912fe9658 100644 --- a/docs/resources/instance_pool.md +++ b/docs/resources/instance_pool.md @@ -71,26 +71,26 @@ The following options are [available](https://docs.microsoft.com/en-us/azure/dat The following options are [available](https://docs.gcp.databricks.com/dev-tools/api/latest/clusters.html#gcpavailability): * `gcp_availability` - (Optional) Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. -* `local_ssd_count` (optional, int) Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. - +* `local_ssd_count` (optional, int) Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. ### disk_spec Configuration Block For disk_spec make sure to use **ebs_volume_type** only on AWS deployment of Databricks and **azure_disk_volume_type** only on a Azure deployment of Databricks. * `disk_count` - (Optional) (Integer) The number of disks to attach to each instance. This feature is only enabled for supported node types. Users can choose up to the limit of the disks supported by the node type. For node types with no local disk, at least one disk needs to be specified. -* `disk_size` - (Optional) (Integer) The size of each disk (in GiB) to attach. +* `disk_size` - (Optional) (Integer) The size of each disk (in GiB) to attach. #### disk_type sub-block + `ebs_volume_type` - (Optional) (String) The EBS volume type to use. Options are: `GENERAL_PURPOSE_SSD` (Provision extra storage using AWS gp2 EBS volumes) or `THROUGHPUT_OPTIMIZED_HDD` (Provision extra storage using AWS st1 volumes) - * General Purpose SSD: `100 - 4096` GiB - * Throughput Optimized HDD: `500 - 4096` GiB +* General Purpose SSD: `100 - 4096` GiB +* Throughput Optimized HDD: `500 - 4096` GiB `azure_disk_volume_type` - (Optional) (String) The type of Azure disk to use. Options are: `PREMIUM_LRS` (Premium storage tier, backed by SSDs) or `"STANDARD_LRS"` (Standard storage tier, backed by HDDs) - * Premium LRS (SSD): `1 - 1023` GiB - * Standard LRS (HDD): `1- 1023` GiB +* Premium LRS (SSD): `1 - 1023` GiB +* Standard LRS (HDD): `1- 1023` GiB ### preloaded_docker_image sub_block @@ -139,5 +139,5 @@ In addition to all arguments above, the following attributes are exported: The resource instance pool can be imported using it's id: ```bash -$ terraform import databricks_instance_pool.this +terraform import databricks_instance_pool.this ``` diff --git a/docs/resources/ip_access_list.md b/docs/resources/ip_access_list.md index 4dd620bc62..fa0aca22c6 100644 --- a/docs/resources/ip_access_list.md +++ b/docs/resources/ip_access_list.md @@ -27,6 +27,7 @@ resource "databricks_ip_access_list" "allowed-list" { depends_on = [databricks_workspace_conf.this] } ``` + ## Argument Reference The following arguments are supported: @@ -40,6 +41,7 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: +* `id` - Canonical unique identifier for the IP Access List, same as `list_id`. * `list_id` - Canonical unique identifier for the IP Access List. ## Import @@ -47,7 +49,7 @@ In addition to all arguments above, the following attributes are exported: The databricks_ip_access_list can be imported using id: ```bash -$ terraform import databricks_ip_access_list.this +terraform import databricks_ip_access_list.this ``` ## Related Resources diff --git a/docs/resources/job.md b/docs/resources/job.md index 3516f3197f..fd8e8282b7 100644 --- a/docs/resources/job.md +++ b/docs/resources/job.md @@ -116,6 +116,7 @@ This block describes individual tasks: * `spark_python_task` * `spark_submit_task` * `sql_task` + * `for_each_task` * `library` - (Optional) (Set) An optional list of libraries to be installed on the cluster that will execute the job. Please consult [libraries section](cluster.md#libraries) for [databricks_cluster](cluster.md) resource. * `depends_on` - (Optional) block specifying dependency(-ies) for a given task. * `run_if` - (Optional) An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. @@ -323,7 +324,10 @@ You can invoke Spark submit tasks only on new clusters. **In the `new_cluster` s ### dbt_task Configuration Block * `commands` - (Required) (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt". -* `project_directory` - (Optional) The relative path to the directory in the repository specified in `git_source` where dbt should look in for the `dbt_project.yml` file. If not specified, defaults to the repository's root directory. Equivalent to passing `--project-dir` to a dbt command. +* `source` - (Optional) The source of the project. Possible values are `WORKSPACE` and `GIT`. Defaults to `GIT` if a `git_source` block is present in the job definition. +* `project_directory` - (Required when `source` is `WORKSPACE`) The path where dbt should look for `dbt_project.yml`. Equivalent to passing `--project-dir` to the dbt CLI. + * If `source` is `GIT`: Relative path to the directory in the repository specified in the `git_source` block. Defaults to the repository's root directory when not specified. + * If `source` is `WORKSPACE`: Absolute path to the folder in the workspace. * `profiles_directory` - (Optional) The relative path to the directory in the repository specified by `git_source` where dbt should look in for the `profiles.yml` file. If not specified, defaults to the repository's root directory. Equivalent to passing `--profile-dir` to a dbt command. * `catalog` - (Optional) The name of the catalog to use inside Unity Catalog. * `schema` - (Optional) The name of the schema dbt should run in. Defaults to `default`. @@ -346,6 +350,12 @@ The `condition_task` specifies a condition with an outcome that can be used to c This task does not require a cluster to execute and does not support retries or notifications. +### for_each_task Configuration Block + +* `concurrency` - (Optional) Controls the number of active iteration task runs. Default is 20, maximum allowed is 100. +* `inputs` - (Required) (String) Array for task to iterate on. This can be a JSON string or a reference to an array parameter. +* `task` - (Required) Task to run against the `inputs` list. + ### sql_task Configuration Block One of the `query`, `dashboard` or `alert` needs to be provided. @@ -362,7 +372,9 @@ One of the `query`, `dashboard` or `alert` needs to be provided. * `alert_id` - (Required) (String) identifier of the Databricks SQL Alert. * `subscriptions` - (Required) a list of subscription blocks consisting out of one of the required fields: `user_name` for user emails or `destination_id` - for Alert destination's identifier. * `pause_subscriptions` - (Optional) flag that specifies if subscriptions are paused or not. -* `file` - (Optional) block consisting of single string field: `path` - a relative path to the file (inside the Git repository) with SQL commands to execute. *Requires `git_source` configuration block*. +* `file` - (Optional) block consisting of single string fields: + * `source` - (Optional) The source of the project. Possible values are `WORKSPACE` and `GIT`. + * `path` - If `source` is `GIT`: Relative path to the file in the repository specified in the `git_source` block with SQL commands to execute. If `source` is `WORKSPACE`: Absolute path to the file in the workspace with SQL commands to execute. Example @@ -421,7 +433,7 @@ By default, all users can create and modify jobs unless an administrator [enable ## Single-task syntax (deprecated) --> **Deprecated** Please define tasks in a `task` block rather than using single-task syntax. +-> **Deprecated** Please define tasks in a `task` block rather than using single-task syntax. This syntax uses Jobs API 2.0 to create a job with a single task. Only a subset of arguments above is supported (`name`, `libraries`, `email_notifications`, `webhook_notifications`, `timeout_seconds`, `max_retries`, `min_retry_interval_millis`, `retry_on_timeout`, `schedule`, `max_concurrent_runs`), and only a single block of `notebook_task`, `spark_jar_task`, `spark_python_task`, `spark_submit_task` and `pipeline_task` can be specified. diff --git a/docs/resources/library.md b/docs/resources/library.md index 329edd8d99..c3f8e9822b 100644 --- a/docs/resources/library.md +++ b/docs/resources/library.md @@ -3,7 +3,7 @@ subcategory: "Compute" --- # databricks_library resource -Installs a [library](https://docs.databricks.com/libraries/index.html) on [databricks_cluster](cluster.md). Each different type of library has a slightly different syntax. It's possible to set only one type of library within one resource. Otherwise, the plan will fail with an error. +Installs a [library](https://docs.databricks.com/libraries/index.html) on [databricks_cluster](cluster.md). Each different type of library has a slightly different syntax. It's possible to set only one type of library within one resource. Otherwise, the plan will fail with an error. -> **Note** `databricks_library` resource would always start the associated cluster if it's not running, so make sure to have auto-termination configured. It's not possible to atomically change the version of the same library without cluster restart. Libraries are fully removed from the cluster only after restart. @@ -128,5 +128,5 @@ The following resources are often used in the same context: * [databricks_global_init_script](global_init_script.md) to manage [global init scripts](https://docs.databricks.com/clusters/init-scripts.html#global-init-scripts), which are run on all [databricks_cluster](cluster.md#init_scripts) and [databricks_job](job.md#new_cluster). * [databricks_job](job.md) to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a [databricks_cluster](cluster.md). * [databricks_mount](mount.md) to [mount your cloud storage](https://docs.databricks.com/data/databricks-file-system.html#mount-object-storage-to-dbfs) on `dbfs:/mnt/name`. -* [databricks_pipeline](pipeline.md) to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). +* [databricks_pipeline](pipeline.md) to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). * [databricks_repo](repo.md) to manage [Databricks Repos](https://docs.databricks.com/repos.html). diff --git a/docs/resources/metastore.md b/docs/resources/metastore.md index efed83c3c0..d159f53037 100644 --- a/docs/resources/metastore.md +++ b/docs/resources/metastore.md @@ -74,7 +74,7 @@ The following arguments are required: * `storage_root` - (Optional) Path on cloud storage account, where managed `databricks_table` are stored. Change forces creation of a new resource. If no `storage_root` is defined for the metastore, each catalog must have a `storage_root` defined. * `region` - (Mandatory for account-level) The region of the metastore * `owner` - (Optional) Username/groupname/sp application_id of the metastore owner. -* `delta_sharing_scope` - (Optional) Required along with `delta_sharing_recipient_token_lifetime_in_seconds`. Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. +* `delta_sharing_scope` - (Optional) Required along with `delta_sharing_recipient_token_lifetime_in_seconds`. Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. INTERNAL only allows sharing within the same account, and INTERNAL_AND_EXTERNAL allows cross account sharing and token based sharing. * `delta_sharing_recipient_token_lifetime_in_seconds` - (Optional) Required along with `delta_sharing_scope`. Used to set expiration duration in seconds on recipient data access tokens. Set to 0 for unlimited duration. * `delta_sharing_organization_name` - (Optional) The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. Once this is set it cannot be removed and can only be modified to another valid value. To delete this value please taint and recreate the resource. * `force_destroy` - (Optional) Destroy metastore regardless of its contents. diff --git a/docs/resources/mlflow_experiment.md b/docs/resources/mlflow_experiment.md index f4d71a170d..8da86061ab 100644 --- a/docs/resources/mlflow_experiment.md +++ b/docs/resources/mlflow_experiment.md @@ -25,6 +25,12 @@ The following arguments are supported: * `artifact_location` - Path to dbfs:/ or s3:// artifact location of the MLflow experiment. * `description` - The description of the MLflow experiment. +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - ID of the MLflow experiment. + ## Access Control * [databricks_permissions](permissions.md#MLflow-Experiment-usage) can control which groups or individual users can *Read*, *Edit*, or *Manage* individual experiments. @@ -34,7 +40,7 @@ The following arguments are supported: The experiment resource can be imported using the id of the experiment ```bash -$ terraform import databricks_mlflow_experiment.this +terraform import databricks_mlflow_experiment.this ``` ## Related Resources diff --git a/docs/resources/mlflow_model.md b/docs/resources/mlflow_model.md index 55c505f68d..60c310d295 100644 --- a/docs/resources/mlflow_model.md +++ b/docs/resources/mlflow_model.md @@ -34,12 +34,18 @@ The following arguments are supported: * `description` - The description of the MLflow model. * `tags` - Tags for the MLflow model. +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - ID of the MLflow model, the same as `name`. + ## Import The model resource can be imported using the name ```bash -$ terraform import databricks_mlflow_model.this +terraform import databricks_mlflow_model.this ``` ## Access Control diff --git a/docs/resources/mlflow_webhook.md b/docs/resources/mlflow_webhook.md index 71e89605a1..eacecb97bc 100644 --- a/docs/resources/mlflow_webhook.md +++ b/docs/resources/mlflow_webhook.md @@ -100,6 +100,12 @@ Configuration must include one of `http_url_spec` or `job_spec` blocks, but not * `enable_ssl_verification` - (Optional) Enable/disable SSL certificate validation. Default is `true`. For self-signed certificates, this field must be `false` AND the destination server must disable certificate validation as well. For security purposes, it is encouraged to perform secret validation with the HMAC-encoded portion of the payload and acknowledge the risk associated with disabling hostname validation whereby it becomes more likely that requests can be maliciously routed to an unintended host. * `secret` - (Optional) Shared secret required for HMAC encoding payload. The HMAC-encoded payload will be sent in the header as `X-Databricks-Signature: encoded_payload`. +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - Unique ID of the MLflow Webhook. + ## Access Control * MLflow webhooks could be configured only by workspace admins. diff --git a/docs/resources/mws_log_delivery.md b/docs/resources/mws_log_delivery.md index 948efb0a69..f124985bab 100644 --- a/docs/resources/mws_log_delivery.md +++ b/docs/resources/mws_log_delivery.md @@ -145,6 +145,7 @@ resource "databricks_mws_log_delivery" "audit_logs" { Resource exports the following attributes: +* `id` - the ID of log delivery configuration in form of `account_id|config_id`. * `config_id` - Databricks log delivery configuration ID. ## Import diff --git a/docs/resources/mws_permission_assignment.md b/docs/resources/mws_permission_assignment.md index acdb38bb7c..7346117ecd 100644 --- a/docs/resources/mws_permission_assignment.md +++ b/docs/resources/mws_permission_assignment.md @@ -74,6 +74,12 @@ The following arguments are required: * `"USER"` - Can access the workspace with basic privileges. * `"ADMIN"` - Can access the workspace and has workspace admin privileges to manage users and groups, workspace configurations, and more. +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - ID of the permission assignment in form of `workspace_id|principal_id`. + ## Import The resource `databricks_mws_permission_assignment` can be imported using the workspace id and principal id diff --git a/docs/resources/mws_private_access_settings.md b/docs/resources/mws_private_access_settings.md index cc613e64d9..e5bde84479 100644 --- a/docs/resources/mws_private_access_settings.md +++ b/docs/resources/mws_private_access_settings.md @@ -3,7 +3,7 @@ subcategory: "Deployment" --- # databricks_mws_private_access_settings Resource -Allows you to create a [Private Access Setting]that can be used as part of a [databricks_mws_workspaces](mws_workspaces.md) resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) or [GCP Private Service Connect](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/private-service-connect.html) +Allows you to create a Private Access Setting resource that can be used as part of a [databricks_mws_workspaces](mws_workspaces.md) resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) or [GCP Private Service Connect](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/private-service-connect.html) It is strongly recommended that customers read the [Enable AWS Private Link](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) [Enable GCP Private Service Connect](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/private-service-connect.html) documentation before trying to leverage this resource. @@ -82,6 +82,7 @@ The following arguments are available: In addition to all arguments above, the following attributes are exported: +* `id` - the ID of the Private Access Settings in form of `account_id/private_access_settings_id`. * `private_access_settings_id` - Canonical unique identifier of Private Access Settings in Databricks Account * `status` - (AWS only) Status of Private Access Settings diff --git a/docs/resources/mws_vpc_endpoint.md b/docs/resources/mws_vpc_endpoint.md index 0c10bd8cae..58c02400d0 100644 --- a/docs/resources/mws_vpc_endpoint.md +++ b/docs/resources/mws_vpc_endpoint.md @@ -190,6 +190,7 @@ The following arguments are required: In addition to all arguments above, the following attributes are exported: +* `id` - the ID of VPC Endpoint in form of `account_id/vpc_endpoint_id` * `vpc_endpoint_id` - Canonical unique identifier of VPC Endpoint in Databricks Account * `aws_endpoint_service_id` - (AWS Only) The ID of the Databricks endpoint service that this VPC endpoint is connected to. Please find the list of endpoint service IDs for each supported region in the [Databricks PrivateLink documentation](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) * `state` - (AWS Only) State of VPC Endpoint diff --git a/docs/resources/notebook.md b/docs/resources/notebook.md index 64c4123469..a88b9db484 100644 --- a/docs/resources/notebook.md +++ b/docs/resources/notebook.md @@ -70,7 +70,7 @@ In addition to all arguments above, the following attributes are exported: The resource notebook can be imported using notebook path ```bash -$ terraform import databricks_notebook.this /path/to/notebook +terraform import databricks_notebook.this /path/to/notebook ``` ## Related Resources diff --git a/docs/resources/permission_assignment.md b/docs/resources/permission_assignment.md index 198a1274b3..b57249c543 100644 --- a/docs/resources/permission_assignment.md +++ b/docs/resources/permission_assignment.md @@ -75,6 +75,12 @@ The following arguments are required: * `"USER"` - Can access the workspace with basic privileges. * `"ADMIN"` - Can access the workspace and has workspace admin privileges to manage users and groups, workspace configurations, and more. +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - ID of the permission assignment - same as `principal_id`. + ## Import The resource `databricks_permission_assignment` can be imported using the principal id diff --git a/docs/resources/permissions.md b/docs/resources/permissions.md index 5f3039db8b..ce2ab812ca 100644 --- a/docs/resources/permissions.md +++ b/docs/resources/permissions.md @@ -819,7 +819,7 @@ Exactly one of the below arguments is required: In addition to all arguments above, the following attributes are exported: -- `id` - Canonical unique identifier for the permissions. +- `id` - Canonical unique identifier for the permissions in form of `/object_type/object_id`. - `object_type` - type of permissions. ## Import diff --git a/docs/resources/pipeline.md b/docs/resources/pipeline.md index 882b4ef813..7fa7a90e76 100644 --- a/docs/resources/pipeline.md +++ b/docs/resources/pipeline.md @@ -3,7 +3,7 @@ subcategory: "Compute" --- # databricks_pipeline Resource -Use `databricks_pipeline` to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). +Use `databricks_pipeline` to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). ## Example Usage @@ -94,13 +94,19 @@ DLT allows to specify one or more notification blocks to get notifications about * `on-update-fatal-failure` - a pipeline update fails with a non-retryable (fatal) error. * `on-flow-failure` - a single data flow fails. +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - Canonical unique identifier of the DLT pipeline. +* `url` - URL of the DLT pipeline on the given workspace. ## Import The resource job can be imported using the id of the pipeline ```bash -$ terraform import databricks_pipeline.this +terraform import databricks_pipeline.this ``` ## Related Resources diff --git a/docs/resources/recipient.md b/docs/resources/recipient.md index dae2797af0..d710f1759b 100644 --- a/docs/resources/recipient.md +++ b/docs/resources/recipient.md @@ -94,6 +94,7 @@ Exactly one of the below arguments is required: In addition to all arguments above, the following attributes are exported: +* `id` - the ID of the recipient - the same as the `name`. * `tokens` - List of Recipient Tokens. This field is only present when the authentication_type is TOKEN. Each list element is an object with following attributes: * `id` - Unique ID of the recipient token. * `created_at` - Time at which this recipient Token was created, in epoch milliseconds. diff --git a/docs/resources/registered_model.md b/docs/resources/registered_model.md index e0b584cfcc..0aaf7404c1 100644 --- a/docs/resources/registered_model.md +++ b/docs/resources/registered_model.md @@ -41,7 +41,7 @@ In addition to all arguments above, the following attributes are exported: The registered model resource can be imported using the full (3-level) name of the model. ```bash -$ terraform import databricks_registered_model.this +terraform import databricks_registered_model.this ``` ## Related Resources diff --git a/docs/resources/restrict_workspace_admins_setting.md b/docs/resources/restrict_workspace_admins_setting.md new file mode 100644 index 0000000000..5e54487d6f --- /dev/null +++ b/docs/resources/restrict_workspace_admins_setting.md @@ -0,0 +1,40 @@ +--- +subcategory: "Settings" +--- + +# databricks_restrict_workspace_admins_setting Resource + +-> **Note** This resource could be only used with workspace-level provider! + +The `databricks_restrict_workspace_admins_setting` resource lets you control the capabilities of workspace admins. + +With the status set to `ALLOW_ALL`, workspace admins can: + +1. Create service principal personal access tokens on behalf of any service principal in their workspace. +2. Change a job owner to any user in the workspace. +3. Change the job run_as setting to any user in their workspace or a service principal on which they have the Service Principal User role. + +With the status set to `RESTRICT_TOKENS_AND_JOB_RUN_AS`, workspace admins can: + +1. Only create personal access tokens on behalf of service principals on which they have the Service Principal User role. +2. Only change a job owner to themselves. +3. Only change the job run_as setting to themselves a service principal on which they have the Service Principal User role. + +-> **Note** Only account admins can update the setting. And the account admin must be part of the workspace to change the setting status. + +## Example Usage + +```hcl +resource "databricks_restrict_workspace_admins_setting" "this" { + restrict_workspace_admins { + status = "RESTRICT_TOKENS_AND_JOB_RUN_AS" + } +} +``` + +## Argument Reference + +The resource supports the following arguments: + +* `restrict_workspace_admins` - (Required) The configuration details. +* `status` - (Required) The restrict workspace admins status for the workspace. diff --git a/docs/resources/secret.md b/docs/resources/secret.md index 3f155ebfef..a79aef3e83 100644 --- a/docs/resources/secret.md +++ b/docs/resources/secret.md @@ -35,7 +35,6 @@ The following arguments are required: * `scope` - (Required) (String) name of databricks secret scope. Must consist of alphanumeric characters, dashes, underscores, and periods, and may not exceed 128 characters. * `key` - (Required) (String) key within secret scope. Must consist of alphanumeric characters, dashes, underscores, and periods, and may not exceed 128 characters. - ## Attribute Reference In addition to all arguments above, the following attributes are exported: @@ -44,13 +43,12 @@ In addition to all arguments above, the following attributes are exported: * `last_updated_timestamp` - (Integer) time secret was updated * `config_reference` - (String) value to use as a secret reference in [Spark configuration and environment variables](https://docs.databricks.com/security/secrets/secrets.html#use-a-secret-in-a-spark-configuration-property-or-environment-variable): `{{secrets/scope/key}}`. - ## Import The resource secret can be imported using `scopeName|||secretKey` combination. **This may change in future versions.** ```bash -$ terraform import databricks_secret.app `scopeName|||secretKey` +terraform import databricks_secret.app `scopeName|||secretKey` ``` ## Related Resources @@ -59,7 +57,7 @@ The following resources are often used in the same context: * [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_notebook](notebook.md) to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). -* [databricks_pipeline](pipeline.md) to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). +* [databricks_pipeline](pipeline.md) to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). * [databricks_repo](repo.md) to manage [Databricks Repos](https://docs.databricks.com/repos.html). * [databricks_secret_acl](secret_acl.md) to manage access to [secrets](https://docs.databricks.com/security/secrets/index.html#secrets-user-guide) in Databricks workspace. * [databricks_secret_scope](secret_scope.md) to create [secret scopes](https://docs.databricks.com/security/secrets/index.html#secrets-user-guide) in Databricks workspace. diff --git a/docs/resources/secret_acl.md b/docs/resources/secret_acl.md index ddc3df797f..ef63f1181c 100644 --- a/docs/resources/secret_acl.md +++ b/docs/resources/secret_acl.md @@ -48,7 +48,7 @@ The following arguments are required: The resource secret acl can be imported using `scopeName|||principalName` combination. ```bash -$ terraform import databricks_secret_acl.object `scopeName|||principalName` +terraform import databricks_secret_acl.object `scopeName|||principalName` ``` ## Related Resources diff --git a/docs/resources/secret_scope.md b/docs/resources/secret_scope.md index d73d588e5b..25218d5c7c 100644 --- a/docs/resources/secret_scope.md +++ b/docs/resources/secret_scope.md @@ -22,13 +22,12 @@ The following arguments are supported: ### keyvault_metadata -On Azure, it is possible to create Azure Databricks secret scopes backed by Azure Key Vault. Secrets are stored in Azure Key Vault and can be accessed through the Azure Databricks secrets utilities, making use of Azure Databricks access control and secret redaction. A secret scope may be configured with at most one Key Vault. +On Azure, it is possible to create Azure Databricks secret scopes backed by Azure Key Vault. Secrets are stored in Azure Key Vault and can be accessed through the Azure Databricks secrets utilities, making use of Azure Databricks access control and secret redaction. A secret scope may be configured with at most one Key Vault. -> **Warning** To create a secret scope from Azure Key Vault, you must use one of the [Azure-specific authentication methods](../index.md#special-configurations-for-azure). Secret scopes backed by Azure Key Vault cannot be created using personal access tokens (PAT). To define AKV access policies, you must use [azurerm_key_vault_access_policy](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/key_vault_access_policy) instead of [access_policy](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/key_vault#access_policy) blocks on `azurerm_key_vault`, otherwise Terraform will remove access policies needed to access the Key Vault and the secret scope won't be in a usable state anymore. - ```hcl data "azurerm_client_config" "current" { } @@ -73,7 +72,7 @@ In addition to all arguments above, the following attributes are exported: The secret resource scope can be imported using the scope name. `initial_manage_principal` state won't be imported, because the underlying API doesn't include it in the response. ```bash -$ terraform import databricks_secret_scope.object +terraform import databricks_secret_scope.object ``` ## Related Resources diff --git a/docs/resources/service_principal_role.md b/docs/resources/service_principal_role.md index 369f70c6e4..511089d7b0 100644 --- a/docs/resources/service_principal_role.md +++ b/docs/resources/service_principal_role.md @@ -23,6 +23,7 @@ resource "databricks_service_principal_role" "my_service_principal_instance_prof role = databricks_instance_profile.instance_profile.id } ``` + ## Argument Reference The following arguments are supported: @@ -34,7 +35,7 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: -* `id` - The id in the format `|`. +* `id` - The id in the format `|`. ## Import diff --git a/docs/resources/service_principal_secret.md b/docs/resources/service_principal_secret.md index 17c5a6cf5e..f98abd9b3e 100644 --- a/docs/resources/service_principal_secret.md +++ b/docs/resources/service_principal_secret.md @@ -3,15 +3,14 @@ subcategory: "Security" --- # databricks_service_principal_secret Resource --> **Note** This resource is only available in Databricks AWS. +-> **Note** This resource can only be used with an account-level provider. -With this resource you can create a secret under the given [Service Principals](https://docs.databricks.com/administration-guide/users-groups/service-principals.html) +With this resource you can create a secret for a given [Service Principals](https://docs.databricks.com/administration-guide/users-groups/service-principals.html). This secret can be used to configure the Databricks Terraform Provider to authenticate with the service principal. See [Authenticating with service principal](../index.md#authenticating-with-service-principal). Additionally, the secret can be used to request OAuth tokens for the service principal, which can be used to authenticate to Databricks REST APIs. See [Authentication using OAuth tokens for service principals](https://docs.databricks.com/dev-tools/authentication-oauth.html). - ## Example Usage Create service principal secret @@ -26,16 +25,14 @@ resource "databricks_service_principal_secret" "terraform_sp" { The following arguments are available: -* `service_principal_id` - ID of the [databricks_service_principal](service_principal.md) - +* `service_principal_id` - ID of the [databricks_service_principal](service_principal.md) (not application ID). ## Attribute Reference In addition to all arguments above, the following attributes are exported: -- `id` - ID of the secret -- `secret` - Generated secret for the service principal - +* `id` - ID of the secret +* `secret` - Generated secret for the service principal ## Related Resources diff --git a/docs/resources/share.md b/docs/resources/share.md index 3b4aee6432..5c0756be82 100644 --- a/docs/resources/share.md +++ b/docs/resources/share.md @@ -33,6 +33,19 @@ resource "databricks_share" "some" { } ``` +Creating a Delta Sharing share and add a schema to it(including all current and future tables). + +```hcl +resource "databricks_share" "schema_share" { + name = "schema_share" + object { + name = "catalog_name.schema_name" + data_object_type = "SCHEMA" + history_data_sharing_status = "ENABLED" + } +} +``` + Creating a Delta Sharing share and share a table with partitions spec and history ```hcl @@ -75,7 +88,7 @@ The following arguments are required: ### object Configuration Block * `name` (Required) - Full name of the object, e.g. `catalog.schema.name` for a table. -* `data_object_type` (Required) - Type of the object, currently only `TABLE` is allowed. +* `data_object_type` (Required) - Type of the data object, currently `TABLE`, `SCHEMA`, `VOLUME`, `NOTEBOOK_FILE` are supported. * `comment` (Optional) - Description about the object. * `shared_as` (Optional) - A user-provided new name for the data object within the share. If this new name is not provided, the object's original name will be used as the `shared_as` name. The `shared_as` name must be unique within a Share. Change forces creation of a new resource. * `cdf_enabled` (Optional) - Whether to enable Change Data Feed (cdf) on the shared object. When this field is set, field `history_data_sharing_status` can not be set. @@ -95,6 +108,7 @@ To share only part of a table when you add the table to a share, you can provide In addition to all arguments above, the following attributes are exported: +* `id` - the ID of the share, the same as `name`. * `created_at` - Time when the share was created. * `created_by` - The principal that created the share. * `status` - Status of the object, one of: `ACTIVE`, `PERMISSION_DENIED`. diff --git a/docs/resources/sql_alert.md b/docs/resources/sql_alert.md index 67c8a0db5a..f523c2acc7 100644 --- a/docs/resources/sql_alert.md +++ b/docs/resources/sql_alert.md @@ -50,7 +50,13 @@ The following arguments are available: * `custom_body` - (Optional, String) Custom body of alert notification, if it exists. See [Alerts API reference](https://docs.databricks.com/sql/user/alerts/index.html) for custom templating instructions. * `empty_result_state` - (Optional, String) State that alert evaluates to when query result is empty. Currently supported values are `unknown`, `triggered`, `ok` - check [API documentation](https://docs.databricks.com/api/workspace/alerts/create) for full list of supported values. * `parent` - (Optional, String) The identifier of the workspace folder containing the alert. The default is ther user's home folder. The folder identifier is formatted as `folder/`. -* `rearm` - (Optional, Integer) Number of seconds after being triggered before the alert rearms itself and can be triggered again. If not defined, alert will never be triggered again. +* `rearm` - (Optional, Integer) Number of seconds after being triggered before the alert rearms itself and can be triggered again. If not defined, alert will never be triggered again. + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - unique ID of the SQL Alert. ## Related Resources diff --git a/docs/resources/sql_dashboard.md b/docs/resources/sql_dashboard.md index 1b96357314..3fc87cf4e1 100644 --- a/docs/resources/sql_dashboard.md +++ b/docs/resources/sql_dashboard.md @@ -40,15 +40,20 @@ resource "databricks_permissions" "d1" { } ``` +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - the unique ID of the SQL Dashboard. + ## Import You can import a `databricks_sql_dashboard` resource with ID like the following: ```bash -$ terraform import databricks_sql_dashboard.this +terraform import databricks_sql_dashboard.this ``` - ## Related Resources The following resources are often used in the same context: diff --git a/docs/resources/sql_endpoint.md b/docs/resources/sql_endpoint.md index e61284a7ef..c77cd3cf54 100644 --- a/docs/resources/sql_endpoint.md +++ b/docs/resources/sql_endpoint.md @@ -39,9 +39,9 @@ The following arguments are supported: * `enable_photon` - Whether to enable [Photon](https://databricks.com/product/delta-engine). This field is optional and is enabled by default. * `enable_serverless_compute` - Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly. - - **For AWS**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated [terms of use](https://docs.databricks.com/sql/admin/serverless.html#accept-terms), workspace admins are prompted in the Databricks SQL UI. A workspace must meet the [requirements](https://docs.databricks.com/sql/admin/serverless.html#requirements) and might require an update to its instance profile role to [add a trust relationship](https://docs.databricks.com/sql/admin/serverless.html#aws-instance-profile-setup). + * **For AWS**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated [terms of use](https://docs.databricks.com/sql/admin/serverless.html#accept-terms), workspace admins are prompted in the Databricks SQL UI. A workspace must meet the [requirements](https://docs.databricks.com/sql/admin/serverless.html#requirements) and might require an update to its instance profile role to [add a trust relationship](https://docs.databricks.com/sql/admin/serverless.html#aws-instance-profile-setup). - - **For Azure**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the [requirements](https://learn.microsoft.com/azure/databricks/sql/admin/serverless) and might require an update to its [Azure storage firewall](https://learn.microsoft.com/azure/databricks/sql/admin/serverless-firewall). + * **For Azure**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the [requirements](https://learn.microsoft.com/azure/databricks/sql/admin/serverless) and might require an update to its [Azure storage firewall](https://learn.microsoft.com/azure/databricks/sql/admin/serverless-firewall). * `channel` block, consisting of following fields: * `name` - Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. @@ -52,6 +52,7 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: +* `id` - the unique ID of the SQL warehouse. * `jdbc_url` - JDBC connection string. * `odbc_params` - ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. * `data_source_id` - ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint. @@ -81,7 +82,7 @@ timeouts { You can import a `databricks_sql_endpoint` resource with ID like the following: ```bash -$ terraform import databricks_sql_endpoint.this +terraform import databricks_sql_endpoint.this ``` ## Related resources diff --git a/docs/resources/sql_global_config.md b/docs/resources/sql_global_config.md index 3fbd7738b6..013dd55500 100644 --- a/docs/resources/sql_global_config.md +++ b/docs/resources/sql_global_config.md @@ -39,15 +39,14 @@ resource "databricks_sql_global_config" "this" { } ``` - ## Argument Reference The following arguments are supported (see [documentation](https://docs.databricks.com/sql/api/sql-endpoints.html#global-edit) for more details): * `security_policy` (Optional, String) - The policy for controlling access to datasets. Default value: `DATA_ACCESS_CONTROL`, consult documentation for list of possible values * `data_access_config` (Optional, Map) - Data access configuration for [databricks_sql_endpoint](sql_endpoint.md), such as configuration for an external Hive metastore, Hadoop Filesystem configuration, etc. Please note that the list of supported configuration properties is limited, so refer to the [documentation](https://docs.databricks.com/sql/admin/data-access-configuration.html#supported-properties) for a full list. Apply will fail if you're specifying not permitted configuration. -* `instance_profile_arn` (Optional, String) - [databricks_instance_profile](instance_profile.md) used to access storage from [databricks_sql_endpoint](sql_endpoint.md). Please note that this parameter is only for AWS, and will generate an error if used on other clouds. -* `google_service_account` (Optional, String) - used to access GCP services, such as Cloud Storage, from [databricks_sql_endpoint](sql_endpoint.md). Please note that this parameter is only for GCP, and will generate an error if used on other clouds. +* `instance_profile_arn` (Optional, String) - [databricks_instance_profile](instance_profile.md) used to access storage from [databricks_sql_endpoint](sql_endpoint.md). Please note that this parameter is only for AWS, and will generate an error if used on other clouds. +* `google_service_account` (Optional, String) - used to access GCP services, such as Cloud Storage, from [databricks_sql_endpoint](sql_endpoint.md). Please note that this parameter is only for GCP, and will generate an error if used on other clouds. * `sql_config_params` (Optional, Map) - SQL Configuration Parameters let you override the default behavior for all sessions with all endpoints. ## Import @@ -55,7 +54,7 @@ The following arguments are supported (see [documentation](https://docs.databric You can import a `databricks_sql_global_config` resource with command like the following (you need to use `global` as ID): ```bash -$ terraform import databricks_sql_global_config.this global +terraform import databricks_sql_global_config.this global ``` ## Related Resources diff --git a/docs/resources/sql_query.md b/docs/resources/sql_query.md index 95e74c102a..27e12e03ea 100644 --- a/docs/resources/sql_query.md +++ b/docs/resources/sql_query.md @@ -111,12 +111,18 @@ For `text`, `number`, `date`, `datetime`, `datetimesec` block * `value` - The default value for this parameter. +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - the unique ID of the SQL Query. + ## Import You can import a `databricks_sql_query` resource with ID like the following: ```bash -$ terraform import databricks_sql_query.this +terraform import databricks_sql_query.this ``` ## Troubleshooting diff --git a/docs/resources/sql_visualization.md b/docs/resources/sql_visualization.md index 86e1c296a3..b9ea7d6c99 100644 --- a/docs/resources/sql_visualization.md +++ b/docs/resources/sql_visualization.md @@ -42,16 +42,15 @@ resource "databricks_sql_visualization" "q1v1" { } ``` - ## Separating `visualization definition` from IAC configuration Since `options` field contains the full JSON encoded string definition of how to render a visualization for the backend API - `sql/api/visualizations`, they can get quite verbose. If you have lots of visualizations to declare, it might be cleaner to separate the `options` field and store them as separate `.json` files to be referenced. -### Example Usage +### Example -- directory tree +- directory tree ```bash . @@ -72,7 +71,6 @@ If you have lots of visualizations to declare, it might be cleaner to separate t name = "My Table" description = "Some Description" options = file("${path.module}/visualizations/q1v1.json") - ) } resource "databricks_sql_visualization" "q1v2" { @@ -81,7 +79,6 @@ If you have lots of visualizations to declare, it might be cleaner to separate t name = "My Chart" description = "Some Description" options = file("${path.module}/visualizations/q1v2.json") - ) } ``` @@ -97,15 +94,15 @@ In preparation for this operational scenario; you should be familiar with, and h You can import a `databricks_sql_visualization` resource with ID like the following: ```bash -$ terraform import databricks_sql_visualization.this / +terraform import databricks_sql_visualization.this / ``` ## Related Resources The following resources are often used in the same context: -* [End to end workspace management](../guides/workspace-management.md) guide. -* [databricks_sql_dashboard](sql_dashboard.md) to manage Databricks SQL [Dashboards](https://docs.databricks.com/sql/user/dashboards/index.html). -* [databricks_sql_endpoint](sql_endpoint.md) to manage Databricks SQL [Endpoints](https://docs.databricks.com/sql/admin/sql-endpoints.html). -* [databricks_sql_global_config](sql_global_config.md) to configure the security policy, [databricks_instance_profile](instance_profile.md), and [data access properties](https://docs.databricks.com/sql/admin/data-access-configuration.html) for all [databricks_sql_endpoint](sql_endpoint.md) of workspace. -* [databricks_sql_permissions](sql_permissions.md) to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). +- [End to end workspace management](../guides/workspace-management.md) guide. +- [databricks_sql_dashboard](sql_dashboard.md) to manage Databricks SQL [Dashboards](https://docs.databricks.com/sql/user/dashboards/index.html). +- [databricks_sql_endpoint](sql_endpoint.md) to manage Databricks SQL [Endpoints](https://docs.databricks.com/sql/admin/sql-endpoints.html). +- [databricks_sql_global_config](sql_global_config.md) to configure the security policy, [databricks_instance_profile](instance_profile.md), and [data access properties](https://docs.databricks.com/sql/admin/data-access-configuration.html) for all [databricks_sql_endpoint](sql_endpoint.md) of workspace. +- [databricks_sql_permissions](sql_permissions.md) to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). diff --git a/docs/resources/sql_widget.md b/docs/resources/sql_widget.md index 1c38f09652..ec862e85c8 100644 --- a/docs/resources/sql_widget.md +++ b/docs/resources/sql_widget.md @@ -11,7 +11,7 @@ A widget is always tied to a [dashboard](sql_dashboard.md). Every dashboard may ## Example Usage -``` +```hcl resource "databricks_sql_widget" "d1w1" { dashboard_id = databricks_sql_dashboard.d1.id text = "Hello! I'm a **text widget**!" @@ -42,7 +42,7 @@ resource "databricks_sql_widget" "d1w2" { You can import a `databricks_sql_widget` resource with ID like the following: ```bash -$ terraform import databricks_sql_widget.this / +terraform import databricks_sql_widget.this / ``` ## Related Resources diff --git a/docs/resources/system_schema.md b/docs/resources/system_schema.md index fd6d5add0d..51ea5a9280 100644 --- a/docs/resources/system_schema.md +++ b/docs/resources/system_schema.md @@ -29,6 +29,7 @@ The following arguments are available: In addition to all arguments above, the following attributes are exported: +* `id` - the ID of system schema in form of `metastore_id|schema_name`. * `state` - The current state of enablement for the system schema. ## Import diff --git a/docs/resources/user_instance_profile.md b/docs/resources/user_instance_profile.md index d655c76cca..88e6016c8e 100644 --- a/docs/resources/user_instance_profile.md +++ b/docs/resources/user_instance_profile.md @@ -23,6 +23,7 @@ resource "databricks_user_instance_profile" "my_user_instance_profile" { instance_profile_id = databricks_instance_profile.instance_profile.id } ``` + ## Argument Reference The following arguments are supported: @@ -34,7 +35,7 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: -* `id` - The id in the format `|`. +* `id` - The id in the format `|`. ## Import diff --git a/docs/resources/vector_search_endpoint.md b/docs/resources/vector_search_endpoint.md new file mode 100644 index 0000000000..51f2addd06 --- /dev/null +++ b/docs/resources/vector_search_endpoint.md @@ -0,0 +1,47 @@ +--- +subcategory: "Vector Search" +--- +# databricks_vector_search_endpoint Resource + +-> **Note** This resource could be only used on Unity Catalog-enabled workspace! + +This resource allows you to create [Vector Search Endpoint](https://docs.databricks.com/en/generative-ai/vector-search.html) in Databricks. Vector Search is a serverless similarity search engine that allows you to store a vector representation of your data, including metadata, in a vector database. The Vector Search Endpoint is used to create and access vector search indexes. + +## Example Usage + +```hcl +resource "databricks_vector_search_endpoint" "this" { + name = "vector-search-test" + endpoint_type = "STANDARD" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) Name of the Vector Search Endpoint to create. If name is changed, Vector Search Endpoint is recreated. +* `endpoint_type` (Required) type of Vector Search Endpoint. Currently only accepting single value: `STANDARD` (See [documentation](https://docs.databricks.com/api/workspace/vectorsearchendpoints/createendpoint) for the list of currently supported values). If it's changed, Vector Search Endpoint is recreated. + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The same as the name of the endpoint. +* `creator` - Creator of the endpoint. +* `creation_timestamp` - Timestamp of endpoint creation (milliseconds). +* `last_updated_user` - User who last updated the endpoint. +* `last_updated_timestamp` - Timestamp of last update to the endpoint (milliseconds). +* `endpoint_id` - Unique internal identifier of the endpoint (UUID). +* `num_indexes` - Number of indexes on the endpoint. +* `endpoint_status` - Object describing the current status of the endpoint consisting of following fields: + * `state` - Current state of the endpoint. Currently following values are supported: `PROVISIONING`, `ONLINE`, `OFFLINE`. + * `message` - Additional status message. + +## Import + +The resource can be imported using the name of the Vector Search Endpoint + +```bash +terraform import databricks_vector_search_endpoint.this +``` diff --git a/docs/resources/volume.md b/docs/resources/volume.md index dda2c975d0..b046a97329 100644 --- a/docs/resources/volume.md +++ b/docs/resources/volume.md @@ -93,6 +93,7 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: * `id` - ID of this Unity Catalog Volume in form of `..`. +* `volume_path` - base file path for this Unity Catalog Volume in form of `/Volumes///`. ## Import diff --git a/docs/resources/workspace_conf.md b/docs/resources/workspace_conf.md index 0c45405b23..0c433c9b79 100644 --- a/docs/resources/workspace_conf.md +++ b/docs/resources/workspace_conf.md @@ -11,11 +11,11 @@ Manages workspace configuration for expert usage. Currently, more than one insta Allows specification of custom configuration properties for expert usage: - * `enableIpAccessLists` - enables the use of [databricks_ip_access_list](ip_access_list.md) resources - * `maxTokenLifetimeDays` - (string) Maximum token lifetime of new tokens in days, as an integer. If zero, new tokens are permitted to have no lifetime limit. Negative numbers are unsupported. **WARNING:** This limit only applies to new tokens, so there may be tokens with lifetimes longer than this value, including unlimited lifetime. Such tokens may have been created before the current maximum token lifetime was set. - * `enableTokensConfig` - (boolean) Enable or disable personal access tokens for this workspace. - * `enableDeprecatedClusterNamedInitScripts` - (boolean) Enable or disable [legacy cluster-named init scripts](https://docs.databricks.com/clusters/init-scripts.html#disable-legacy-cluster-named-init-scripts-for-a-workspace) for this workspace. - * `enableDeprecatedGlobalInitScripts` - (boolean) Enable or disable [legacy global init scripts](https://docs.databricks.com/clusters/init-scripts.html#migrate-legacy-scripts) for this workspace. +* `enableIpAccessLists` - enables the use of [databricks_ip_access_list](ip_access_list.md) resources +* `maxTokenLifetimeDays` - (string) Maximum token lifetime of new tokens in days, as an integer. If zero, new tokens are permitted to have no lifetime limit. Negative numbers are unsupported. **WARNING:** This limit only applies to new tokens, so there may be tokens with lifetimes longer than this value, including unlimited lifetime. Such tokens may have been created before the current maximum token lifetime was set. +* `enableTokensConfig` - (boolean) Enable or disable personal access tokens for this workspace. +* `enableDeprecatedClusterNamedInitScripts` - (boolean) Enable or disable [legacy cluster-named init scripts](https://docs.databricks.com/clusters/init-scripts.html#disable-legacy-cluster-named-init-scripts-for-a-workspace) for this workspace. +* `enableDeprecatedGlobalInitScripts` - (boolean) Enable or disable [legacy global init scripts](https://docs.databricks.com/clusters/init-scripts.html#migrate-legacy-scripts) for this workspace. ```hcl resource "databricks_workspace_conf" "this" { diff --git a/docs/resources/workspace_file.md b/docs/resources/workspace_file.md index 50c46ef061..f7cbc8e1de 100644 --- a/docs/resources/workspace_file.md +++ b/docs/resources/workspace_file.md @@ -60,7 +60,7 @@ In addition to all arguments above, the following attributes are exported: The workspace file resource can be imported using workspace file path ```bash -$ terraform import databricks_workspace_file.this /path/to/file +terraform import databricks_workspace_file.this /path/to/file ``` ## Related Resources diff --git a/exporter/command.go b/exporter/command.go index b96e8f396a..99b3c10504 100644 --- a/exporter/command.go +++ b/exporter/command.go @@ -6,12 +6,14 @@ import ( "log" "net/http" "os" + "slices" "strings" "time" "github.com/databricks/databricks-sdk-go/client" "github.com/databricks/databricks-sdk-go/config" "github.com/databricks/terraform-provider-databricks/common" + "golang.org/x/exp/maps" ) type levelWriter []string @@ -31,23 +33,15 @@ func (lw *levelWriter) Write(p []byte) (n int, err error) { } func (ic *importContext) allServicesAndListing() (string, string) { - services := "" - listing := "" + services := map[string]struct{}{} + listing := map[string]struct{}{} for _, ir := range ic.Importables { - if !strings.Contains(services, ir.Service) { - if len(services) > 0 { - services += "," - } - services += ir.Service - } - if ir.List != nil && !strings.Contains(listing, ir.Service) { - if len(listing) > 0 { - listing += "," - } - listing += ir.Service + services[ir.Service] = struct{}{} + if ir.List != nil { + listing[ir.Service] = struct{}{} } } - return services, listing + return strings.Join(maps.Keys(services), ","), strings.Join(maps.Keys(listing), ",") } func (ic *importContext) interactivePrompts() { @@ -57,23 +51,35 @@ func (ic *importContext) interactivePrompts() { ic.Client.DatabricksClient.Config.Token = askFor("🔑 Databricks Workspace PAT:") } ic.match = askFor("🔍 Match entity names (optional):") - listing := "" + + services := map[string][]string{} for r, ir := range ic.Importables { if ir.List == nil { continue } - if !askFlag(fmt.Sprintf("✅ Generate `%s` and related resources?", r)) { - continue + service := ir.Service + v, exists := services[service] + if exists { + services[service] = append(v, r) + } else { + services[service] = []string{r} } - if len(listing) > 0 { - listing += "," + } + + ic.listing = map[string]struct{}{} + keys := maps.Keys(services) + slices.Sort(keys) + for _, service := range keys { + resources := services[service] + if !askFlag(fmt.Sprintf("✅ Generate for service `%s` (%s) and related resources?", + service, strings.Join(resources, ","))) { + continue } - listing += ir.Service - if ir.Service == "mounts" { + ic.listing[service] = struct{}{} + if service == "mounts" { ic.mounts = true } } - ic.listing = listing } // Run import according to flags @@ -124,7 +130,8 @@ func Run(args ...string) error { var configuredServices string flags.StringVar(&configuredServices, "services", services, "Comma-separated list of services to import. By default all services are imported.") - flags.StringVar(&ic.listing, "listing", listing, + var configuredListing string + flags.StringVar(&configuredListing, "listing", listing, "Comma-separated list of services to be listed and further passed on for importing. "+ "`-services` parameter controls which transitive dependencies will be processed. "+ "We recommend limiting services with `-listing` more often, than `-services`.") @@ -153,5 +160,6 @@ func Run(args ...string) error { logLevel = append(logLevel, "[DEBUG]") } ic.enableServices(configuredServices) + ic.enableListing(configuredListing) return ic.Run() } diff --git a/exporter/context.go b/exporter/context.go index b128e4f564..b56cf46960 100644 --- a/exporter/context.go +++ b/exporter/context.go @@ -33,6 +33,7 @@ import ( "github.com/hashicorp/hcl/v2/hclsyntax" "github.com/hashicorp/hcl/v2/hclwrite" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/zclconf/go-cty/cty" ) @@ -91,7 +92,7 @@ type importContext struct { mounts bool noFormat bool services map[string]struct{} - listing string + listing map[string]struct{} match string lastActiveDays int64 lastActiveMs int64 @@ -109,37 +110,33 @@ type importContext struct { // TODO: protect by mutex? mountMap map[string]mount - // testEmits map[string]bool testEmitsMutex sync.Mutex - // allGroups []scim.Group groupsMutex sync.Mutex - // allUsers map[string]scim.User usersMutex sync.RWMutex allUsersMapping map[string]string // maps user_name -> internal ID allUsersMutex sync.RWMutex - // allSps map[string]scim.User allSpsMapping map[string]string // maps application_id -> internal ID spsMutex sync.RWMutex - // importing map[string]bool importingMutex sync.RWMutex - // sqlDatasources map[string]string sqlDatasourcesMutex sync.Mutex // workspace-related objects & corresponding mutex - allDirectories []workspace.ObjectStatus - allWorkspaceObjects []workspace.ObjectStatus - wsObjectsMutex sync.RWMutex + allDirectories []workspace.ObjectStatus + allWorkspaceObjects []workspace.ObjectStatus + wsObjectsMutex sync.RWMutex + oldWorkspaceObjects []workspace.ObjectStatus + oldWorkspaceObjectMapping map[int64]string builtInPolicies map[string]compute.PolicyFamily builtInPoliciesMutex sync.Mutex @@ -151,11 +148,12 @@ type importContext struct { ignoredResourcesMutex sync.Mutex ignoredResources map[string]struct{} + deletedResources map[string]struct{} + // emitting of users/SPs emittedUsers map[string]struct{} emittedUsersMutex sync.RWMutex - // userOrSpDirectories map[string]bool userOrSpDirectoriesMutex sync.RWMutex } @@ -239,30 +237,35 @@ func newImportContext(c *common.DatabricksClient) *importContext { supportedResources := maps.Keys(resourcesMap) return &importContext{ - Client: c, - Context: ctx, - State: newStateApproximation(supportedResources), - Importables: resourcesMap, - Resources: p.ResourcesMap, - Scope: importedResources{}, - importing: map[string]bool{}, - nameFixes: nameFixes, - hclFixes: []regexFix{}, // Be careful with that! it may break working code - variables: map[string]string{}, - allDirectories: []workspace.ObjectStatus{}, - allWorkspaceObjects: []workspace.ObjectStatus{}, - workspaceConfKeys: workspaceConfKeys, - shImports: map[string]bool{}, - notebooksFormat: "SOURCE", - allUsers: map[string]scim.User{}, - allSps: map[string]scim.User{}, - waitGroup: &sync.WaitGroup{}, - channels: makeResourcesChannels(), - defaultHanlerChannelSize: defaultHanlerChannelSize, - defaultChannel: make(resourceChannel, defaultHanlerChannelSize), - ignoredResources: map[string]struct{}{}, - emittedUsers: map[string]struct{}{}, - userOrSpDirectories: map[string]bool{}, + Client: c, + Context: ctx, + State: newStateApproximation(supportedResources), + Importables: resourcesMap, + Resources: p.ResourcesMap, + Scope: importedResources{}, + importing: map[string]bool{}, + nameFixes: nameFixes, + hclFixes: []regexFix{}, // Be careful with that! it may break working code + variables: map[string]string{}, + allDirectories: []workspace.ObjectStatus{}, + allWorkspaceObjects: []workspace.ObjectStatus{}, + oldWorkspaceObjects: []workspace.ObjectStatus{}, + oldWorkspaceObjectMapping: map[int64]string{}, + workspaceConfKeys: workspaceConfKeys, + shImports: map[string]bool{}, + notebooksFormat: "SOURCE", + allUsers: map[string]scim.User{}, + allSps: map[string]scim.User{}, + waitGroup: &sync.WaitGroup{}, + channels: makeResourcesChannels(), + defaultHanlerChannelSize: defaultHanlerChannelSize, + defaultChannel: make(resourceChannel, defaultHanlerChannelSize), + ignoredResources: map[string]struct{}{}, + deletedResources: map[string]struct{}{}, + emittedUsers: map[string]struct{}{}, + userOrSpDirectories: map[string]bool{}, + services: map[string]struct{}{}, + listing: map[string]struct{}{}, } } @@ -288,6 +291,7 @@ func getLastRunString(fileName string) string { func (ic *importContext) Run() error { startTime := time.Now() statsFileName := ic.Directory + "/exporter-run-stats.json" + wsObjectsFileName := ic.Directory + "/ws_objects.json" if len(ic.services) == 0 { return fmt.Errorf("no services to import") } @@ -308,10 +312,12 @@ func (ic *importContext) Run() error { ic.updatedSinceStr = tm.UTC().Format(time.RFC3339) tm, _ = time.Parse(time.RFC3339, ic.updatedSinceStr) ic.updatedSinceMs = tm.UnixMilli() + + ic.loadOldWorkspaceObjects(wsObjectsFileName) } - log.Printf("[INFO] Importing %s module into %s directory Databricks resources of %s services", - ic.Module, ic.Directory, maps.Keys(ic.services)) + log.Printf("[INFO] Importing %s module into %s directory Databricks resources of %s services. Listing %s", + ic.Module, ic.Directory, maps.Keys(ic.services), maps.Keys(ic.listing)) ic.notebooksFormat = strings.ToUpper(ic.notebooksFormat) _, supportedFormat := fileExtensionFormatMapping[ic.notebooksFormat] @@ -372,7 +378,8 @@ func (ic *importContext) Run() error { if ir.List == nil { continue } - if !strings.Contains(ic.listing, ir.Service) { + _, exists := ic.listing[ir.Service] + if !exists { log.Printf("[DEBUG] %s (%s service) is not part of listing", resourceName, ir.Service) continue } @@ -398,9 +405,10 @@ func (ic *importContext) Run() error { // close channels ic.closeImportChannels() - // This should be single threaded... - if ic.Scope.Len() == 0 { - return fmt.Errorf("no resources to import") + // Generating the code + ic.findDeletedResources() + if ic.Scope.Len() == 0 && len(ic.deletedResources) == 0 { + return fmt.Errorf("no resources to import or delete") } shFileName := fmt.Sprintf("%s/import.sh", ic.Directory) if ic.incremental { @@ -460,7 +468,7 @@ func (ic *importContext) Run() error { return err } - // + // Write stats file if stats, err := os.Create(statsFileName); err == nil { defer stats.Close() statsData := map[string]any{ @@ -470,12 +478,26 @@ func (ic *importContext) Run() error { } statsBytes, _ := json.Marshal(statsData) if _, err = stats.Write(statsBytes); err != nil { - return err + log.Printf("[ERROR] can't write stats into the %s: %s", statsFileName, err.Error()) + } + } + + // Write workspace objects file + if len(ic.allWorkspaceObjects) > 0 { + if wsObjects, err := os.Create(wsObjectsFileName); err == nil { + defer wsObjects.Close() + wsObjectsBytes, _ := json.Marshal(ic.allWorkspaceObjects) + if _, err = wsObjects.Write(wsObjectsBytes); err != nil { + log.Printf("[ERROR] can't write workspace objects into the %s: %s", wsObjectsFileName, err.Error()) + } + } else { + log.Printf("[ERROR] can't open %s: %s", wsObjectsFileName, err.Error()) } } // output ignored resources... - if ignored, err := os.Create(fmt.Sprintf("%s/ignored_resources.txt", ic.Directory)); err == nil { + ignoredResourcesFileName := fmt.Sprintf("%s/ignored_resources.txt", ic.Directory) + if ignored, err := os.Create(ignoredResourcesFileName); err == nil { defer ignored.Close() ic.ignoredResourcesMutex.Lock() keys := maps.Keys(ic.ignoredResources) @@ -483,6 +505,9 @@ func (ic *importContext) Run() error { for _, s := range keys { ignored.WriteString(s + "\n") } + ic.ignoredResourcesMutex.Unlock() + } else { + log.Printf("[ERROR] can't open %s: %s", ignoredResourcesFileName, err.Error()) } if !ic.noFormat { @@ -499,6 +524,114 @@ func (ic *importContext) Run() error { return nil } +func isSupportedWsObject(obj workspace.ObjectStatus) bool { + switch obj.ObjectType { + case workspace.Directory, workspace.Notebook, workspace.File: + return true + } + return false +} + +func (ic *importContext) generateResourceIdForWsObject(obj workspace.ObjectStatus) (string, string) { + var rtype string + switch obj.ObjectType { + case workspace.Directory: + rtype = "databricks_directory" + case workspace.File: + rtype = "databricks_workspace_file" + case workspace.Notebook: + rtype = "databricks_notebook" + default: + log.Printf("[WARN] Unsupported WS object type: %s in obj %v", obj.ObjectType, obj) + return "", "" + } + rData := ic.Resources[rtype].Data( + &terraform.InstanceState{ + ID: obj.Path, + Attributes: map[string]string{}, + }) + rData.Set("object_id", obj.ObjectID) + rData.Set("path", obj.Path) + name := ic.ResourceName(&resource{ + ID: obj.Path, + Resource: rtype, + Data: rData, + }) + return generateResourceName(rtype, name), rtype +} + +func (ic *importContext) loadOldWorkspaceObjects(fileName string) { + ic.oldWorkspaceObjects = []workspace.ObjectStatus{} + // Read a list of resources from previous run + oldDataFile, err := os.ReadFile(fileName) + if err != nil { + log.Printf("[WARN] Can't open the file (%s) with previous list of workspace objects: %s", fileName, err.Error()) + return + } + err = json.Unmarshal(oldDataFile, &ic.oldWorkspaceObjects) + if err != nil { + log.Printf("[WARN] Can't desereialize previous list of workspace objects: %s", err.Error()) + return + } + log.Printf("[DEBUG] Read previous list of workspace objects. got %d objects", len(ic.oldWorkspaceObjects)) + for _, obj := range ic.oldWorkspaceObjects { + ic.oldWorkspaceObjectMapping[obj.ObjectID] = obj.Path + } +} + +func (ic *importContext) findDeletedResources() { + log.Print("[INFO] Starting detection of deleted workspace objects") + if !ic.incremental || len(ic.allWorkspaceObjects) == 0 { + return + } + if len(ic.oldWorkspaceObjects) == 0 { + log.Print("[INFO] Previous list of workspace objects is empty") + return + } + // generate IDs of current objects + currentObjs := map[string]struct{}{} + for _, obj := range ic.allWorkspaceObjects { + obj := obj + if !isSupportedWsObject(obj) { + continue + } + rid, _ := ic.generateResourceIdForWsObject(obj) + currentObjs[rid] = struct{}{} + } + // Loop through previous objects, and if it's missing from the current list, add it to deleted, including permission + for _, obj := range ic.oldWorkspaceObjects { + obj := obj + if !isSupportedWsObject(obj) { + continue + } + rid, rtype := ic.generateResourceIdForWsObject(obj) + _, exists := currentObjs[rid] + if exists { + log.Printf("[DEBUG] object %s still exists", rid) // change to TRACE? + continue + } + log.Printf("[DEBUG] object %s is deleted!", rid) + ic.deletedResources[rid] = struct{}{} + // convert into permissions. This is quite fragile right now, need to think how to handle it better + var permId string + switch rtype { + case "databricks_notebook": + permId = "databricks_permissions.notebook_" + rid[len(rtype)+1:] + case "databricks_directory": + permId = "databricks_permissions.directory_" + rid[len(rtype)+1:] + case "databricks_workspace_file": + permId = "databricks_permissions.ws_file_" + rid[len(rtype)+1:] + } + log.Printf("[DEBUG] deleted permissions object %s", permId) + if permId != "" { + ic.deletedResources[permId] = struct{}{} + } + } + log.Printf("[INFO] Finished detection of deleted workspace objects. Detected %d deleted objects.", + len(ic.deletedResources)) + log.Printf("[DEBUG] Deleted objects. %v", ic.deletedResources) // change to TRACE? +} + func (ic *importContext) resourceHandler(num int, resourceType string, ch resourceChannel) { log.Printf("[DEBUG] Starting goroutine %d for resource %s", num, resourceType) for r := range ch { @@ -545,8 +678,13 @@ func (ic *importContext) closeImportChannels() { close(ic.defaultChannel) } +func generateResourceName(rtype, rname string) string { + return rtype + "." + rname +} + func generateBlockFullName(block *hclwrite.Block) string { - return block.Type() + "_" + strings.Join(block.Labels(), "_") + labels := block.Labels() + return generateResourceName(labels[0], strings.Join(labels[1:], "_")) } type resourceWriteData struct { @@ -573,6 +711,9 @@ func (ic *importContext) handleResourceWrite(generatedFile string, ch dataWriteC existingFile, diags = hclwrite.ParseConfig(content, generatedFile, hcl.Pos{Line: 1, Column: 1}) if diags.HasErrors() { log.Printf("[ERROR] parsing of existing file %s failed: %s", generatedFile, diags.Error()) + } else { + log.Printf("[DEBUG] There are %d objects in existing file %s", + len(existingFile.Body().Blocks()), generatedFile) } } } @@ -588,6 +729,7 @@ func (ic *importContext) handleResourceWrite(generatedFile string, ch dataWriteC // newResources := make(map[string]struct{}, 100) + log.Printf("[DEBUG] started processing new writes for %s", generatedFile) for f := range ch { if f != nil { log.Printf("[DEBUG] started writing resource body for %s", f.BlockName) @@ -607,16 +749,20 @@ func (ic *importContext) handleResourceWrite(generatedFile string, ch dataWriteC } ic.waitGroup.Done() } - // update existing file if incremental mode numResources := len(newResources) + log.Printf("[DEBUG] finished processing new writes for %s. Wrote %d resources", generatedFile, numResources) + // update existing file if incremental mode if ic.incremental { log.Printf("[DEBUG] Starting to merge existing resources for %s", generatedFile) f := hclwrite.NewEmptyFile() for _, block := range existingFile.Body().Blocks() { blockName := generateBlockFullName(block) _, exists := newResources[blockName] + _, deleted := ic.deletedResources[blockName] if exists { log.Printf("[DEBUG] resource %s already generated, skipping...", blockName) + } else if deleted { + log.Printf("[DEBUG] resource %s is deleted, skipping...", blockName) } else { log.Printf("[DEBUG] resource %s doesn't exist, adding...", blockName) f.Body().AppendBlock(block) @@ -650,6 +796,15 @@ func (ic *importContext) writeImports(sh *os.File, importChan importWriteChannel if sh != nil { log.Printf("[DEBUG] Writing the rest of import commands. len=%d", len(ic.shImports)) for k := range ic.shImports { + parts := strings.Split(k, " ") + if len(parts) > 3 { + resource := parts[2] + _, deleted := ic.deletedResources[resource] + if deleted { + log.Printf("[DEBUG] Resource %s is deleted. Skipping import command for it", resource) + continue + } + } sh.WriteString(k + "\n") } } @@ -684,8 +839,7 @@ func (ic *importContext) processSingleResource(resourcesChan resourceChannel, wr } } else { resourceBlock := body.AppendNewBlock("resource", []string{r.Resource, r.Name}) - err = ic.dataToHcl(ir, []string{}, ic.Resources[r.Resource], - r.Data, resourceBlock.Body()) + err = ic.dataToHcl(ir, []string{}, ic.Resources[r.Resource], r, resourceBlock.Body()) if err != nil { log.Printf("[ERROR] error generating body for %v: %s", r, err.Error()) } @@ -729,13 +883,16 @@ func (ic *importContext) generateAndWriteResources(sh *os.File) { resourcesChan := make(resourceChannel, defaultChannelSize) resourceWriters := make(map[string]dataWriteChannel, len(ic.Resources)) - for _, imp := range ic.Importables { - resourceWriters[imp.Service] = make(dataWriteChannel, defaultChannelSize) + for service := range ic.services { + resourceWriters[service] = make(dataWriteChannel, defaultChannelSize) } importChan := make(importWriteChannel, defaultChannelSize) + writersWaitGroup := &sync.WaitGroup{} // + writersWaitGroup.Add(1) go func() { ic.writeImports(sh, importChan) + writersWaitGroup.Done() }() for i := 0; i < resourceHandlersNumber; i++ { i := i @@ -744,13 +901,16 @@ func (ic *importContext) generateAndWriteResources(sh *os.File) { ic.processSingleResource(resourcesChan, resourceWriters) }() } + for service, ch := range resourceWriters { service := service ch := ch generatedFile := fmt.Sprintf("%s/%s.tf", ic.Directory, service) log.Printf("[DEBUG] starting writer for service %s", service) + writersWaitGroup.Add(1) go func() { ic.handleResourceWrite(generatedFile, ch, importChan) + writersWaitGroup.Done() }() } @@ -758,7 +918,7 @@ func (ic *importContext) generateAndWriteResources(sh *os.File) { for i, r := range resources { ic.waitGroup.Add(1) resourcesChan <- r - if i%50 == 0 { + if i%500 == 0 { log.Printf("[INFO] Submitted %d of %d resources", i+1, scopeSize) } } @@ -770,6 +930,7 @@ func (ic *importContext) generateAndWriteResources(sh *os.File) { log.Printf("Closing writer for service %s", service) close(ch) } + writersWaitGroup.Wait() log.Printf("[INFO] Finished generation of configuration for %d resources (took %v seconds)", scopeSize, time.Since(t1).Seconds()) @@ -846,72 +1007,102 @@ func genTraversalTokens(sr *resourceApproximation, pick string) hcl.Traversal { } } -// this will run single threaded -func (ic *importContext) Find(r *resource, pick string, ref reference) (string, hcl.Traversal) { - log.Printf("[DEBUG] Starting searching for reference for resource %s %s, pick=%s, ref=%v", r.Resource, r.ID, pick, ref) - // TODO: Can we cache findings? +func (ic *importContext) Find(value, attr string, ref reference, origResource *resource, origPath string) (string, hcl.Traversal, bool) { + log.Printf("[DEBUG] Starting searching for reference for resource %s, attr='%s', value='%s', ref=%v", + ref.Resource, attr, value, ref) // optimize performance by avoiding doing regexp matching multiple times matchValue := "" - if ref.MatchType == MatchRegexp { + switch ref.MatchType { + case MatchRegexp: if ref.Regexp == nil { log.Printf("[WARN] you must provide regular expression for 'regexp' match type") - return "", nil + return "", nil, false } - res := ref.Regexp.FindStringSubmatch(r.Value) + res := ref.Regexp.FindStringSubmatch(value) if len(res) < 2 { - log.Printf("[WARN] no match for regexp: %v in string %s", ref.Regexp, r.Value) - return "", nil + log.Printf("[WARN] no match for regexp: %v in string %s", ref.Regexp, value) + return "", nil, false } matchValue = res[1] - } else if ref.MatchType == MatchCaseInsensitive { - matchValue = strings.ToLower(r.Value) // performance optimization to avoid doing it in the loop - } else if ref.MatchType == MatchExact || ref.MatchType == MatchDefault { - matchValue = r.Value - } - // doing explicit lookup in the state. For case insensitive matches, first attempt to lookup for the value, and do iteration if it's not found - if ref.MatchType == MatchExact || ref.MatchType == MatchDefault || ref.MatchType == MatchRegexp || ref.MatchType == MatchCaseInsensitive { - sr := ic.State.Get(r.Resource, r.Attribute, matchValue) - if sr != nil { - log.Printf("[DEBUG] Finished direct lookup for reference for resource %s %s, pick=%s, ref=%v. Found: type=%s name=%s", - r.Resource, r.ID, pick, ref, sr.Type, sr.Name) - return matchValue, genTraversalTokens(sr, pick) + case MatchCaseInsensitive: + matchValue = strings.ToLower(value) // performance optimization to avoid doing it in the loop + case MatchExact, MatchDefault: + matchValue = value + case MatchPrefix, MatchLongestPrefix: + if ref.MatchValueTransformFunc != nil { + matchValue = ref.MatchValueTransformFunc(value) + } else { + matchValue = value + } + } + // doing explicit lookup in the state. For case insensitive matches, first attempt to lookup for the value, + // and do iteration if it's not found + if (ref.MatchType == MatchExact || ref.MatchType == MatchDefault || ref.MatchType == MatchRegexp || + ref.MatchType == MatchCaseInsensitive) && !ref.SkipDirectLookup { + sr := ic.State.Get(ref.Resource, attr, matchValue) + if sr != nil && (ref.IsValidApproximation == nil || ref.IsValidApproximation(ic, origResource, sr, origPath)) { + log.Printf("[DEBUG] Finished direct lookup for reference for resource %s, attr='%s', value='%s', ref=%v. Found: type=%s name=%s", + ref.Resource, attr, value, ref, sr.Type, sr.Name) + return matchValue, genTraversalTokens(sr, attr), sr.Mode == "data" } if ref.MatchType != MatchCaseInsensitive { // for case-insensitive matching we'll try iteration - log.Printf("[DEBUG] Finished direct lookup for reference for resource %s %s, pick=%s, ref=%v. Not found", - r.Resource, r.ID, pick, ref) - return "", nil + log.Printf("[DEBUG] Finished direct lookup for reference for resource %s, attr='%s', value='%s', ref=%v. Not found", + ref.Resource, attr, value, ref) + return "", nil, false } } - for _, sr := range *ic.State.Resources(r.Resource) { + maxPrefixLen := 0 + maxPrefixOrigValue := "" + var maxPrefixResource *resourceApproximation + srs := *ic.State.Resources(ref.Resource) + for _, sr := range srs { for _, i := range sr.Instances { - v := i.Attributes[r.Attribute] + v := i.Attributes[attr] if v == nil { - log.Printf("[WARN] Can't find instance attribute '%v' in resource: '%v' with name '%v', ID: '%v'", - r.Attribute, r.Resource, r.Name, r.ID) + log.Printf("[WARN] Can't find instance attribute '%v' in resource: '%v'", attr, ref.Resource) continue } strValue := v.(string) + origValue := strValue + if ref.SearchValueTransformFunc != nil { + strValue = ref.SearchValueTransformFunc(strValue) + log.Printf("[DEBUG] Resource %s. Transformed value from '%s' to '%s'", ref.Resource, origValue, strValue) + } matched := false switch ref.MatchType { case MatchCaseInsensitive: matched = (strings.ToLower(strValue) == matchValue) case MatchPrefix: - matched = strings.HasPrefix(r.Value, strValue) + matched = strings.HasPrefix(matchValue, strValue) + case MatchLongestPrefix: + if strings.HasPrefix(matchValue, strValue) && len(origValue) > maxPrefixLen { + maxPrefixLen = len(origValue) + maxPrefixOrigValue = origValue + maxPrefixResource = sr + } + case MatchExact, MatchDefault: + matched = (strValue == matchValue) default: log.Printf("[WARN] Unsupported match type: %s", ref.MatchType) } - if !matched { + if !matched || (ref.IsValidApproximation != nil && !ref.IsValidApproximation(ic, origResource, sr, origPath)) { continue } // TODO: we need to not generate traversals resources for which their Ignore function returns true... - log.Printf("[DEBUG] Finished searching for reference for resource %s %s, pick=%s, ref=%v. Found: type=%s name=%s", - r.Resource, r.ID, pick, ref, sr.Type, sr.Name) - return strValue, genTraversalTokens(sr, pick) + log.Printf("[DEBUG] Finished searching for reference for resource %s, attr='%s', value='%s', ref=%v. Found: type=%s name=%s", + ref.Resource, attr, value, ref, sr.Type, sr.Name) + return origValue, genTraversalTokens(sr, attr), sr.Mode == "data" } } - log.Printf("[DEBUG] Finished searching for reference for resource %s %s, pick=%s, ref=%v. Not found", r.Resource, r.ID, pick, ref) - return "", nil + if ref.MatchType == MatchLongestPrefix && maxPrefixResource != nil && + (ref.IsValidApproximation == nil || ref.IsValidApproximation(ic, origResource, maxPrefixResource, origPath)) { + log.Printf("[DEBUG] Finished searching longest prefix for reference for resource %s, attr='%s', value='%s', ref=%v. Found: type=%s name=%s", + ref.Resource, attr, value, ref, maxPrefixResource.Type, maxPrefixResource.Name) + return maxPrefixOrigValue, genTraversalTokens(maxPrefixResource, attr), maxPrefixResource.Mode == "data" + } + log.Printf("[DEBUG] Finished searching for reference for resource %s, pick=%s, ref=%v. Not found", ref.Resource, attr, ref) + return "", nil, false } // This function checks if resource exist in any state (already added or in process of addition) @@ -1006,6 +1197,32 @@ func (ic *importContext) isServiceEnabled(service string) bool { return exists } +func (ic *importContext) EmitIfUpdatedAfterMillis(r *resource, modifiedAt int64, message string) { + updatedSinceMs := ic.getUpdatedSinceMs() + if ic.incremental && modifiedAt < updatedSinceMs { + log.Printf("[DEBUG] skipping %s that was modified at %d (last active=%d)", + message, modifiedAt, updatedSinceMs) + return + } + ic.Emit(r) +} + +func (ic *importContext) EmitIfUpdatedAfterMillisAndNameMatches(r *resource, name string, modifiedAt int64, message string) { + if ic.MatchesName(name) { + ic.EmitIfUpdatedAfterMillis(r, modifiedAt, message) + } +} + +func (ic *importContext) EmitIfUpdatedAfterIsoString(r *resource, updatedAt, message string) { + updatedSinceStr := ic.getUpdatedSinceStr() + if ic.incremental && updatedAt < updatedSinceStr { + log.Printf("[DEBUG] skipping %s that was modified at %s (updatedSince=%s)", message, + updatedAt, updatedSinceStr) + return + } + ic.Emit(r) +} + func (ic *importContext) Emit(r *resource) { // TODO: change into channels, if stack trace depth issues would surface _, v := r.MatchPair() @@ -1065,29 +1282,26 @@ func maybeAddQuoteCharacter(s string) string { return s } -func (ic *importContext) getTraversalTokens(ref reference, value string) hclwrite.Tokens { +func (ic *importContext) getTraversalTokens(ref reference, value string, origResource *resource, origPath string) (hclwrite.Tokens, bool) { matchType := ref.MatchTypeValue() attr := ref.MatchAttribute() - attrValue, traversal := ic.Find(&resource{ - Resource: ref.Resource, - Attribute: attr, - Value: value, - }, attr, ref) + attrValue, traversal, isData := ic.Find(value, attr, ref, origResource, origPath) // at least one invocation of ic.Find will assign Nil to traversal if resource with value is not found if traversal == nil { - return nil + return nil, isData } + // capture if it's data? switch matchType { case MatchExact, MatchDefault, MatchCaseInsensitive: - return hclwrite.TokensForTraversal(traversal) - case MatchPrefix: + return hclwrite.TokensForTraversal(traversal), isData + case MatchPrefix, MatchLongestPrefix: rest := value[len(attrValue):] tokens := hclwrite.Tokens{&hclwrite.Token{Type: hclsyntax.TokenOQuote, Bytes: []byte{'"', '$', '{'}}} tokens = append(tokens, hclwrite.TokensForTraversal(traversal)...) tokens = append(tokens, &hclwrite.Token{Type: hclsyntax.TokenCQuote, Bytes: []byte{'}'}}) tokens = append(tokens, &hclwrite.Token{Type: hclsyntax.TokenQuotedLit, Bytes: []byte(maybeAddQuoteCharacter(rest))}) tokens = append(tokens, &hclwrite.Token{Type: hclsyntax.TokenCQuote, Bytes: []byte{'"'}}) - return tokens + return tokens, isData case MatchRegexp: indices := ref.Regexp.FindStringSubmatchIndex(value) if len(indices) == 4 { @@ -1098,21 +1312,23 @@ func (ic *importContext) getTraversalTokens(ref reference, value string) hclwrit tokens = append(tokens, &hclwrite.Token{Type: hclsyntax.TokenCQuote, Bytes: []byte{'}'}}) tokens = append(tokens, &hclwrite.Token{Type: hclsyntax.TokenQuotedLit, Bytes: []byte(maybeAddQuoteCharacter(value[indices[3]:]))}) tokens = append(tokens, &hclwrite.Token{Type: hclsyntax.TokenCQuote, Bytes: []byte{'"'}}) - return tokens + return tokens, isData } log.Printf("[WARN] Can't match found data in '%s'. Indices: %v", value, indices) default: log.Printf("[WARN] Unsupported match type: %s", ref.MatchType) } - return nil + return nil, false } // TODO: move to IC var dependsRe = regexp.MustCompile(`(\.[\d]+)`) -func (ic *importContext) reference(i importable, path []string, value string, ctyValue cty.Value) hclwrite.Tokens { - match := dependsRe.ReplaceAllString(strings.Join(path, "."), "") - // TODO: get reference candidate, but if it's a `data`, then look for another non-data reference if possible.. +func (ic *importContext) reference(i importable, path []string, value string, ctyValue cty.Value, origResource *resource) hclwrite.Tokens { + pathString := strings.Join(path, ".") + match := dependsRe.ReplaceAllString(pathString, "") + // get reference candidate, but if it's a `data`, then look for another non-data reference if possible.. + var dataTokens hclwrite.Tokens for _, d := range i.Depends { if d.Path != match { continue @@ -1129,10 +1345,19 @@ func (ic *importContext) reference(i importable, path []string, value string, ct return ic.variable(fmt.Sprintf("%s_%s", path[0], value), "") } - if tokens := ic.getTraversalTokens(d, value); tokens != nil { - return tokens + tokens, isData := ic.getTraversalTokens(d, value, origResource, pathString) + if tokens != nil { + if isData { + dataTokens = tokens + log.Printf("[DEBUG] Got reference to data for dependency %v", d) + } else { + return tokens + } } } + if len(dataTokens) > 0 { + return dataTokens + } return hclwrite.TokensForValue(ctyValue) } @@ -1150,7 +1375,8 @@ type fieldTuple struct { } func (ic *importContext) dataToHcl(i importable, path []string, - pr *schema.Resource, d *schema.ResourceData, body *hclwrite.Body) error { + pr *schema.Resource, res *resource, body *hclwrite.Body) error { + d := res.Data ss := []fieldTuple{} for a, as := range pr.Schema { ss = append(ss, fieldTuple{a, as}) @@ -1160,6 +1386,7 @@ func (ic *importContext) dataToHcl(i importable, path []string, // makes the most beautiful configs return ss[i].Field > ss[j].Field }) + var_cnt := 0 for _, tuple := range ss { a, as := tuple.Field, tuple.Schema pathString := strings.Join(append(path, a), ".") @@ -1173,12 +1400,16 @@ func (ic *importContext) dataToHcl(i importable, path []string, continue } mpath := dependsRe.ReplaceAllString(pathString, "") - for _, r := range i.Depends { - if r.Path == mpath && r.Variable { + for _, ref := range i.Depends { + if ref.Path == mpath && ref.Variable { // sensitive fields are moved to variable depends, variable name is normalized // TODO: handle a case when we have multiple blocks, so names won't be unique - raw = ic.regexFix(i.Name(ic, d), simpleNameFixes) + raw = ic.regexFix(ic.ResourceName(res), simpleNameFixes) + if var_cnt > 0 { + raw = fmt.Sprintf("%s_%d", raw, var_cnt) + } nonZero = true + var_cnt++ } } shouldSkip := !nonZero @@ -1194,7 +1425,7 @@ func (ic *importContext) dataToHcl(i importable, path []string, switch as.Type { case schema.TypeString: value := raw.(string) - tokens := ic.reference(i, append(path, a), value, cty.StringVal(value)) + tokens := ic.reference(i, append(path, a), value, cty.StringVal(value), res) body.SetAttributeRaw(a, tokens) case schema.TypeBool: body.SetAttributeValue(a, cty.BoolVal(raw.(bool))) @@ -1209,7 +1440,7 @@ func (ic *importContext) dataToHcl(i importable, path []string, num = iv } body.SetAttributeRaw(a, ic.reference(i, append(path, a), - strconv.FormatInt(num, 10), cty.NumberIntVal(num))) + strconv.FormatInt(num, 10), cty.NumberIntVal(num), res)) case schema.TypeFloat: body.SetAttributeValue(a, cty.NumberFloatVal(raw.(float64))) case schema.TypeMap: @@ -1223,7 +1454,7 @@ func (ic *importContext) dataToHcl(i importable, path []string, case schema.TypeSet: if rawSet, ok := raw.(*schema.Set); ok { rawList := rawSet.List() - err := ic.readListFromData(i, append(path, a), d, rawList, body, as, func(i int) string { + err := ic.readListFromData(i, append(path, a), res, rawList, body, as, func(i int) string { return strconv.Itoa(rawSet.F(rawList[i])) }) if err != nil { @@ -1232,7 +1463,7 @@ func (ic *importContext) dataToHcl(i importable, path []string, } case schema.TypeList: if rawList, ok := raw.([]any); ok { - err := ic.readListFromData(i, append(path, a), d, rawList, body, as, strconv.Itoa) + err := ic.readListFromData(i, append(path, a), res, rawList, body, as, strconv.Itoa) if err != nil { return err } @@ -1244,9 +1475,8 @@ func (ic *importContext) dataToHcl(i importable, path []string, return nil } -func (ic *importContext) readListFromData(i importable, path []string, d *schema.ResourceData, - rawList []any, body *hclwrite.Body, as *schema.Schema, - offsetConverter func(i int) string) error { +func (ic *importContext) readListFromData(i importable, path []string, res *resource, + rawList []any, body *hclwrite.Body, as *schema.Schema, offsetConverter func(i int) string) error { if len(rawList) == 0 { return nil } @@ -1256,12 +1486,12 @@ func (ic *importContext) readListFromData(i importable, path []string, d *schema if as.MaxItems == 1 { nestedPath := append(path, offsetConverter(0)) confBlock := body.AppendNewBlock(name, []string{}) - return ic.dataToHcl(i, nestedPath, elem, d, confBlock.Body()) + return ic.dataToHcl(i, nestedPath, elem, res, confBlock.Body()) } for offset := range rawList { confBlock := body.AppendNewBlock(name, []string{}) nestedPath := append(path, offsetConverter(offset)) - err := ic.dataToHcl(i, nestedPath, elem, d, confBlock.Body()) + err := ic.dataToHcl(i, nestedPath, elem, res, confBlock.Body()) if err != nil { return err } @@ -1282,7 +1512,7 @@ func (ic *importContext) readListFromData(i importable, path []string, d *schema switch x := raw.(type) { case string: value := raw.(string) - toks = append(toks, ic.reference(i, path, value, cty.StringVal(value))...) + toks = append(toks, ic.reference(i, path, value, cty.StringVal(value), res)...) case int: // probably we don't even use integer lists?... toks = append(toks, hclwrite.TokensForValue( diff --git a/exporter/context_test.go b/exporter/context_test.go index b98c3c5841..4bdc7908b5 100644 --- a/exporter/context_test.go +++ b/exporter/context_test.go @@ -1,14 +1,18 @@ package exporter import ( + "encoding/json" "fmt" "os" "sync" "testing" + "github.com/databricks/terraform-provider-databricks/provider" "github.com/databricks/terraform-provider-databricks/qa" + "github.com/databricks/terraform-provider-databricks/workspace" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNoServicesSkipsRun(t *testing.T) { @@ -30,16 +34,52 @@ func TestImportContextFindSkips(t *testing.T) { }, }, }}) - _, traversal := (&importContext{ + _, traversal, _ := (&importContext{ State: state, - }).Find(&resource{ - Resource: "a", - Attribute: "b", - Name: "c", - }, "x", reference{}) + }).Find("v", "x", reference{Resource: "a"}, &resource{}, "a") assert.Nil(t, traversal) } +func TestImportContextFindNoDirectLookup(t *testing.T) { + state := newStateApproximation([]string{"a"}) + state.Append(resourceApproximation{ + Type: "a", + Instances: []instanceApproximation{ + { + Attributes: map[string]any{ + "b": "42", + }, + }, + }}) + _, traversal, _ := (&importContext{ + State: state, + }).Find("42", "b", reference{Resource: "a", SkipDirectLookup: true}, &resource{}, "a") + assert.NotNil(t, traversal) +} + +func TestImportContextFindMatchLongestPrefix(t *testing.T) { + state := newStateApproximation([]string{"a"}) + state.Append(resourceApproximation{ + Type: "a", + Instances: []instanceApproximation{ + { + Attributes: map[string]any{ + "b": "/a/b", + }, + }, + { + Attributes: map[string]any{ + "b": "/a/b/c", + }, + }, + }}) + val, traversal, _ := (&importContext{ + State: state, + }).Find("/a/b/c/d", "b", reference{Resource: "a", MatchType: MatchLongestPrefix}, &resource{}, "a") + require.NotNil(t, traversal) + assert.Equal(t, "/a/b/c", val) +} + func TestImportContextHas(t *testing.T) { state := newStateApproximation([]string{"a"}) state.Append(resourceApproximation{ @@ -201,6 +241,50 @@ func TestEmitNoSearchNoId(t *testing.T) { close(ch) } +func TestEmitNoSearchNoIdWithRetry(t *testing.T) { + ch := make(resourceChannel, 10) + state := newStateApproximation([]string{"a"}) + i := 0 + ic := &importContext{ + importing: map[string]bool{}, + Resources: map[string]*schema.Resource{ + "a": {}, + }, + Importables: map[string]importable{ + "a": { + Service: "e", + Search: func(ic *importContext, r *resource) error { + if i > 0 { + return nil + } + i = i + 1 + return fmt.Errorf("context deadline exceeded (Client.Timeout exceeded while awaiting headers)") + }, + }, + }, + waitGroup: &sync.WaitGroup{}, + channels: map[string]resourceChannel{ + "a": ch, + }, + ignoredResources: map[string]struct{}{}, + State: state, + } + ic.enableServices("e") + go func() { + for r := range ch { + r.ImportResource(ic) + } + }() + ic.Emit(&resource{ + Resource: "a", + Attribute: "b", + Value: "d", + Name: "c", + }) + ic.waitGroup.Wait() + close(ch) +} + func TestEmitNoSearchSucceedsImportFails(t *testing.T) { ch := make(resourceChannel, 10) state := newStateApproximation([]string{"a"}) @@ -266,3 +350,86 @@ func TestLoadingLastRun(t *testing.T) { s = getLastRunString(fname) assert.Equal(t, "2023-07-24T00:00:00Z", s) } + +func TestGenerateResourceIdForWsObject(t *testing.T) { + p := provider.DatabricksProvider() + ic := &importContext{ + Importables: resourcesMap, + Resources: p.ResourcesMap, + } + rid, rtype := ic.generateResourceIdForWsObject(workspace.ObjectStatus{ + ObjectID: 123, + Path: "Test", + ObjectType: "Unknown", + }) + assert.Empty(t, rid) + assert.Empty(t, rtype) + + rid, rtype = ic.generateResourceIdForWsObject(workspace.ObjectStatus{ + ObjectID: 123, + Path: "/Users/user@domain.com/TestDir", + ObjectType: workspace.Directory, + }) + assert.Equal(t, "databricks_directory.users_user_domain_com_testdir_123", rid) + assert.Equal(t, "databricks_directory", rtype) + + rid, rtype = ic.generateResourceIdForWsObject(workspace.ObjectStatus{ + ObjectID: 123, + Path: "/Users/user@domain.com/Test File", + ObjectType: workspace.File, + }) + assert.Equal(t, "databricks_workspace_file.users_user_domain_com_test_file_123", rid) + assert.Equal(t, "databricks_workspace_file", rtype) + + rid, rtype = ic.generateResourceIdForWsObject(workspace.ObjectStatus{ + ObjectID: 123, + Path: "/Users/user@domain.com/Test Notebook", + ObjectType: workspace.Notebook, + }) + assert.Equal(t, "databricks_notebook.users_user_domain_com_test_notebook_123", rid) + assert.Equal(t, "databricks_notebook", rtype) +} + +func TestDeletedWsObjectsDetection(t *testing.T) { + ic := importContextForTest() + ic.incremental = true + + tmpDir := fmt.Sprintf("/tmp/tf-%s", qa.RandomName()) + os.MkdirAll(tmpDir, 0755) + defer os.RemoveAll(tmpDir) + + objects := []workspace.ObjectStatus{ + {ObjectID: 123, ObjectType: "REPO", Path: "/Repos/user@domain.com/test"}, + {ObjectID: 456, ObjectType: "NOTEBOOK", Path: "/Test/1234"}, + // This is deleted objects + {ObjectID: 789, ObjectType: "DIRECTORY", Path: "/Test/TDir"}, + {ObjectID: 12, ObjectType: "FILE", Path: "/Test/TDir"}, + {ObjectID: 345, ObjectType: "NOTEBOOK", Path: "/Test/12345"}, + } + + bytes, _ := json.Marshal(objects) + fname := tmpDir + "/1.json" + os.WriteFile(fname, bytes, 0755) + + ic.loadOldWorkspaceObjects(fname) + ic.allWorkspaceObjects = objects[0:2] + ic.findDeletedResources() + require.Equal(t, 6, len(ic.deletedResources)) + assert.Contains(t, ic.deletedResources, "databricks_directory.test_tdir_789") + assert.Contains(t, ic.deletedResources, "databricks_permissions.directory_test_tdir_789") + assert.Contains(t, ic.deletedResources, "databricks_notebook.test_12345_345") + assert.Contains(t, ic.deletedResources, "databricks_permissions.notebook_test_12345_345") + assert.Contains(t, ic.deletedResources, "databricks_workspace_file.test_tdir_12") + assert.Contains(t, ic.deletedResources, "databricks_permissions.ws_file_test_tdir_12") + + // errors/edge case handling + _ = os.WriteFile(fname, []byte("[]"), 0755) + ic.loadOldWorkspaceObjects(fname) + require.Equal(t, 0, len(ic.oldWorkspaceObjects)) + ic.findDeletedResources() + + // Incorrect data type + _ = os.WriteFile(fname, []byte("{}"), 0755) + ic.loadOldWorkspaceObjects(fname) + require.Equal(t, 0, len(ic.oldWorkspaceObjects)) +} diff --git a/exporter/exporter_test.go b/exporter/exporter_test.go index 650a656ecf..29142bc88f 100644 --- a/exporter/exporter_test.go +++ b/exporter/exporter_test.go @@ -19,6 +19,7 @@ import ( "github.com/databricks/databricks-sdk-go/service/ml" "github.com/databricks/databricks-sdk-go/service/serving" "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/databricks/databricks-sdk-go/service/sharing" "github.com/databricks/databricks-sdk-go/service/sql" workspaceApi "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/databricks/terraform-provider-databricks/aws" @@ -219,8 +220,7 @@ func TestImportingMounts(t *testing.T) { }, func(ctx context.Context, client *common.DatabricksClient) { ic := newImportContext(client) ic.setClientsForTests() - ic.enableServices("mounts") - ic.listing = "mounts" + ic.enableListing("mounts") ic.mounts = true err := ic.Importables["databricks_mount"].List(ic) @@ -278,6 +278,26 @@ var emptyMlflowWebhooks = qa.HTTPFixture{ Response: ml.ListRegistryWebhooks{}, } +var emptyExternalLocations = qa.HTTPFixture{ + Method: "GET", + Resource: "/api/2.1/unity-catalog/external-locations?", + Status: 200, + Response: &catalog.ListExternalLocationsResponse{}, +} + +var emptyStorageCrdentials = qa.HTTPFixture{ + Method: "GET", + Resource: "/api/2.1/unity-catalog/storage-credentials?", + Status: 200, + Response: &catalog.ListStorageCredentialsResponse{}, +} + +var emptyConnections = qa.HTTPFixture{ + Method: "GET", + Resource: "/api/2.1/unity-catalog/connections", + Response: catalog.ListConnectionsResponse{}, +} + var emptyRepos = qa.HTTPFixture{ Method: "GET", ReuseRequest: true, @@ -285,6 +305,20 @@ var emptyRepos = qa.HTTPFixture{ Response: repos.ReposListResponse{}, } +var emptyShares = qa.HTTPFixture{ + Method: "GET", + ReuseRequest: true, + Resource: "/api/2.1/unity-catalog/shares", + Response: sharing.ListSharesResponse{}, +} + +var emptyRecipients = qa.HTTPFixture{ + Method: "GET", + ReuseRequest: true, + Resource: "/api/2.1/unity-catalog/recipients?", + Response: sharing.ListRecipientsResponse{}, +} + var emptyGitCredentials = qa.HTTPFixture{ Method: http.MethodGet, Resource: "/api/2.0/git-credentials", @@ -396,6 +430,13 @@ var currentMetastoreSuccess = qa.HTTPFixture{ ReuseRequest: true, } +var emptyMetastoreList = qa.HTTPFixture{ + Method: "GET", + Resource: "/api/2.1/unity-catalog/metastores", + Response: catalog.ListMetastoresResponse{}, + ReuseRequest: true, +} + func TestImportingUsersGroupsSecretScopes(t *testing.T) { listSpFixtures := qa.ListServicePrincipalsFixtures([]iam.ServicePrincipal{ { @@ -417,13 +458,19 @@ func TestImportingUsersGroupsSecretScopes(t *testing.T) { qa.HTTPFixturesApply(t, []qa.HTTPFixture{ noCurrentMetastoreAttached, + emptyMetastoreList, meAdminFixture, emptyRepos, + emptyShares, + emptyConnections, + emptyRecipients, emptyGitCredentials, emptyWorkspace, emptyIpAccessLIst, emptyInstancePools, emptyModelServing, + emptyExternalLocations, + emptyStorageCrdentials, emptyMlflowWebhooks, emptySqlDashboards, emptySqlEndpoints, @@ -653,9 +700,8 @@ func TestImportingUsersGroupsSecretScopes(t *testing.T) { ic := newImportContext(client) ic.Directory = tmpDir - services, listing := ic.allServicesAndListing() - ic.enableServices(services) - ic.listing = listing + _, listing := ic.allServicesAndListing() + ic.enableListing(listing) err := ic.Run() assert.NoError(t, err) @@ -674,7 +720,13 @@ func TestImportingNoResourcesError(t *testing.T) { }, }, noCurrentMetastoreAttached, + emptyMetastoreList, emptyRepos, + emptyExternalLocations, + emptyStorageCrdentials, + emptyShares, + emptyConnections, + emptyRecipients, emptyModelServing, emptyMlflowWebhooks, emptyWorkspaceConf, @@ -724,12 +776,11 @@ func TestImportingNoResourcesError(t *testing.T) { ic := newImportContext(client) ic.Directory = tmpDir - services, listing := ic.allServicesAndListing() - ic.listing = listing - ic.enableServices(services) + _, listing := ic.allServicesAndListing() + ic.enableListing(listing) err := ic.Run() - assert.EqualError(t, err, "no resources to import") + assert.EqualError(t, err, "no resources to import or delete") }) } @@ -935,7 +986,7 @@ func TestImportingClusters(t *testing.T) { ic := newImportContext(client) ic.Directory = tmpDir - ic.listing = "compute" + ic.enableListing("compute") ic.enableServices("access,users,policies,compute,secrets,groups,storage") err := ic.Run() @@ -1140,7 +1191,7 @@ func TestImportingJobs_JobList(t *testing.T) { func(ctx context.Context, client *common.DatabricksClient) { ic := newImportContext(client) ic.enableServices("jobs,access,storage,clusters,pools") - ic.listing = "jobs" + ic.enableListing("jobs") ic.mounts = true ic.meAdmin = true tmpDir := fmt.Sprintf("/tmp/tf-%s", qa.RandomName()) @@ -1160,7 +1211,7 @@ func TestImportingJobs_JobList(t *testing.T) { ic.Importables["databricks_job"], []string{}, ic.Resources["databricks_job"], - res.Data, + res, hclwrite.NewEmptyFile().Body()) assert.NoError(t, err) @@ -1391,7 +1442,7 @@ func TestImportingJobs_JobListMultiTask(t *testing.T) { func(ctx context.Context, client *common.DatabricksClient) { ic := newImportContext(client) ic.enableServices("jobs,access,storage,clusters,pools") - ic.listing = "jobs" + ic.enableListing("jobs") ic.mounts = true ic.meAdmin = true tmpDir := fmt.Sprintf("/tmp/tf-%s", qa.RandomName()) @@ -1407,12 +1458,8 @@ func TestImportingJobs_JobListMultiTask(t *testing.T) { continue } // simulate complex HCL write - err = ic.dataToHcl( - ic.Importables["databricks_job"], - []string{}, - ic.Resources["databricks_job"], - res.Data, - hclwrite.NewEmptyFile().Body()) + err = ic.dataToHcl(ic.Importables["databricks_job"], []string{}, ic.Resources["databricks_job"], + res, hclwrite.NewEmptyFile().Body()) assert.NoError(t, err) } @@ -1478,7 +1525,7 @@ func TestImportingSecrets(t *testing.T) { ic := newImportContext(client) ic.Directory = tmpDir - ic.listing = "secrets" + ic.enableListing("secrets") services, _ := ic.allServicesAndListing() ic.enableServices(services) ic.generateDeclaration = true @@ -1528,13 +1575,13 @@ func TestImportingGlobalInitScripts(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/global-init-scripts/C39FD6BAC8088BBC", + Resource: "/api/2.0/global-init-scripts/C39FD6BAC8088BBC?", ReuseRequest: true, Response: getJSONObject("test-data/global-init-script-get1.json"), }, { Method: "GET", - Resource: "/api/2.0/global-init-scripts/F931E63C248C1D8C", + Resource: "/api/2.0/global-init-scripts/F931E63C248C1D8C?", ReuseRequest: true, Response: getJSONObject("test-data/global-init-script-get2.json"), }, @@ -1544,7 +1591,7 @@ func TestImportingGlobalInitScripts(t *testing.T) { ic := newImportContext(client) ic.Directory = tmpDir - ic.listing = "workspace" + ic.enableListing("workspace") services, _ := ic.allServicesAndListing() ic.enableServices(services) ic.generateDeclaration = true @@ -1650,8 +1697,7 @@ func TestImportingRepos(t *testing.T) { ic := newImportContext(client) ic.Directory = tmpDir - ic.listing = "repos" - ic.enableServices(ic.listing) + ic.enableListing("repos") err := ic.Run() assert.NoError(t, err) @@ -1721,8 +1767,8 @@ func TestImportingIPAccessLists(t *testing.T) { ic := newImportContext(client) ic.Directory = tmpDir - ic.listing = "workspace,access" - ic.enableServices(ic.listing) + services := "workspace,access" + ic.enableListing(services) err := ic.Run() assert.NoError(t, err) @@ -1857,7 +1903,7 @@ func TestImportingSqlObjects(t *testing.T) { ic := newImportContext(client) ic.Directory = tmpDir - ic.listing = "sql-dashboards,sql-queries,sql-endpoints,sql-alerts" + ic.enableListing("sql-dashboards,sql-queries,sql-endpoints,sql-alerts") ic.enableServices("sql-dashboards,sql-queries,sql-alerts,sql-endpoints,access,notebooks") err := ic.Run() @@ -2036,7 +2082,7 @@ func TestImportingDLTPipelines(t *testing.T) { ic := newImportContext(client) ic.Directory = tmpDir - ic.listing = "dlt" + ic.enableListing("dlt") ic.enableServices("dlt,access,notebooks,users,repos,secrets") err := ic.Run() @@ -2094,7 +2140,7 @@ func TestImportingDLTPipelinesMatchingOnly(t *testing.T) { ic := newImportContext(client) ic.Directory = tmpDir ic.match = "test" - ic.listing = "dlt" + ic.enableListing("dlt") ic.enableServices("dlt,access") err := ic.Run() @@ -2136,8 +2182,7 @@ func TestImportingGlobalSqlConfig(t *testing.T) { ic := newImportContext(client) ic.Directory = tmpDir - ic.listing = "sql-endpoints" - ic.enableServices(ic.listing) + ic.enableListing("sql-endpoints") err := ic.Run() assert.NoError(t, err) @@ -2168,6 +2213,7 @@ func TestImportingNotebooksWorkspaceFiles(t *testing.T) { Response: workspace.ObjectList{ Objects: []workspace.ObjectStatus{notebookStatus, fileStatus}, }, + ReuseRequest: true, }, { Method: "GET", @@ -2204,8 +2250,7 @@ func TestImportingNotebooksWorkspaceFiles(t *testing.T) { ic := newImportContext(client) ic.Directory = tmpDir - ic.listing = "notebooks" - ic.enableServices(ic.listing) + ic.enableListing("notebooks") err := ic.Run() assert.NoError(t, err) @@ -2255,8 +2300,7 @@ func TestImportingModelServing(t *testing.T) { ic := newImportContext(client) ic.Directory = tmpDir - ic.listing = "model-serving" - ic.enableServices(ic.listing) + ic.enableListing("model-serving") err := ic.Run() assert.NoError(t, err) @@ -2307,8 +2351,7 @@ func TestImportingMlfloweWebhooks(t *testing.T) { ic := newImportContext(client) ic.Directory = tmpDir - ic.listing = "mlflow-webhooks" - ic.enableServices(ic.listing) + ic.enableListing("mlflow-webhooks") err := ic.Run() assert.NoError(t, err) @@ -2439,8 +2482,8 @@ resource "databricks_pipeline" "def" { ic := newImportContext(client) ic.Directory = tmpDir - ic.listing = "dlt,mlflow-webhooks" - ic.enableServices(ic.listing) + services := "dlt,mlflow-webhooks" + ic.enableListing(services) ic.incremental = true ic.updatedSinceStr = "2023-07-24T00:00:00Z" ic.meAdmin = false @@ -2502,8 +2545,7 @@ func TestImportingRunJobTask(t *testing.T) { ic := newImportContext(client) ic.Directory = tmpDir - ic.listing = "jobs" - ic.enableServices(ic.listing) + ic.enableListing("jobs") ic.match = "runjobtask" err := ic.Run() diff --git a/exporter/importables.go b/exporter/importables.go index 43205b0877..e2c30e0783 100644 --- a/exporter/importables.go +++ b/exporter/importables.go @@ -18,7 +18,9 @@ import ( sdk_jobs "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/ml" "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/databricks/databricks-sdk-go/service/sharing" "github.com/databricks/databricks-sdk-go/service/sql" + tfuc "github.com/databricks/terraform-provider-databricks/catalog" "github.com/databricks/terraform-provider-databricks/clusters" "github.com/databricks/terraform-provider-databricks/common" "github.com/databricks/terraform-provider-databricks/jobs" @@ -34,6 +36,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/zclconf/go-cty/cty" + "golang.org/x/exp/maps" "golang.org/x/exp/slices" ) @@ -283,10 +286,14 @@ var resourcesMap map[string]importable = map[string]importable{ {Path: "policy_id", Resource: "databricks_cluster_policy"}, {Path: "single_user_name", Resource: "databricks_service_principal", Match: "application_id"}, {Path: "single_user_name", Resource: "databricks_user", Match: "user_name", MatchType: MatchCaseInsensitive}, - {Path: "library.jar", Resource: "databricks_repo", Match: "workspace_path", MatchType: MatchPrefix}, - {Path: "library.whl", Resource: "databricks_repo", Match: "workspace_path", MatchType: MatchPrefix}, - {Path: "library.egg", Resource: "databricks_repo", Match: "workspace_path", MatchType: MatchPrefix}, - {Path: "init_scripts.workspace.destination", Resource: "databricks_repo", Match: "workspace_path", MatchType: MatchPrefix}, + {Path: "library.jar", Resource: "databricks_repo", Match: "workspace_path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "library.whl", Resource: "databricks_repo", Match: "workspace_path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "library.egg", Resource: "databricks_repo", Match: "workspace_path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "init_scripts.workspace.destination", Resource: "databricks_repo", Match: "workspace_path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, }, List: func(ic *importContext) error { clusters, err := clusters.NewClustersAPI(ic.Context, ic.Client).List() @@ -392,26 +399,40 @@ var resourcesMap map[string]importable = map[string]importable{ {Path: "task.spark_python_task.python_file", Resource: "databricks_workspace_file", Match: "path"}, {Path: "task.spark_submit_task.parameters", Resource: "databricks_dbfs_file", Match: "dbfs_path"}, {Path: "task.spark_submit_task.parameters", Resource: "databricks_workspace_file", Match: "workspace_path"}, + {Path: "task.sql_task.file.path", Resource: "databricks_workspace_file", Match: "path"}, + {Path: "task.dbt_task.project_directory", Resource: "databricks_directory", Match: "path"}, {Path: "task.sql_task.alert.alert_id", Resource: "databricks_sql_alert"}, {Path: "task.sql_task.dashboard.dashboard_id", Resource: "databricks_sql_dashboard"}, {Path: "task.sql_task.query.query_id", Resource: "databricks_sql_query"}, {Path: "task.sql_task.warehouse_id", Resource: "databricks_sql_endpoint"}, {Path: "run_as.user_name", Resource: "databricks_user", Match: "user_name", MatchType: MatchCaseInsensitive}, - {Path: "email_notifications.on_duration_warning_threshold_exceeded", Resource: "databricks_user", Match: "user_name", MatchType: MatchCaseInsensitive}, + {Path: "email_notifications.on_duration_warning_threshold_exceeded", Resource: "databricks_user", + Match: "user_name", MatchType: MatchCaseInsensitive}, {Path: "email_notifications.on_failure", Resource: "databricks_user", Match: "user_name", MatchType: MatchCaseInsensitive}, {Path: "email_notifications.on_start", Resource: "databricks_user", Match: "user_name", MatchType: MatchCaseInsensitive}, {Path: "email_notifications.on_success", Resource: "databricks_user", Match: "user_name", MatchType: MatchCaseInsensitive}, - {Path: "task.library.whl", Resource: "databricks_repo", Match: "workspace_path", MatchType: MatchPrefix}, - {Path: "task.new_cluster.init_scripts.workspace.destination", Resource: "databricks_repo", Match: "workspace_path", MatchType: MatchPrefix}, - {Path: "task.notebook_task.base_parameters", Resource: "databricks_repo", Match: "workspace_path", MatchType: MatchPrefix}, - {Path: "task.notebook_task.notebook_path", Resource: "databricks_repo", Match: "path", MatchType: MatchPrefix}, - {Path: "task.python_wheel_task.named_parameters", Resource: "databricks_repo", Match: "workspace_path", MatchType: MatchPrefix}, - {Path: "task.python_wheel_task.parameters", Resource: "databricks_repo", Match: "workspace_path", MatchType: MatchPrefix}, - {Path: "task.run_job_task.job_parameters", Resource: "databricks_repo", Match: "workspace_path", MatchType: MatchPrefix}, - {Path: "task.spark_python_task.python_file", Resource: "databricks_repo", Match: "path", MatchType: MatchPrefix}, - {Path: "task.spark_jar_task.parameters", Resource: "databricks_repo", Match: "workspace_path", MatchType: MatchPrefix}, - {Path: "task.spark_submit_task.parameters", Resource: "databricks_repo", Match: "workspace_path", MatchType: MatchPrefix}, - {Path: "job_cluster.new_cluster.init_scripts.workspace.destination", Resource: "databricks_repo", Match: "workspace_path", MatchType: MatchPrefix}, + {Path: "task.library.whl", Resource: "databricks_repo", Match: "workspace_path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "task.new_cluster.init_scripts.workspace.destination", Resource: "databricks_repo", Match: "workspace_path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "task.notebook_task.base_parameters", Resource: "databricks_repo", Match: "workspace_path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "task.notebook_task.notebook_path", Resource: "databricks_repo", Match: "path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "task.python_wheel_task.named_parameters", Resource: "databricks_repo", Match: "workspace_path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "task.python_wheel_task.parameters", Resource: "databricks_repo", Match: "workspace_path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "task.run_job_task.job_parameters", Resource: "databricks_repo", Match: "workspace_path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "task.spark_python_task.python_file", Resource: "databricks_repo", Match: "path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "task.spark_jar_task.parameters", Resource: "databricks_repo", Match: "workspace_path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "task.spark_submit_task.parameters", Resource: "databricks_repo", Match: "workspace_path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "job_cluster.new_cluster.init_scripts.workspace.destination", Resource: "databricks_repo", Match: "workspace_path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, }, Import: func(ic *importContext, r *resource) error { var job jobs.JobSettings @@ -491,6 +512,9 @@ var resourcesMap map[string]importable = map[string]importable{ ID: task.SqlTask.WarehouseID, }) } + if task.SqlTask.File != nil && task.SqlTask.File.Source == "WORKSPACE" { + ic.emitWorkspaceFileOrRepo(task.SqlTask.File.Path) + } } if task.DbtTask != nil { if task.DbtTask.WarehouseId != "" { @@ -499,6 +523,26 @@ var resourcesMap map[string]importable = map[string]importable{ ID: task.DbtTask.WarehouseId, }) } + if task.DbtTask.Source == "WORKSPACE" { + directory := task.DbtTask.ProjectDirectory + if strings.HasPrefix(directory, "/Repos") { + ic.emitRepoByPath(directory) + } else { + // Traverse the dbt project directory and emit all objects found in it + nbAPI := workspace.NewNotebooksAPI(ic.Context, ic.Client) + objects, err := nbAPI.List(directory, true, true) + if err == nil { + for _, object := range objects { + if object.ObjectType != workspace.File { + continue + } + ic.maybeEmitWorkspaceObject("databricks_workspace_file", object.Path) + } + } else { + log.Printf("[WARN] Can't list directory %s for DBT task in job %s (id: %s)", directory, job.Name, r.ID) + } + } + } } if task.RunJobTask != nil && task.RunJobTask.JobID != 0 { ic.Emit(&resource{ @@ -745,9 +789,12 @@ var resourcesMap map[string]importable = map[string]importable{ {Path: "libraries.whl", Resource: "databricks_workspace_file", Match: "workspace_path"}, {Path: "libraries.egg", Resource: "databricks_dbfs_file", Match: "dbfs_path"}, {Path: "libraries.egg", Resource: "databricks_workspace_file", Match: "workspace_path"}, - {Path: "libraries.whl", Resource: "databricks_repo", Match: "workspace_path", MatchType: MatchPrefix}, - {Path: "libraries.egg", Resource: "databricks_repo", Match: "workspace_path", MatchType: MatchPrefix}, - {Path: "libraries.jar", Resource: "databricks_repo", Match: "workspace_path", MatchType: MatchPrefix}, + {Path: "libraries.whl", Resource: "databricks_repo", Match: "workspace_path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "libraries.egg", Resource: "databricks_repo", Match: "workspace_path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "libraries.jar", Resource: "databricks_repo", Match: "workspace_path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, }, // TODO: special formatting required, where JSON is written line by line // so that we're able to do the references @@ -1027,15 +1074,13 @@ var resourcesMap map[string]importable = map[string]importable{ // TODO: can we fill _path component for it, and then match on user/SP home instead? {Path: "directory_id", Resource: "databricks_directory", Match: "object_id"}, {Path: "notebook_id", Resource: "databricks_notebook", Match: "object_id"}, + {Path: "workspace_file_id", Resource: "databricks_workspace_file", Match: "object_id"}, {Path: "access_control.group_name", Resource: "databricks_group", Match: "display_name"}, {Path: "access_control.service_principal_name", Resource: "databricks_service_principal", Match: "application_id"}, {Path: "access_control.user_name", Resource: "databricks_user", Match: "user_name", MatchType: MatchCaseInsensitive}, }, Ignore: func(ic *importContext, r *resource) bool { - var permissions permissions.PermissionsEntity - s := ic.Resources["databricks_permissions"].Schema - common.DataToStructPointer(r.Data, s, &permissions) - return (len(permissions.AccessControlList) == 0) + return (r.Data.Get("access_control.#").(int) == 0) }, Import: func(ic *importContext, r *resource) error { var permissions permissions.PermissionsEntity @@ -1073,8 +1118,7 @@ var resourcesMap map[string]importable = map[string]importable{ if scopes, err := ssAPI.List(); err == nil { for i, scope := range scopes { if !ic.MatchesName(scope.Name) { - log.Printf("[INFO] Secret scope %s doesn't match %s filter", - scope.Name, ic.match) + log.Printf("[INFO] Secret scope %s doesn't match %s filter", scope.Name, ic.match) continue } ic.Emit(&resource{ @@ -1214,37 +1258,29 @@ var resourcesMap map[string]importable = map[string]importable{ return name }, List: func(ic *importContext) error { - globalInitScripts, err := workspace.NewGlobalInitScriptsAPI(ic.Context, ic.Client).List() + globalInitScripts, err := ic.workspaceClient.GlobalInitScripts.ListAll(ic.Context) if err != nil { return err } - updatedSinceMs := ic.getUpdatedSinceMs() for offset, gis := range globalInitScripts { - modifiedAt := gis.UpdatedAt - if ic.incremental && modifiedAt < updatedSinceMs { - log.Printf("[DEBUG] skipping global init script '%s' that was modified at %d (last active=%d)", - gis.Name, modifiedAt, updatedSinceMs) - continue - } - ic.Emit(&resource{ + ic.EmitIfUpdatedAfterMillis(&resource{ Resource: "databricks_global_init_script", - ID: gis.ScriptID, - }) + ID: gis.ScriptId, + }, int64(gis.UpdatedAt), fmt.Sprintf("global init script '%s'", gis.Name)) log.Printf("[INFO] Scanned %d of %d global init scripts", offset+1, len(globalInitScripts)) } return nil }, Import: func(ic *importContext, r *resource) error { - gis, err := workspace.NewGlobalInitScriptsAPI(ic.Context, ic.Client).Get(r.ID) + gis, err := ic.workspaceClient.GlobalInitScripts.GetByScriptId(ic.Context, r.ID) if err != nil { return err } - content, err := base64.StdEncoding.DecodeString(gis.ContentBase64) + content, err := base64.StdEncoding.DecodeString(gis.Script) if err != nil { return err } fileName, err := ic.createFile(fmt.Sprintf("%s.sh", r.Name), content) - log.Printf("Creating %s for %s", fileName, r) if err != nil { return err } @@ -1317,6 +1353,10 @@ var resourcesMap map[string]importable = map[string]importable{ return d.Get("branch").(string) == "" case "tag": return d.Get("tag").(string) == "" + case "git_provider": + url := d.Get("url").(string) + provider := repos.GetGitProviderFromUrl(url) + return provider != "" // omit git_provider only for well-known URLs } return defaultShouldOmitFieldFunc(ic, pathString, as, d) }, @@ -1331,8 +1371,10 @@ var resourcesMap map[string]importable = map[string]importable{ }, Depends: []reference{ - {Path: "path", Resource: "databricks_user", Match: "repos", MatchType: MatchPrefix}, - {Path: "path", Resource: "databricks_service_principal", Match: "repos", MatchType: MatchPrefix}, + {Path: "path", Resource: "databricks_user", Match: "repos", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "path", Resource: "databricks_service_principal", Match: "repos", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, }, }, "databricks_workspace_conf": { @@ -1360,11 +1402,7 @@ var resourcesMap map[string]importable = map[string]importable{ return nil }, Import: func(ic *importContext, r *resource) error { - loaded := map[string]any{} - keyNames := []string{} - for k := range ic.workspaceConfKeys { - keyNames = append(keyNames, k) - } + keyNames := maps.Keys(ic.workspaceConfKeys) sort.Strings(keyNames) conf, err := ic.workspaceClient.WorkspaceConf.GetStatus(ic.Context, settings.GetStatusRequest{ Keys: strings.Join(keyNames, ","), @@ -1372,6 +1410,7 @@ var resourcesMap map[string]importable = map[string]importable{ if err != nil { return err } + loaded := map[string]any{} for k, v := range *conf { if v == "" { continue @@ -1395,18 +1434,11 @@ var resourcesMap map[string]importable = map[string]importable{ return err } ipLists := ipListsResp.IpAccessLists - updatedSinceMs := ic.getUpdatedSinceMs() for offset, ipList := range ipLists { - modifiedAt := ipList.UpdatedAt - if ic.incremental && modifiedAt < updatedSinceMs { - log.Printf("[DEBUG] skipping IP access list '%s' that was modified at %d (last active=%d)", - ipList.Label, modifiedAt, updatedSinceMs) - continue - } - ic.Emit(&resource{ + ic.EmitIfUpdatedAfterMillis(&resource{ Resource: "databricks_ip_access_list", ID: ipList.ListId, - }) + }, ipList.UpdatedAt, fmt.Sprintf("IP access list '%s'", ipList.Label)) log.Printf("[INFO] Scanned %d of %d IP Access Lists", offset+1, len(ipLists)) } if len(ipLists) > 0 { @@ -1477,10 +1509,12 @@ var resourcesMap map[string]importable = map[string]importable{ ShouldOmitField: shouldOmitMd5Field, Depends: []reference{ {Path: "source", File: true}, - // TODO: This should be the longest prefix, and avoid data source if possible - it should be done in the `reference` function - {Path: "path", Resource: "databricks_directory", MatchType: MatchPrefix}, - {Path: "path", Resource: "databricks_user", Match: "home", MatchType: MatchPrefix}, - {Path: "path", Resource: "databricks_service_principal", Match: "home", MatchType: MatchPrefix}, + {Path: "path", Resource: "databricks_directory", + MatchType: MatchLongestPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "path", Resource: "databricks_user", Match: "home", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "path", Resource: "databricks_service_principal", Match: "home", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, }, }, "databricks_workspace_file": { @@ -1537,10 +1571,12 @@ var resourcesMap map[string]importable = map[string]importable{ ShouldOmitField: shouldOmitMd5Field, Depends: []reference{ {Path: "source", File: true}, - // TODO: This should be the longest prefix, and avoid data source if possible - it should be done in the `reference` function - {Path: "path", Resource: "databricks_directory", MatchType: MatchPrefix}, - {Path: "path", Resource: "databricks_user", Match: "home", MatchType: MatchPrefix}, - {Path: "path", Resource: "databricks_service_principal", Match: "home", MatchType: MatchPrefix}, + {Path: "path", Resource: "databricks_directory", + MatchType: MatchLongestPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "path", Resource: "databricks_user", Match: "home", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "path", Resource: "databricks_service_principal", Match: "home", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, }, }, "databricks_sql_query": { @@ -1554,25 +1590,16 @@ var resourcesMap map[string]importable = map[string]importable{ if err != nil { return nil } - updatedSinceStr := ic.getUpdatedSinceStr() for i, q := range qs { name := q["name"].(string) if !ic.MatchesName(name) { continue } - updatedAt := q["updated_at"].(string) - if ic.incremental && updatedAt < updatedSinceStr { - log.Printf("[DEBUG] skipping query '%s' that was modified at %s (updatedSince=%s)", name, - updatedAt, updatedSinceStr) - continue - } - log.Printf("[DEBUG] emitting query '%s' that was modified at %s (updatedSince=%s)", name, - updatedAt, updatedSinceStr) - ic.Emit(&resource{ + ic.EmitIfUpdatedAfterIsoString(&resource{ Resource: "databricks_sql_query", ID: q["id"].(string), Incremental: ic.incremental, - }) + }, q["updated_at"].(string), fmt.Sprintf("query '%s'", name)) log.Printf("[INFO] Imported %d of %d SQL queries", i+1, len(qs)) } @@ -1699,25 +1726,16 @@ var resourcesMap map[string]importable = map[string]importable{ if err != nil { return nil } - updatedSinceStr := ic.getUpdatedSinceStr() for i, q := range qs { name := q["name"].(string) if !ic.MatchesName(name) { continue } - updatedAt := q["updated_at"].(string) - if ic.incremental && updatedAt < updatedSinceStr { - log.Printf("[DEBUG] skipping dashboard '%s' that was modified at %s (updatedSince=%s)", name, - updatedAt, updatedSinceStr) - continue - } - log.Printf("[DEBUG] emitting dashboard '%s' that was modified at %s (updatedSince=%s)", name, - updatedAt, updatedSinceStr) - ic.Emit(&resource{ + ic.EmitIfUpdatedAfterIsoString(&resource{ Resource: "databricks_sql_dashboard", ID: q["id"].(string), Incremental: ic.incremental, - }) + }, q["updated_at"].(string), fmt.Sprintf("dashboard '%s'", name)) log.Printf("[INFO] Imported %d of %d SQL dashboards", i+1, len(qs)) } return nil @@ -1824,7 +1842,6 @@ var resourcesMap map[string]importable = map[string]importable{ return d.Get("name").(string) + "_" + d.Id() }, List: func(ic *importContext) error { - updatedSinceStr := ic.getUpdatedSinceStr() alerts, err := ic.workspaceClient.Alerts.List(ic.Context) if err != nil { return err @@ -1834,18 +1851,11 @@ var resourcesMap map[string]importable = map[string]importable{ if !ic.MatchesName(name) { continue } - if ic.incremental && alert.UpdatedAt < updatedSinceStr { - log.Printf("[DEBUG] skipping alert '%s' that was modified at %s (last active=%s)", name, - alert.UpdatedAt, updatedSinceStr) - continue - } - log.Printf("[DEBUG] emitting alert '%s' that was modified at %s (last active=%s)", name, - alert.UpdatedAt, updatedSinceStr) - ic.Emit(&resource{ + ic.EmitIfUpdatedAfterIsoString(&resource{ Resource: "databricks_sql_alert", ID: alert.Id, Incremental: ic.incremental, - }) + }, alert.UpdatedAt, fmt.Sprintf("alert '%s'", name)) log.Printf("[INFO] Imported %d of %d SQL alerts", i+1, len(alerts)) } return nil @@ -1888,27 +1898,22 @@ var resourcesMap map[string]importable = map[string]importable{ if err != nil { return err } - updatedSinceMs := ic.getUpdatedSinceMs() for i, q := range pipelinesList { if !ic.MatchesName(q.Name) { continue } + var modifiedAt int64 if ic.incremental { pipeline, err := api.Read(q.PipelineID) if err != nil { return err } - modifiedAt := pipeline.LastModified - if modifiedAt < updatedSinceMs { - log.Printf("[DEBUG] skipping DLT Pipeline '%s' that was modified at %d (last active=%d)", - pipeline.Name, modifiedAt, updatedSinceMs) - continue - } + modifiedAt = pipeline.LastModified } - ic.Emit(&resource{ + ic.EmitIfUpdatedAfterMillis(&resource{ Resource: "databricks_pipeline", ID: q.PipelineID, - }) + }, modifiedAt, fmt.Sprintf("DLT Pipeline '%s'", q.Name)) log.Printf("[INFO] Imported %d of %d DLT Pipelines", i+1, len(pipelinesList)) } return nil @@ -1999,10 +2004,14 @@ var resourcesMap map[string]importable = map[string]importable{ {Path: "library.file.path", Resource: "databricks_workspace_file"}, {Path: "library.jar", Resource: "databricks_dbfs_file", Match: "dbfs_path"}, {Path: "library.whl", Resource: "databricks_dbfs_file", Match: "dbfs_path"}, - {Path: "configuration", Resource: "databricks_repo", Match: "workspace_path", MatchType: MatchPrefix}, - {Path: "library.notebook.path", Resource: "databricks_repo", Match: "path", MatchType: MatchPrefix}, - {Path: "library.file.path", Resource: "databricks_repo", Match: "path", MatchType: MatchPrefix}, - {Path: "cluster.init_scripts.workspace.destination", Resource: "databricks_repo", Match: "workspace_path", MatchType: MatchPrefix}, + {Path: "configuration", Resource: "databricks_repo", Match: "workspace_path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "library.notebook.path", Resource: "databricks_repo", Match: "path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "library.file.path", Resource: "databricks_repo", Match: "path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "cluster.init_scripts.workspace.destination", Resource: "databricks_repo", Match: "workspace_path", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, }, }, "databricks_directory": { @@ -2063,8 +2072,10 @@ var resourcesMap map[string]importable = map[string]importable{ Body: resourceOrDataBlockBody, Depends: []reference{ // TODO: it should try to find longest reference to another directory object that it not itself... - {Path: "path", Resource: "databricks_user", Match: "home", MatchType: MatchPrefix}, - {Path: "path", Resource: "databricks_service_principal", Match: "home", MatchType: MatchPrefix}, + {Path: "path", Resource: "databricks_user", Match: "home", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, + {Path: "path", Resource: "databricks_service_principal", Match: "home", + MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, }, }, "databricks_model_serving": { @@ -2080,18 +2091,11 @@ var resourcesMap map[string]importable = map[string]importable{ return err } - updatedSinceMs := ic.getUpdatedSinceMs() for offset, endpoint := range endpointsList { - modifiedAt := endpoint.LastUpdatedTimestamp - if ic.incremental && modifiedAt < updatedSinceMs { - log.Printf("[DEBUG] skipping serving endpoint '%s' that was modified at %d (last active=%d)", - endpoint.Name, modifiedAt, updatedSinceMs) - continue - } - ic.Emit(&resource{ + ic.EmitIfUpdatedAfterMillis(&resource{ Resource: "databricks_model_serving", ID: endpoint.Name, - }) + }, endpoint.LastUpdatedTimestamp, fmt.Sprintf("serving endpoint '%s'", endpoint.Name)) if offset%50 == 0 { log.Printf("[INFO] Scanned %d of %d Serving Endpoints", offset+1, len(endpointsList)) } @@ -2129,21 +2133,11 @@ var resourcesMap map[string]importable = map[string]importable{ if err != nil { return err } - - updatedSinceMs := ic.getUpdatedSinceMs() for offset, webhook := range webhooks { - modifiedAt := webhook.LastUpdatedTimestamp - if ic.incremental && modifiedAt < updatedSinceMs { - log.Printf("[DEBUG] skipping MLflow webhook '%s' that was modified at %d (last active=%d)", - webhook.Id, modifiedAt, updatedSinceMs) - continue - } - log.Printf("[DEBUG] emitting MLflow webhook '%s' that was modified at %d (last active=%d)", - webhook.Id, modifiedAt, updatedSinceMs) - ic.Emit(&resource{ + ic.EmitIfUpdatedAfterMillis(&resource{ Resource: "databricks_mlflow_webhook", ID: webhook.Id, - }) + }, webhook.LastUpdatedTimestamp, fmt.Sprintf("webhook '%s'", webhook.Id)) if webhook.JobSpec != nil && webhook.JobSpec.JobId != "" { ic.Emit(&resource{ Resource: "databricks_job", @@ -2294,4 +2288,555 @@ var resourcesMap map[string]importable = map[string]importable{ }, // TODO: add Depends & Import to emit corresponding UC Volumes when support for them is added }, + "databricks_catalog": { + WorkspaceLevel: true, + Service: "uc-catalogs", + List: func(ic *importContext) error { + if ic.currentMetastore == nil { + return fmt.Errorf("there is no UC metastore information") + } + catalogs, err := ic.workspaceClient.Catalogs.ListAll(ic.Context) + if err != nil { + return err + } + for _, v := range catalogs { + switch v.CatalogType { + case "MANAGED_CATALOG", "FOREIGN_CATALOG", "DELTASHARING_CATALOG": + { + name := fmt.Sprintf("%s_%s_%s", v.Name, ic.currentMetastore.Name, v.CatalogType) + ic.EmitIfUpdatedAfterMillisAndNameMatches(&resource{ + Resource: "databricks_catalog", + ID: v.Name, + Name: nameNormalizationRegex.ReplaceAllString(name, "_"), + }, v.Name, v.UpdatedAt, fmt.Sprintf("catalog '%s'", v.Name)) + } + default: + log.Printf("[INFO] Skipping catalog %s of type %s", v.Name, v.CatalogType) + } + } + return nil + }, + Import: func(ic *importContext, r *resource) error { + var cat tfuc.CatalogInfo + s := ic.Resources["databricks_catalog"].Schema + common.DataToStructPointer(r.Data, s, &cat) + + // Emit: UC Connection, List schemas, Catalog grants, ... + ic.Emit(&resource{ + Resource: "databricks_grants", + ID: "catalog/" + cat.Name, + }) + // TODO: emit owner? Should we do this? Because it's a account-level identity... Create a separate function for that... + if cat.ConnectionName != "" { + ic.Emit(&resource{ + Resource: "databricks_connection", + ID: cat.MetastoreID + "|" + cat.ConnectionName, + }) + } else if cat.ShareName == "" { + // TODO: We need to be careful here if we add more catalog types... Really we need to have CatalogType in resource + schemas, err := ic.workspaceClient.Schemas.ListAll(ic.Context, catalog.ListSchemasRequest{CatalogName: r.ID}) + if err != nil { + return err + } + ignoredSchemas := []string{"information_schema"} + for _, schema := range schemas { + if schema.CatalogType != "MANAGED_CATALOG" || slices.Contains(ignoredSchemas, schema.Name) { + continue + } + ic.EmitIfUpdatedAfterMillis(&resource{ + Resource: "databricks_schema", + ID: schema.FullName, + }, schema.UpdatedAt, fmt.Sprintf("schema '%s'", schema.FullName)) + } + } + if cat.IsolationMode == "ISOLATED" { + securable := "catalog" + bindings, err := ic.workspaceClient.WorkspaceBindings.GetBindings(ic.Context, catalog.GetBindingsRequest{ + SecurableName: cat.Name, + SecurableType: securable, + }) + if err == nil { + for _, binding := range bindings.Bindings { + id := fmt.Sprintf("%d|%s|%s", binding.WorkspaceId, securable, cat.Name) + d := ic.Resources["databricks_catalog_workspace_binding"].Data( + &terraform.InstanceState{ + ID: id, + Attributes: map[string]string{ + "workspace_id": fmt.Sprintf("%d", binding.WorkspaceId), + "securable_type": securable, + "securable_name": cat.Name, + "binding_type": binding.BindingType.String(), + }, + }) + ic.Emit(&resource{ + Resource: "databricks_catalog_workspace_binding", + ID: id, + Name: fmt.Sprintf("%s_%s_ws_%d", securable, cat.Name, binding.WorkspaceId), + Data: d, + }) + } + } else { + log.Printf("[ERROR] listing catalog bindings: %s", err.Error()) + } + } + return nil + }, + ShouldOmitField: func(ic *importContext, pathString string, as *schema.Schema, d *schema.ResourceData) bool { + if pathString == "isolation_mode" { + return d.Get(pathString).(string) != "ISOLATED" + } + return shouldOmitForUnityCatalog(ic, pathString, as, d) + }, + Depends: []reference{ + {Path: "connection_name", Resource: "databricks_connection", Match: "name"}, + {Path: "storage_root", Resource: "databricks_external_location", Match: "url", MatchType: MatchPrefix}, + }, + // TODO: convert `main` catalog into the data source as it's automatically created? + // This will require addition of the databricks_catalog data source + }, + "databricks_schema": { + WorkspaceLevel: true, + Service: "uc-schemas", + Import: func(ic *importContext, r *resource) error { + schemaFullName := r.ID + catalogName := r.Data.Get("catalog_name").(string) + schemaName := r.Data.Get("name").(string) + ic.Emit(&resource{ + Resource: "databricks_grants", + ID: "schema/" + schemaFullName, + }) + ic.Emit(&resource{ + Resource: "databricks_catalog", + ID: catalogName, + }) + // TODO: emit owner? See comment in catalog resource + // TODO: list tables + // list registered models + models, err := ic.workspaceClient.RegisteredModels.ListAll(ic.Context, + catalog.ListRegisteredModelsRequest{ + CatalogName: catalogName, + SchemaName: schemaName, + }) + if err != nil { // TODO: should we continue? + return err + } + for _, model := range models { + ic.EmitIfUpdatedAfterMillis(&resource{ + Resource: "databricks_registered_model", + ID: model.FullName, + }, model.UpdatedAt, fmt.Sprintf("registered model '%s'", model.FullName)) + } + // list volumes + volumes, err := ic.workspaceClient.Volumes.ListAll(ic.Context, + catalog.ListVolumesRequest{ + CatalogName: catalogName, + SchemaName: schemaName, + }) + if err != nil { + return err + } + for _, volume := range volumes { + ic.EmitIfUpdatedAfterMillis(&resource{ + Resource: "databricks_volume", + ID: volume.FullName, + }, volume.UpdatedAt, fmt.Sprintf("volume '%s'", volume.FullName)) + } + + return nil + }, + ShouldOmitField: shouldOmitForUnityCatalog, + Depends: []reference{ + {Path: "catalog_name", Resource: "databricks_catalog"}, + {Path: "storage_root", Resource: "databricks_external_location", Match: "url", MatchType: MatchPrefix}, + }, + }, + "databricks_volume": { + WorkspaceLevel: true, + Service: "uc-volumes", + Import: func(ic *importContext, r *resource) error { + volumeFullName := r.ID + ic.Emit(&resource{ + Resource: "databricks_grants", + ID: "volume/" + volumeFullName, + }) + catalogName := r.Data.Get("catalog_name").(string) + ic.Emit(&resource{ + Resource: "databricks_schema", + ID: catalogName + "." + r.Data.Get("schema_name").(string), + }) + ic.Emit(&resource{ + Resource: "databricks_catalog", + ID: catalogName, + }) + // TODO: emit owner? See comment in catalog resource + return nil + }, + ShouldOmitField: func(ic *importContext, pathString string, as *schema.Schema, d *schema.ResourceData) bool { + switch pathString { + case "storage_location", "volume_type": + return d.Get("volume_type").(string) == "MANAGED" + } + return shouldOmitForUnityCatalog(ic, pathString, as, d) + }, + Depends: []reference{ + {Path: "catalog_name", Resource: "databricks_catalog"}, + {Path: "schema_name", Resource: "databricks_schema", Match: "name", + IsValidApproximation: isMatchingCatalogAndSchema, SkipDirectLookup: true}, + {Path: "storage_location", Resource: "databricks_external_location", Match: "url", MatchType: MatchPrefix}, + }, + }, + "databricks_grants": { + WorkspaceLevel: true, + Service: "uc-grants", + // TODO: Should we try to make name unique? + // TODO: do we need to emit principals? Maybe only on account level? See comment for the owner... + Ignore: func(ic *importContext, r *resource) bool { + return r.Data.Get("grant.#").(int) == 0 + }, + Depends: []reference{ + {Path: "catalog", Resource: "databricks_catalog"}, + {Path: "schema", Resource: "databricks_schema"}, + {Path: "volume", Resource: "databricks_volume"}, + {Path: "share", Resource: "databricks_share"}, + {Path: "foreign_connection", Resource: "databricks_connection", Match: "name"}, + {Path: "metastore", Resource: "databricks_metastore"}, + {Path: "model", Resource: "databricks_registered_model"}, + {Path: "external_location", Resource: "databricks_external_location", Match: "name"}, + {Path: "storage_credential", Resource: "databricks_storage_credential"}, + // TODO: add similar matchers for users/groups/SPs on account level... + {Path: "grant.principal", Resource: "databricks_recipient", IsValidApproximation: isMatchingShareRecipient}, + // {Path: "", Resource: ""}, + // {Path: "", Resource: ""}, + }, + }, + "databricks_storage_credential": { + WorkspaceLevel: true, + AccountLevel: true, + Service: "uc-storage-credentials", + Import: func(ic *importContext, r *resource) error { + ic.Emit(&resource{ + Resource: "databricks_grants", + ID: fmt.Sprintf("storage_credential/%s", r.ID), + }) + return nil + }, + List: func(ic *importContext) error { + var objList []catalog.StorageCredentialInfo + var err error + + if ic.accountLevel { + if ic.currentMetastore == nil { + return fmt.Errorf("there is no UC metastore information") + } + currentMetastore := ic.currentMetastore.MetastoreId + objList, err = ic.accountClient.StorageCredentials.List(ic.Context, catalog.ListAccountStorageCredentialsRequest{ + MetastoreId: currentMetastore, + }) + } else { + objList, err = ic.workspaceClient.StorageCredentials.ListAll(ic.Context, catalog.ListStorageCredentialsRequest{}) + } + if err != nil { + return err + } + + for _, v := range objList { + ic.EmitIfUpdatedAfterMillisAndNameMatches(&resource{ + Resource: "databricks_storage_credential", + ID: v.Name, + }, v.Name, v.UpdatedAt, fmt.Sprintf("storage credential %s", v.Name)) + } + return nil + }, + ShouldOmitField: shouldOmitForUnityCatalog, + Depends: []reference{ + {Path: "azure_service_principal.client_secret", Variable: true}, + }, + }, + "databricks_external_location": { + WorkspaceLevel: true, + Service: "uc-external-locations", + Import: func(ic *importContext, r *resource) error { + ic.Emit(&resource{ + Resource: "databricks_grants", + ID: fmt.Sprintf("external_location/%s", r.ID), + }) + ic.Emit(&resource{ + Resource: "databricks_storage_credential", + ID: r.Data.Get("credential_name").(string), + }) + return nil + }, + List: func(ic *importContext) error { + objList, err := ic.workspaceClient.ExternalLocations.ListAll(ic.Context, catalog.ListExternalLocationsRequest{}) + if err != nil { + return err + } + for _, v := range objList { + if v.Name != "metastore_default_location" { + ic.EmitIfUpdatedAfterMillisAndNameMatches(&resource{ + Resource: "databricks_external_location", + ID: v.Name, + }, v.Name, v.UpdatedAt, fmt.Sprintf("external location %s", v.Name)) + } + } + return nil + }, + ShouldOmitField: shouldOmitForUnityCatalog, + // This external location is automatically created when metastore is created with the `storage_root` + Ignore: func(ic *importContext, r *resource) bool { + return r.ID == "metastore_default_location" + }, + Depends: []reference{ + {Path: "credential_name", Resource: "databricks_storage_credential", Match: "name"}, + }, + }, + "databricks_connection": { + WorkspaceLevel: true, + Service: "uc-connections", + Name: func(ic *importContext, d *schema.ResourceData) string { + connectionName := d.Get("name").(string) + connectionType := d.Get("connection_type").(string) + if connectionName == "" || connectionType == "" { + return d.Id() + } + return connectionType + "_" + connectionName + }, + List: func(ic *importContext) error { + connections, err := ic.workspaceClient.Connections.ListAll(ic.Context) + if err != nil { + return err + } + for _, conn := range connections { + ic.EmitIfUpdatedAfterMillisAndNameMatches(&resource{ + Resource: "databricks_connection", + ID: conn.MetastoreId + "|" + conn.Name, + }, conn.Name, conn.UpdatedAt, fmt.Sprintf("connection '%s'", conn.Name)) + } + return nil + }, + // TODO: think what to do with the sensitive fields in the `options`? + Import: func(ic *importContext, r *resource) error { + // TODO: do we need to emit the owner See comment for the owner... + connectionName := r.Data.Get("name").(string) + ic.Emit(&resource{ + Resource: "databricks_grants", + ID: "foreign_connection/" + connectionName, + }) + return nil + }, + ShouldOmitField: shouldOmitForUnityCatalog, + }, + "databricks_share": { + WorkspaceLevel: true, + Service: "uc-shares", + List: func(ic *importContext) error { + shares, err := ic.workspaceClient.Shares.ListAll(ic.Context) + if err != nil { + return err + } + for _, share := range shares { + ic.EmitIfUpdatedAfterMillisAndNameMatches(&resource{ + Resource: "databricks_share", + ID: share.Name, + }, share.Name, share.UpdatedAt, fmt.Sprintf("share '%s'", share.Name)) + } + return nil + }, + Import: func(ic *importContext, r *resource) error { + // TODO: do we need to emit the owner See comment for the owner... + var share tfuc.ShareInfo + s := ic.Resources["databricks_share"].Schema + common.DataToStructPointer(r.Data, s, &share) + // TODO: how to link recipients to share? + ic.Emit(&resource{ + Resource: "databricks_grants", + ID: "share/" + r.ID, + }) + for _, obj := range share.Objects { + switch obj.DataObjectType { + case "TABLE": + ic.Emit(&resource{ + Resource: "databricks_sql_table", + ID: obj.Name, + }) + case "VOLUME": + ic.Emit(&resource{ + Resource: "databricks_volume", + ID: obj.Name, + }) + case "MODEL": + ic.Emit(&resource{ + Resource: "databricks_registered_model", + ID: obj.Name, + }) + default: + log.Printf("[INFO] Object type '%s' (name: '%s') isn't supported in share '%s'", + obj.DataObjectType, obj.Name, r.ID) + } + } + + return nil + }, + ShouldOmitField: shouldOmitForUnityCatalog, + Depends: []reference{ + {Path: "object.name", Resource: "databricks_volume", IsValidApproximation: isMatchignShareObject("VOLUME")}, + {Path: "object.name", Resource: "databricks_registered_model", IsValidApproximation: isMatchignShareObject("MODEL")}, + {Path: "object.name", Resource: "databricks_schema", IsValidApproximation: isMatchignShareObject("SCHEMA")}, + // {Path: "object.name", Resource: "databricks_sql_table"}, + }, + }, + "databricks_recipient": { + WorkspaceLevel: true, + Service: "uc-shares", + List: func(ic *importContext) error { + recipients, err := ic.workspaceClient.Recipients.ListAll(ic.Context, sharing.ListRecipientsRequest{}) + if err != nil { + return err + } + for _, rec := range recipients { + ic.EmitIfUpdatedAfterMillisAndNameMatches(&resource{ + Resource: "databricks_recipient", + ID: rec.Name, + }, rec.Name, rec.UpdatedAt, fmt.Sprintf("recipient '%s'", rec.Name)) + } + return nil + }, + // TODO: do we need to emit the owner See comment for the owner... + // TODO: emit variable for sharing_code ... + // TODO: add depends for sharing_code? + }, + "databricks_registered_model": { + WorkspaceLevel: true, + Service: "uc-models", + // TODO: it doesn't work right now, need a fix in the Go SDK + // List: func(ic *importContext) error { + // models, err := ic.workspaceClient.RegisteredModels.ListAll(ic.Context, catalog.ListRegisteredModelsRequest{}) + // if err != nil { + // return err + // } + // for _, model := range models { + // TODO: Add name matching... + // ic.EmitIfUpdatedAfterMillis(&resource{ + // Resource: "databricks_registered_model", + // ID: model.FullName, + // }, model.UpdatedAt, fmt.Sprintf("registered model '%s'", model.FullName)) + // } + // return nil + // }, + Import: func(ic *importContext, r *resource) error { + modelFullName := r.ID + ic.Emit(&resource{ + Resource: "databricks_grants", + ID: "model/" + modelFullName, + }) + catalogName := r.Data.Get("catalog_name").(string) + ic.Emit(&resource{ + Resource: "databricks_schema", + ID: catalogName + "." + r.Data.Get("catalog_name").(string), + }) + ic.Emit(&resource{ + Resource: "databricks_catalog", + ID: catalogName, + }) + // TODO: emit owner? See comment in catalog resource + return nil + }, + ShouldOmitField: func(ic *importContext, pathString string, as *schema.Schema, d *schema.ResourceData) bool { + if pathString == "storage_location" { + location := d.Get(pathString).(string) + // TODO: don't generate it if it's managed. + // check if string contains metastore_id/models/model_id (although we don't have model_id in the state) + return location == "" + } + return shouldOmitForUnityCatalog(ic, pathString, as, d) + }, + Depends: []reference{ + {Path: "catalog_name", Resource: "databricks_catalog"}, + {Path: "schema_name", Resource: "databricks_schema", Match: "name", + IsValidApproximation: isMatchingCatalogAndSchema, SkipDirectLookup: true}, + {Path: "storage_root", Resource: "databricks_external_location", Match: "url", MatchType: MatchPrefix}, + }, + }, + "databricks_metastore": { + WorkspaceLevel: true, + AccountLevel: true, + Service: "uc-metastores", + Name: func(ic *importContext, d *schema.ResourceData) string { + name := d.Get("name").(string) + if name == "" { + return d.Id() + } + return name + }, + List: func(ic *importContext) error { + var err error + var metastores []catalog.MetastoreInfo + if ic.accountLevel { + metastores, err = ic.accountClient.Metastores.ListAll(ic.Context) + } else { + metastores, err = ic.workspaceClient.Metastores.ListAll(ic.Context) + } + if err != nil { + return err + } + for _, mstore := range metastores { + ic.EmitIfUpdatedAfterMillisAndNameMatches(&resource{ + Resource: "databricks_metastore", + ID: mstore.MetastoreId, + }, mstore.Name, mstore.UpdatedAt, fmt.Sprintf("metastore '%s'", mstore.Name)) + } + return nil + }, + Import: func(ic *importContext, r *resource) error { + ic.Emit(&resource{ + Resource: "databricks_grants", + ID: "metastore/" + r.ID, + }) + // TODO: emit owner? See comment in catalog resource + if ic.accountLevel { // emit metastore assignments + assignments, err := ic.accountClient.MetastoreAssignments.ListByMetastoreId(ic.Context, r.ID) + if err == nil { + for _, workspaceID := range assignments.WorkspaceIds { + ic.Emit(&resource{ + Resource: "databricks_metastore_assignment", + ID: fmt.Sprintf("%d|%s", workspaceID, r.ID), + }) + } + } else { + log.Printf("[ERROR] listing metastore assignments: %s", err.Error()) + } + } + return nil + }, + ShouldOmitField: func(ic *importContext, pathString string, as *schema.Schema, d *schema.ResourceData) bool { + if pathString == "default_data_access_config_id" || pathString == "storage_root_credential_id" { + // technically, both should be marked as `computed` + return true + } + return shouldOmitForUnityCatalog(ic, pathString, as, d) + }, + }, + "databricks_metastore_assignment": { + AccountLevel: true, + Service: "uc-metastores", + Name: func(ic *importContext, d *schema.ResourceData) string { + return fmt.Sprintf("ws_%d", d.Get("workspace_id").(int)) + }, + ShouldOmitField: func(ic *importContext, pathString string, as *schema.Schema, d *schema.ResourceData) bool { + if pathString == "default_catalog_name" { + return d.Get(pathString).(string) == "" + } + return defaultShouldOmitFieldFunc(ic, pathString, as, d) + }, + Depends: []reference{ + {Path: "metastore_id", Resource: "databricks_metastore"}, + }, + }, + "databricks_catalog_workspace_binding": { + WorkspaceLevel: true, + Service: "uc-catalogs", + Depends: []reference{ + {Path: "securable_name", Resource: "databricks_catalog", Match: "name"}, + }, + }, } diff --git a/exporter/importables_test.go b/exporter/importables_test.go index 933d66a4bd..70f06c8de8 100644 --- a/exporter/importables_test.go +++ b/exporter/importables_test.go @@ -13,6 +13,8 @@ import ( "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/databricks-sdk-go/service/sharing" + tfcatalog "github.com/databricks/terraform-provider-databricks/catalog" "github.com/databricks/terraform-provider-databricks/clusters" "github.com/databricks/terraform-provider-databricks/commands" "github.com/databricks/terraform-provider-databricks/common" @@ -31,6 +33,7 @@ import ( "github.com/databricks/terraform-provider-databricks/workspace" "github.com/hashicorp/hcl/v2/hclwrite" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.org/x/exp/maps" ) @@ -38,20 +41,24 @@ func importContextForTest() *importContext { p := provider.DatabricksProvider() supportedResources := maps.Keys(resourcesMap) return &importContext{ - Importables: resourcesMap, - Resources: p.ResourcesMap, - testEmits: map[string]bool{}, - nameFixes: nameFixes, - waitGroup: &sync.WaitGroup{}, - allUsers: map[string]scim.User{}, - allSps: map[string]scim.User{}, - channels: makeResourcesChannels(), - exportDeletedUsersAssets: false, - ignoredResources: map[string]struct{}{}, - State: newStateApproximation(supportedResources), - emittedUsers: map[string]struct{}{}, - userOrSpDirectories: map[string]bool{}, - defaultChannel: make(resourceChannel, defaultChannelSize), + Importables: resourcesMap, + Resources: p.ResourcesMap, + testEmits: map[string]bool{}, + nameFixes: nameFixes, + waitGroup: &sync.WaitGroup{}, + allUsers: map[string]scim.User{}, + allSps: map[string]scim.User{}, + channels: makeResourcesChannels(), + oldWorkspaceObjectMapping: map[int64]string{}, + exportDeletedUsersAssets: false, + ignoredResources: map[string]struct{}{}, + deletedResources: map[string]struct{}{}, + State: newStateApproximation(supportedResources), + emittedUsers: map[string]struct{}{}, + userOrSpDirectories: map[string]bool{}, + defaultChannel: make(resourceChannel, defaultChannelSize), + services: map[string]struct{}{}, + listing: map[string]struct{}{}, } } @@ -914,18 +921,18 @@ func TestGlobalInitScriptsBodyErrors(t *testing.T) { qa.HTTPFixturesApply(t, []qa.HTTPFixture{ { Method: "GET", - Resource: "/api/2.0/global-init-scripts/sad-emoji", - Response: workspace.GlobalInitScriptInfo{ - Name: "x.sh", - ContentBase64: "🥺", + Resource: "/api/2.0/global-init-scripts/sad-emoji?", + Response: compute.GlobalInitScriptDetailsWithContent{ + Name: "x.sh", + Script: "🥺", }, }, { Method: "GET", - Resource: "/api/2.0/global-init-scripts/second", - Response: workspace.GlobalInitScriptInfo{ - Name: "x.sh", - ContentBase64: "YWJj", + Resource: "/api/2.0/global-init-scripts/second?", + Response: compute.GlobalInitScriptDetailsWithContent{ + Name: "x.sh", + Script: "YWJj", }, }, }, func(ctx context.Context, client *common.DatabricksClient) { @@ -1205,11 +1212,12 @@ func TestGlobalInitScriptGeneration(t *testing.T) { { Method: "GET", ReuseRequest: true, - Resource: "/api/2.0/global-init-scripts/a", - Response: workspace.GlobalInitScriptInfo{ - Name: "New: Importing ^ Things", - Enabled: true, - ContentBase64: "YWJj", + Resource: "/api/2.0/global-init-scripts/a?", + Response: compute.GlobalInitScriptDetailsWithContent{ + ScriptId: "a", + Name: "New: Importing ^ Things", + Enabled: true, + Script: "YWJj", }, }, }, "workspace", false, func(ic *importContext) { @@ -1469,3 +1477,599 @@ func TestEmitFilesFromMap(t *testing.T) { assert.Contains(t, ic.testEmits, "databricks_dbfs_file[] (id: dbfs:/FileStore/test.txt)") assert.Contains(t, ic.testEmits, "databricks_workspace_file[] (id: /Shared/test.txt)") } + +func TestStorageCredentialListFails(t *testing.T) { + qa.HTTPFixturesApply(t, []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.1/unity-catalog/storage-credentials?", + Status: 200, + Response: &catalog.ListStorageCredentialsResponse{}, + }, + }, func(ctx context.Context, client *common.DatabricksClient) { + ic := importContextForTestWithClient(ctx, client) + err := resourcesMap["databricks_storage_credential"].List(ic) + assert.NoError(t, err) + }) +} + +func TestImportStorageCredentialGrants(t *testing.T) { + qa.HTTPFixturesApply(t, []qa.HTTPFixture{ + { + ReuseRequest: true, + Method: "GET", + Status: 200, + Resource: "/api/2.1/unity-catalog/permissions/storage_credential/abc", + Response: catalog.PermissionsList{ + PrivilegeAssignments: []catalog.PrivilegeAssignment{ + { + Principal: "principal", + Privileges: []catalog.Privilege{"CREATE EXTERNAL LOCATION"}, + }, + }, + }, + }, + }, func(ctx context.Context, client *common.DatabricksClient) { + ic := importContextForTestWithClient(ctx, client) + d := tfcatalog.ResourceStorageCredential().ToResource().TestResourceData() + d.SetId("abc") + err := resourcesMap["databricks_storage_credential"].Import(ic, &resource{ + ID: "abc", + Data: d, + }) + assert.NoError(t, err) + }) +} + +func TestExternalLocationListFails(t *testing.T) { + qa.HTTPFixturesApply(t, []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.1/unity-catalog/external-locations?", + Status: 200, + Response: &catalog.ListExternalLocationsResponse{}, + }, + }, func(ctx context.Context, client *common.DatabricksClient) { + ic := importContextForTestWithClient(ctx, client) + err := resourcesMap["databricks_external_location"].List(ic) + assert.NoError(t, err) + }) +} + +func TestImportExternalLocationGrants(t *testing.T) { + qa.HTTPFixturesApply(t, []qa.HTTPFixture{ + { + ReuseRequest: true, + Method: "GET", + Status: 200, + Resource: "/api/2.1/unity-catalog/permissions/external-locations/abc", + Response: catalog.PermissionsList{ + PrivilegeAssignments: []catalog.PrivilegeAssignment{ + { + Principal: "principal", + Privileges: []catalog.Privilege{"ALL PRIVILEGES"}, + }, + }, + }, + }, + }, func(ctx context.Context, client *common.DatabricksClient) { + ic := importContextForTestWithClient(ctx, client) + d := tfcatalog.ResourceExternalLocation().ToResource().TestResourceData() + d.SetId("abc") + err := resourcesMap["databricks_external_location"].Import(ic, &resource{ + ID: "abc", + Data: d, + }) + assert.NoError(t, err) + }) +} + +func TestListMetastores(t *testing.T) { + qa.HTTPFixturesApply(t, []qa.HTTPFixture{ + { + ReuseRequest: true, + Method: "GET", + Resource: "/api/2.1/unity-catalog/metastores", + Response: catalog.ListMetastoresResponse{ + Metastores: []catalog.MetastoreInfo{ + { + Name: "test", + MetastoreId: "1234", + }, + }, + }, + }, + }, func(ctx context.Context, client *common.DatabricksClient) { + ic := importContextForTestWithClient(ctx, client) + ic.enableServices("uc-metastores") + err := resourcesMap["databricks_metastore"].List(ic) + assert.NoError(t, err) + require.Equal(t, 1, len(ic.testEmits)) + assert.True(t, ic.testEmits["databricks_metastore[] (id: 1234)"]) + }) +} + +func TestListCatalogs(t *testing.T) { + qa.HTTPFixturesApply(t, []qa.HTTPFixture{ + { + ReuseRequest: true, + Method: "GET", + Resource: "/api/2.1/unity-catalog/catalogs", + Response: catalog.ListCatalogsResponse{ + Catalogs: []catalog.CatalogInfo{ + { + Name: "cat1", + CatalogType: "MANAGED_CATALOG", + }, + { + Name: "cat2", + CatalogType: "UNKNOWN", + }, + }, + }, + }, + }, func(ctx context.Context, client *common.DatabricksClient) { + ic := importContextForTestWithClient(ctx, client) + ic.enableServices("uc-catalogs") + ic.currentMetastore = currentMetastoreResponse + err := resourcesMap["databricks_catalog"].List(ic) + assert.NoError(t, err) + require.Equal(t, 1, len(ic.testEmits)) + assert.True(t, ic.testEmits["databricks_catalog[cat1_test_MANAGED_CATALOG] (id: cat1)"]) + }) +} + +func TestImportManagedCatalog(t *testing.T) { + qa.HTTPFixturesApply(t, []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.1/unity-catalog/schemas?catalog_name=ctest", + Response: catalog.ListSchemasResponse{ + Schemas: []catalog.SchemaInfo{ + { + CatalogType: "MANAGED_CATALOG", + Name: "schema1", + FullName: "ctest.schema1", + }, + { + CatalogType: "MANAGED_CATALOG", + Name: "information_schema", + FullName: "ctest.schema1", + }, + { + CatalogType: "UNKNOWN", + FullName: "ctest.schema2", + }, + }, + }, + }, + }, func(ctx context.Context, client *common.DatabricksClient) { + ic := importContextForTestWithClient(ctx, client) + ic.enableServices("uc-catalogs,uc-grants,uc-schemas") + ic.currentMetastore = currentMetastoreResponse + d := tfcatalog.ResourceCatalog().ToResource().TestResourceData() + d.SetId("ctest") + d.Set("name", "ctest") + err := resourcesMap["databricks_catalog"].Import(ic, &resource{ + ID: "ctest", + Data: d, + }) + assert.NoError(t, err) + require.Equal(t, 2, len(ic.testEmits)) + assert.True(t, ic.testEmits["databricks_grants[] (id: catalog/ctest)"]) + assert.True(t, ic.testEmits["databricks_schema[] (id: ctest.schema1)"]) + }) +} + +func TestImportForeignCatalog(t *testing.T) { + ic := importContextForTest() + ic.enableServices("uc-catalogs,uc-grants,uc-connections") + ic.currentMetastore = currentMetastoreResponse + d := tfcatalog.ResourceCatalog().ToResource().TestResourceData() + d.SetId("fctest") + d.Set("metastore_id", "1234") + d.Set("connection_name", "conn") + d.Set("name", "fctest") + err := resourcesMap["databricks_catalog"].Import(ic, &resource{ + ID: "fctest", + Data: d, + }) + assert.NoError(t, err) + require.Equal(t, 2, len(ic.testEmits)) + assert.True(t, ic.testEmits["databricks_grants[] (id: catalog/fctest)"]) + assert.True(t, ic.testEmits["databricks_connection[] (id: 1234|conn)"]) +} + +func TestImportIsolatedManagedCatalog(t *testing.T) { + qa.HTTPFixturesApply(t, []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.1/unity-catalog/schemas?catalog_name=ctest", + Response: catalog.ListSchemasResponse{}, + }, + { + Method: "GET", + Resource: "/api/2.1/unity-catalog/bindings/catalog/ctest?", + Response: catalog.WorkspaceBindingsResponse{ + Bindings: []catalog.WorkspaceBinding{ + { + BindingType: "BINDING_TYPE_READ", + WorkspaceId: 1234, + }, + }, + }, + }, + }, func(ctx context.Context, client *common.DatabricksClient) { + ic := importContextForTestWithClient(ctx, client) + ic.enableServices("uc-catalogs,uc-grants,uc-schemas") + ic.currentMetastore = currentMetastoreResponse + d := tfcatalog.ResourceCatalog().ToResource().TestResourceData() + d.SetId("ctest") + d.Set("name", "ctest") + d.Set("isolation_mode", "ISOLATED") + err := resourcesMap["databricks_catalog"].Import(ic, &resource{ + ID: "ctest", + Data: d, + }) + assert.NoError(t, err) + require.Equal(t, 2, len(ic.testEmits)) + assert.True(t, ic.testEmits["databricks_grants[] (id: catalog/ctest)"]) + assert.True(t, ic.testEmits["databricks_catalog_workspace_binding[catalog_ctest_ws_1234] (id: 1234|catalog|ctest)"]) + }) +} + +func TestImportSchema(t *testing.T) { + qa.HTTPFixturesApply(t, []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.1/unity-catalog/models?catalog_name=ctest&schema_name=stest", + Response: catalog.ListRegisteredModelsResponse{ + RegisteredModels: []catalog.RegisteredModelInfo{ + { + Name: "model1", + FullName: "ctest.stest.model1", + }, + }, + }, + }, + { + Method: "GET", + Resource: "/api/2.1/unity-catalog/volumes?catalog_name=ctest&schema_name=stest", + Response: catalog.ListVolumesResponseContent{ + Volumes: []catalog.VolumeInfo{ + { + Name: "volume1", + FullName: "ctest.stest.volume1", + }, + }, + }, + }, + }, func(ctx context.Context, client *common.DatabricksClient) { + ic := importContextForTestWithClient(ctx, client) + ic.enableServices("uc-catalogs,uc-grants,uc-schemas,uc-volumes,uc-models") + ic.currentMetastore = currentMetastoreResponse + d := tfcatalog.ResourceSchema().ToResource().TestResourceData() + d.SetId("ctest.stest") + d.Set("catalog_name", "ctest") + d.Set("name", "stest") + err := resourcesMap["databricks_schema"].Import(ic, &resource{ + ID: "ctest.stest", + Data: d, + }) + assert.NoError(t, err) + require.Equal(t, 4, len(ic.testEmits)) + assert.True(t, ic.testEmits["databricks_grants[] (id: schema/ctest.stest)"]) + assert.True(t, ic.testEmits["databricks_catalog[] (id: ctest)"]) + assert.True(t, ic.testEmits["databricks_registered_model[] (id: ctest.stest.model1)"]) + assert.True(t, ic.testEmits["databricks_volume[] (id: ctest.stest.volume1)"]) + }) +} + +func TestImportShare(t *testing.T) { + ic := importContextForTest() + ic.enableServices("uc-grants,uc-volumes,uc-models") + d := tfcatalog.ResourceShare().ToResource().TestResourceData() + scm := tfcatalog.ResourceShare().Schema + share := tfcatalog.ShareInfo{ + Name: "stest", + Objects: []tfcatalog.SharedDataObject{ + { + DataObjectType: "TABLE", + Name: "ctest.stest.table1", + }, + { + DataObjectType: "MODEL", + Name: "ctest.stest.model1", + }, + { + DataObjectType: "VOLUME", + Name: "ctest.stest.vol1", + }, + { + DataObjectType: "NOTEBOOK", + Name: "Test", + }, + }, + } + d.MarkNewResource() + common.StructToData(share, scm, d) + err := common.StructToData(share, scm, d) + require.NoError(t, err) + err = resourcesMap["databricks_share"].Import(ic, &resource{ + ID: "stest", + Data: d, + }) + assert.NoError(t, err) + require.Equal(t, 3, len(ic.testEmits)) + assert.True(t, ic.testEmits["databricks_grants[] (id: share/stest)"]) + assert.True(t, ic.testEmits["databricks_registered_model[] (id: ctest.stest.model1)"]) + assert.True(t, ic.testEmits["databricks_volume[] (id: ctest.stest.vol1)"]) +} + +func TestConnections(t *testing.T) { + qa.HTTPFixturesApply(t, []qa.HTTPFixture{ + { + ReuseRequest: true, + Method: "GET", + Resource: "/api/2.1/unity-catalog/connections", + Response: catalog.ListConnectionsResponse{ + Connections: []catalog.ConnectionInfo{ + { + Name: "test", + MetastoreId: "12345", + }, + }, + }, + }, + }, func(ctx context.Context, client *common.DatabricksClient) { + ic := importContextForTestWithClient(ctx, client) + ic.enableServices("uc-connections,uc-grants") + // Test Listing + err := resourcesMap["databricks_connection"].List(ic) + assert.NoError(t, err) + require.Equal(t, 1, len(ic.testEmits)) + assert.True(t, ic.testEmits["databricks_connection[] (id: 12345|test)"]) + // Test Importing + d := tfcatalog.ResourceConnection().ToResource().TestResourceData() + d.SetId("ctest") + d.Set("name", "ctest") + err = resourcesMap["databricks_connection"].Import(ic, &resource{ + ID: "ctest", + Data: d, + }) + assert.NoError(t, err) + require.Equal(t, 2, len(ic.testEmits)) + assert.True(t, ic.testEmits["databricks_grants[] (id: foreign_connection/ctest)"]) + }) +} + +func TestListExternalLocations(t *testing.T) { + qa.HTTPFixturesApply(t, []qa.HTTPFixture{ + { + ReuseRequest: true, + Method: "GET", + Resource: "/api/2.1/unity-catalog/external-locations?", + Response: catalog.ListExternalLocationsResponse{ + ExternalLocations: []catalog.ExternalLocationInfo{ + { + Name: "test", + }, + }, + }, + }, + }, func(ctx context.Context, client *common.DatabricksClient) { + ic := importContextForTestWithClient(ctx, client) + ic.enableServices("uc-external-locations,uc-storage-credentials,uc-grants") + ic.currentMetastore = currentMetastoreResponse + // Test listing + err := resourcesMap["databricks_external_location"].List(ic) + assert.NoError(t, err) + require.Equal(t, 1, len(ic.testEmits)) + assert.True(t, ic.testEmits["databricks_external_location[] (id: test)"]) + // Test import + d := tfcatalog.ResourceExternalLocation().ToResource().TestResourceData() + d.SetId("ext_loc") + d.Set("credential_name", "stest") + err = resourcesMap["databricks_external_location"].Import(ic, &resource{ + ID: "ext_loc", + Data: d, + }) + assert.NoError(t, err) + require.Equal(t, 3, len(ic.testEmits)) + assert.True(t, ic.testEmits["databricks_grants[] (id: external_location/ext_loc)"]) + assert.True(t, ic.testEmits["databricks_storage_credential[] (id: stest)"]) + }) +} + +func TestStorageCredentials(t *testing.T) { + qa.HTTPFixturesApply(t, []qa.HTTPFixture{ + { + ReuseRequest: true, + Method: "GET", + Resource: "/api/2.1/unity-catalog/storage-credentials?", + Response: catalog.ListStorageCredentialsResponse{ + StorageCredentials: []catalog.StorageCredentialInfo{ + { + Name: "test", + }, + }, + }, + }, + }, func(ctx context.Context, client *common.DatabricksClient) { + ic := importContextForTestWithClient(ctx, client) + ic.enableServices("uc-storage-credentials,uc-grants") + ic.currentMetastore = currentMetastoreResponse + // Test listing + err := resourcesMap["databricks_storage_credential"].List(ic) + assert.NoError(t, err) + require.Equal(t, 1, len(ic.testEmits)) + assert.True(t, ic.testEmits["databricks_storage_credential[] (id: test)"]) + // Test import + err = resourcesMap["databricks_storage_credential"].Import(ic, &resource{ + ID: "1234", + }) + assert.NoError(t, err) + require.Equal(t, 2, len(ic.testEmits)) + assert.True(t, ic.testEmits["databricks_grants[] (id: storage_credential/1234)"]) + }) +} + +func TestListRecipients(t *testing.T) { + qa.HTTPFixturesApply(t, []qa.HTTPFixture{ + { + ReuseRequest: true, + Method: "GET", + Resource: "/api/2.1/unity-catalog/recipients?", + Response: sharing.ListRecipientsResponse{ + Recipients: []sharing.RecipientInfo{ + { + Name: "test", + }, + }, + }, + }, + }, func(ctx context.Context, client *common.DatabricksClient) { + ic := importContextForTestWithClient(ctx, client) + ic.enableServices("uc-shares") + ic.currentMetastore = currentMetastoreResponse + err := resourcesMap["databricks_recipient"].List(ic) + assert.NoError(t, err) + require.Equal(t, 1, len(ic.testEmits)) + assert.True(t, ic.testEmits["databricks_recipient[] (id: test)"]) + }) +} + +func TestVolumes(t *testing.T) { + ic := importContextForTest() + ic.enableServices("uc-volumes,uc-catalogs,uc-schemas,uc-grants") + // Test importing + d := tfcatalog.ResourceVolume().ToResource().TestResourceData() + d.SetId("vtest") + d.Set("catalog_name", "ctest") + d.Set("schema_name", "stest") + err := resourcesMap["databricks_volume"].Import(ic, &resource{ + ID: "vtest", + Data: d, + }) + assert.NoError(t, err) + require.Equal(t, 3, len(ic.testEmits)) + assert.True(t, ic.testEmits["databricks_grants[] (id: volume/vtest)"]) + assert.True(t, ic.testEmits["databricks_schema[] (id: ctest.stest)"]) + assert.True(t, ic.testEmits["databricks_catalog[] (id: ctest)"]) + + // + shouldOmitFunc := resourcesMap["databricks_volume"].ShouldOmitField + require.NotNil(t, shouldOmitFunc) + scm := tfcatalog.ResourceVolume().Schema + assert.False(t, shouldOmitFunc(nil, "volume_type", scm["volume_type"], d)) + assert.False(t, shouldOmitFunc(nil, "name", scm["name"], d)) + d.Set("volume_type", "MANAGED") + d.Set("storage_location", "s3://abc/") + assert.True(t, shouldOmitFunc(nil, "volume_type", scm["volume_type"], d)) + assert.True(t, shouldOmitFunc(nil, "storage_location", scm["storage_location"], d)) + assert.True(t, shouldOmitFunc(nil, "storage_location", scm["storage_location"], d)) +} + +func TestRegisteredModels(t *testing.T) { + ic := importContextForTest() + ic.enableServices("uc-models,uc-catalogs,uc-schemas,uc-grants") + // Test importing + d := tfcatalog.ResourceRegisteredModel().ToResource().TestResourceData() + d.SetId("mtest") + d.Set("catalog_name", "ctest") + d.Set("schema_name", "stest") + err := resourcesMap["databricks_registered_model"].Import(ic, &resource{ + ID: "mtest", + Data: d, + }) + assert.NoError(t, err) + require.Equal(t, 3, len(ic.testEmits)) + assert.True(t, ic.testEmits["databricks_grants[] (id: model/mtest)"]) + assert.True(t, ic.testEmits["databricks_schema[] (id: ctest.ctest)"]) + assert.True(t, ic.testEmits["databricks_catalog[] (id: ctest)"]) + + // + shouldOmitFunc := resourcesMap["databricks_registered_model"].ShouldOmitField + require.NotNil(t, shouldOmitFunc) + scm := tfcatalog.ResourceRegisteredModel().Schema + assert.True(t, shouldOmitFunc(nil, "storage_location", scm["storage_location"], d)) + d.Set("storage_location", "s3://abc/") + assert.False(t, shouldOmitFunc(nil, "storage_location", scm["storage_location"], d)) + assert.False(t, shouldOmitFunc(nil, "name", scm["name"], d)) +} + +func TestListShares(t *testing.T) { + qa.HTTPFixturesApply(t, []qa.HTTPFixture{ + { + ReuseRequest: true, + Method: "GET", + Resource: "/api/2.1/unity-catalog/shares", + Response: sharing.ListSharesResponse{ + Shares: []sharing.ShareInfo{ + { + Name: "test", + }, + }, + }, + }, + }, func(ctx context.Context, client *common.DatabricksClient) { + ic := importContextForTestWithClient(ctx, client) + ic.enableServices("uc-shares") + ic.currentMetastore = currentMetastoreResponse + err := resourcesMap["databricks_share"].List(ic) + assert.NoError(t, err) + require.Equal(t, 1, len(ic.testEmits)) + assert.True(t, ic.testEmits["databricks_share[] (id: test)"]) + }) +} + +func TestAuxUcFunctions(t *testing.T) { + // Metastore Assignment + d := tfcatalog.ResourceMetastoreAssignment().ToResource().TestResourceData() + d.Set("workspace_id", 123) + assert.Equal(t, "ws_123", resourcesMap["databricks_metastore_assignment"].Name(nil, d)) + + shouldOmitFunc := resourcesMap["databricks_metastore_assignment"].ShouldOmitField + require.NotNil(t, shouldOmitFunc) + d.Set("default_catalog_name", "") + + scm := tfcatalog.ResourceMetastoreAssignment().Schema + assert.True(t, shouldOmitFunc(nil, "default_catalog_name", scm["default_catalog_name"], d)) + assert.False(t, shouldOmitFunc(nil, "metastore_id", scm["metastore_id"], d)) + + // Metastore + d = tfcatalog.ResourceMetastore().ToResource().TestResourceData() + d.SetId("1234") + assert.Equal(t, "1234", resourcesMap["databricks_metastore"].Name(nil, d)) + d.Set("name", "test") + assert.Equal(t, "test", resourcesMap["databricks_metastore"].Name(nil, d)) + + shouldOmitFunc = resourcesMap["databricks_metastore"].ShouldOmitField + require.NotNil(t, shouldOmitFunc) + scm = tfcatalog.ResourceMetastore().Schema + assert.True(t, shouldOmitFunc(nil, "default_data_access_config_id", scm["default_data_access_config_id"], d)) + assert.True(t, shouldOmitFunc(nil, "owner", scm["owner"], d)) + d.Set("owner", "test") + assert.False(t, shouldOmitFunc(nil, "owner", scm["owner"], d)) + assert.False(t, shouldOmitFunc(nil, "name", scm["name"], d)) + + // Connections + d = tfcatalog.ResourceConnection().ToResource().TestResourceData() + d.SetId("1234") + assert.Equal(t, "1234", resourcesMap["databricks_connection"].Name(nil, d)) + d.Set("name", "test") + d.Set("connection_type", "db") + assert.Equal(t, "db_test", resourcesMap["databricks_connection"].Name(nil, d)) + + // Catalogs + d = tfcatalog.ResourceCatalog().ToResource().TestResourceData() + d.SetId("test") + shouldOmitFunc = resourcesMap["databricks_catalog"].ShouldOmitField + require.NotNil(t, shouldOmitFunc) + scm = tfcatalog.ResourceCatalog().Schema + d.Set("isolation_mode", "OPEN") + assert.True(t, shouldOmitFunc(nil, "isolation_mode", scm["isolation_mode"], d)) + d.Set("isolation_mode", "ISOLATED") + assert.False(t, shouldOmitFunc(nil, "isolation_mode", scm["isolation_mode"], d)) + assert.False(t, shouldOmitFunc(nil, "name", scm["name"], d)) +} diff --git a/exporter/model.go b/exporter/model.go index 0a932a16d2..a2d57ff1e8 100644 --- a/exporter/model.go +++ b/exporter/model.go @@ -12,6 +12,7 @@ import ( "github.com/databricks/terraform-provider-databricks/common" "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) @@ -35,6 +36,16 @@ type resourceApproximation struct { Instances []instanceApproximation `json:"instances"` } +func (ra *resourceApproximation) Get(attr string) (any, bool) { + for _, i := range ra.Instances { + v, found := i.Attributes[attr] + if found { + return v, found + } + } + return nil, false +} + // TODO: think if something like trie may help here... type resourceApproximationHolder struct { mutex sync.RWMutex @@ -162,10 +173,15 @@ const ( MatchCaseInsensitive = "caseinsensitive" // MatchPrefix is to specify that prefix of value should match MatchPrefix = "prefix" + // MatchLongestPrefix is to specify that prefix of value should match, and select the longest value from list of candidates + MatchLongestPrefix = "longestprefix" // MatchRegexp is to specify that the group extracted from value should match MatchRegexp = "regexp" ) +type valueTransformFunc func(string) string +type isValidAproximationFunc func(ic *importContext, res *resource, sr *resourceApproximation, origPath string) bool + type reference struct { // path to a given field, like, `cluster_id`, `access_control.user_name``, ... For references blocks/arrays, the `.N` component isn't required Path string @@ -181,6 +197,13 @@ type reference struct { File bool // regular expression (if MatchType == "regexp") must define a group that will be used to extract value to match Regexp *regexp.Regexp + // functions to transform match and current search value + MatchValueTransformFunc valueTransformFunc + SearchValueTransformFunc valueTransformFunc + // function to evaluate fit of the resource approximation found to the resource... + IsValidApproximation isValidAproximationFunc + // if we should skip direct lookups (for example, we need it for UC schemas matching) + SkipDirectLookup bool } func (r reference) MatchAttribute() string { @@ -265,8 +288,12 @@ func (r *resource) ImportResource(ic *importContext) { log.Printf("[ERROR] Searching %s is not available", r) return } - if err := ir.Search(ic, r); err != nil { - log.Printf("[ERROR] Cannot search for a resource %s: %v", err, r) + err := runWithRetries(func() error { + return ir.Search(ic, r) + }, + fmt.Sprintf("searching of %v", r)) + if err != nil { + log.Printf("[ERROR] Error searching %s#%s: %v", r.Resource, r.ID, err) return } if r.ID == "" { @@ -288,7 +315,11 @@ func (r *resource) ImportResource(ic *importContext) { if apiVersion != "" { ctx = context.WithValue(ctx, common.Api, apiVersion) } - if dia := pr.ReadContext(ctx, r.Data, ic.Client); dia != nil { + dia := runWithRetries(func() diag.Diagnostics { + return pr.ReadContext(ctx, r.Data, ic.Client) + }, + fmt.Sprintf("reading %s#%s", r.Resource, r.ID)) + if dia != nil { log.Printf("[ERROR] Error reading %s#%s: %v", r.Resource, r.ID, dia) return } @@ -298,7 +329,11 @@ func (r *resource) ImportResource(ic *importContext) { } r.Name = ic.ResourceName(r) if ir.Import != nil { - if err := ir.Import(ic, r); err != nil { + err := runWithRetries(func() error { + return ir.Import(ic, r) + }, + fmt.Sprintf("importing of %s#%s", r.Resource, r.ID)) + if err != nil { log.Printf("[ERROR] Failed custom import of %s: %s", r, err) return } diff --git a/exporter/model_test.go b/exporter/model_test.go new file mode 100644 index 0000000000..b5329840dc --- /dev/null +++ b/exporter/model_test.go @@ -0,0 +1,21 @@ +package exporter + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestResourceApproaximationGet(t *testing.T) { + _, found := (&resourceApproximation{}).Get("test") + assert.False(t, found) + + v, found := (&resourceApproximation{ + Instances: []instanceApproximation{ + {Attributes: map[string]any{"test": "42"}}, + }, + }).Get("test") + require.True(t, found) + assert.Equal(t, "42", v.(string)) +} diff --git a/exporter/util.go b/exporter/util.go index 7d7c380627..95e776b0d9 100644 --- a/exporter/util.go +++ b/exporter/util.go @@ -218,17 +218,27 @@ func (ic *importContext) IsUserOrServicePrincipalDirectory(path, prefix string, } func (ic *importContext) emitRepoByPath(path string) { - ic.Emit(&resource{ - Resource: "databricks_repo", - Attribute: "path", - Value: strings.Join(strings.SplitN(path, "/", 5)[:4], "/"), - }) + // Path to Repos objects consits of following parts: /Repos, folder, repository, path inside Repo. + // Because it starts with `/`, it will produce empty string as first element in the slice. + // And we're stopping splitting to avoid producing too many not necessary parts, so we have 5 parts only. + parts := strings.SplitN(path, "/", 5) + if len(parts) >= 4 { + ic.Emit(&resource{ + Resource: "databricks_repo", + Attribute: "path", + Value: strings.Join(parts[:4], "/"), + }) + } else { + log.Printf("[WARN] Incorrect Repos path") + } } func (ic *importContext) emitWorkspaceFileOrRepo(path string) { if strings.HasPrefix(path, "/Repos") { ic.emitRepoByPath(path) } else { + // TODO: wrap this into ic.shouldEmit... + // TODO: strip /Workspace prefix if it's provided ic.Emit(&resource{ Resource: "databricks_workspace_file", ID: path, @@ -240,6 +250,7 @@ func (ic *importContext) emitNotebookOrRepo(path string) { if strings.HasPrefix(path, "/Repos") { ic.emitRepoByPath(path) } else { + // TODO: strip /Workspace prefix if it's provided ic.maybeEmitWorkspaceObject("databricks_notebook", path) } } @@ -879,7 +890,7 @@ func resourceOrDataBlockBody(ic *importContext, body *hclwrite.Body, r *resource } resourceBlock := body.AppendNewBlock(blockType, []string{r.Resource, r.Name}) return ic.dataToHcl(ic.Importables[r.Resource], - []string{}, ic.Resources[r.Resource], r.Data, resourceBlock.Body()) + []string{}, ic.Resources[r.Resource], r, resourceBlock.Body()) } func generateUniqueID(v string) string { @@ -936,6 +947,17 @@ func (ic *importContext) enableServices(services string) { for _, s := range strings.Split(services, ",") { ic.services[strings.TrimSpace(s)] = struct{}{} } + for s := range ic.listing { // Add all services mentioned in the listing + ic.services[strings.TrimSpace(s)] = struct{}{} + } +} + +func (ic *importContext) enableListing(listing string) { + ic.listing = map[string]struct{}{} + for _, s := range strings.Split(listing, ",") { + ic.listing[strings.TrimSpace(s)] = struct{}{} + ic.services[strings.TrimSpace(s)] = struct{}{} + } } func (ic *importContext) emitSqlParentDirectory(parent string) { @@ -963,9 +985,13 @@ func (ic *importContext) shouldSkipWorkspaceObject(object workspace.ObjectStatus } modifiedAt := wsObjectGetModifiedAt(object) if ic.incremental && modifiedAt < updatedSinceMs { - log.Printf("[DEBUG] skipping '%s' that was modified at %d (last active=%d)", - object.Path, modifiedAt, updatedSinceMs) - return true + p := ic.oldWorkspaceObjectMapping[object.ObjectID] + if p == "" || p == object.Path { + log.Printf("[DEBUG] skipping '%s' that was modified at %d (last active=%d)", + object.Path, modifiedAt, updatedSinceMs) + return true + } + log.Printf("[DEBUG] Different path for object %d. Old='%s', New='%s'", object.ObjectID, p, object.Path) } if !ic.MatchesName(object.Path) { return true @@ -1088,7 +1114,6 @@ type directoryInfo struct { // constants related to the parallel listing const ( - directoryListingMaxAttempts = 3 envVarListParallelism = "EXPORTER_WS_LIST_PARALLELISM" envVarDirectoryChannelSize = "EXPORTER_DIRECTORIES_CHANNEL_SIZE" defaultWorkersPoolSize = 10 @@ -1101,8 +1126,11 @@ func recursiveAddPathsParallel(a workspace.NotebooksAPI, directory directoryInfo notebookInfoList, err := a.ListInternalImpl(directory.Path) if err != nil { log.Printf("[WARN] error listing '%s': %v", directory.Path, err) - if directory.Attempts < directoryListingMaxAttempts { + if isRetryableError(err.Error(), directory.Attempts) { wg.Add(1) + log.Printf("[INFO] attempt %d of retrying listing of '%s' after error: %v", + directory.Attempts+1, directory.Path, err) + time.Sleep(time.Duration(retryDelaySeconds) * time.Second) dirChannel <- directoryInfo{Path: directory.Path, Attempts: directory.Attempts + 1} } } @@ -1166,3 +1194,97 @@ func ListParallel(a workspace.NotebooksAPI, path string, shouldIncludeDir func(w defer answer.MU.Unlock() return answer.data, nil } + +var ( + maxRetries = 5 + retryDelaySeconds = 2 + retriableErrors = []string{"context deadline exceeded", "Error handling request", "Timed out after "} +) + +func isRetryableError(err string, i int) bool { + if i < (maxRetries - 1) { + for _, msg := range retriableErrors { + if strings.Contains(err, msg) { + return true + } + } + } + return false +} + +func runWithRetries[ERR any](runFunc func() ERR, msg string) ERR { + var err ERR + delay := 1 + for i := 0; i < maxRetries; i++ { + err = runFunc() + valOf := reflect.ValueOf(&err).Elem() + if valOf.IsNil() || valOf.IsZero() { + break + } + if !isRetryableError(fmt.Sprintf("%v", err), i) { + log.Printf("[ERROR] Error %s after %d retries: %v", msg, i, err) + return err + } + delay = delay * retryDelaySeconds + log.Printf("[INFO] next retry (%d) for %s after %d seconds", (i + 1), msg, delay) + time.Sleep(time.Duration(delay) * time.Second) + } + return err +} + +func shouldOmitForUnityCatalog(ic *importContext, pathString string, as *schema.Schema, d *schema.ResourceData) bool { + if pathString == "owner" { + return d.Get(pathString).(string) == "" + } + return defaultShouldOmitFieldFunc(ic, pathString, as, d) +} + +func appendEndingSlashToDirName(dir string) string { + if dir == "" || dir[len(dir)-1] == '/' { + return dir + } + return dir + "/" +} + +func isMatchingCatalogAndSchema(ic *importContext, res *resource, ra *resourceApproximation, origPath string) bool { + // log.Printf("[DEBUG] matchingCatalogAndSchema: resource: %s, origPath=%s", res.Resource, origPath) + res_catalog_name := res.Data.Get("catalog_name").(string) + res_schema_name := res.Data.Get("schema_name").(string) + // log.Printf("[DEBUG] matchingCatalogAndSchema: resource: %s, catalog='%s' schema='%s'", + // res.Resource, res_catalog_name, res_schema_name) + ra_catalog_name, cat_found := ra.Get("catalog_name") + ra_schema_name, schema_found := ra.Get("name") + // log.Printf("[DEBUG] matchingCatalogAndSchema: approximation: %s %s, catalog='%v' (found? %v) schema='%v' (found? %v)", + // ra.Type, ra.Name, ra_catalog_name, cat_found, ra_schema_name, schema_found) + if !cat_found || !schema_found { + log.Printf("[WARN] Can't find attributes in approximation: %s %s, catalog='%v' (found? %v) schema='%v' (found? %v). Resource: %s, catalog='%s', schema='%s'", + ra.Type, ra.Name, ra_catalog_name, cat_found, ra_schema_name, schema_found, res.Resource, res_catalog_name, res_schema_name) + return true + } + + result := ra_catalog_name.(string) == res_catalog_name && ra_schema_name.(string) == res_schema_name + // log.Printf("[DEBUG] matchingCatalogAndSchema: result: %v approximation: catalog='%v' schema='%v', res: catalog='%s' schema='%s'", + // result, ra_catalog_name, ra_schema_name, res_catalog_name, res_schema_name) + return result +} + +func isMatchingShareRecipient(ic *importContext, res *resource, ra *resourceApproximation, origPath string) bool { + shareName, ok := res.Data.GetOk("share") + // principal := res.Data.Get(origPath) + // log.Printf("[DEBUG] isMatchingShareRecipient: origPath='%s', ra.Type='%s', shareName='%v', ok? %v, principal='%v'", + // origPath, ra.Type, shareName, ok, principal) + + return ok && shareName.(string) != "" +} + +func isMatchignShareObject(obj string) isValidAproximationFunc { + return func(ic *importContext, res *resource, ra *resourceApproximation, origPath string) bool { + objPath := strings.Replace(origPath, ".name", ".data_object_type", 1) + objType, ok := res.Data.GetOk(objPath) + // name := res.Data.Get(origPath) + // log.Printf("[DEBUG] isMatchignShareObject: %s origPath='%s', ra.Type='%s', name='%v', objPath='%s' objType='%v' ok? %v", + // obj, origPath, ra.Type, name, objPath, objType, ok) + + return ok && objType.(string) == obj + } +} diff --git a/go.mod b/go.mod index 6a1cb0de85..bd4612deb6 100644 --- a/go.mod +++ b/go.mod @@ -3,17 +3,17 @@ module github.com/databricks/terraform-provider-databricks go 1.21 require ( - github.com/databricks/databricks-sdk-go v0.30.0 + github.com/databricks/databricks-sdk-go v0.33.0 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 github.com/hashicorp/hcl/v2 v2.19.1 github.com/hashicorp/terraform-plugin-log v0.9.0 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.32.0 github.com/stretchr/testify v1.8.4 - github.com/zclconf/go-cty v1.14.1 + github.com/zclconf/go-cty v1.14.2 golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611 - golang.org/x/mod v0.14.0 + golang.org/x/mod v0.15.0 ) require ( @@ -26,7 +26,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect @@ -44,9 +44,9 @@ require ( github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/hc-install v0.6.2 // indirect github.com/hashicorp/logutils v1.0.0 // indirect - github.com/hashicorp/terraform-exec v0.19.0 // indirect - github.com/hashicorp/terraform-json v0.18.0 // indirect - github.com/hashicorp/terraform-plugin-go v0.20.0 // indirect + github.com/hashicorp/terraform-exec v0.20.0 // indirect + github.com/hashicorp/terraform-json v0.21.0 // indirect + github.com/hashicorp/terraform-plugin-go v0.21.0 // indirect github.com/hashicorp/terraform-registry-address v0.2.3 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect @@ -64,21 +64,21 @@ require ( github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect - go.opentelemetry.io/otel v1.21.0 // indirect - go.opentelemetry.io/otel/metric v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.21.0 // indirect - golang.org/x/crypto v0.17.0 // indirect - golang.org/x/net v0.19.0 // indirect - golang.org/x/oauth2 v0.15.0 // indirect - golang.org/x/sys v0.15.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect + go.opentelemetry.io/otel v1.22.0 // indirect + go.opentelemetry.io/otel/metric v1.22.0 // indirect + go.opentelemetry.io/otel/trace v1.22.0 // indirect + golang.org/x/crypto v0.18.0 // indirect + golang.org/x/net v0.20.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect + golang.org/x/sys v0.16.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - google.golang.org/api v0.154.0 // indirect + google.golang.org/api v0.161.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 // indirect - google.golang.org/grpc v1.60.1 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect + google.golang.org/grpc v1.61.0 // indirect + google.golang.org/protobuf v1.32.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index bcb7ae19b0..65a5768ef2 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,8 @@ github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBS github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.30.0 h1:nwJcnvW7NfV0oLrXmGcBDcefOZn4DIEByK0FFTkXCF0= -github.com/databricks/databricks-sdk-go v0.30.0/go.mod h1:QB64wT8EmR9T4ZPqeTRKjfIF4tPZuP9M9kM8Hcr019Q= +github.com/databricks/databricks-sdk-go v0.33.0 h1:0ldeP8aPnpKLV/mvNKsOVijOaLLo6TxRGdIwrEf2rlQ= +github.com/databricks/databricks-sdk-go v0.33.0/go.mod h1:yyXGdhEfXBBsIoTm0mdl8QN0xzCQPUVZTozMM/7wVuI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -49,8 +49,8 @@ github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgF github.com/go-git/go-git/v5 v5.10.1 h1:tu8/D8i+TWxgKpzQ3Vc43e+kkhXqtsZCKI/egajKnxk= github.com/go-git/go-git/v5 v5.10.1/go.mod h1:uEuHjxkHap8kAl//V5F/nNWwqIYtP/402ddd05mp0wg= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= @@ -91,8 +91,8 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17 github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= +github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= @@ -126,16 +126,16 @@ github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5R github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-exec v0.19.0 h1:FpqZ6n50Tk95mItTSS9BjeOVUb4eg81SpgVtZNNtFSM= -github.com/hashicorp/terraform-exec v0.19.0/go.mod h1:tbxUpe3JKruE9Cuf65mycSIT8KiNPZ0FkuTE3H4urQg= -github.com/hashicorp/terraform-json v0.18.0 h1:pCjgJEqqDESv4y0Tzdqfxr/edOIGkjs8keY42xfNBwU= -github.com/hashicorp/terraform-json v0.18.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= -github.com/hashicorp/terraform-plugin-go v0.20.0 h1:oqvoUlL+2EUbKNsJbIt3zqqZ7wi6lzn4ufkn/UA51xQ= -github.com/hashicorp/terraform-plugin-go v0.20.0/go.mod h1:Rr8LBdMlY53a3Z/HpP+ZU3/xCDqtKNCkeI9qOyT10QE= +github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo= +github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw= +github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRyRNd+zTI05U= +github.com/hashicorp/terraform-json v0.21.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= +github.com/hashicorp/terraform-plugin-go v0.21.0 h1:VSjdVQYNDKR0l2pi3vsFK1PdMQrw6vGOshJXMNFeVc0= +github.com/hashicorp/terraform-plugin-go v0.21.0/go.mod h1:piJp8UmO1uupCvC9/H74l2C6IyKG0rW4FDedIpwW5RQ= github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0 h1:Bl3e2ei2j/Z3Hc2HIS15Gal2KMKyLAZ2om1HCEvK6es= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0/go.mod h1:i2C41tszDjiWfziPQDL5R/f3Zp0gahXe5No/MIO9rCE= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.32.0 h1:7xdO9aOXVmhvMxNAq8UloyyqW0EEzyAY37llSTHJgjo= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.32.0/go.mod h1:LxQzs7AQl/5JE1IGFd6LX8E4A0InRJ/7s245gOmsejA= github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= github.com/hashicorp/terraform-registry-address v0.2.3/go.mod h1:lFHA76T8jfQteVfT7caREqguFrW3c4MFSPhZB7HHgUM= github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= @@ -205,27 +205,27 @@ github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA= -github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.14.2 h1:kTG7lqmBou0Zkx35r6HJHUQTvaRPr5bIAf3AoHS0izI= +github.com/zclconf/go-cty v1.14.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= +go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= +go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= +go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= +go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= +go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= +go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611 h1:qCEDpW1G+vcj3Y7Fy52pEM1AWm3abj8WimGYejI3SC4= golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= @@ -234,8 +234,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -248,18 +248,18 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= -golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -278,15 +278,15 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -311,8 +311,8 @@ golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.154.0 h1:X7QkVKZBskztmpPKWQXgjJRPA2dJYrL6r+sYPRLj050= -google.golang.org/api v0.154.0/go.mod h1:qhSMkM85hgqiokIYsrRyKxrjfBeIhgl4Z2JmeRkYylc= +google.golang.org/api v0.161.0 h1:oYzk/bs26WN10AV7iU7MVJVXBH8oCPS2hHyBiEeFoSU= +google.golang.org/api v0.161.0/go.mod h1:0mu0TpK33qnydLvWqbImq2b1eQ5FHRSDCBzAxX9ZHyw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -320,15 +320,15 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 h1:/jFB8jK5R3Sq3i/lmeZO0cATSzFfZaJq1J2Euan3XKU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0/go.mod h1:FUoWkonphQm3RhTS+kOEhF8h0iDpm4tdXolVCeZ9KKA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac h1:nUQEQmH/csSvFECKYRv6HWEyypysidKl2I6Qpsglq/0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= +google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= +google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -340,8 +340,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/acceptance/catalog_test.go b/internal/acceptance/catalog_test.go index 9fdf29a247..ae0bbe3b65 100644 --- a/internal/acceptance/catalog_test.go +++ b/internal/acceptance/catalog_test.go @@ -70,6 +70,16 @@ func TestUcAccCatalogUpdate(t *testing.T) { } owner = "account users" }`, + }, step{ + Template: ` + resource "databricks_catalog" "sandbox" { + name = "sandbox{var.STICKY_RANDOM}" + comment = "this catalog is managed by terraform" + properties = { + purpose = "testing" + } + owner = "{env.TEST_DATA_ENG_GROUP}" + }`, }, step{ Template: ` resource "databricks_catalog" "sandbox" { diff --git a/internal/acceptance/connection_test.go b/internal/acceptance/connection_test.go index 714ea33a53..7cf6ec5093 100644 --- a/internal/acceptance/connection_test.go +++ b/internal/acceptance/connection_test.go @@ -21,6 +21,33 @@ func connectionTemplateWithOwner(host string, owner string) string { } `, host, owner) } + +func connectionTemplateWithoutOwner() string { + return ` + resource "databricks_connection" "this" { + name = "name-{var.STICKY_RANDOM}" + connection_type = "BIGQUERY" + comment = "test" + options = { + GoogleServiceAccountKeyJson = <<-EOT + { + "type": "service_account", + "project_id": "PROJECT_ID", + "private_key_id": "KEY_ID", + "private_key": "-----BEGIN PRIVATE KEY-----\nPRIVATE_KEY\n-----END PRIVATE KEY-----\n", + "client_email": "SERVICE_ACCOUNT_EMAIL", + "client_id": "CLIENT_ID", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://accounts.google.com/o/oauth2/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/SERVICE_ACCOUNT_EMAIL", + "universe_domain": "googleapis.com" + } + EOT + } + } + ` +} func TestUcAccConnectionsResourceFullLifecycle(t *testing.T) { unityWorkspaceLevel(t, step{ Template: connectionTemplateWithOwner("test.mysql.database.azure.com", "account users"), @@ -30,3 +57,11 @@ func TestUcAccConnectionsResourceFullLifecycle(t *testing.T) { Template: connectionTemplateWithOwner("test.mysql.database.azure.com", "{env.TEST_METASTORE_ADMIN_GROUP_NAME}"), }) } + +func TestUcAccConnectionsWithoutOwnerResourceFullLifecycle(t *testing.T) { + unityWorkspaceLevel(t, step{ + Template: connectionTemplateWithoutOwner(), + }, step{ + Template: connectionTemplateWithoutOwner(), + }) +} diff --git a/internal/acceptance/data_storage_credential_test.go b/internal/acceptance/data_storage_credential_test.go new file mode 100755 index 0000000000..3c77462d9c --- /dev/null +++ b/internal/acceptance/data_storage_credential_test.go @@ -0,0 +1,39 @@ +package acceptance + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/stretchr/testify/require" +) + +func checkStorageCredentialDataSourcePopulated(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + r, ok := s.Modules[0].Resources["data.databricks_storage_credential.this"] + require.True(t, ok, "data.databricks_storage_credential.this has to be there") + storage_credential_info := r.Primary.Attributes["storage_credential_info.0.%"] + if storage_credential_info == "" { + return fmt.Errorf("StorageCredentialInfo is empty: %v", r.Primary.Attributes) + } + return nil + } +} +func TestUcAccDataSourceStorageCredential(t *testing.T) { + unityWorkspaceLevel(t, step{ + Template: ` + resource "databricks_storage_credential" "external" { + name = "cred-{var.RANDOM}" + aws_iam_role { + role_arn = "{env.TEST_METASTORE_DATA_ACCESS_ARN}" + } + skip_validation = true + comment = "Managed by TF" + } + + data "databricks_storage_credential" "this" { + name = databricks_storage_credential.external.name + }`, + Check: checkStorageCredentialDataSourcePopulated(t), + }) +} diff --git a/internal/acceptance/data_storage_credentials_test.go b/internal/acceptance/data_storage_credentials_test.go new file mode 100644 index 0000000000..43a5d3781d --- /dev/null +++ b/internal/acceptance/data_storage_credentials_test.go @@ -0,0 +1,27 @@ +package acceptance + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestUcAccDataSourceStorageCredentials(t *testing.T) { + unityWorkspaceLevel(t, step{ + Template: ` + data "databricks_storage_credentials" "this" { + }`, + Check: func(s *terraform.State) error { + r, ok := s.RootModule().Resources["data.databricks_storage_credentials.this"] + if !ok { + return fmt.Errorf("data not found in state") + } + names := r.Primary.Attributes["names.#"] + if names == "" { + return fmt.Errorf("names are empty: %v", r.Primary.Attributes) + } + return nil + }, + }) +} diff --git a/internal/acceptance/data_volumes_test.go b/internal/acceptance/data_volumes_test.go new file mode 100644 index 0000000000..1aafe87862 --- /dev/null +++ b/internal/acceptance/data_volumes_test.go @@ -0,0 +1,60 @@ +package acceptance + +import ( + "strconv" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func checkDataSourceVolumesPopulated(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + _, ok := s.Modules[0].Resources["data.databricks_volumes.this"] + require.True(t, ok, "data.databricks_volumes.this has to be there") + num_volumes, _ := strconv.Atoi(s.Modules[0].Outputs["volumes"].Value.(string)) + assert.GreaterOrEqual(t, num_volumes, 1) + return nil + } +} +func TestUcAccDataSourceVolumes(t *testing.T) { + unityWorkspaceLevel(t, step{ + Template: ` + resource "databricks_catalog" "sandbox" { + name = "sandbox{var.RANDOM}" + comment = "this catalog is managed by terraform" + properties = { + purpose = "testing" + } + } + + resource "databricks_schema" "things" { + catalog_name = databricks_catalog.sandbox.id + name = "things{var.RANDOM}" + comment = "this database is managed by terraform" + properties = { + kind = "various" + } + } + + resource "databricks_volume" "this" { + name = "volume_data_source_test" + catalog_name = databricks_catalog.sandbox.name + schema_name = databricks_schema.things.name + volume_type = "MANAGED" + } + + data "databricks_volumes" "this" { + catalog_name = databricks_catalog.sandbox.name + schema_name = databricks_schema.things.name + depends_on = [ databricks_volume.this ] + } + + output "volumes" { + value = length(data.databricks_volumes.this.ids) + } + `, + Check: checkDataSourceVolumesPopulated(t), + }) +} diff --git a/internal/acceptance/default_namespace_test.go b/internal/acceptance/default_namespace_test.go index 9a41ae22b2..06a5d13d61 100644 --- a/internal/acceptance/default_namespace_test.go +++ b/internal/acceptance/default_namespace_test.go @@ -24,7 +24,7 @@ func TestAccDefaultNamespaceSetting(t *testing.T) { ctx = context.WithValue(ctx, common.Api, common.API_2_1) w, err := client.WorkspaceClient() assert.NoError(t, err) - res, err := w.Settings.ReadDefaultWorkspaceNamespace(ctx, settings.ReadDefaultWorkspaceNamespaceRequest{ + res, err := w.Settings.GetDefaultNamespaceSetting(ctx, settings.GetDefaultNamespaceSettingRequest{ Etag: id, }) assert.NoError(t, err) @@ -46,9 +46,9 @@ func TestAccDefaultNamespaceSetting(t *testing.T) { assert.NoError(t, err) // Terraform Check returns the latest resource status before it is destroyed, which has an outdated eTag. // We are making an update call to get the correct eTag in the response error. - _, err = w.Settings.UpdateDefaultWorkspaceNamespace(ctx, settings.UpdateDefaultWorkspaceNamespaceRequest{ + _, err = w.Settings.UpdateDefaultNamespaceSetting(ctx, settings.UpdateDefaultNamespaceSettingRequest{ AllowMissing: true, - Setting: &settings.DefaultNamespaceSetting{ + Setting: settings.DefaultNamespaceSetting{ Namespace: settings.StringMessage{ Value: "this_call_should_fail", }, @@ -61,7 +61,7 @@ func TestAccDefaultNamespaceSetting(t *testing.T) { assert.FailNow(t, "cannot parse error message %v", err) } etag := aerr.Details[0].Metadata["etag"] - _, err = w.Settings.ReadDefaultWorkspaceNamespace(ctx, settings.ReadDefaultWorkspaceNamespaceRequest{ + _, err = w.Settings.GetDefaultNamespaceSetting(ctx, settings.GetDefaultNamespaceSettingRequest{ Etag: etag, }) if !errors.As(err, &aerr) { diff --git a/internal/acceptance/external_location_test.go b/internal/acceptance/external_location_test.go index 0352382129..7e219bed2a 100644 --- a/internal/acceptance/external_location_test.go +++ b/internal/acceptance/external_location_test.go @@ -91,6 +91,10 @@ func TestUcAccExternalLocationUpdate(t *testing.T) { Template: storageCredentialTemplateWithOwner("Managed by TF -- Updated Comment", "account users") + externalLocationTemplateWithOwner("Managed by TF -- Updated Comment", "account users") + grantsTemplateForExternalLocation, + }, step{ + Template: storageCredentialTemplateWithOwner("Managed by TF -- Updated Comment", "{env.TEST_DATA_ENG_GROUP}") + + externalLocationTemplateWithOwner("Managed by TF -- Updated Comment", "{env.TEST_DATA_ENG_GROUP}") + + grantsTemplateForExternalLocation, }, step{ Template: storageCredentialTemplateWithOwner("Managed by TF -- Updated Comment 2", "{env.TEST_METASTORE_ADMIN_GROUP_NAME}") + externalLocationTemplateWithOwner("Managed by TF -- Updated Comment 2", "{env.TEST_METASTORE_ADMIN_GROUP_NAME}") + diff --git a/internal/acceptance/file_test.go b/internal/acceptance/file_test.go new file mode 100644 index 0000000000..62e41e2bf4 --- /dev/null +++ b/internal/acceptance/file_test.go @@ -0,0 +1,318 @@ +package acceptance + +import ( + "context" + "io" + "regexp" + "strings" + "testing" + + "github.com/databricks/databricks-sdk-go/service/files" + "github.com/databricks/terraform-provider-databricks/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestUcAccFileDontUpdateIfNoChange(t *testing.T) { + createdTime := "" + unityWorkspaceLevel(t, step{ + Template: ` + resource "databricks_schema" "this" { + name = "schema-{var.STICKY_RANDOM}" + catalog_name = "main" + } + + resource "databricks_volume" "this" { + name = "name-abc" + comment = "comment-abc" + catalog_name = "main" + schema_name = databricks_schema.this.name + volume_type = "MANAGED" + } + + resource "databricks_file" "this" { + content_base64 = "YWJjCg==" + path = "/Volumes/${databricks_volume.this.catalog_name}/${databricks_volume.this.schema_name}/${databricks_volume.this.name}/abcde" + }`, + Check: resourceCheck("databricks_file.this", func(ctx context.Context, client *common.DatabricksClient, id string) error { + w, err := client.WorkspaceClient() + if err != nil { + return err + } + m, err := w.Files.GetMetadata(ctx, files.GetMetadataRequest{FilePath: id}) + if err != nil { + return err + } + require.True(t, m.LastModified != "") + createdTime = m.LastModified + return nil + }), + }, step{ + Template: ` + resource "databricks_schema" "this" { + name = "schema-{var.STICKY_RANDOM}" + catalog_name = "main" + } + + resource "databricks_volume" "this" { + name = "name-abc" + comment = "comment-abc" + catalog_name = "main" + schema_name = databricks_schema.this.name + volume_type = "MANAGED" + } + + resource "databricks_file" "this" { + content_base64 = "YWJjCg==" + path = "/Volumes/${databricks_volume.this.catalog_name}/${databricks_volume.this.schema_name}/${databricks_volume.this.name}/abcde" + }`, + Check: resourceCheck("databricks_file.this", func(ctx context.Context, client *common.DatabricksClient, id string) error { + w, err := client.WorkspaceClient() + if err != nil { + return err + } + m, err := w.Files.GetMetadata(ctx, files.GetMetadataRequest{FilePath: id}) + if err != nil { + return err + } + require.Equal(t, m.LastModified, createdTime) + return nil + }), + }) +} + +func TestUcAccFileUpdateOnLocalChange(t *testing.T) { + createdTime := "" + unityWorkspaceLevel(t, step{ + Template: ` + resource "databricks_schema" "this" { + name = "schema-{var.STICKY_RANDOM}" + catalog_name = "main" + } + + resource "databricks_volume" "this" { + name = "name-abc" + comment = "comment-abc" + catalog_name = "main" + schema_name = databricks_schema.this.name + volume_type = "MANAGED" + } + + resource "databricks_file" "this" { + content_base64 = "YWJjZA==" + path = "/Volumes/${databricks_volume.this.catalog_name}/${databricks_volume.this.schema_name}/${databricks_volume.this.name}/abcde" + }`, + Check: resourceCheck("databricks_file.this", func(ctx context.Context, client *common.DatabricksClient, id string) error { + w, err := client.WorkspaceClient() + if err != nil { + return err + } + m, err := w.Files.GetMetadata(ctx, files.GetMetadataRequest{FilePath: id}) + if err != nil { + return err + } + require.True(t, m.LastModified != "") + createdTime = m.LastModified + return nil + }), + }, step{ + Template: ` + resource "databricks_schema" "this" { + name = "schema-{var.STICKY_RANDOM}" + catalog_name = "main" + } + + resource "databricks_volume" "this" { + name = "name-abc" + comment = "comment-abc" + catalog_name = "main" + schema_name = databricks_schema.this.name + volume_type = "MANAGED" + } + + resource "databricks_file" "this" { + content_base64 = "YWJjCg==" + path = "/Volumes/${databricks_volume.this.catalog_name}/${databricks_volume.this.schema_name}/${databricks_volume.this.name}/abcde" + }`, + Check: resourceCheck("databricks_file.this", func(ctx context.Context, client *common.DatabricksClient, id string) error { + w, err := client.WorkspaceClient() + if err != nil { + return err + } + m, err := w.Files.GetMetadata(ctx, files.GetMetadataRequest{FilePath: id}) + if err != nil { + return err + } + require.NotEqual(t, m.LastModified, createdTime) + return nil + }), + }) +} + +func TestUcAccFileUpdateServerChange(t *testing.T) { + createdTime := "" + unityWorkspaceLevel(t, step{ + Template: ` + resource "databricks_schema" "this" { + name = "schema-{var.STICKY_RANDOM}" + catalog_name = "main" + } + + resource "databricks_volume" "this" { + name = "name-abc" + comment = "comment-abc" + catalog_name = "main" + schema_name = databricks_schema.this.name + volume_type = "MANAGED" + } + + resource "databricks_file" "this" { + content_base64 = "YWJjCg==" + path = "/Volumes/${databricks_volume.this.catalog_name}/${databricks_volume.this.schema_name}/${databricks_volume.this.name}/abcde" + }`, + // We are modifying the resource during the check stage, which causes the TF validation to fail. Ignoring the error. + ExpectError: regexp.MustCompile(` the plan was not empty`), + Check: resourceCheck("databricks_file.this", func(ctx context.Context, client *common.DatabricksClient, id string) error { + w, err := client.WorkspaceClient() + if err != nil { + return err + } + m, err := w.Files.GetMetadata(ctx, files.GetMetadataRequest{FilePath: id}) + if err != nil { + return err + } + require.True(t, m.LastModified != "") + createdTime = m.LastModified + + // Modify the file manually to test next step + err = w.Files.Upload(ctx, files.UploadRequest{Contents: io.NopCloser(strings.NewReader("acdc")), FilePath: id}) + if err != nil { + return err + } + return nil + }), + }, + step{ + Template: ` + resource "databricks_schema" "this" { + name = "schema-{var.STICKY_RANDOM}" + catalog_name = "main" + } + + resource "databricks_volume" "this" { + name = "name-abc" + comment = "comment-abc" + catalog_name = "main" + schema_name = databricks_schema.this.name + volume_type = "MANAGED" + } + + resource "databricks_file" "this" { + content_base64 = "YWJjCg==" + path = "/Volumes/${databricks_volume.this.catalog_name}/${databricks_volume.this.schema_name}/${databricks_volume.this.name}/abcde" + }`, + Check: resourceCheck("databricks_file.this", func(ctx context.Context, client *common.DatabricksClient, id string) error { + w, err := client.WorkspaceClient() + if err != nil { + return err + } + m, err := w.Files.GetMetadata(ctx, files.GetMetadataRequest{FilePath: id}) + if err != nil { + return err + } + require.NotEqual(t, m.LastModified, createdTime) + + raw, err := w.Files.DownloadByFilePath(ctx, id) + require.NoError(t, err) + contents, err := io.ReadAll(raw.Contents) + require.NoError(t, err) + // Check that we updated the file + assert.Equal(t, "abc\n", string(contents)) + return nil + }), + }) +} + +func TestUcAccFileFullLifeCycle(t *testing.T) { + unityWorkspaceLevel(t, step{ + Template: ` + resource "databricks_schema" "this" { + name = "schema-{var.STICKY_RANDOM}" + catalog_name = "main" + } + + resource "databricks_volume" "this" { + name = "name-abc" + comment = "comment-abc" + catalog_name = "main" + schema_name = databricks_schema.this.name + volume_type = "MANAGED" + } + + resource "databricks_file" "this" { + source = "{var.CWD}/../../storage/testdata/tf-test-python.py" + path = "/Volumes/${databricks_volume.this.catalog_name}/${databricks_volume.this.schema_name}/${databricks_volume.this.name}/abcde" + }`, + }, step{ + Template: ` + resource "databricks_schema" "this" { + name = "schema-{var.STICKY_RANDOM}" + catalog_name = "main" + } + + resource "databricks_volume" "this" { + name = "name-abc" + comment = "comment-abc" + catalog_name = "main" + schema_name = databricks_schema.this.name + volume_type = "MANAGED" + } + + resource "databricks_file" "this" { + source = "{var.CWD}/../../storage/testdata/tf-test-python2.py" + path = "/Volumes/${databricks_volume.this.catalog_name}/${databricks_volume.this.schema_name}/${databricks_volume.this.name}/abcde" + }`, + }) +} + +func TestUcAccFileBase64FullLifeCycle(t *testing.T) { + unityWorkspaceLevel(t, step{ + Template: ` + resource "databricks_schema" "this" { + name = "schema-{var.STICKY_RANDOM}" + catalog_name = "main" + } + + resource "databricks_volume" "this" { + name = "name-abc" + comment = "comment-abc" + catalog_name = "main" + schema_name = databricks_schema.this.name + volume_type = "MANAGED" + } + + resource "databricks_file" "this" { + content_base64 = "YWJjCg==" + path = "/Volumes/${databricks_volume.this.catalog_name}/${databricks_volume.this.schema_name}/${databricks_volume.this.name}/abcde" + }`, + }, step{ + Template: ` + resource "databricks_schema" "this" { + name = "schema-{var.STICKY_RANDOM}" + catalog_name = "main" + } + + resource "databricks_volume" "this" { + name = "name-abc" + comment = "comment-abc" + catalog_name = "main" + schema_name = databricks_schema.this.name + volume_type = "MANAGED" + } + + resource "databricks_file" "this" { + content_base64 = "YWJjDg==" + path = "/Volumes/${databricks_volume.this.catalog_name}/${databricks_volume.this.schema_name}/${databricks_volume.this.name}/abcde" + }`, + }) +} diff --git a/internal/acceptance/grant_test.go b/internal/acceptance/grant_test.go index 08d328db38..19898e477d 100644 --- a/internal/acceptance/grant_test.go +++ b/internal/acceptance/grant_test.go @@ -1,6 +1,8 @@ package acceptance import ( + "fmt" + "regexp" "strings" "testing" ) @@ -99,8 +101,36 @@ resource "databricks_grant" "some" { func TestUcAccGrant(t *testing.T) { unityWorkspaceLevel(t, step{ Template: strings.ReplaceAll(grantTemplate, "%s", "{env.TEST_DATA_ENG_GROUP}"), - }, - step{ - Template: strings.ReplaceAll(grantTemplate, "%s", "{env.TEST_DATA_SCI_GROUP}"), - }) + }, step{ + Template: strings.ReplaceAll(grantTemplate, "%s", "{env.TEST_DATA_SCI_GROUP}"), + }) +} + +func grantTemplateForNamePermissionChange(suffix string, permission string) string { + return fmt.Sprintf(` + resource "databricks_storage_credential" "external" { + name = "cred-{var.STICKY_RANDOM}%s" + aws_iam_role { + role_arn = "{env.TEST_METASTORE_DATA_ACCESS_ARN}" + } + comment = "Managed by TF" + } + + resource "databricks_grant" "cred" { + storage_credential = databricks_storage_credential.external.id + principal = "{env.TEST_DATA_ENG_GROUP}" + privileges = ["%s"] + } + `, suffix, permission) +} + +func TestUcAccGrantForIdChange(t *testing.T) { + unityWorkspaceLevel(t, step{ + Template: grantTemplateForNamePermissionChange("-old", "ALL_PRIVILEGES"), + }, step{ + Template: grantTemplateForNamePermissionChange("-new", "ALL_PRIVILEGES"), + }, step{ + Template: grantTemplateForNamePermissionChange("-fail", "abc"), + ExpectError: regexp.MustCompile(`cannot create grant: Privilege abc is not applicable to this entity`), + }) } diff --git a/internal/acceptance/grants_test.go b/internal/acceptance/grants_test.go index 303e325f2d..23beb2fe3b 100644 --- a/internal/acceptance/grants_test.go +++ b/internal/acceptance/grants_test.go @@ -1,6 +1,8 @@ package acceptance import ( + "fmt" + "regexp" "strings" "testing" ) @@ -105,8 +107,38 @@ resource "databricks_grants" "some" { func TestUcAccGrants(t *testing.T) { unityWorkspaceLevel(t, step{ Template: strings.ReplaceAll(grantsTemplate, "%s", "{env.TEST_DATA_ENG_GROUP}"), - }, - step{ - Template: strings.ReplaceAll(grantsTemplate, "%s", "{env.TEST_DATA_SCI_GROUP}"), - }) + }, step{ + Template: strings.ReplaceAll(grantsTemplate, "%s", "{env.TEST_DATA_SCI_GROUP}"), + }) +} + +func grantsTemplateForNamePermissionChange(suffix string, permission string) string { + return fmt.Sprintf(` + resource "databricks_storage_credential" "external" { + name = "cred-{var.STICKY_RANDOM}%s" + aws_iam_role { + role_arn = "{env.TEST_METASTORE_DATA_ACCESS_ARN}" + } + comment = "Managed by TF" + } + + resource "databricks_grants" "cred" { + storage_credential = databricks_storage_credential.external.id + grant { + principal = "{env.TEST_DATA_ENG_GROUP}" + privileges = ["%s"] + } + } + `, suffix, permission) +} + +func TestUcAccGrantsForIdChange(t *testing.T) { + unityWorkspaceLevel(t, step{ + Template: grantsTemplateForNamePermissionChange("-old", "ALL_PRIVILEGES"), + }, step{ + Template: grantsTemplateForNamePermissionChange("-new", "ALL_PRIVILEGES"), + }, step{ + Template: grantsTemplateForNamePermissionChange("-fail", "abc"), + ExpectError: regexp.MustCompile(`Error: cannot create grants: Privilege abc is not applicable to this entity`), + }) } diff --git a/internal/acceptance/job_test.go b/internal/acceptance/job_test.go index de1224acc1..781d9b99b8 100644 --- a/internal/acceptance/job_test.go +++ b/internal/acceptance/job_test.go @@ -90,8 +90,22 @@ func TestAccJobTasks(t *testing.T) { notebook_task { notebook_path = databricks_notebook.this.path + base_parameters = { + "param_0" = "{{job.parameters.empty_default}}" + "param_1" = "{{job.parameters.non_empty_default}}" + } } } + + parameter { + name = "empty_default" + default = "" + } + + parameter { + name = "non_empty_default" + default = "non_empty" + } }`, }) } diff --git a/internal/acceptance/metastore_test.go b/internal/acceptance/metastore_test.go index 5861a158fa..2250567bff 100644 --- a/internal/acceptance/metastore_test.go +++ b/internal/acceptance/metastore_test.go @@ -110,6 +110,13 @@ func runMetastoreTestWithOwnerUpdates(t *testing.T, extraAttributes map[string]a owner = "account users" %s }`, template), + }, step{ + Template: fmt.Sprintf(`resource "databricks_metastore" "this" { + name = "{var.STICKY_RANDOM}" + force_destroy = true + owner = "{env.TEST_DATA_ENG_GROUP}" + %s + }`, template), }, step{ Template: fmt.Sprintf(`resource "databricks_metastore" "this" { name = "{var.STICKY_RANDOM}-updated" diff --git a/internal/acceptance/recipient_test.go b/internal/acceptance/recipient_test.go index 10a91177fa..e59bf90d1c 100644 --- a/internal/acceptance/recipient_test.go +++ b/internal/acceptance/recipient_test.go @@ -50,6 +50,8 @@ func TestUcAccUpdateRecipientDb2Open(t *testing.T) { Template: recipientTemplateWithOwner("made by terraform", "account users"), }, step{ Template: recipientTemplateWithOwner("made by terraform -- updated comment", "account users"), + }, step{ + Template: recipientTemplateWithOwner("made by terraform -- updated comment", "{env.TEST_DATA_ENG_GROUP}"), }, step{ Template: recipientTemplateWithOwner("made by terraform -- updated comment 2", "{env.TEST_METASTORE_ADMIN_GROUP_NAME}"), }) diff --git a/internal/acceptance/restrict_workspace_admins_test.go b/internal/acceptance/restrict_workspace_admins_test.go new file mode 100644 index 0000000000..339a488115 --- /dev/null +++ b/internal/acceptance/restrict_workspace_admins_test.go @@ -0,0 +1,75 @@ +package acceptance + +import ( + "context" + "errors" + "testing" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/databricks/terraform-provider-databricks/common" + "github.com/stretchr/testify/assert" +) + +func TestAccRestrictWorkspaceAdminsSetting(t *testing.T) { + workspaceLevel(t, step{ + Template: ` + resource "databricks_restrict_workspace_admins_setting" "this" { + restrict_workspace_admins { + status = "RESTRICT_TOKENS_AND_JOB_RUN_AS" + } + } + `, + Check: resourceCheck("databricks_restrict_workspace_admins_setting.this", func(ctx context.Context, client *common.DatabricksClient, id string) error { + ctx = context.WithValue(ctx, common.Api, common.API_2_1) + w, err := client.WorkspaceClient() + assert.NoError(t, err) + res, err := w.Settings.GetRestrictWorkspaceAdminsSetting(ctx, settings.GetRestrictWorkspaceAdminsSettingRequest{ + Etag: id, + }) + assert.NoError(t, err) + // Check that the resource has been created and that it has the correct value. + assert.Equal(t, res.RestrictWorkspaceAdmins.Status.String(), "RESTRICT_TOKENS_AND_JOB_RUN_AS") + return nil + }), + }, + step{ + Template: `resource "databricks_restrict_workspace_admins_setting" "this" { + restrict_workspace_admins { + status = "RESTRICT_TOKENS_AND_JOB_RUN_AS" + } + }`, + Destroy: true, + Check: resourceCheck("databricks_restrict_workspace_admins_setting.this", func(ctx context.Context, client *common.DatabricksClient, id string) error { + ctx = context.WithValue(ctx, common.Api, common.API_2_1) + w, err := client.WorkspaceClient() + assert.NoError(t, err) + // Terraform Check returns the latest resource status before it is destroyed, which has an outdated eTag. + // We are making an update call to get the correct eTag in the response error. + _, err = w.Settings.UpdateRestrictWorkspaceAdminsSetting(ctx, settings.UpdateRestrictWorkspaceAdminsSettingRequest{ + AllowMissing: true, + Setting: settings.RestrictWorkspaceAdminsSetting{ + RestrictWorkspaceAdmins: settings.RestrictWorkspaceAdminsMessage{ + Status: "RESTRICT_TOKENS_AND_JOB_RUN_AS", + }, + }, + FieldMask: "restrict_workspace_admins.status", + }) + assert.Error(t, err) + var aerr *apierr.APIError + if !errors.As(err, &aerr) { + assert.FailNow(t, "cannot parse error message %v", err) + } + etag := aerr.Details[0].Metadata["etag"] + res, err := w.Settings.GetRestrictWorkspaceAdminsSetting(ctx, settings.GetRestrictWorkspaceAdminsSettingRequest{ + Etag: etag, + }) + // we should not be getting any error + assert.NoError(t, err) + // workspace should go back to default + assert.Equal(t, res.RestrictWorkspaceAdmins.Status.String(), "ALLOW_ALL") + return nil + }), + }, + ) +} diff --git a/internal/acceptance/schema_test.go b/internal/acceptance/schema_test.go index df2098ceb8..246668d440 100644 --- a/internal/acceptance/schema_test.go +++ b/internal/acceptance/schema_test.go @@ -92,6 +92,8 @@ func TestUcAccSchemaUpdate(t *testing.T) { Template: catalogTemplate + schemaTemplateWithOwner("this database is managed by terraform", "account users"), }, step{ Template: catalogTemplate + schemaTemplateWithOwner("this database is managed by terraform -- updated comment", "account users"), + }, step{ + Template: catalogTemplate + schemaTemplateWithOwner("this database is managed by terraform -- updated comment", "{env.TEST_DATA_ENG_GROUP}"), }, step{ Template: catalogTemplate + schemaTemplateWithOwner("this database is managed by terraform -- updated comment 2", "{env.TEST_METASTORE_ADMIN_GROUP_NAME}"), }) diff --git a/internal/acceptance/share_test.go b/internal/acceptance/share_test.go index 2ce09769bf..afc3667ca4 100644 --- a/internal/acceptance/share_test.go +++ b/internal/acceptance/share_test.go @@ -129,6 +129,8 @@ func TestUcAccUpdateShare(t *testing.T) { Template: preTestTemplate + preTestTemplateUpdate + shareTemplateWithOwner("c", "account users"), }, step{ Template: preTestTemplate + preTestTemplateUpdate + shareTemplateWithOwner("e", "account users"), + }, step{ + Template: preTestTemplate + preTestTemplateUpdate + shareTemplateWithOwner("e", "{env.TEST_DATA_ENG_GROUP}"), }, step{ Template: preTestTemplate + preTestTemplateUpdate + shareTemplateWithOwner("f", "{env.TEST_METASTORE_ADMIN_GROUP_NAME}"), }) diff --git a/internal/acceptance/sql_table_test.go b/internal/acceptance/sql_table_test.go index c91291d350..4fea72794d 100644 --- a/internal/acceptance/sql_table_test.go +++ b/internal/acceptance/sql_table_test.go @@ -32,7 +32,7 @@ func TestUcAccResourceSqlTable_Managed(t *testing.T) { } column { name = "name" - type = "varchar(64)" + type = "string" } comment = "this table is managed by terraform" }`, diff --git a/internal/acceptance/storage_credential_test.go b/internal/acceptance/storage_credential_test.go index 8d13a8efc2..3170b3ca8d 100644 --- a/internal/acceptance/storage_credential_test.go +++ b/internal/acceptance/storage_credential_test.go @@ -32,3 +32,22 @@ func TestUcAccStorageCredential(t *testing.T) { }) } } + +func TestAccStorageCredentialOwner(t *testing.T) { + unityAccountLevel(t, step{ + Template: ` + resource "databricks_service_principal" "test_acc_storage_credential_owner" { + display_name = "test_acc_storage_credential_owner {var.RANDOM}" + } + + resource "databricks_storage_credential" "test_acc_storage_credential_owner" { + name = "test_acc_storage_credential_owner-{var.RANDOM}" + owner = databricks_service_principal.test_acc_storage_credential_owner.application_id + metastore_id = "{env.TEST_METASTORE_ID}" + aws_iam_role { + role_arn = "{env.TEST_METASTORE_DATA_ACCESS_ARN}" + } + } + `, + }) +} diff --git a/internal/acceptance/vector_search_test.go b/internal/acceptance/vector_search_test.go new file mode 100644 index 0000000000..a6033aa802 --- /dev/null +++ b/internal/acceptance/vector_search_test.go @@ -0,0 +1,30 @@ +package acceptance + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" +) + +func TestUcAccVectorSearchEndpoint(t *testing.T) { + cloudEnv := os.Getenv("CLOUD_ENV") + switch cloudEnv { + case "ucws", "azure": + default: + t.Skipf("not available on %s", cloudEnv) + } + + name := fmt.Sprintf("terraform-test-vector-search-%[1]s", + acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)) + unityWorkspaceLevel(t, step{ + Template: fmt.Sprintf(` + resource "databricks_vector_search_endpoint" "this" { + name = "%s" + endpoint_type = "STANDARD" + } + `, name), + }, + ) +} diff --git a/internal/acceptance/volume_test.go b/internal/acceptance/volume_test.go index c2ba2e5d0f..8ffa282358 100644 --- a/internal/acceptance/volume_test.go +++ b/internal/acceptance/volume_test.go @@ -46,9 +46,19 @@ func TestUcAccVolumesResourceWithoutInitialOwnerAWSFullLifecycle(t *testing.T) { volume_type = "EXTERNAL" storage_location = databricks_external_location.some.url }`, - }, - step{ - Template: prefixTestTemplate + ` + }, step{ + Template: prefixTestTemplate + ` + resource "databricks_volume" "this" { + name = "name-abc" + comment = "comment-pqr" + owner = "{env.TEST_DATA_ENG_GROUP}" + catalog_name = "main" + schema_name = databricks_schema.this.name + volume_type = "EXTERNAL" + storage_location = databricks_external_location.some.url + }`, + }, step{ + Template: prefixTestTemplate + ` resource "databricks_volume" "this" { name = "name-def" comment = "comment-def" @@ -58,7 +68,7 @@ func TestUcAccVolumesResourceWithoutInitialOwnerAWSFullLifecycle(t *testing.T) { volume_type = "EXTERNAL" storage_location = databricks_external_location.some.url }`, - }) + }) } func TestUcAccVolumesResourceWithInitialOwnerAWSFullLifecycle(t *testing.T) { @@ -73,6 +83,17 @@ func TestUcAccVolumesResourceWithInitialOwnerAWSFullLifecycle(t *testing.T) { volume_type = "EXTERNAL" storage_location = databricks_external_location.some.url }`, + }, step{ + Template: prefixTestTemplate + ` + resource "databricks_volume" "this" { + name = "name-abc" + comment = "comment-abc" + owner = "{env.TEST_DATA_ENG_GROUP}" + catalog_name = "main" + schema_name = databricks_schema.this.name + volume_type = "EXTERNAL" + storage_location = databricks_external_location.some.url + }`, }, step{ Template: prefixTestTemplate + ` resource "databricks_volume" "this" { diff --git a/jobs/resource_job.go b/jobs/resource_job.go index 7bc95c8cb3..fd2a961680 100644 --- a/jobs/resource_job.go +++ b/jobs/resource_job.go @@ -87,7 +87,8 @@ type SqlAlertTask struct { } type SqlFileTask struct { - Path string `json:"path"` + Path string `json:"path"` + Source string `json:"source,omitempty" tf:"suppress_diff"` } // SqlTask contains information about DBSQL task @@ -110,6 +111,7 @@ type DbtTask struct { Schema string `json:"schema,omitempty" tf:"default:default"` Catalog string `json:"catalog,omitempty"` WarehouseId string `json:"warehouse_id,omitempty"` + Source string `json:"source,omitempty" tf:"suppress_diff"` } // RunJobTask contains information about RunJobTask @@ -118,6 +120,47 @@ type RunJobTask struct { JobParameters map[string]string `json:"job_parameters,omitempty"` } +// TODO: As TF does not support recursive nesting, limit the nesting depth. Example: +// https://github.com/hashicorp/terraform-provider-aws/blob/b4a9f93a2b7323202c8904e86cff03d3f2cb006b/internal/service/wafv2/rule_group.go#L110 +type ForEachTask struct { + Concurrency int `json:"concurrency,omitempty"` + Inputs string `json:"inputs"` + Task ForEachNestedTask `json:"task"` +} + +type ForEachNestedTask struct { + TaskKey string `json:"task_key,omitempty"` + Description string `json:"description,omitempty"` + DependsOn []jobs.TaskDependency `json:"depends_on,omitempty"` + RunIf string `json:"run_if,omitempty" tf:"suppress_diff"` + + ExistingClusterID string `json:"existing_cluster_id,omitempty" tf:"group:cluster_type"` + NewCluster *clusters.Cluster `json:"new_cluster,omitempty" tf:"group:cluster_type"` + JobClusterKey string `json:"job_cluster_key,omitempty" tf:"group:cluster_type"` + ComputeKey string `json:"compute_key,omitempty" tf:"group:cluster_type"` + Libraries []libraries.Library `json:"libraries,omitempty" tf:"slice_set,alias:library"` + + NotebookTask *NotebookTask `json:"notebook_task,omitempty" tf:"group:task_type"` + SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty" tf:"group:task_type"` + SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty" tf:"group:task_type"` + SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty" tf:"group:task_type"` + PipelineTask *PipelineTask `json:"pipeline_task,omitempty" tf:"group:task_type"` + PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty" tf:"group:task_type"` + SqlTask *SqlTask `json:"sql_task,omitempty" tf:"group:task_type"` + DbtTask *DbtTask `json:"dbt_task,omitempty" tf:"group:task_type"` + RunJobTask *RunJobTask `json:"run_job_task,omitempty" tf:"group:task_type"` + ConditionTask *jobs.ConditionTask `json:"condition_task,omitempty" tf:"group:task_type"` + + EmailNotifications *jobs.TaskEmailNotifications `json:"email_notifications,omitempty" tf:"suppress_diff"` + WebhookNotifications *jobs.WebhookNotifications `json:"webhook_notifications,omitempty" tf:"suppress_diff"` + NotificationSettings *jobs.TaskNotificationSettings `json:"notification_settings,omitempty"` + TimeoutSeconds int32 `json:"timeout_seconds,omitempty"` + MaxRetries int32 `json:"max_retries,omitempty"` + MinRetryIntervalMillis int32 `json:"min_retry_interval_millis,omitempty"` + RetryOnTimeout bool `json:"retry_on_timeout,omitempty" tf:"computed"` + Health *JobHealth `json:"health,omitempty"` +} + func sortWebhookNotifications(wn *jobs.WebhookNotifications) { if wn == nil { return @@ -158,7 +201,7 @@ type GitSource struct { type JobHealthRule struct { Metric string `json:"metric,omitempty"` Operation string `json:"op,omitempty"` - Value int32 `json:"value,omitempty"` + Value int64 `json:"value,omitempty"` } type JobHealth struct { @@ -187,6 +230,7 @@ type JobTaskSettings struct { DbtTask *DbtTask `json:"dbt_task,omitempty" tf:"group:task_type"` RunJobTask *RunJobTask `json:"run_job_task,omitempty" tf:"group:task_type"` ConditionTask *jobs.ConditionTask `json:"condition_task,omitempty" tf:"group:task_type"` + ForEachTask *ForEachTask `json:"for_each_task,omitempty" tf:"group:task_type"` EmailNotifications *jobs.TaskEmailNotifications `json:"email_notifications,omitempty" tf:"suppress_diff"` WebhookNotifications *jobs.WebhookNotifications `json:"webhook_notifications,omitempty" tf:"suppress_diff"` @@ -273,7 +317,7 @@ type JobSettings struct { Queue *jobs.QueueSettings `json:"queue,omitempty"` RunAs *JobRunAs `json:"run_as,omitempty" tf:"computed"` Health *JobHealth `json:"health,omitempty"` - Parameters []JobParameterDefinition `json:"parameters,omitempty" tf:"alias:parameter"` + Parameters []jobs.JobParameterDefinition `json:"parameters,omitempty" tf:"alias:parameter"` Deployment *jobs.JobDeployment `json:"deployment,omitempty"` EditMode jobs.CreateJobEditMode `json:"edit_mode,omitempty"` } @@ -342,12 +386,6 @@ type JobParameter struct { Value string `json:"value,omitempty"` } -// Job-level parameter definitions -type JobParameterDefinition struct { - Name string `json:"name,omitempty"` - Default string `json:"default,omitempty"` -} - // RunState of the job type RunState struct { ResultState string `json:"result_state,omitempty"` diff --git a/jobs/resource_job_test.go b/jobs/resource_job_test.go index 2457f199db..e2bb746699 100644 --- a/jobs/resource_job_test.go +++ b/jobs/resource_job_test.go @@ -171,7 +171,7 @@ func TestResourceJobCreate_MultiTask(t *testing.T) { { Metric: "RUN_DURATION_SECONDS", Operation: "GREATER_THAN", - Value: 3600, + Value: 50000000000, // 5 * 10^10 }, }, }, @@ -260,7 +260,7 @@ func TestResourceJobCreate_MultiTask(t *testing.T) { rules { metric = "RUN_DURATION_SECONDS" op = "GREATER_THAN" - value = 3600 + value = 50000000000 } } @@ -293,6 +293,188 @@ func TestResourceJobCreate_MultiTask(t *testing.T) { assert.Equal(t, "789", d.Id()) } +func TestResourceJobCreate_TaskOrder(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "POST", + Resource: "/api/2.1/jobs/create", + ExpectedRequest: JobSettings{ + Name: "Featurizer", + Tasks: []JobTaskSettings{ + { + TaskKey: "a", + ExistingClusterID: "abc", + NotebookTask: &NotebookTask{ + NotebookPath: "/a", + }, + }, + { + TaskKey: "b", + DependsOn: []jobs.TaskDependency{ + { + TaskKey: "a", + }, + }, + ExistingClusterID: "abc", + NotebookTask: &NotebookTask{ + NotebookPath: "/b", + }, + }, + { + TaskKey: "c", + DependsOn: []jobs.TaskDependency{ + { + TaskKey: "a", + }, + { + TaskKey: "b", + }, + }, + ExistingClusterID: "abc", + NotebookTask: &NotebookTask{ + NotebookPath: "/c", + }, + }, + { + TaskKey: "d", + DependsOn: []jobs.TaskDependency{ + { + TaskKey: "a", + }, + { + TaskKey: "b", + }, + { + TaskKey: "c", + }, + }, + ExistingClusterID: "abc", + NotebookTask: &NotebookTask{ + NotebookPath: "/d", + }, + }, + }, + MaxConcurrentRuns: 1, + Health: &JobHealth{ + Rules: []JobHealthRule{ + { + Metric: "RUN_DURATION_SECONDS", + Operation: "GREATER_THAN", + Value: 3600, + }, + }, + }, + }, + Response: Job{ + JobID: 789, + }, + }, + { + Method: "GET", + Resource: "/api/2.1/jobs/get?job_id=789", + Response: Job{ + // good enough for mock + Settings: &JobSettings{ + Tasks: []JobTaskSettings{ + { + TaskKey: "b", + }, + { + TaskKey: "a", + }, + { + TaskKey: "d", + }, + { + TaskKey: "c", + }, + }, + }, + }, + }, + }, + Create: true, + Resource: ResourceJob(), + HCL: ` + name = "Featurizer" + + health { + rules { + metric = "RUN_DURATION_SECONDS" + op = "GREATER_THAN" + value = 3600 + } + } + + task { + task_key = "a" + + existing_cluster_id = "abc" + + notebook_task { + notebook_path = "/a" + } + } + + task { + task_key = "b" + + depends_on { + task_key = "a" + } + + existing_cluster_id = "abc" + + notebook_task { + notebook_path = "/b" + } + } + + task { + task_key = "c" + + depends_on { + task_key = "a" + } + + depends_on { + task_key = "b" + } + + existing_cluster_id = "abc" + + notebook_task { + notebook_path = "/c" + } + } + + task { + task_key = "d" + + depends_on { + task_key = "a" + } + + depends_on { + task_key = "b" + } + + depends_on { + task_key = "c" + } + + existing_cluster_id = "abc" + + notebook_task { + notebook_path = "/d" + } + }`, + }.Apply(t) + assert.NoError(t, err) + assert.Equal(t, "789", d.Id()) +} + func TestResourceJobCreate_ConditionTask(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ @@ -354,6 +536,80 @@ func TestResourceJobCreate_ConditionTask(t *testing.T) { assert.NoError(t, err) assert.Equal(t, "231", d.Id()) } + +func TestResourceJobCreate_ForEachTask(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "POST", + Resource: "/api/2.1/jobs/create", + ExpectedRequest: JobSettings{ + Name: "Foreach-task-testing", + Tasks: []JobTaskSettings{ + { + TaskKey: "for_each_task_key", + ForEachTask: &ForEachTask{ + Concurrency: 1, + Inputs: "[1, 2, 3, 4, 5, 6]", + Task: ForEachNestedTask{ + TaskKey: "nested_task_key", + ExistingClusterID: "abc", + NotebookTask: &NotebookTask{ + NotebookPath: "/Stuff", + }, + }, + }, + }, + }, + MaxConcurrentRuns: 1, + }, + Response: Job{ + JobID: 789, + }, + }, + { + Method: "GET", + Resource: "/api/2.1/jobs/get?job_id=789", + Response: Job{ + // good enough for mock + Settings: &JobSettings{ + Tasks: []JobTaskSettings{ + { + TaskKey: "for_each_task_key", + }, + }, + }, + }, + }, + }, + Create: true, + Resource: ResourceJob(), + HCL: ` + name = "Foreach-task-testing" + + task { + task_key = "for_each_task_key" + for_each_task { + concurrency = 1 + + inputs = "[1, 2, 3, 4, 5, 6]" + + task { + + task_key = "nested_task_key" + + existing_cluster_id = "abc" + + notebook_task { + notebook_path = "/Stuff" + } + } + } + }`, + }.Apply(t) + assert.NoError(t, err) + assert.Equal(t, "789", d.Id()) +} func TestResourceJobCreate_JobParameters(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ @@ -371,7 +627,7 @@ func TestResourceJobCreate_JobParameters(t *testing.T) { }, }, MaxConcurrentRuns: 1, - Parameters: []JobParameterDefinition{ + Parameters: []jobs.JobParameterDefinition{ { Name: "hello", Default: "world", @@ -400,7 +656,7 @@ func TestResourceJobCreate_JobParameters(t *testing.T) { TaskKey: "b", }, }, - Parameters: []JobParameterDefinition{ + Parameters: []jobs.JobParameterDefinition{ { Name: "hello", Default: "world", @@ -439,6 +695,88 @@ func TestResourceJobCreate_JobParameters(t *testing.T) { assert.NoError(t, err) assert.Equal(t, "231", d.Id()) } + +func TestResourceJobCreate_JobParameters_EmptyDefault(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "POST", + Resource: "/api/2.1/jobs/create", + ExpectedRequest: JobSettings{ + Name: "JobParameterTesting", + MaxConcurrentRuns: 1, + Tasks: []JobTaskSettings{ + { + TaskKey: "a", + }, + }, + Parameters: []jobs.JobParameterDefinition{ + { + Name: "key", + Default: "", + }, + }, + }, + Response: Job{ + JobID: 231, + }, + }, + { + Method: "GET", + Resource: "/api/2.1/jobs/get?job_id=231", + Response: Job{ + // good enough for mock + Settings: &JobSettings{ + Tasks: []JobTaskSettings{ + { + TaskKey: "a", + }, + }, + Parameters: []jobs.JobParameterDefinition{ + { + Name: "key", + Default: "", + }, + }, + }, + }, + }, + }, + Create: true, + Resource: ResourceJob(), + HCL: ` + name = "JobParameterTesting" + + parameter { + name = "key" + default = "" + } + + task { + task_key = "a" + }`, + }.Apply(t) + assert.NoError(t, err) + assert.Equal(t, "231", d.Id()) +} + +func TestResourceJobCreate_JobParameters_DefaultIsRequired(t *testing.T) { + qa.ResourceFixture{ + Create: true, + Resource: ResourceJob(), + HCL: ` + name = "JobParameterTesting" + + parameter { + name = "key" + } + + task { + task_key = "a" + }`, + }.ExpectError(t, "invalid config supplied. [parameter.#.default] Missing required argument") +} + func TestResourceJobCreate_JobClusters(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ diff --git a/policies/resource_cluster_policy.go b/policies/resource_cluster_policy.go index 96f80f6b44..ceb0b11c24 100644 --- a/policies/resource_cluster_policy.go +++ b/policies/resource_cluster_policy.go @@ -35,7 +35,10 @@ func ResourceClusterPolicy() common.Resource { } m["definition"].ConflictsWith = []string{"policy_family_definition_overrides", "policy_family_id"} m["definition"].Computed = true + m["definition"].DiffSuppressFunc = common.SuppressDiffWhitespaceChange + m["policy_family_definition_overrides"].ConflictsWith = []string{"definition"} + m["policy_family_definition_overrides"].DiffSuppressFunc = common.SuppressDiffWhitespaceChange m["policy_family_id"].ConflictsWith = []string{"definition"} m["policy_family_definition_overrides"].RequiredWith = []string{"policy_family_id"} diff --git a/provider/provider.go b/provider/provider.go index ae9846af15..2e1eb0cc90 100644 --- a/provider/provider.go +++ b/provider/provider.go @@ -39,6 +39,7 @@ import ( "github.com/databricks/terraform-provider-databricks/sql" "github.com/databricks/terraform-provider-databricks/storage" "github.com/databricks/terraform-provider-databricks/tokens" + "github.com/databricks/terraform-provider-databricks/vectorsearch" "github.com/databricks/terraform-provider-databricks/workspace" ) @@ -52,46 +53,50 @@ func init() { func DatabricksProvider() *schema.Provider { p := &schema.Provider{ DataSourcesMap: map[string]*schema.Resource{ // must be in alphabetical order - "databricks_aws_crossaccount_policy": aws.DataAwsCrossaccountPolicy().ToResource(), - "databricks_aws_assume_role_policy": aws.DataAwsAssumeRolePolicy().ToResource(), - "databricks_aws_bucket_policy": aws.DataAwsBucketPolicy().ToResource(), - "databricks_cluster": clusters.DataSourceCluster().ToResource(), - "databricks_clusters": clusters.DataSourceClusters().ToResource(), - "databricks_cluster_policy": policies.DataSourceClusterPolicy().ToResource(), - "databricks_catalogs": catalog.DataSourceCatalogs().ToResource(), - "databricks_current_config": mws.DataSourceCurrentConfiguration().ToResource(), - "databricks_current_metastore": catalog.DataSourceCurrentMetastore().ToResource(), - "databricks_current_user": scim.DataSourceCurrentUser().ToResource(), - "databricks_dbfs_file": storage.DataSourceDbfsFile().ToResource(), - "databricks_dbfs_file_paths": storage.DataSourceDbfsFilePaths().ToResource(), - "databricks_directory": workspace.DataSourceDirectory().ToResource(), - "databricks_group": scim.DataSourceGroup().ToResource(), - "databricks_instance_pool": pools.DataSourceInstancePool().ToResource(), - "databricks_instance_profiles": aws.DataSourceInstanceProfiles().ToResource(), - "databricks_jobs": jobs.DataSourceJobs().ToResource(), - "databricks_job": jobs.DataSourceJob().ToResource(), - "databricks_metastore": catalog.DataSourceMetastore().ToResource(), - "databricks_metastores": catalog.DataSourceMetastores().ToResource(), - "databricks_mlflow_model": mlflow.DataSourceModel().ToResource(), - "databricks_mws_credentials": mws.DataSourceMwsCredentials().ToResource(), - "databricks_mws_workspaces": mws.DataSourceMwsWorkspaces().ToResource(), - "databricks_node_type": clusters.DataSourceNodeType().ToResource(), - "databricks_notebook": workspace.DataSourceNotebook().ToResource(), - "databricks_notebook_paths": workspace.DataSourceNotebookPaths().ToResource(), - "databricks_pipelines": pipelines.DataSourcePipelines().ToResource(), - "databricks_schemas": catalog.DataSourceSchemas().ToResource(), - "databricks_service_principal": scim.DataSourceServicePrincipal().ToResource(), - "databricks_service_principals": scim.DataSourceServicePrincipals().ToResource(), - "databricks_share": catalog.DataSourceShare().ToResource(), - "databricks_shares": catalog.DataSourceShares().ToResource(), - "databricks_spark_version": clusters.DataSourceSparkVersion().ToResource(), - "databricks_sql_warehouse": sql.DataSourceWarehouse().ToResource(), - "databricks_sql_warehouses": sql.DataSourceWarehouses().ToResource(), + "databricks_aws_crossaccount_policy": aws.DataAwsCrossaccountPolicy().ToResource(), + "databricks_aws_assume_role_policy": aws.DataAwsAssumeRolePolicy().ToResource(), + "databricks_aws_bucket_policy": aws.DataAwsBucketPolicy().ToResource(), + "databricks_aws_unity_catalog_policy": aws.DataAwsUnityCatalogPolicy().ToResource(), + "databricks_cluster": clusters.DataSourceCluster().ToResource(), + "databricks_clusters": clusters.DataSourceClusters().ToResource(), + "databricks_cluster_policy": policies.DataSourceClusterPolicy().ToResource(), + "databricks_catalogs": catalog.DataSourceCatalogs().ToResource(), + "databricks_current_config": mws.DataSourceCurrentConfiguration().ToResource(), + "databricks_current_metastore": catalog.DataSourceCurrentMetastore().ToResource(), + "databricks_current_user": scim.DataSourceCurrentUser().ToResource(), + "databricks_dbfs_file": storage.DataSourceDbfsFile().ToResource(), + "databricks_dbfs_file_paths": storage.DataSourceDbfsFilePaths().ToResource(), + "databricks_directory": workspace.DataSourceDirectory().ToResource(), + "databricks_group": scim.DataSourceGroup().ToResource(), + "databricks_instance_pool": pools.DataSourceInstancePool().ToResource(), + "databricks_instance_profiles": aws.DataSourceInstanceProfiles().ToResource(), + "databricks_jobs": jobs.DataSourceJobs().ToResource(), + "databricks_job": jobs.DataSourceJob().ToResource(), + "databricks_metastore": catalog.DataSourceMetastore().ToResource(), + "databricks_metastores": catalog.DataSourceMetastores().ToResource(), + "databricks_mlflow_model": mlflow.DataSourceModel().ToResource(), + "databricks_mws_credentials": mws.DataSourceMwsCredentials().ToResource(), + "databricks_mws_workspaces": mws.DataSourceMwsWorkspaces().ToResource(), + "databricks_node_type": clusters.DataSourceNodeType().ToResource(), + "databricks_notebook": workspace.DataSourceNotebook().ToResource(), + "databricks_notebook_paths": workspace.DataSourceNotebookPaths().ToResource(), + "databricks_pipelines": pipelines.DataSourcePipelines().ToResource(), + "databricks_schemas": catalog.DataSourceSchemas().ToResource(), + "databricks_service_principal": scim.DataSourceServicePrincipal().ToResource(), + "databricks_service_principals": scim.DataSourceServicePrincipals().ToResource(), + "databricks_share": catalog.DataSourceShare().ToResource(), + "databricks_shares": catalog.DataSourceShares().ToResource(), + "databricks_spark_version": clusters.DataSourceSparkVersion().ToResource(), + "databricks_sql_warehouse": sql.DataSourceWarehouse().ToResource(), + "databricks_sql_warehouses": sql.DataSourceWarehouses().ToResource(), "databricks_table": catalog.DataSourceTable().ToResource(), - "databricks_tables": catalog.DataSourceTables().ToResource(), - "databricks_views": catalog.DataSourceViews().ToResource(), - "databricks_user": scim.DataSourceUser().ToResource(), - "databricks_zones": clusters.DataSourceClusterZones().ToResource(), + "databricks_storage_credential": catalog.DataSourceStorageCredential().ToResource(), + "databricks_storage_credentials": catalog.DataSourceStorageCredentials().ToResource(), + "databricks_tables": catalog.DataSourceTables().ToResource(), + "databricks_views": catalog.DataSourceViews().ToResource(), + "databricks_volumes": catalog.DataSourceVolumes().ToResource(), + "databricks_user": scim.DataSourceUser().ToResource(), + "databricks_zones": clusters.DataSourceClusterZones().ToResource(), }, ResourcesMap: map[string]*schema.Resource{ // must be in alphabetical order "databricks_access_control_rule_set": permissions.ResourceAccessControlRuleSet().ToResource(), @@ -109,6 +114,7 @@ func DatabricksProvider() *schema.Provider { "databricks_directory": workspace.ResourceDirectory().ToResource(), "databricks_entitlements": scim.ResourceEntitlements().ToResource(), "databricks_external_location": catalog.ResourceExternalLocation().ToResource(), + "databricks_file": storage.ResourceFile().ToResource(), "databricks_git_credential": repos.ResourceGitCredential().ToResource(), "databricks_global_init_script": workspace.ResourceGlobalInitScript().ToResource(), "databricks_grant": catalog.ResourceGrant().ToResource(), @@ -172,6 +178,7 @@ func DatabricksProvider() *schema.Provider { "databricks_user": scim.ResourceUser().ToResource(), "databricks_user_instance_profile": aws.ResourceUserInstanceProfile().ToResource(), "databricks_user_role": aws.ResourceUserRole().ToResource(), + "databricks_vector_search_endpoint": vectorsearch.ResourceVectorSearchEndpoint().ToResource(), "databricks_volume": catalog.ResourceVolume().ToResource(), "databricks_workspace_conf": workspace.ResourceWorkspaceConf().ToResource(), "databricks_workspace_file": workspace.ResourceWorkspaceFile().ToResource(), diff --git a/qa/testing.go b/qa/testing.go index 94926a02b3..ee7adf60cf 100644 --- a/qa/testing.go +++ b/qa/testing.go @@ -104,11 +104,13 @@ type ResourceFixture struct { CommandMock common.CommandMock // Set one of them to true to test the corresponding CRUD function for the - // terraform resource. - Create bool - Read bool - Update bool - Delete bool + // terraform resource. Or set ExpectedDiff to skip execution and only test + // that the diff is expected. + Create bool + Read bool + Update bool + Delete bool + ExpectedDiff map[string]*terraform.ResourceAttrDiff Removed bool ID string @@ -171,8 +173,10 @@ func (f ResourceFixture) prepareExecution(r *schema.Resource) (resourceCRUD, err return nil, fmt.Errorf("ID must be set for Delete") } return resourceCRUD(r.DeleteContext).withId(f.ID), nil + case f.ExpectedDiff != nil: + return nil, nil } - return nil, fmt.Errorf("no `Create|Read|Update|Delete: true` specificed") + return nil, fmt.Errorf("no `Create|Read|Update|Delete: true` or `ExpectedDiff` specified") } func (f ResourceFixture) setDatabricksEnvironmentForTest(client *common.DatabricksClient, host string) { @@ -304,6 +308,10 @@ func (f ResourceFixture) Apply(t *testing.T) (*schema.ResourceData, error) { } ctx := context.Background() diff, err := resource.Diff(ctx, is, resourceConfig, client) + if f.ExpectedDiff != nil { + assert.Equal(t, f.ExpectedDiff, diff.Attributes) + return nil, err + } // TODO: f.Resource.Data(is) - check why it doesn't work if err != nil { return nil, err diff --git a/qa/testing_test.go b/qa/testing_test.go index 76a9762a48..b68d5899e1 100644 --- a/qa/testing_test.go +++ b/qa/testing_test.go @@ -106,7 +106,7 @@ var noopContextResource = common.Resource{ func TestResourceFixture_ID(t *testing.T) { _, err := ResourceFixture{}.prepareExecution(nil) - assert.EqualError(t, err, "no `Create|Read|Update|Delete: true` specificed") + assert.EqualError(t, err, "no `Create|Read|Update|Delete: true` or `ExpectedDiff` specified") f := ResourceFixture{ Resource: noopResource, diff --git a/scripts/gcp-integration/README.md b/scripts/gcp-integration/README.md index d76239a607..3c2ae934cf 100644 --- a/scripts/gcp-integration/README.md +++ b/scripts/gcp-integration/README.md @@ -1,4 +1,5 @@ -make test-gcp +# make test-gcp + --- Used for running integration tests on GCP. diff --git a/settings/all_settings.go b/settings/all_settings.go index 90e06ee64f..f259f7f9ae 100644 --- a/settings/all_settings.go +++ b/settings/all_settings.go @@ -15,6 +15,7 @@ import ( // 3. Add a new entry to the AllSettingsResources map below. The final resource name will be "databricks__setting". func AllSettingsResources() map[string]common.Resource { return map[string]common.Resource{ - "default_namespace": makeSettingResource[settings.DefaultNamespaceSetting, *databricks.WorkspaceClient](defaultNamespaceSetting), + "default_namespace": makeSettingResource[settings.DefaultNamespaceSetting, *databricks.WorkspaceClient](defaultNamespaceSetting), + "restrict_workspace_admins": makeSettingResource[settings.RestrictWorkspaceAdminsSetting, *databricks.WorkspaceClient](restrictWsAdminsSetting), } } diff --git a/settings/generic_setting_test.go b/settings/generic_setting_test.go index 4a273c90c0..0b41d413c4 100644 --- a/settings/generic_setting_test.go +++ b/settings/generic_setting_test.go @@ -18,10 +18,10 @@ func TestQueryCreateDefaultNameSetting(t *testing.T) { d, err := qa.ResourceFixture{ MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockSettingsAPI().EXPECT() - e.UpdateDefaultWorkspaceNamespace(mock.Anything, settings.UpdateDefaultWorkspaceNamespaceRequest{ + e.UpdateDefaultNamespaceSetting(mock.Anything, settings.UpdateDefaultNamespaceSettingRequest{ AllowMissing: true, FieldMask: "namespace.value", - Setting: &settings.DefaultNamespaceSetting{ + Setting: settings.DefaultNamespaceSetting{ Etag: "", Namespace: settings.StringMessage{ Value: "namespace_value", @@ -39,10 +39,10 @@ func TestQueryCreateDefaultNameSetting(t *testing.T) { }, }}, }) - e.UpdateDefaultWorkspaceNamespace(mock.Anything, settings.UpdateDefaultWorkspaceNamespaceRequest{ + e.UpdateDefaultNamespaceSetting(mock.Anything, settings.UpdateDefaultNamespaceSettingRequest{ AllowMissing: true, FieldMask: "namespace.value", - Setting: &settings.DefaultNamespaceSetting{ + Setting: settings.DefaultNamespaceSetting{ Etag: "etag1", Namespace: settings.StringMessage{ Value: "namespace_value", @@ -56,7 +56,7 @@ func TestQueryCreateDefaultNameSetting(t *testing.T) { }, SettingName: "default", }, nil) - e.ReadDefaultWorkspaceNamespace(mock.Anything, settings.ReadDefaultWorkspaceNamespaceRequest{ + e.GetDefaultNamespaceSetting(mock.Anything, settings.GetDefaultNamespaceSettingRequest{ Etag: "etag2", }).Return(&settings.DefaultNamespaceSetting{ Etag: "etag2", @@ -84,7 +84,7 @@ func TestQueryCreateDefaultNameSetting(t *testing.T) { func TestQueryReadDefaultNameSetting(t *testing.T) { d, err := qa.ResourceFixture{ MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { - w.GetMockSettingsAPI().EXPECT().ReadDefaultWorkspaceNamespace(mock.Anything, settings.ReadDefaultWorkspaceNamespaceRequest{ + w.GetMockSettingsAPI().EXPECT().GetDefaultNamespaceSetting(mock.Anything, settings.GetDefaultNamespaceSettingRequest{ Etag: "etag1", }).Return(&settings.DefaultNamespaceSetting{ Etag: "etag2", @@ -115,10 +115,10 @@ func TestQueryUpdateDefaultNameSetting(t *testing.T) { d, err := qa.ResourceFixture{ MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockSettingsAPI().EXPECT() - e.UpdateDefaultWorkspaceNamespace(mock.Anything, settings.UpdateDefaultWorkspaceNamespaceRequest{ + e.UpdateDefaultNamespaceSetting(mock.Anything, settings.UpdateDefaultNamespaceSettingRequest{ AllowMissing: true, FieldMask: "namespace.value", - Setting: &settings.DefaultNamespaceSetting{ + Setting: settings.DefaultNamespaceSetting{ Etag: "etag1", Namespace: settings.StringMessage{ Value: "new_namespace_value", @@ -132,7 +132,7 @@ func TestQueryUpdateDefaultNameSetting(t *testing.T) { }, SettingName: "default", }, nil) - e.ReadDefaultWorkspaceNamespace(mock.Anything, settings.ReadDefaultWorkspaceNamespaceRequest{ + e.GetDefaultNamespaceSetting(mock.Anything, settings.GetDefaultNamespaceSettingRequest{ Etag: "etag2", }).Return(&settings.DefaultNamespaceSetting{ Etag: "etag2", @@ -163,10 +163,10 @@ func TestQueryUpdateDefaultNameSettingWithConflict(t *testing.T) { d, err := qa.ResourceFixture{ MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockSettingsAPI().EXPECT() - e.UpdateDefaultWorkspaceNamespace(mock.Anything, settings.UpdateDefaultWorkspaceNamespaceRequest{ + e.UpdateDefaultNamespaceSetting(mock.Anything, settings.UpdateDefaultNamespaceSettingRequest{ AllowMissing: true, FieldMask: "namespace.value", - Setting: &settings.DefaultNamespaceSetting{ + Setting: settings.DefaultNamespaceSetting{ Etag: "etag1", Namespace: settings.StringMessage{ Value: "new_namespace_value", @@ -184,10 +184,10 @@ func TestQueryUpdateDefaultNameSettingWithConflict(t *testing.T) { }, }}, }) - e.UpdateDefaultWorkspaceNamespace(mock.Anything, settings.UpdateDefaultWorkspaceNamespaceRequest{ + e.UpdateDefaultNamespaceSetting(mock.Anything, settings.UpdateDefaultNamespaceSettingRequest{ AllowMissing: true, FieldMask: "namespace.value", - Setting: &settings.DefaultNamespaceSetting{ + Setting: settings.DefaultNamespaceSetting{ Etag: "etag2", Namespace: settings.StringMessage{ Value: "new_namespace_value", @@ -201,7 +201,7 @@ func TestQueryUpdateDefaultNameSettingWithConflict(t *testing.T) { }, SettingName: "default", }, nil) - e.ReadDefaultWorkspaceNamespace(mock.Anything, settings.ReadDefaultWorkspaceNamespaceRequest{ + e.GetDefaultNamespaceSetting(mock.Anything, settings.GetDefaultNamespaceSettingRequest{ Etag: "etag3", }).Return(&settings.DefaultNamespaceSetting{ Etag: "etag3", @@ -231,9 +231,9 @@ func TestQueryUpdateDefaultNameSettingWithConflict(t *testing.T) { func TestQueryDeleteDefaultNameSetting(t *testing.T) { d, err := qa.ResourceFixture{ MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { - w.GetMockSettingsAPI().EXPECT().DeleteDefaultWorkspaceNamespace(mock.Anything, settings.DeleteDefaultWorkspaceNamespaceRequest{ + w.GetMockSettingsAPI().EXPECT().DeleteDefaultNamespaceSetting(mock.Anything, settings.DeleteDefaultNamespaceSettingRequest{ Etag: "etag1", - }).Return(&settings.DeleteDefaultWorkspaceNamespaceResponse{ + }).Return(&settings.DeleteDefaultNamespaceSettingResponse{ Etag: "etag2", }, nil) }, @@ -249,7 +249,7 @@ func TestQueryDeleteDefaultNameSetting(t *testing.T) { func TestQueryDeleteDefaultNameSettingWithConflict(t *testing.T) { d, err := qa.ResourceFixture{ MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { - w.GetMockSettingsAPI().EXPECT().DeleteDefaultWorkspaceNamespace(mock.Anything, settings.DeleteDefaultWorkspaceNamespaceRequest{ + w.GetMockSettingsAPI().EXPECT().DeleteDefaultNamespaceSetting(mock.Anything, settings.DeleteDefaultNamespaceSettingRequest{ Etag: "etag1", }).Return(nil, &apierr.APIError{ ErrorCode: "RESOURCE_CONFLICT", @@ -262,9 +262,9 @@ func TestQueryDeleteDefaultNameSettingWithConflict(t *testing.T) { }, }}, }) - w.GetMockSettingsAPI().EXPECT().DeleteDefaultWorkspaceNamespace(mock.Anything, settings.DeleteDefaultWorkspaceNamespaceRequest{ + w.GetMockSettingsAPI().EXPECT().DeleteDefaultNamespaceSetting(mock.Anything, settings.DeleteDefaultNamespaceSettingRequest{ Etag: "etag2", - }).Return(&settings.DeleteDefaultWorkspaceNamespaceResponse{ + }).Return(&settings.DeleteDefaultNamespaceSettingResponse{ Etag: "etag3", }, nil) }, diff --git a/settings/resource_default_namespace_setting.go b/settings/resource_default_namespace_setting.go index 8457b72191..748b5ff610 100644 --- a/settings/resource_default_namespace_setting.go +++ b/settings/resource_default_namespace_setting.go @@ -11,15 +11,15 @@ import ( var defaultNamespaceSetting = workspaceSetting[settings.DefaultNamespaceSetting]{ settingStruct: settings.DefaultNamespaceSetting{}, readFunc: func(ctx context.Context, w *databricks.WorkspaceClient, etag string) (*settings.DefaultNamespaceSetting, error) { - return w.Settings.ReadDefaultWorkspaceNamespace(ctx, settings.ReadDefaultWorkspaceNamespaceRequest{ + return w.Settings.GetDefaultNamespaceSetting(ctx, settings.GetDefaultNamespaceSettingRequest{ Etag: etag, }) }, updateFunc: func(ctx context.Context, w *databricks.WorkspaceClient, t settings.DefaultNamespaceSetting) (string, error) { t.SettingName = "default" - res, err := w.Settings.UpdateDefaultWorkspaceNamespace(ctx, settings.UpdateDefaultWorkspaceNamespaceRequest{ + res, err := w.Settings.UpdateDefaultNamespaceSetting(ctx, settings.UpdateDefaultNamespaceSettingRequest{ AllowMissing: true, - Setting: &t, + Setting: t, FieldMask: "namespace.value", }) if err != nil { @@ -28,7 +28,7 @@ var defaultNamespaceSetting = workspaceSetting[settings.DefaultNamespaceSetting] return res.Etag, err }, deleteFunc: func(ctx context.Context, w *databricks.WorkspaceClient, etag string) (string, error) { - res, err := w.Settings.DeleteDefaultWorkspaceNamespace(ctx, settings.DeleteDefaultWorkspaceNamespaceRequest{ + res, err := w.Settings.DeleteDefaultNamespaceSetting(ctx, settings.DeleteDefaultNamespaceSettingRequest{ Etag: etag, }) if err != nil { diff --git a/settings/resource_restrict_workspace_admins_setting.go b/settings/resource_restrict_workspace_admins_setting.go new file mode 100644 index 0000000000..d2b5385d07 --- /dev/null +++ b/settings/resource_restrict_workspace_admins_setting.go @@ -0,0 +1,39 @@ +package settings + +import ( + "context" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/settings" +) + +// Restrict Workspace Admins setting +var restrictWsAdminsSetting = workspaceSetting[settings.RestrictWorkspaceAdminsSetting]{ + settingStruct: settings.RestrictWorkspaceAdminsSetting{}, + readFunc: func(ctx context.Context, w *databricks.WorkspaceClient, etag string) (*settings.RestrictWorkspaceAdminsSetting, error) { + return w.Settings.GetRestrictWorkspaceAdminsSetting(ctx, settings.GetRestrictWorkspaceAdminsSettingRequest{ + Etag: etag, + }) + }, + updateFunc: func(ctx context.Context, w *databricks.WorkspaceClient, t settings.RestrictWorkspaceAdminsSetting) (string, error) { + t.SettingName = "default" + res, err := w.Settings.UpdateRestrictWorkspaceAdminsSetting(ctx, settings.UpdateRestrictWorkspaceAdminsSettingRequest{ + AllowMissing: true, + Setting: t, + FieldMask: "restrict_workspace_admins.status", + }) + if err != nil { + return "", err + } + return res.Etag, err + }, + deleteFunc: func(ctx context.Context, w *databricks.WorkspaceClient, etag string) (string, error) { + res, err := w.Settings.DeleteRestrictWorkspaceAdminsSetting(ctx, settings.DeleteRestrictWorkspaceAdminsSettingRequest{ + Etag: etag, + }) + if err != nil { + return "", err + } + return res.Etag, err + }, +} diff --git a/settings/resource_restrict_workspace_admins_setting_test.go b/settings/resource_restrict_workspace_admins_setting_test.go new file mode 100644 index 0000000000..f6793c6568 --- /dev/null +++ b/settings/resource_restrict_workspace_admins_setting_test.go @@ -0,0 +1,277 @@ +package settings + +import ( + "testing" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/databricks/terraform-provider-databricks/qa" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +var testRestrictWsAdminsSetting = AllSettingsResources()["restrict_workspace_admins"] + +func TestQueryCreateRestrictWsAdminsSetting(t *testing.T) { + d, err := qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + e := w.GetMockSettingsAPI().EXPECT() + e.UpdateRestrictWorkspaceAdminsSetting(mock.Anything, settings.UpdateRestrictWorkspaceAdminsSettingRequest{ + AllowMissing: true, + FieldMask: "restrict_workspace_admins.status", + Setting: settings.RestrictWorkspaceAdminsSetting{ + Etag: "", + RestrictWorkspaceAdmins: settings.RestrictWorkspaceAdminsMessage{ + Status: "RESTRICT_TOKENS_AND_JOB_RUN_AS", + }, + SettingName: "default", + }, + }).Return(nil, &apierr.APIError{ + ErrorCode: "NOT_FOUND", + StatusCode: 404, + Message: "SomeMessage", + Details: []apierr.ErrorDetail{{ + Type: "type.googleapis.com/google.rpc.ErrorInfo", + Metadata: map[string]string{ + "etag": "etag1", + }, + }}, + }) + e.UpdateRestrictWorkspaceAdminsSetting(mock.Anything, settings.UpdateRestrictWorkspaceAdminsSettingRequest{ + AllowMissing: true, + FieldMask: "restrict_workspace_admins.status", + Setting: settings.RestrictWorkspaceAdminsSetting{ + Etag: "etag1", + RestrictWorkspaceAdmins: settings.RestrictWorkspaceAdminsMessage{ + Status: "RESTRICT_TOKENS_AND_JOB_RUN_AS", + }, + SettingName: "default", + }, + }).Return(&settings.RestrictWorkspaceAdminsSetting{ + Etag: "etag2", + RestrictWorkspaceAdmins: settings.RestrictWorkspaceAdminsMessage{ + Status: "RESTRICT_TOKENS_AND_JOB_RUN_AS", + }, + SettingName: "default", + }, nil) + e.GetRestrictWorkspaceAdminsSetting(mock.Anything, settings.GetRestrictWorkspaceAdminsSettingRequest{ + Etag: "etag2", + }).Return(&settings.RestrictWorkspaceAdminsSetting{ + Etag: "etag2", + RestrictWorkspaceAdmins: settings.RestrictWorkspaceAdminsMessage{ + Status: "RESTRICT_TOKENS_AND_JOB_RUN_AS", + }, + SettingName: "default", + }, nil) + }, + Resource: testRestrictWsAdminsSetting, + Create: true, + HCL: ` + restrict_workspace_admins { + status = "RESTRICT_TOKENS_AND_JOB_RUN_AS" + } + `, + }.Apply(t) + + assert.NoError(t, err) + + assert.Equal(t, "etag2", d.Id()) + assert.Equal(t, "RESTRICT_TOKENS_AND_JOB_RUN_AS", d.Get("restrict_workspace_admins.0.status")) +} + +func TestQueryReadRestrictWsAdminsSetting(t *testing.T) { + d, err := qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + w.GetMockSettingsAPI().EXPECT().GetRestrictWorkspaceAdminsSetting(mock.Anything, settings.GetRestrictWorkspaceAdminsSettingRequest{ + Etag: "etag1", + }).Return(&settings.RestrictWorkspaceAdminsSetting{ + Etag: "etag2", + RestrictWorkspaceAdmins: settings.RestrictWorkspaceAdminsMessage{ + Status: "RESTRICT_TOKENS_AND_JOB_RUN_AS", + }, + SettingName: "default", + }, nil) + }, + Resource: testRestrictWsAdminsSetting, + Read: true, + HCL: ` + restrict_workspace_admins { + status = "RESTRICT_TOKENS_AND_JOB_RUN_AS" + } + `, + ID: "etag1", + }.Apply(t) + + assert.NoError(t, err) + + assert.Equal(t, "etag2", d.Id()) + res := d.Get("restrict_workspace_admins").([]interface{})[0].(map[string]interface{}) + assert.Equal(t, "RESTRICT_TOKENS_AND_JOB_RUN_AS", res["status"]) +} + +func TestQueryUpdateRestrictWsAdminsSetting(t *testing.T) { + d, err := qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + e := w.GetMockSettingsAPI().EXPECT() + e.UpdateRestrictWorkspaceAdminsSetting(mock.Anything, settings.UpdateRestrictWorkspaceAdminsSettingRequest{ + AllowMissing: true, + FieldMask: "restrict_workspace_admins.status", + Setting: settings.RestrictWorkspaceAdminsSetting{ + Etag: "etag1", + RestrictWorkspaceAdmins: settings.RestrictWorkspaceAdminsMessage{ + Status: "ALLOW_ALL", + }, + SettingName: "default", + }, + }).Return(&settings.RestrictWorkspaceAdminsSetting{ + Etag: "etag2", + RestrictWorkspaceAdmins: settings.RestrictWorkspaceAdminsMessage{ + Status: "ALLOW_ALL", + }, + SettingName: "default", + }, nil) + e.GetRestrictWorkspaceAdminsSetting(mock.Anything, settings.GetRestrictWorkspaceAdminsSettingRequest{ + Etag: "etag2", + }).Return(&settings.RestrictWorkspaceAdminsSetting{ + Etag: "etag2", + RestrictWorkspaceAdmins: settings.RestrictWorkspaceAdminsMessage{ + Status: "ALLOW_ALL", + }, + SettingName: "default", + }, nil) + }, + Resource: testRestrictWsAdminsSetting, + Update: true, + HCL: ` + restrict_workspace_admins { + status = "ALLOW_ALL" + } + `, + ID: "etag1", + }.Apply(t) + + assert.NoError(t, err) + + assert.Equal(t, "etag2", d.Id()) + res := d.Get("restrict_workspace_admins").([]interface{})[0].(map[string]interface{}) + assert.Equal(t, "ALLOW_ALL", res["status"]) +} + +func TestQueryUpdateRestrictWsAdminsSettingWithConflict(t *testing.T) { + d, err := qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + e := w.GetMockSettingsAPI().EXPECT() + e.UpdateRestrictWorkspaceAdminsSetting(mock.Anything, settings.UpdateRestrictWorkspaceAdminsSettingRequest{ + AllowMissing: true, + FieldMask: "restrict_workspace_admins.status", + Setting: settings.RestrictWorkspaceAdminsSetting{ + Etag: "etag1", + RestrictWorkspaceAdmins: settings.RestrictWorkspaceAdminsMessage{ + Status: "RESTRICT_TOKENS_AND_JOB_RUN_AS", + }, + SettingName: "default", + }, + }).Return(nil, &apierr.APIError{ + ErrorCode: "RESOURCE_CONFLICT", + StatusCode: 409, + Message: "SomeMessage", + Details: []apierr.ErrorDetail{{ + Type: "type.googleapis.com/google.rpc.ErrorInfo", + Metadata: map[string]string{ + "etag": "etag2", + }, + }}, + }) + e.UpdateRestrictWorkspaceAdminsSetting(mock.Anything, settings.UpdateRestrictWorkspaceAdminsSettingRequest{ + AllowMissing: true, + FieldMask: "restrict_workspace_admins.status", + Setting: settings.RestrictWorkspaceAdminsSetting{ + Etag: "etag2", + RestrictWorkspaceAdmins: settings.RestrictWorkspaceAdminsMessage{ + Status: "RESTRICT_TOKENS_AND_JOB_RUN_AS", + }, + SettingName: "default", + }, + }).Return(&settings.RestrictWorkspaceAdminsSetting{ + Etag: "etag3", + RestrictWorkspaceAdmins: settings.RestrictWorkspaceAdminsMessage{ + Status: "RESTRICT_TOKENS_AND_JOB_RUN_AS", + }, + SettingName: "default", + }, nil) + e.GetRestrictWorkspaceAdminsSetting(mock.Anything, settings.GetRestrictWorkspaceAdminsSettingRequest{ + Etag: "etag3", + }).Return(&settings.RestrictWorkspaceAdminsSetting{ + Etag: "etag3", + RestrictWorkspaceAdmins: settings.RestrictWorkspaceAdminsMessage{ + Status: "RESTRICT_TOKENS_AND_JOB_RUN_AS", + }, + SettingName: "default", + }, nil) + }, + Resource: testRestrictWsAdminsSetting, + Update: true, + HCL: ` + restrict_workspace_admins { + status = "RESTRICT_TOKENS_AND_JOB_RUN_AS" + } + `, + ID: "etag1", + }.Apply(t) + + assert.NoError(t, err) + + assert.Equal(t, "etag3", d.Id()) + res := d.Get("restrict_workspace_admins").([]interface{})[0].(map[string]interface{}) + assert.Equal(t, "RESTRICT_TOKENS_AND_JOB_RUN_AS", res["status"]) +} + +func TestQueryDeleteRestrictWsAdminsSetting(t *testing.T) { + d, err := qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + w.GetMockSettingsAPI().EXPECT().DeleteRestrictWorkspaceAdminsSetting(mock.Anything, settings.DeleteRestrictWorkspaceAdminsSettingRequest{ + Etag: "etag1", + }).Return(&settings.DeleteRestrictWorkspaceAdminsSettingResponse{ + Etag: "etag2", + }, nil) + }, + Resource: testRestrictWsAdminsSetting, + Delete: true, + ID: "etag1", + }.Apply(t) + + assert.NoError(t, err) + assert.Equal(t, "etag2", d.Id()) +} + +func TestQueryDeleteRestrictWsAdminsSettingWithConflict(t *testing.T) { + d, err := qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + w.GetMockSettingsAPI().EXPECT().DeleteRestrictWorkspaceAdminsSetting(mock.Anything, settings.DeleteRestrictWorkspaceAdminsSettingRequest{ + Etag: "etag1", + }).Return(nil, &apierr.APIError{ + ErrorCode: "RESOURCE_CONFLICT", + StatusCode: 409, + Message: "SomeMessage", + Details: []apierr.ErrorDetail{{ + Type: "type.googleapis.com/google.rpc.ErrorInfo", + Metadata: map[string]string{ + "etag": "etag2", + }, + }}, + }) + w.GetMockSettingsAPI().EXPECT().DeleteRestrictWorkspaceAdminsSetting(mock.Anything, settings.DeleteRestrictWorkspaceAdminsSettingRequest{ + Etag: "etag2", + }).Return(&settings.DeleteRestrictWorkspaceAdminsSettingResponse{ + Etag: "etag3", + }, nil) + }, + Resource: testRestrictWsAdminsSetting, + Delete: true, + ID: "etag1", + }.Apply(t) + + assert.NoError(t, err) + assert.Equal(t, "etag3", d.Id()) +} diff --git a/sharing/resource_recipient.go b/sharing/resource_recipient.go index 5f5f37662a..457600a2e5 100644 --- a/sharing/resource_recipient.go +++ b/sharing/resource_recipient.go @@ -85,6 +85,10 @@ func ResourceRecipient() common.Resource { } } + if !d.HasChangeExcept("owner") { + return nil + } + updateRecipientRequest.Owner = "" err = w.Recipients.Update(ctx, updateRecipientRequest) if err != nil { diff --git a/sql/resource_sql_endpoint.go b/sql/resource_sql_endpoint.go index fd48f45c08..8283a1b389 100644 --- a/sql/resource_sql_endpoint.go +++ b/sql/resource_sql_endpoint.go @@ -145,5 +145,8 @@ func ResourceSqlEndpoint() common.Resource { return w.Warehouses.DeleteById(ctx, d.Id()) }, Schema: s, + CustomizeDiff: func(ctx context.Context, d *schema.ResourceDiff) error { + return d.Clear("health") + }, } } diff --git a/sql/resource_sql_endpoint_test.go b/sql/resource_sql_endpoint_test.go index 2b98ef4160..a6099b4fd3 100644 --- a/sql/resource_sql_endpoint_test.go +++ b/sql/resource_sql_endpoint_test.go @@ -11,6 +11,7 @@ import ( "github.com/databricks/databricks-sdk-go/service/sql" "github.com/databricks/terraform-provider-databricks/common" "github.com/databricks/terraform-provider-databricks/qa" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -239,6 +240,37 @@ func TestResourceSQLEndpointUpdate(t *testing.T) { assert.Equal(t, "d7c9d05c-7496-4c69-b089-48823edad40c", d.Get("data_source_id")) } +// Testing the customizeDiff on clearing "health" diff is working as expected. +func TestResourceSQLEndpointUpdateHealthNoDiff(t *testing.T) { + qa.ResourceFixture{ + Resource: ResourceSqlEndpoint(), + ID: "abc", + InstanceState: map[string]string{ + "name": "foo", + "cluster_size": "Small", + "auto_stop_mins": "120", + "enable_photon": "true", + "max_num_clusters": "1", + "spot_instance_policy": "COST_OPTIMIZED", + }, + ExpectedDiff: map[string]*terraform.ResourceAttrDiff{ + "state": {Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}, + "odbc_params.#": {Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}, + "num_clusters": {Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}, + "num_active_sessions": {Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}, + "jdbc_url": {Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}, + "id": {Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}, + "enable_serverless_compute": {Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}, + "data_source_id": {Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}, + "creator_name": {Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}, + }, + HCL: ` + name = "foo" + cluster_size = "Small" + `, + }.ApplyNoError(t) +} + func TestResourceSQLEndpointDelete(t *testing.T) { d, err := qa.ResourceFixture{ MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { diff --git a/sql/resource_sql_query.go b/sql/resource_sql_query.go index 8fc78eea80..0693335655 100644 --- a/sql/resource_sql_query.go +++ b/sql/resource_sql_query.go @@ -544,6 +544,7 @@ func ResourceSqlQuery() common.Resource { }, false) m["run_as_role"].ValidateFunc = validation.StringInSlice([]string{"viewer", "owner"}, false) + m["query"].DiffSuppressFunc = common.SuppressDiffWhitespaceChange return m }) diff --git a/storage/resource_file.go b/storage/resource_file.go new file mode 100644 index 0000000000..4cd2d18f7b --- /dev/null +++ b/storage/resource_file.go @@ -0,0 +1,124 @@ +package storage + +import ( + "bytes" + "context" + "encoding/base64" + "io" + "os" + + "github.com/databricks/databricks-sdk-go/service/files" + "github.com/databricks/terraform-provider-databricks/common" + "github.com/databricks/terraform-provider-databricks/workspace" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func getContentReader(data *schema.ResourceData) (io.ReadCloser, error) { + source := data.Get("source").(string) + var reader io.ReadCloser + var err error + if source != "" { + reader, err = os.Open(source) + if err != nil { + return nil, err + } + } + contentBase64 := data.Get("content_base64").(string) + if contentBase64 != "" { + decodedString, err := base64.StdEncoding.DecodeString(contentBase64) + if err != nil { + return nil, err + } + reader = io.NopCloser(bytes.NewReader(decodedString)) + if err != nil { + return nil, err + } + } + return reader, err +} + +func upload(ctx context.Context, data *schema.ResourceData, c *common.DatabricksClient, path string) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + reader, err := getContentReader(data) + if err != nil { + return err + } + err = w.Files.Upload(ctx, files.UploadRequest{Contents: reader, FilePath: path}) + if err != nil { + return err + } + + metadata, err := w.Files.GetMetadata(ctx, files.GetMetadataRequest{FilePath: path}) + if err != nil { + return err + } + data.Set("modification_time", metadata.LastModified) + data.Set("file_size", metadata.ContentLength) + data.Set("remote_file_modified", false) + data.SetId(path) + return nil +} + +func ResourceFile() common.Resource { + s := workspace.FileContentSchema(map[string]*schema.Schema{ + "modification_time": { + Type: schema.TypeString, + Computed: true, + }, + "file_size": { + Type: schema.TypeInt, + Computed: true, + }, + "remote_file_modified": { + Type: schema.TypeBool, + Optional: true, + }, + }) + return common.Resource{ + Schema: s, + Create: func(ctx context.Context, data *schema.ResourceData, c *common.DatabricksClient) error { + path := data.Get("path").(string) + err := upload(ctx, data, c, path) + if err != nil { + return err + } + data.SetId(path) + return nil + }, + Read: func(ctx context.Context, data *schema.ResourceData, c *common.DatabricksClient) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + + path := data.Id() + metadata, err := w.Files.GetMetadata(ctx, files.GetMetadataRequest{FilePath: path}) + if err != nil { + return err + } + storedModificationTime := data.Get("modification_time").(string) + + data.Set("remote_file_modified", storedModificationTime != metadata.LastModified) + + // Do not store here the modification time. If the update fails, we will keep the wrong one in the state. + + return common.StructToData(metadata, s, data) + }, + Update: func(ctx context.Context, data *schema.ResourceData, c *common.DatabricksClient) error { + path := data.Id() + return upload(ctx, data, c, path) + }, + Delete: func(ctx context.Context, data *schema.ResourceData, c *common.DatabricksClient) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + path := data.Id() + err = w.Files.Delete(ctx, files.DeleteFileRequest{FilePath: path}) + return err + }, + } +} diff --git a/storage/resource_file_test.go b/storage/resource_file_test.go new file mode 100644 index 0000000000..25dd748802 --- /dev/null +++ b/storage/resource_file_test.go @@ -0,0 +1,249 @@ +package storage + +import ( + "net/http" + "testing" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/files" + "github.com/databricks/terraform-provider-databricks/qa" + "github.com/stretchr/testify/assert" +) + +func TestResourceFileCreate(t *testing.T) { + path := "/Volumes/CatalogName/SchemaName/VolumeName/fileName" + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodPut, + Resource: "/api/2.0/fs/files/Volumes/CatalogName/SchemaName/VolumeName/fileName", + Status: http.StatusOK, + Response: nil, + }, + { + Method: http.MethodHead, + Resource: "/api/2.0/fs/files/Volumes/CatalogName/SchemaName/VolumeName/fileName?", + Response: files.GetMetadataResponse{ + LastModified: "Wed, 21 Oct 2015 07:28:00 GMT", + ContentLength: 1024, + }, + }, + { + Method: http.MethodHead, + Resource: "/api/2.0/fs/files/Volumes/CatalogName/SchemaName/VolumeName/fileName?", + Response: files.GetMetadataResponse{ + LastModified: "Wed, 21 Oct 2015 07:28:00 GMT", + ContentLength: 1024, + }, + }, + }, + Resource: ResourceFile(), + State: map[string]any{ + "content_base64": "YWJjCg==", + "path": path, + }, + Create: true, + }.Apply(t) + assert.NoError(t, err) + assert.Equal(t, path, d.Id()) +} + +func TestResourceFileCreateSource(t *testing.T) { + path := "/Volumes/CatalogName/SchemaName/VolumeName/fileName" + source := "testdata/tf-test-python.py" + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodPut, + Resource: "/api/2.0/fs/files/Volumes/CatalogName/SchemaName/VolumeName/fileName", + Status: http.StatusOK, + Response: nil, + }, + { + Method: http.MethodHead, + Resource: "/api/2.0/fs/files/Volumes/CatalogName/SchemaName/VolumeName/fileName?", + Response: files.GetMetadataResponse{ + LastModified: "Wed, 21 Oct 2015 07:28:00 GMT", + ContentLength: 1024, + }, + }, + { + Method: http.MethodHead, + Resource: "/api/2.0/fs/files/Volumes/CatalogName/SchemaName/VolumeName/fileName?", + Response: files.GetMetadataResponse{ + LastModified: "Wed, 21 Oct 2015 07:28:00 GMT", + ContentLength: 1024, + }, + }, + }, + Resource: ResourceFile(), + State: map[string]any{ + "source": source, + "path": path, + }, + Create: true, + }.Apply(t) + assert.NoError(t, err) + assert.Equal(t, path, d.Id()) +} + +func TestResourceFileCreate_Error(t *testing.T) { + path := "/Volumes/CatalogName/SchemaName/VolumeName/fileName" + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodPut, + Resource: "/api/2.0/fs/files/Volumes/CatalogName/SchemaName/VolumeName/fileName", + Response: apierr.APIErrorBody{ + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + }, + Status: 400, + }, + }, + Resource: ResourceFile(), + State: map[string]any{ + "content_base64": "YWJjCg==", + "path": path, + }, + Create: true, + }.Apply(t) + qa.AssertErrorStartsWith(t, err, "Internal error happened") + assert.Equal(t, "", d.Id(), "Id should be empty for error creates") +} + +func TestResourceFileRead(t *testing.T) { + path := "/Volumes/CatalogName/SchemaName/VolumeName/fileName" + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodHead, + Resource: "/api/2.0/fs/files/Volumes/CatalogName/SchemaName/VolumeName/fileName?", + Response: files.GetMetadataResponse{ + LastModified: "Wed, 21 Oct 2015 07:28:00 GMT", + ContentLength: 1024, + }, + }, + { + Method: http.MethodHead, + Resource: "/api/2.0/fs/files/Volumes/CatalogName/SchemaName/VolumeName/fileName?", + Response: files.GetMetadataResponse{ + LastModified: "Wed, 21 Oct 2015 07:28:00 GMT", + ContentLength: 1024, + }, + }, + }, + Resource: ResourceFile(), + Read: true, + New: true, + ID: path, + }.Apply(t) + assert.NoError(t, err) + assert.Equal(t, path, d.Id()) +} + +func TestResourceFileRead_NotFound(t *testing.T) { + path := "/Volumes/CatalogName/SchemaName/VolumeName/fileName" + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "HEAD", + Resource: "/api/2.0/fs/files/Volumes/CatalogName/SchemaName/VolumeName/fileName?", + Status: 404, + }, + }, + Resource: ResourceFile(), + Read: true, + Removed: true, + ID: path, + }.ApplyNoError(t) +} + +func TestResourceFileDelete(t *testing.T) { + path := "/Volumes/CatalogName/SchemaName/VolumeName/fileName" + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodDelete, + Resource: "/api/2.0/fs/files/Volumes/CatalogName/SchemaName/VolumeName/fileName?", + Response: nil, + }, + }, + Resource: ResourceFile(), + Delete: true, + New: true, + ID: path, + }.Apply(t) + assert.NoError(t, err) + assert.Equal(t, path, d.Id()) +} + +func TestResourceFileDelete_Error(t *testing.T) { + path := "/Volumes/CatalogName/SchemaName/VolumeName/fileName" + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "DELETE", + Resource: "/api/2.0/fs/files/Volumes/CatalogName/SchemaName/VolumeName/fileName?", + Response: apierr.APIErrorBody{ + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + }, + Status: 400, + }, + }, + Resource: ResourceFile(), + Delete: true, + ID: path, + }.Apply(t) + qa.AssertErrorStartsWith(t, err, "Internal error happened") + assert.Equal(t, path, d.Id()) +} + +func TestResourceFileUpdate(t *testing.T) { + path := "/Volumes/CatalogName/SchemaName/VolumeName/fileName" + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "PUT", + Resource: "/api/2.0/fs/files/Volumes/CatalogName/SchemaName/VolumeName/fileName", + Status: http.StatusOK, + }, + { + Method: http.MethodHead, + Resource: "/api/2.0/fs/files/Volumes/CatalogName/SchemaName/VolumeName/fileName?", + Response: files.GetMetadataResponse{ + LastModified: "Wed, 21 Oct 2015 07:28:00 GMT", + ContentLength: 1024, + }, + }, + { + Method: http.MethodHead, + Resource: "/api/2.0/fs/files/Volumes/CatalogName/SchemaName/VolumeName/fileName?", + Response: files.GetMetadataResponse{ + LastModified: "Wed, 21 Oct 2015 07:28:00 GMT", + ContentLength: 1024, + }, + }, + }, + Resource: ResourceFile(), + State: map[string]any{ + "content_base64": "YWJjCg==", + "path": path, + }, + ID: path, + RequiresNew: true, + Update: true, + }.ApplyNoError(t) +} + +func TestResourceFileBadPrefix(t *testing.T) { + qa.ResourceFixture{ + Resource: ResourceFile(), + Create: true, + HCL: ` + path = "Volumes/CatalogName/SchemaName/VolumeName/fileName" + content_base64 = "YWJjCg==" + `, + }.ExpectError(t, "invalid config supplied. [path] Path should start with /Volumes") +} diff --git a/storage/testdata/tf-test-python2.py b/storage/testdata/tf-test-python2.py new file mode 100644 index 0000000000..b7f38b2261 --- /dev/null +++ b/storage/testdata/tf-test-python2.py @@ -0,0 +1,6 @@ +# Databricks notebook source +print("hello world -- New File") + +# COMMAND ---------- + +print("hello world2 -- New File") diff --git a/vectorsearch/resource_vector_search_endpoint.go b/vectorsearch/resource_vector_search_endpoint.go new file mode 100644 index 0000000000..c9647fa316 --- /dev/null +++ b/vectorsearch/resource_vector_search_endpoint.go @@ -0,0 +1,85 @@ +package vectorsearch + +import ( + "context" + "time" + + "github.com/databricks/terraform-provider-databricks/common" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/databricks/databricks-sdk-go/service/vectorsearch" +) + +const DefaultProvisionTimeout = 45 * time.Minute + +func ResourceVectorSearchEndpoint() common.Resource { + s := common.StructToSchema( + vectorsearch.EndpointInfo{}, + func(s map[string]*schema.Schema) map[string]*schema.Schema { + common.CustomizeSchemaPath(s, "name").SetRequired().SetForceNew() + common.CustomizeSchemaPath(s, "endpoint_type").SetRequired().SetForceNew() + delete(s, "id") + common.CustomizeSchemaPath(s, "creator").SetReadOnly() + common.CustomizeSchemaPath(s, "creation_timestamp").SetReadOnly() + common.CustomizeSchemaPath(s, "last_updated_timestamp").SetReadOnly() + common.CustomizeSchemaPath(s, "last_updated_user").SetReadOnly() + common.CustomizeSchemaPath(s, "endpoint_status").SetReadOnly() + common.CustomizeSchemaPath(s, "num_indexes").SetReadOnly() + common.CustomizeSchemaPath(s).AddNewField("endpoint_id", &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }) + + return s + }) + + return common.Resource{ + Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + var req vectorsearch.CreateEndpoint + common.DataToStructPointer(d, s, &req) + wait, err := w.VectorSearchEndpoints.CreateEndpoint(ctx, req) + if err != nil { + return err + } + endpoint, err := wait.GetWithTimeout(d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + d.SetId(endpoint.Name) + return nil + }, + Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + endpoint, err := w.VectorSearchEndpoints.GetEndpointByEndpointName(ctx, d.Id()) + if err != nil { + return err + } + err = common.StructToData(*endpoint, s, d) + if err != nil { + return err + } + d.Set("endpoint_id", endpoint.Id) + return nil + }, + Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + return w.VectorSearchEndpoints.DeleteEndpointByEndpointName(ctx, d.Id()) + }, + StateUpgraders: []schema.StateUpgrader{}, + Schema: s, + SchemaVersion: 0, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(DefaultProvisionTimeout), + }, + } +} diff --git a/vectorsearch/resource_vector_search_endpoint_test.go b/vectorsearch/resource_vector_search_endpoint_test.go new file mode 100644 index 0000000000..ec5e193a57 --- /dev/null +++ b/vectorsearch/resource_vector_search_endpoint_test.go @@ -0,0 +1,83 @@ +package vectorsearch + +import ( + "testing" + + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/qa/poll" + "github.com/databricks/terraform-provider-databricks/qa" + + "github.com/databricks/databricks-sdk-go/service/vectorsearch" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestVectorSearchEndpointCornerCases(t *testing.T) { + qa.ResourceCornerCases(t, ResourceVectorSearchEndpoint()) +} + +func TestVectorSearchEndpointCreate(t *testing.T) { + ei := &vectorsearch.EndpointInfo{ + Name: "abc", + EndpointStatus: &vectorsearch.EndpointStatus{State: "ONLINE"}, + Id: "1234-5678", + } + d, err := qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + e := w.GetMockVectorSearchEndpointsAPI().EXPECT() + e.CreateEndpoint(mock.Anything, vectorsearch.CreateEndpoint{ + Name: "abc", + EndpointType: "STANDARD", + }).Return(&vectorsearch.WaitGetEndpointVectorSearchEndpointOnline[vectorsearch.EndpointInfo]{Poll: poll.Simple(*ei)}, nil) + + e.GetEndpointByEndpointName(mock.Anything, "abc").Return(ei, nil) + }, + Resource: ResourceVectorSearchEndpoint(), + HCL: ` + name = "abc" + endpoint_type = "STANDARD" + `, + Create: true, + }.Apply(t) + assert.NoError(t, err) + assert.Equal(t, "abc", d.Id()) + assert.Equal(t, "1234-5678", d.Get("endpoint_id")) +} + +func TestVectorSearchEndpointRead(t *testing.T) { + ei := &vectorsearch.EndpointInfo{ + Name: "abc", + EndpointStatus: &vectorsearch.EndpointStatus{State: "ONLINE"}, + Id: "1234-5678", + } + d, err := qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + e := w.GetMockVectorSearchEndpointsAPI().EXPECT() + e.GetEndpointByEndpointName(mock.Anything, "abc").Return(ei, nil) + }, + Resource: ResourceVectorSearchEndpoint(), + ID: "abc", + HCL: ` + name = "abc" + endpoint_type = "STANDARD" + `, + Read: true, + }.Apply(t) + assert.NoError(t, err) + assert.Equal(t, "abc", d.Id()) + assert.Equal(t, "1234-5678", d.Get("endpoint_id")) +} + +func TestResourcePASDelete(t *testing.T) { + d, err := qa.ResourceFixture{ + MockWorkspaceClientFunc: func(a *mocks.MockWorkspaceClient) { + a.GetMockVectorSearchEndpointsAPI().EXPECT().DeleteEndpointByEndpointName(mock.Anything, "abc").Return(nil) + }, + Resource: ResourceVectorSearchEndpoint(), + Delete: true, + ID: "abc", + }.Apply(t) + assert.NoError(t, err) + assert.Equal(t, "abc", d.Id()) +} diff --git a/workspace/file_resource.go b/workspace/file_resource.go index 91905c701f..75ac23b537 100644 --- a/workspace/file_resource.go +++ b/workspace/file_resource.go @@ -146,6 +146,15 @@ func FileContentSchema(extra map[string]*schema.Schema) map[string]*schema.Schem }, } } + if strings.HasPrefix(v, "Volume") { + return diag.Diagnostics{ + { + Summary: "Path should start with /Volumes", + Severity: diag.Error, + AttributePath: p, + }, + } + } if strings.HasPrefix(v, "dbfs:") { return diag.Diagnostics{ { diff --git a/workspace/global_init_scripts.go b/workspace/global_init_scripts.go deleted file mode 100644 index 4e20b74753..0000000000 --- a/workspace/global_init_scripts.go +++ /dev/null @@ -1,83 +0,0 @@ -package workspace - -import ( - "context" - - "github.com/databricks/terraform-provider-databricks/common" -) - -// NewGlobalInitScriptsAPI creates GlobalInitScriptsAPI instance from provider meta -func NewGlobalInitScriptsAPI(ctx context.Context, m any) GlobalInitScriptsAPI { - return GlobalInitScriptsAPI{ - client: m.(*common.DatabricksClient), - context: ctx, - } -} - -// GlobalInitScriptsAPI exposes the Global Init Scripts API: https://docs.databricks.com/dev-tools/api/latest/global-init-scripts.html# -type GlobalInitScriptsAPI struct { - client *common.DatabricksClient - context context.Context -} - -// GlobalInitScriptInfo contains information about registered global init script -type GlobalInitScriptInfo struct { - ScriptID string `json:"script_id"` - Name string `json:"name"` - Position int32 `json:"position,omitempty" tf:"computed"` - Enabled bool `json:"enabled,omitempty"` - CreatedBy string `json:"creator_user_name,omitempty"` - CreatedAt int64 `json:"created_at_timestamp,omitempty"` - UpdatedBy string `json:"updater_user_name,omitempty"` - UpdatedAt int64 `json:"updated_at_timestamp,omitempty"` - ContentBase64 string `json:"script,omitempty"` -} - -// GlobalInitScriptPayload contains information about registered global init script -type GlobalInitScriptPayload struct { - Name string `json:"name"` - Position int32 `json:"position"` - Enabled bool `json:"enabled"` - ContentBase64 string `json:"script"` -} - -type globalInitScriptCreateResponse struct { - ScriptID string `json:"script_id"` -} - -type globalInitScriptListResponse struct { - Scripts []GlobalInitScriptInfo `json:"scripts"` -} - -// List returns a list of registered global init scripts -func (a GlobalInitScriptsAPI) List() ([]GlobalInitScriptInfo, error) { - var giss globalInitScriptListResponse - err := a.client.Get(a.context, "/global-init-scripts", nil, &giss) - return giss.Scripts, err -} - -// Get returns information about specific global init scripts -func (a GlobalInitScriptsAPI) Get(scriptID string) (initScript GlobalInitScriptInfo, err error) { - err = a.client.Get(a.context, "/global-init-scripts/"+scriptID, nil, &initScript) - return -} - -// Delete deletes specific global init scripts -func (a GlobalInitScriptsAPI) Delete(scriptID string) error { - request := map[string]string{ - "script_id": scriptID, - } - return a.client.Delete(a.context, "/global-init-scripts/"+scriptID, request) -} - -// Create creates the global init script from the given payload. -func (a GlobalInitScriptsAPI) Create(payload GlobalInitScriptPayload) (string, error) { - var response globalInitScriptCreateResponse - err := a.client.Post(a.context, "/global-init-scripts", payload, &response) - return response.ScriptID, err -} - -// Update updates the specific global init script from the given payload. -func (a GlobalInitScriptsAPI) Update(scriptID string, payload GlobalInitScriptPayload) error { - return a.client.Patch(a.context, "/global-init-scripts/"+scriptID, payload) -} diff --git a/workspace/resource_global_init_script.go b/workspace/resource_global_init_script.go index f8b3fe7edd..37d915941f 100644 --- a/workspace/resource_global_init_script.go +++ b/workspace/resource_global_init_script.go @@ -6,6 +6,7 @@ import ( "fmt" "regexp" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/terraform-provider-databricks/common" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -53,22 +54,28 @@ func ResourceGlobalInitScript() common.Resource { return fmt.Errorf("size of the global init script (%d bytes) exceeds maximal allowed (%d bytes)", contentLen, maxScriptSize) } - globalInitScriptsAPI := NewGlobalInitScriptsAPI(ctx, c) - scriptID, err := globalInitScriptsAPI.Create(GlobalInitScriptPayload{ - ContentBase64: base64.StdEncoding.EncodeToString(content), - Enabled: d.Get("enabled").(bool), - Position: int32(d.Get("position").(int)), - Name: d.Get("name").(string), + w, err := c.WorkspaceClient() + if err != nil { + return err + } + created, err := w.GlobalInitScripts.Create(ctx, compute.GlobalInitScriptCreateRequest{ + Script: base64.StdEncoding.EncodeToString(content), + Enabled: d.Get("enabled").(bool), + Position: d.Get("position").(int), + Name: d.Get("name").(string), }) if err != nil { return err } - d.SetId(scriptID) + d.SetId(created.ScriptId) return nil }, Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - globalInitScriptsAPI := NewGlobalInitScriptsAPI(ctx, c) - scriptStatus, err := globalInitScriptsAPI.Get(d.Id()) + w, err := c.WorkspaceClient() + if err != nil { + return err + } + scriptStatus, err := w.GlobalInitScripts.GetByScriptId(ctx, d.Id()) if err != nil { return err } @@ -83,16 +90,24 @@ func ResourceGlobalInitScript() common.Resource { return fmt.Errorf("size of the global init script (%d bytes) exceeds maximal allowed (%d bytes)", contentLen, maxScriptSize) } - globalInitScriptsAPI := NewGlobalInitScriptsAPI(ctx, c) - return globalInitScriptsAPI.Update(d.Id(), GlobalInitScriptPayload{ - ContentBase64: base64.StdEncoding.EncodeToString(content), - Enabled: d.Get("enabled").(bool), - Position: int32(d.Get("position").(int)), - Name: d.Get("name").(string), + w, err := c.WorkspaceClient() + if err != nil { + return err + } + return w.GlobalInitScripts.Update(ctx, compute.GlobalInitScriptUpdateRequest{ + ScriptId: d.Id(), + Script: base64.StdEncoding.EncodeToString(content), + Enabled: d.Get("enabled").(bool), + Position: d.Get("position").(int), + Name: d.Get("name").(string), }) }, Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - return NewGlobalInitScriptsAPI(ctx, c).Delete(d.Id()) + w, err := c.WorkspaceClient() + if err != nil { + return err + } + return w.GlobalInitScripts.DeleteByScriptId(ctx, d.Id()) }, Schema: s, SchemaVersion: 1, diff --git a/workspace/resource_global_init_script_test.go b/workspace/resource_global_init_script_test.go index c31ec7a8fc..57a0290270 100644 --- a/workspace/resource_global_init_script_test.go +++ b/workspace/resource_global_init_script_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/terraform-provider-databricks/qa" "github.com/stretchr/testify/assert" @@ -19,17 +20,17 @@ func TestResourceGlobalInitScriptRead(t *testing.T) { Fixtures: []qa.HTTPFixture{ { Method: http.MethodGet, - Resource: "/api/2.0/global-init-scripts/1234", - Response: GlobalInitScriptInfo{ - ScriptID: "1234", - Name: "Test", - Position: 0, - Enabled: true, - CreatedBy: "someuser@domain.com", - CreatedAt: 1612520583493, - UpdatedBy: "someuser@domain.com", - UpdatedAt: 1612520583493, - ContentBase64: "ZWNobyBoZWxsbw==", + Resource: "/api/2.0/global-init-scripts/1234?", + Response: compute.GlobalInitScriptDetailsWithContent{ + ScriptId: "1234", + Name: "Test", + Position: 0, + Enabled: true, + CreatedBy: "someuser@domain.com", + CreatedAt: 1612520583493, + UpdatedBy: "someuser@domain.com", + UpdatedAt: 1612520583493, + Script: "ZWNobyBoZWxsbw==", }, }, }, @@ -51,7 +52,7 @@ func TestResourceGlobalInitScriptDelete(t *testing.T) { Fixtures: []qa.HTTPFixture{ { Method: http.MethodDelete, - Resource: "/api/2.0/global-init-scripts/" + scriptID + "?script_id=" + scriptID, + Resource: "/api/2.0/global-init-scripts/1234?", Status: http.StatusOK, }, }, @@ -68,7 +69,7 @@ func TestResourceGlobalInitScriptRead_NotFound(t *testing.T) { Fixtures: []qa.HTTPFixture{ { // read log output for correct url... Method: "GET", - Resource: "/api/2.0/global-init-scripts/1234", + Resource: "/api/2.0/global-init-scripts/1234?", Response: apierr.APIErrorBody{ ErrorCode: "RESOURCE_DOES_NOT_EXIST", Message: "The global unit script with ID 1234 does not exist.", @@ -89,23 +90,22 @@ func TestResourceGlobalInitScriptCreate(t *testing.T) { { Method: "POST", Resource: "/api/2.0/global-init-scripts", - ExpectedRequest: GlobalInitScriptPayload{ - Name: "test", - ContentBase64: "ZWNobyBoZWxsbw==", + ExpectedRequest: compute.GlobalInitScriptCreateRequest{ + Name: "test", + Script: "ZWNobyBoZWxsbw==", }, - Response: globalInitScriptCreateResponse{ - ScriptID: "1234", + Response: compute.CreateResponse{ + ScriptId: "1234", }, }, { Method: "GET", - Resource: "/api/2.0/global-init-scripts/1234", + Resource: "/api/2.0/global-init-scripts/1234?", ReuseRequest: true, - Response: GlobalInitScriptInfo{ - ScriptID: "1234", - ContentBase64: "ZWNobyBoZWxsbw==", - Position: 0, - Name: "test", + Response: compute.GlobalInitScriptDetailsWithContent{ + ScriptId: "1234", + Script: "ZWNobyBoZWxsbw==", + Name: "test", }, }, }, @@ -156,24 +156,24 @@ func TestResourceGlobalInitScriptUpdate(t *testing.T) { { Method: "PATCH", Resource: "/api/2.0/global-init-scripts/1234", - ExpectedRequest: GlobalInitScriptPayload{ - Name: "test", - Position: 0, - ContentBase64: "ZWNobyBoZWxsbw==", + ExpectedRequest: compute.GlobalInitScriptUpdateRequest{ + Name: "test", + Position: 0, + Script: "ZWNobyBoZWxsbw==", }, - Response: globalInitScriptCreateResponse{ - ScriptID: "1234", + Response: compute.CreateResponse{ + ScriptId: "1234", }, }, { Method: "GET", - Resource: "/api/2.0/global-init-scripts/1234", + Resource: "/api/2.0/global-init-scripts/1234?", ReuseRequest: true, - Response: GlobalInitScriptInfo{ - ScriptID: "1234", - ContentBase64: "ZWNobyBoZWxsbw==", - Position: 0, - Name: "test", + Response: compute.GlobalInitScriptDetailsWithContent{ + ScriptId: "1234", + Script: "ZWNobyBoZWxsbw==", + Position: 0, + Name: "test", }, }, },