diff --git a/.github/ISSUE_TEMPLATE/docs-issue.md b/.github/ISSUE_TEMPLATE/docs-issue.md index a3976c5e16..d30bf39518 100644 --- a/.github/ISSUE_TEMPLATE/docs-issue.md +++ b/.github/ISSUE_TEMPLATE/docs-issue.md @@ -16,10 +16,12 @@ This template is for both adding enhancement as well as pointing out issues with ### Expected Details -### List of things to potentially add/remove: +### List of things to potentially add/remove + This is a list of things to manipulate in the docs: + - [ ] First item to change -- [ ] Second item to change +- [ ] Second item to change ### Important Factoids diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 911c068e41..57d5b8b59f 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -11,4 +11,3 @@ How is this tested? Please see the checklist below and also describe any other r - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK - diff --git a/CHANGELOG.md b/CHANGELOG.md index 9657cbc1e7..82efe91e2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,16 @@ # Version changelog +## 1.37.1 + +### New Features and Improvements + * Removed `CustomizeDiff` and Client Side Validation for [databricks_grants](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/grants) ([#3290](https://github.com/databricks/terraform-provider-databricks/pull/3290)). + * Added terraform support for restrict ws admins setting ([#3243](https://github.com/databricks/terraform-provider-databricks/pull/3243)). + +### Internal Changes + * Migrated [databricks_global_init_script](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/global_init_script) to Go SDK ([#2036](https://github.com/databricks/terraform-provider-databricks/pull/2036)). + * Bump github.com/hashicorp/terraform-plugin-sdk/v2 from 2.31.0 to 2.32.0 ([#3177](https://github.com/databricks/terraform-provider-databricks/pull/3177)). + + ## 1.37.0 ### New Features and Improvements diff --git a/README.md b/README.md index 31f9812d78..7ba3fb5b0e 100644 --- a/README.md +++ b/README.md @@ -167,7 +167,7 @@ To make Databricks Terraform Provider generally available, we've moved it from [ You should have [`.terraform.lock.hcl`](https://github.com/databrickslabs/terraform-provider-databricks/blob/v0.6.2/scripts/versions-lock.hcl) file in your state directory that is checked into source control. terraform init will give you the following warning. -``` +```text Warning: Additional provider information from registry The remote registry returned warnings for registry.terraform.io/databrickslabs/databricks: @@ -178,6 +178,6 @@ After you replace `databrickslabs/databricks` with `databricks/databricks` in th If you didn't check-in [`.terraform.lock.hcl`](https://www.terraform.io/language/files/dependency-lock#lock-file-location) to the source code version control, you may you may see `Failed to install provider` error. Please follow the simple steps described in the [troubleshooting guide](docs/guides/troubleshooting.md). -``` +```text Warning: Exporter is experimental and provided as is. It has an evolving interface, which may change or be removed in future versions of the provider. ``` diff --git a/catalog/permissions/permissions.go b/catalog/permissions/permissions.go index efd1f962b2..c0c21b45e9 100644 --- a/catalog/permissions/permissions.go +++ b/catalog/permissions/permissions.go @@ -92,6 +92,7 @@ func (sm SecurableMapping) KeyValue(d attributeGetter) (string, string) { } return field, v } + log.Printf("[WARN] Unexpected resource or permissions. Please proceed at your own risk.") return "unknown", "unknown" } func (sm SecurableMapping) Id(d *schema.ResourceData) string { diff --git a/catalog/resource_grants.go b/catalog/resource_grants.go index f2c799b430..58e9c69700 100644 --- a/catalog/resource_grants.go +++ b/catalog/resource_grants.go @@ -89,171 +89,6 @@ func replaceAllPermissions(a permissions.UnityCatalogPermissionsAPI, securable s }) } -type securableMapping map[string]map[string]bool - -// reuse ResourceDiff and ResourceData -type attributeGetter interface { - Get(key string) any -} - -func (sm securableMapping) kv(d attributeGetter) (string, string) { - for field := range sm { - v := d.Get(field).(string) - if v == "" { - continue - } - return field, v - } - return "unknown", "unknown" -} - -func (sm securableMapping) id(d *schema.ResourceData) string { - securable, name := sm.kv(d) - return fmt.Sprintf("%s/%s", securable, name) -} - -func (sm securableMapping) validate(d attributeGetter, pl PermissionsList) error { - securable, _ := sm.kv(d) - allowed, ok := sm[securable] - if !ok { - return fmt.Errorf(`%s is not fully supported yet`, securable) - } - for _, v := range pl.Assignments { - for _, priv := range v.Privileges { - if !allowed[strings.ToUpper(priv)] { - // check if user uses spaces instead of underscores - if allowed[strings.ReplaceAll(priv, " ", "_")] { - return fmt.Errorf(`%s is not allowed on %s. Did you mean %s?`, priv, securable, strings.ReplaceAll(priv, " ", "_")) - } - return fmt.Errorf(`%s is not allowed on %s`, priv, securable) - } - } - } - return nil -} - -var mapping = securableMapping{ - // add other securable mappings once needed - "table": { - "MODIFY": true, - "SELECT": true, - - // v1.0 - "ALL_PRIVILEGES": true, - "APPLY_TAG": true, - "BROWSE": true, - }, - "catalog": { - "CREATE": true, - "USAGE": true, - - // v1.0 - "ALL_PRIVILEGES": true, - "APPLY_TAG": true, - "USE_CATALOG": true, - "USE_SCHEMA": true, - "CREATE_SCHEMA": true, - "CREATE_TABLE": true, - "CREATE_FUNCTION": true, - "CREATE_MATERIALIZED_VIEW": true, - "CREATE_MODEL": true, - "CREATE_VOLUME": true, - "READ_VOLUME": true, - "WRITE_VOLUME": true, - "EXECUTE": true, - "MODIFY": true, - "SELECT": true, - "REFRESH": true, - "BROWSE": true, - }, - "schema": { - "CREATE": true, - "USAGE": true, - - // v1.0 - "ALL_PRIVILEGES": true, - "APPLY_TAG": true, - "USE_SCHEMA": true, - "CREATE_TABLE": true, - "CREATE_FUNCTION": true, - "CREATE_MATERIALIZED_VIEW": true, - "CREATE_MODEL": true, - "CREATE_VOLUME": true, - "READ_VOLUME": true, - "WRITE_VOLUME": true, - "EXECUTE": true, - "MODIFY": true, - "SELECT": true, - "REFRESH": true, - "BROWSE": true, - }, - "storage_credential": { - "CREATE_TABLE": true, - "READ_FILES": true, - "WRITE_FILES": true, - "CREATE_EXTERNAL_LOCATION": true, - - // v1.0 - "ALL_PRIVILEGES": true, - "CREATE_EXTERNAL_TABLE": true, - }, - "external_location": { - "CREATE_TABLE": true, - "READ_FILES": true, - "WRITE_FILES": true, - - // v1.0 - "ALL_PRIVILEGES": true, - "CREATE_EXTERNAL_TABLE": true, - "CREATE_MANAGED_STORAGE": true, - "CREATE_EXTERNAL_VOLUME": true, - "BROWSE": true, - }, - "metastore": { - // v1.0 - "CREATE_CATALOG": true, - "CREATE_CLEAN_ROOM": true, - "CREATE_CONNECTION": true, - "CREATE_EXTERNAL_LOCATION": true, - "CREATE_STORAGE_CREDENTIAL": true, - "CREATE_SHARE": true, - "CREATE_RECIPIENT": true, - "CREATE_PROVIDER": true, - "MANAGE_ALLOWLIST": true, - "USE_CONNECTION": true, - "USE_PROVIDER": true, - "USE_SHARE": true, - "USE_RECIPIENT": true, - "USE_MARKETPLACE_ASSETS": true, - "SET_SHARE_PERMISSION": true, - }, - "function": { - "ALL_PRIVILEGES": true, - "EXECUTE": true, - }, - "model": { - "ALL_PRIVILEGES": true, - "APPLY_TAG": true, - "EXECUTE": true, - }, - "share": { - "SELECT": true, - }, - "volume": { - "ALL_PRIVILEGES": true, - "READ_VOLUME": true, - "WRITE_VOLUME": true, - }, - // avoid reserved field - "foreign_connection": { - "ALL_PRIVILEGES": true, - "CREATE_FOREIGN_CATALOG": true, - "CREATE_FOREIGN_SCHEMA": true, - "CREATE_FOREIGN_TABLE": true, - "USE_CONNECTION": true, - }, -} - func (pl PermissionsList) toSdkPermissionsList() (out catalog.PermissionsList) { for _, v := range pl.Assignments { privileges := []catalog.Privilege{} @@ -294,7 +129,7 @@ func ResourceGrants() common.Resource { s := common.StructToSchema(PermissionsList{}, func(s map[string]*schema.Schema) map[string]*schema.Schema { alof := []string{} - for field := range mapping { + for field := range permissions.Mappings { s[field] = &schema.Schema{ Type: schema.TypeString, ForceNew: true, @@ -302,22 +137,13 @@ func ResourceGrants() common.Resource { } alof = append(alof, field) } - for field := range mapping { + for field := range permissions.Mappings { s[field].AtLeastOneOf = alof } return s }) return common.Resource{ Schema: s, - CustomizeDiff: func(ctx context.Context, d *schema.ResourceDiff) error { - if d.Id() == "" { - // unfortunately we cannot do validation before dependent resources exist with tfsdkv2 - return nil - } - var grants PermissionsList - common.DiffToStructPointer(d, s, &grants) - return mapping.validate(d, grants) - }, Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { w, err := c.WorkspaceClient() if err != nil { @@ -329,17 +155,17 @@ func ResourceGrants() common.Resource { } var grants PermissionsList common.DataToStructPointer(d, s, &grants) - securable, name := mapping.kv(d) - err = mapping.validate(d, grants) + err = mapping.validate(d, grants) if err != nil { return err } + securable, name := permissions.Mappings.KeyValue(d) unityCatalogPermissionsAPI := permissions.NewUnityCatalogPermissionsAPI(ctx, c) err = replaceAllPermissions(unityCatalogPermissionsAPI, securable, name, grants.toSdkPermissionsList()) if err != nil { return err } - d.SetId(mapping.id(d)) + d.SetId(permissions.Mappings.Id(d)) return nil }, Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { diff --git a/catalog/resource_grants_test.go b/catalog/resource_grants_test.go index 35a132f6b1..56359c60b4 100644 --- a/catalog/resource_grants_test.go +++ b/catalog/resource_grants_test.go @@ -358,31 +358,6 @@ func TestGrantReadMalformedId(t *testing.T) { }.ExpectError(t, "ID must be two elements split by `/`: foo.bar") } -type data map[string]string - -func (a data) Get(k string) any { - return a[k] -} - -func TestMappingUnsupported(t *testing.T) { - d := data{"nothing": "here"} - err := mapping.validate(d, PermissionsList{}) - assert.EqualError(t, err, "unknown is not fully supported yet") -} - -func TestInvalidPrivilege(t *testing.T) { - d := data{"table": "me"} - err := mapping.validate(d, PermissionsList{ - Assignments: []PrivilegeAssignment{ - { - Principal: "me", - Privileges: []string{"EVERYTHING"}, - }, - }, - }) - assert.EqualError(t, err, "EVERYTHING is not allowed on table") -} - func TestPermissionsList_Diff_ExternallyAddedPrincipal(t *testing.T) { diff := diffPermissions( catalog.PermissionsList{ // config @@ -600,30 +575,6 @@ func TestShareGrantUpdate(t *testing.T) { }.ApplyNoError(t) } -func TestPrivilegeWithSpace(t *testing.T) { - d := data{"table": "me"} - err := mapping.validate(d, PermissionsList{ - Assignments: []PrivilegeAssignment{ - { - Principal: "me", - Privileges: []string{"ALL PRIVILEGES"}, - }, - }, - }) - assert.EqualError(t, err, "ALL PRIVILEGES is not allowed on table. Did you mean ALL_PRIVILEGES?") - - d = data{"external_location": "me"} - err = mapping.validate(d, PermissionsList{ - Assignments: []PrivilegeAssignment{ - { - Principal: "me", - Privileges: []string{"CREATE TABLE"}, - }, - }, - }) - assert.EqualError(t, err, "CREATE TABLE is not allowed on external_location. Did you mean CREATE_TABLE?") -} - func TestConnectionGrantCreate(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ diff --git a/common/version.go b/common/version.go index 0cfc3c9131..596f1eedc0 100644 --- a/common/version.go +++ b/common/version.go @@ -3,7 +3,7 @@ package common import "context" var ( - version = "1.37.0" + version = "1.37.1" // ResourceName is resource name without databricks_ prefix ResourceName contextKey = 1 // Provider is the current instance of provider diff --git a/docs/data-sources/aws_bucket_policy.md b/docs/data-sources/aws_bucket_policy.md index ed5d4cd1ee..6cf4f73618 100644 --- a/docs/data-sources/aws_bucket_policy.md +++ b/docs/data-sources/aws_bucket_policy.md @@ -91,6 +91,6 @@ In addition to all arguments above, the following attributes are exported: The following resources are used in the same context: * [Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection](../guides/aws-e2-firewall-hub-and-spoke.md) guide. -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide +* [End to end workspace management](../guides/workspace-management.md) guide * [databricks_instance_profile](../resources/instance_profile.md) to manage AWS EC2 instance profiles that users can launch [databricks_cluster](../resources/cluster.md) and access data, like [databricks_mount](../resources/mount.md). * [databricks_mount](../resources/mount.md) to [mount your cloud storage](https://docs.databricks.com/data/databricks-file-system.html#mount-object-storage-to-dbfs) on `dbfs:/mnt/name`. diff --git a/docs/data-sources/cluster.md b/docs/data-sources/cluster.md index bb442e7cf6..7e5cdbba2b 100644 --- a/docs/data-sources/cluster.md +++ b/docs/data-sources/cluster.md @@ -56,7 +56,7 @@ This data source exports the following attributes: The following resources are often used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide. +* [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_cluster](../resources/cluster.md) to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). * [databricks_cluster_policy](../resources/cluster_policy.md) to create a [databricks_cluster](../resources/cluster.md) policy, which limits the ability to create clusters based on a set of rules. * [databricks_instance_pool](../resources/instance_pool.md) to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce [cluster](../resources/cluster.md) start and auto-scaling times by maintaining a set of idle, ready-to-use instances. diff --git a/docs/data-sources/clusters.md b/docs/data-sources/clusters.md index 70995952f7..60436d1ad7 100644 --- a/docs/data-sources/clusters.md +++ b/docs/data-sources/clusters.md @@ -40,10 +40,10 @@ This data source exports the following attributes: The following resources are used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide. +* [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_cluster](../resources/cluster.md) to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). * [databricks_cluster_policy](../resources/cluster_policy.md) to create a [databricks_cluster](../resources/cluster.md) policy, which limits the ability to create clusters based on a set of rules. * [databricks_instance_pool](../resources/instance_pool.md) to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce [cluster](../resources/cluster.md) start and auto-scaling times by maintaining a set of idle, ready-to-use instances. * [databricks_job](../resources/job.md) to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a [databricks_cluster](../resources/cluster.md). * [databricks_library](../resources/library.md) to install a [library](https://docs.databricks.com/libraries/index.html) on [databricks_cluster](../resources/cluster.md). -* [databricks_pipeline](../resources/pipeline.md) to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). +* [databricks_pipeline](../resources/pipeline.md) to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). diff --git a/docs/data-sources/current_config.md b/docs/data-sources/current_config.md index 7631e78acf..1d24ff3ecf 100644 --- a/docs/data-sources/current_config.md +++ b/docs/data-sources/current_config.md @@ -51,7 +51,7 @@ Data source exposes the following attributes: The following resources are used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide +* [End to end workspace management](../guides/workspace-management.md) guide * [databricks_directory](../resources/directory.md) to manage directories in [Databricks Workpace](https://docs.databricks.com/workspace/workspace-objects.html). * [databricks_notebook](../resources/notebook.md) to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). * [databricks_repo](../resources/repo.md) to manage [Databricks Repos](https://docs.databricks.com/repos.html). diff --git a/docs/data-sources/current_metastore.md b/docs/data-sources/current_metastore.md index e3bd858fa1..d2add1a2eb 100644 --- a/docs/data-sources/current_metastore.md +++ b/docs/data-sources/current_metastore.md @@ -38,7 +38,7 @@ This data source exports the following attributes: * `storage_root_credential_id` - ID of a storage credential used for the `storage_root`. * `storage_root_credential_name` - Name of a storage credential used for the `storage_root`. * `default_data_access_config_id` - the ID of the default data access configuration. - * `delta_sharing_scope` - Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + * `delta_sharing_scope` - Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. INTERNAL only allows sharing within the same account, and INTERNAL_AND_EXTERNAL allows cross account sharing and token based sharing. * `delta_sharing_recipient_token_lifetime_in_seconds` - the expiration duration in seconds on recipient data access tokens. * `delta_sharing_organization_name` - The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. * `created_at` - Timestamp (in milliseconds) when the current metastore was created. diff --git a/docs/data-sources/current_user.md b/docs/data-sources/current_user.md index 80d8a920f6..47a5fdc9c3 100644 --- a/docs/data-sources/current_user.md +++ b/docs/data-sources/current_user.md @@ -72,7 +72,7 @@ Data source exposes the following attributes: The following resources are used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide +* [End to end workspace management](../guides/workspace-management.md) guide * [databricks_directory](../resources/directory.md) to manage directories in [Databricks Workpace](https://docs.databricks.com/workspace/workspace-objects.html). * [databricks_notebook](../resources/notebook.md) to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). * [databricks_repo](../resources/repo.md) to manage [Databricks Repos](https://docs.databricks.com/repos.html). diff --git a/docs/data-sources/dbfs_file.md b/docs/data-sources/dbfs_file.md index 7e71213951..16a14c5479 100644 --- a/docs/data-sources/dbfs_file.md +++ b/docs/data-sources/dbfs_file.md @@ -15,10 +15,11 @@ data "databricks_dbfs_file" "report" { limit_file_size = "true" } ``` + ## Argument Reference * `path` - (Required) Path on DBFS for the file from which to get content. -* `limit_file_size` - (Required - boolean) Do not load content for files larger than 4MB. +* `limit_file_size` - (Required - boolean) Do not load content for files larger than 4MB. ## Attribute Reference @@ -31,7 +32,7 @@ This data source exports the following attributes: The following resources are used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide. +* [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_dbfs_file_paths](dbfs_file_paths.md) data to get list of file names from get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). * [databricks_dbfs_file](../resources/dbfs_file.md) to manage relatively small files on [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). * [databricks_mount](../resources/mount.md) to [mount your cloud storage](https://docs.databricks.com/data/databricks-file-system.html#mount-object-storage-to-dbfs) on `dbfs:/mnt/name`. diff --git a/docs/data-sources/dbfs_file_paths.md b/docs/data-sources/dbfs_file_paths.md index 7fe88130df..caee7471dd 100644 --- a/docs/data-sources/dbfs_file_paths.md +++ b/docs/data-sources/dbfs_file_paths.md @@ -15,6 +15,7 @@ data "databricks_dbfs_file_paths" "partitions" { recursive = false } ``` + ## Argument Reference * `path` - (Required) Path on DBFS for the file to perform listing @@ -26,12 +27,11 @@ This data source exports the following attributes: * `path_list` - returns list of objects with `path` and `file_size` attributes in each - ## Related Resources The following resources are used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide. +* [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_dbfs_file](dbfs_file.md) data to get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). * [databricks_dbfs_file_paths](dbfs_file_paths.md) data to get list of file names from get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). * [databricks_dbfs_file](../resources/dbfs_file.md) to manage relatively small files on [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html). diff --git a/docs/data-sources/group.md b/docs/data-sources/group.md index 859c864788..aa6481cd36 100644 --- a/docs/data-sources/group.md +++ b/docs/data-sources/group.md @@ -48,12 +48,11 @@ Data source exposes the following attributes: * `allow_instance_pool_create` - True if group members can create [instance pools](../resources/instance_pool.md) * `acl_principal_id` - identifier for use in [databricks_access_control_rule_set](../resources/access_control_rule_set.md), e.g. `groups/Some Group`. - ## Related Resources The following resources are used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide +* [End to end workspace management](../guides/workspace-management.md) guide * [databricks_cluster](../resources/cluster.md) to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). * [databricks_directory](../resources/directory.md) to manage directories in [Databricks Workpace](https://docs.databricks.com/workspace/workspace-objects.html). * [databricks_group_member](../resources/group_member.md) to attach [users](../resources/user.md) and [groups](../resources/group.md) as group members. diff --git a/docs/data-sources/instance_pool.md b/docs/data-sources/instance_pool.md index b1bc904264..052e4dea28 100644 --- a/docs/data-sources/instance_pool.md +++ b/docs/data-sources/instance_pool.md @@ -35,4 +35,3 @@ Data source exposes the following attributes: - `id` - The id of the instance pool. - `pool_info` - block describing instance pool and its state. Check documentation for [databricks_instance_pool](../resources/instance_pool.md) for a list of exposed attributes. - diff --git a/docs/data-sources/instance_profiles.md b/docs/data-sources/instance_profiles.md index 1d4172446e..3339305715 100644 --- a/docs/data-sources/instance_profiles.md +++ b/docs/data-sources/instance_profiles.md @@ -26,6 +26,7 @@ There are no arguments available for this data source. ## Attribute Reference This data source exports the following attributes: + * `instance_profiles` - Set of objects for a [databricks_instance_profile](../resources/instance_profile.md). This contains the following attributes: * `name` - Name of the instance profile. * `arn` - ARN of the instance profile. diff --git a/docs/data-sources/job.md b/docs/data-sources/job.md index fd195799e8..ec4ba1f286 100755 --- a/docs/data-sources/job.md +++ b/docs/data-sources/job.md @@ -26,7 +26,6 @@ output "job_num_workers" { This data source exports the following attributes: - * `id` - the id of [databricks_job](../resources/job.md) if the resource was matched by name. * `name` - the job name of [databricks_job](../resources/job.md) if the resource was matched by id. * `job_settings` - the same fields as in [databricks_job](../resources/job.md). diff --git a/docs/data-sources/metastore.md b/docs/data-sources/metastore.md index cd7311011a..f8fbf44ae6 100644 --- a/docs/data-sources/metastore.md +++ b/docs/data-sources/metastore.md @@ -44,7 +44,7 @@ This data source exports the following attributes: * `name` - Name of metastore. * `storage_root` - Path on cloud storage account, where managed `databricks_table` are stored. * `owner` - Username/groupname/sp application_id of the metastore owner. - * `delta_sharing_scope` - Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + * `delta_sharing_scope` - Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. INTERNAL only allows sharing within the same account, and INTERNAL_AND_EXTERNAL allows cross account sharing and token based sharing. * `delta_sharing_recipient_token_lifetime_in_seconds` - Used to set expiration duration in seconds on recipient data access tokens. * `delta_sharing_organization_name` - The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. diff --git a/docs/data-sources/mlflow_model.md b/docs/data-sources/mlflow_model.md index c05af7fcbc..dcd69a3a86 100644 --- a/docs/data-sources/mlflow_model.md +++ b/docs/data-sources/mlflow_model.md @@ -63,10 +63,10 @@ resource "databricks_model_serving" "this" { This data source exports the following attributes: * `model` - Model object - * `description` - User-specified description for the object. - * `id` - Unique identifier for the object. - * `latest_versions` - Array of model versions, each the latest version for its stage. - * `name` - Name of the model. - * `permission_level` - Permission level of the requesting user on the object. For what is allowed at each level, see MLflow Model permissions. - * `tags` - Array of tags associated with the model. - * `user_id` - The username of the user that created the object. + * `description` - User-specified description for the object. + * `id` - Unique identifier for the object. + * `latest_versions` - Array of model versions, each the latest version for its stage. + * `name` - Name of the model. + * `permission_level` - Permission level of the requesting user on the object. For what is allowed at each level, see MLflow Model permissions. + * `tags` - Array of tags associated with the model. + * `user_id` - The username of the user that created the object. diff --git a/docs/data-sources/mws_workspaces.md b/docs/data-sources/mws_workspaces.md index 9474a4e46d..87c0590e09 100755 --- a/docs/data-sources/mws_workspaces.md +++ b/docs/data-sources/mws_workspaces.md @@ -11,7 +11,7 @@ Lists all [databricks_mws_workspaces](../resources/mws_workspaces.md) in Databri ## Example Usage -Listing all workspaces in +Listing all workspaces in ```hcl provider "databricks" { diff --git a/docs/data-sources/node_type.md b/docs/data-sources/node_type.md index 2152addb78..43bb36029b 100644 --- a/docs/data-sources/node_type.md +++ b/docs/data-sources/node_type.md @@ -40,12 +40,12 @@ resource "databricks_cluster" "research" { Data source allows you to pick groups by the following attributes -* `min_memory_gb` - (Optional) Minimum amount of memory per node in gigabytes. Defaults to *0*. -* `gb_per_core` - (Optional) Number of gigabytes per core available on instance. Conflicts with `min_memory_gb`. Defaults to *0*. -* `min_cores` - (Optional) Minimum number of CPU cores available on instance. Defaults to *0*. -* `min_gpus` - (Optional) Minimum number of GPU's attached to instance. Defaults to *0*. -* `local_disk` - (Optional) Pick only nodes with local storage. Defaults to *false*. -* `local_disk_min_size` - (Optional) Pick only nodes that have size local storage greater or equal to given value. Defaults to *0*. +* `min_memory_gb` - (Optional) Minimum amount of memory per node in gigabytes. Defaults to _0_. +* `gb_per_core` - (Optional) Number of gigabytes per core available on instance. Conflicts with `min_memory_gb`. Defaults to _0_. +* `min_cores` - (Optional) Minimum number of CPU cores available on instance. Defaults to _0_. +* `min_gpus` - (Optional) Minimum number of GPU's attached to instance. Defaults to _0_. +* `local_disk` - (Optional) Pick only nodes with local storage. Defaults to _false_. +* `local_disk_min_size` - (Optional) Pick only nodes that have size local storage greater or equal to given value. Defaults to _0_. * `category` - (Optional, case insensitive string) Node category, which can be one of (depending on the cloud environment, could be checked with `databricks clusters list-node-types -o json|jq '.node_types[]|.category'|sort |uniq`): * `General Purpose` (all clouds) * `General Purpose (HDD)` (Azure) @@ -54,12 +54,12 @@ Data source allows you to pick groups by the following attributes * `Memory Optimized (Remote HDD)` (Azure) * `Storage Optimized` (AWS, Azure) * `GPU Accelerated` (AWS, Azure) -* `photon_worker_capable` - (Optional) Pick only nodes that can run Photon workers. Defaults to *false*. -* `photon_driver_capable` - (Optional) Pick only nodes that can run Photon driver. Defaults to *false*. -* `graviton` - (boolean, optional) if we should limit the search only to nodes with AWS Graviton CPUs. Default to *false*. -* `fleet` - (boolean, optional) if we should limit the search only to [AWS fleet instance types](https://docs.databricks.com/compute/aws-fleet-instances.html). Default to *false*. -* `is_io_cache_enabled` - (Optional) . Pick only nodes that have IO Cache. Defaults to *false*. -* `support_port_forwarding` - (Optional) Pick only nodes that support port forwarding. Defaults to *false*. +* `photon_worker_capable` - (Optional) Pick only nodes that can run Photon workers. Defaults to _false_. +* `photon_driver_capable` - (Optional) Pick only nodes that can run Photon driver. Defaults to _false_. +* `graviton` - (boolean, optional) if we should limit the search only to nodes with AWS Graviton CPUs. Default to _false_. +* `fleet` - (boolean, optional) if we should limit the search only to [AWS fleet instance types](https://docs.databricks.com/compute/aws-fleet-instances.html). Default to _false_. +* `is_io_cache_enabled` - (Optional) . Pick only nodes that have IO Cache. Defaults to _false_. +* `support_port_forwarding` - (Optional) Pick only nodes that support port forwarding. Defaults to _false_. ## Attribute Reference @@ -71,7 +71,7 @@ Data source exposes the following attributes: The following resources are used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide. +* [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_cluster](../resources/cluster.md) to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). * [databricks_cluster_policy](../resources/cluster_policy.md) to create a [databricks_cluster](../resources/cluster.md) policy, which limits the ability to create clusters based on a set of rules. * [databricks_instance_pool](../resources/instance_pool.md) to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce [cluster](../resources/cluster.md) start and auto-scaling times by maintaining a set of idle, ready-to-use instances. diff --git a/docs/data-sources/pipelines.md b/docs/data-sources/pipelines.md index d0223725ee..a7d02a47d1 100755 --- a/docs/data-sources/pipelines.md +++ b/docs/data-sources/pipelines.md @@ -5,7 +5,7 @@ subcategory: "Compute" -> **Note** If you have a fully automated setup with workspaces created by [databricks_mws_workspaces](../resources/mws_workspaces.md) or [azurerm_databricks_workspace](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/databricks_workspace), please make sure to add [depends_on attribute](../guides/troubleshooting.md#data-resources-and-authentication-is-not-configured-errors) in order to prevent _authentication is not configured for provider_ errors. -Retrieves a list of all [databricks_pipeline](../resources/pipeline.md) ([Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html)) ids deployed in a workspace, or those matching the provided search term. Maximum 100 results. +Retrieves a list of all [databricks_pipeline](../resources/pipeline.md) ([Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html)) ids deployed in a workspace, or those matching the provided search term. Maximum 100 results. ## Example Usage @@ -49,7 +49,6 @@ This data source exports the following attributes: * `pipeline_name` - (Optional) Filter Delta Live Tables pipelines by name for a given search term. `%` is the supported wildcard operator. - ## Attribute Reference This data source exports the following attributes: diff --git a/docs/data-sources/service_principal.md b/docs/data-sources/service_principal.md index 983e351bf8..f9c7b593aa 100644 --- a/docs/data-sources/service_principal.md +++ b/docs/data-sources/service_principal.md @@ -44,13 +44,14 @@ Data source exposes the following attributes: - `home` - Home folder of the [service principal](../resources/service_principal.md), e.g. `/Users/11111111-2222-3333-4444-555666777888`. - `repos` - Repos location of the [service principal](../resources/service_principal.md), e.g. `/Repos/11111111-2222-3333-4444-555666777888`. - `active` - Whether service principal is active or not. + * `acl_principal_id` - identifier for use in [databricks_access_control_rule_set](../resources/access_control_rule_set.md), e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. ## Related Resources The following resources are used in the same context: -- [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide. +- [End to end workspace management](../guides/workspace-management.md) guide. - [databricks_current_user](current_user.md) data to retrieve information about [databricks_user](../resources/user.md) or [databricks_service_principal](../resources/service_principal.md), that is calling Databricks REST API. - [databricks_group](../resources/group.md) to manage [groups in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/groups.html) or [Account Console](https://accounts.cloud.databricks.com/) (for AWS deployments). - [databricks_group](group.md) data to retrieve information about [databricks_group](../resources/group.md) members, entitlements and instance profiles. diff --git a/docs/data-sources/service_principals.md b/docs/data-sources/service_principals.md index ca5522014f..457e0f98bc 100644 --- a/docs/data-sources/service_principals.md +++ b/docs/data-sources/service_principals.md @@ -49,11 +49,11 @@ Data source exposes the following attributes: The following resources are used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide. -* [databricks_current_user](current_user.md) data to retrieve information about [databricks_user](../resources/user.md) or [databricks_service_principal](../resources/service_principal.md), that is calling Databricks REST API. -* [databricks_group](../resources/group.md) to manage [groups in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/groups.html) or [Account Console](https://accounts.cloud.databricks.com/) (for AWS deployments). -* [databricks_group](group.md) data to retrieve information about [databricks_group](../resources/group.md) members, entitlements and instance profiles. -* [databricks_group_instance_profile](../resources/group_instance_profile.md) to attach [databricks_instance_profile](../resources/instance_profile.md) (AWS) to [databricks_group](../resources/group.md). -* [databricks_group_member](../resources/group_member.md) to attach [users](../resources/user.md) and [groups](../resources/group.md) as group members. -* [databricks_permissions](../resources/permissions.md) to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. -* [databricks_service principal](../resources/service_principal.md) to manage [service principals](../resources/service_principal.md) +- [End to end workspace management](../guides/workspace-management.md) guide. +- [databricks_current_user](current_user.md) data to retrieve information about [databricks_user](../resources/user.md) or [databricks_service_principal](../resources/service_principal.md), that is calling Databricks REST API. +- [databricks_group](../resources/group.md) to manage [groups in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/groups.html) or [Account Console](https://accounts.cloud.databricks.com/) (for AWS deployments). +- [databricks_group](group.md) data to retrieve information about [databricks_group](../resources/group.md) members, entitlements and instance profiles. +- [databricks_group_instance_profile](../resources/group_instance_profile.md) to attach [databricks_instance_profile](../resources/instance_profile.md) (AWS) to [databricks_group](../resources/group.md). +- [databricks_group_member](../resources/group_member.md) to attach [users](../resources/user.md) and [groups](../resources/group.md) as group members. +- [databricks_permissions](../resources/permissions.md) to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. +- [databricks_service principal](../resources/service_principal.md) to manage [service principals](../resources/service_principal.md) diff --git a/docs/data-sources/spark_version.md b/docs/data-sources/spark_version.md index be45e869b9..3c91c7f0d0 100644 --- a/docs/data-sources/spark_version.md +++ b/docs/data-sources/spark_version.md @@ -49,7 +49,7 @@ Data source allows you to pick groups by the following attributes: * `scala` - (string, optional) if we should limit the search only to runtimes that are based on specific Scala version. Default to `2.12`. * `spark_version` - (string, optional) if we should limit the search only to runtimes that are based on specific Spark version. Default to empty string. It could be specified as `3`, or `3.0`, or full version, like, `3.0.1`. * `photon` - (boolean, optional) if we should limit the search only to Photon runtimes. Default to `false`. *Deprecated with DBR 14.0 release. Specify `runtime_engine=\"PHOTON\"` in the cluster configuration instead!* -* `graviton` - (boolean, optional) if we should limit the search only to runtimes supporting AWS Graviton CPUs. Default to `false`. *Deprecated with DBR 14.0 release. DBR version compiled for Graviton will be automatically installed when nodes with Graviton CPUs are specified in the cluster configuration.* +* `graviton` - (boolean, optional) if we should limit the search only to runtimes supporting AWS Graviton CPUs. Default to `false`. _Deprecated with DBR 14.0 release. DBR version compiled for Graviton will be automatically installed when nodes with Graviton CPUs are specified in the cluster configuration._ ## Attribute Reference @@ -61,7 +61,7 @@ Data source exposes the following attributes: The following resources are used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide. +* [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_cluster](../resources/cluster.md) to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). * [databricks_cluster_policy](../resources/cluster_policy.md) to create a [databricks_cluster](../resources/cluster.md) policy, which limits the ability to create clusters based on a set of rules. * [databricks_instance_pool](../resources/instance_pool.md) to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce [cluster](../resources/cluster.md) start and auto-scaling times by maintaining a set of idle, ready-to-use instances. diff --git a/docs/data-sources/sql_warehouse.md b/docs/data-sources/sql_warehouse.md index 897d228e25..e601a183cd 100644 --- a/docs/data-sources/sql_warehouse.md +++ b/docs/data-sources/sql_warehouse.md @@ -49,9 +49,9 @@ This data source exports the following attributes: * `enable_photon` - Whether [Photon](https://databricks.com/product/delta-engine) is enabled. * `enable_serverless_compute` - Whether this SQL warehouse is a serverless SQL warehouse. - - **For AWS**: If your account needs updated [terms of use](https://docs.databricks.com/sql/admin/serverless.html#accept-terms), workspace admins are prompted in the Databricks SQL UI. A workspace must meet the [requirements](https://docs.databricks.com/sql/admin/serverless.html#requirements) and might require an update its instance profile role to [add a trust relationship](https://docs.databricks.com/sql/admin/serverless.html#aws-instance-profile-setup). + * **For AWS**: If your account needs updated [terms of use](https://docs.databricks.com/sql/admin/serverless.html#accept-terms), workspace admins are prompted in the Databricks SQL UI. A workspace must meet the [requirements](https://docs.databricks.com/sql/admin/serverless.html#requirements) and might require an update its instance profile role to [add a trust relationship](https://docs.databricks.com/sql/admin/serverless.html#aws-instance-profile-setup). - - **For Azure**, you must [enable your workspace for serverless SQL warehouse](https://learn.microsoft.com/azure/databricks/sql/admin/serverless). + * **For Azure**, you must [enable your workspace for serverless SQL warehouse](https://learn.microsoft.com/azure/databricks/sql/admin/serverless). * `warehouse_type` - SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). * `channel` block, consisting of following fields: diff --git a/docs/data-sources/user.md b/docs/data-sources/user.md index cea0e38709..7a93272ede 100644 --- a/docs/data-sources/user.md +++ b/docs/data-sources/user.md @@ -32,7 +32,7 @@ resource "databricks_group_member" "my_member_a" { Data source allows you to pick groups by the following attributes - `user_name` - (Optional) User name of the user. The user must exist before this resource can be planned. -- `user_id` - (Optional) ID of the user. +- `user_id` - (Optional) ID of the user. ## Attribute Reference @@ -45,18 +45,19 @@ Data source exposes the following attributes: - `home` - Home folder of the [user](../resources/user.md), e.g. `/Users/mr.foo@example.com`. - `repos` - Personal Repos location of the [user](../resources/user.md), e.g. `/Repos/mr.foo@example.com`. - `alphanumeric` - Alphanumeric representation of user local name. e.g. `mr_foo`. + * `acl_principal_id` - identifier for use in [databricks_access_control_rule_set](../resources/access_control_rule_set.md), e.g. `users/mr.foo@example.com`. ## Related Resources The following resources are used in the same context: -* [End to end workspace management](../guides/passthrough-cluster-per-user.md) guide. -* [databricks_current_user](current_user.md) data to retrieve information about [databricks_user](../resources/user.md) or [databricks_service_principal](../resources/service_principal.md), that is calling Databricks REST API. -* [databricks_group](../resources/group.md) to manage [groups in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/groups.html) or [Account Console](https://accounts.cloud.databricks.com/) (for AWS deployments). -* [databricks_group](group.md) data to retrieve information about [databricks_group](../resources/group.md) members, entitlements and instance profiles. -* [databricks_group_instance_profile](../resources/group_instance_profile.md) to attach [databricks_instance_profile](../resources/instance_profile.md) (AWS) to [databricks_group](../resources/group.md). -* [databricks_group_member](../resources/group_member.md) to attach [users](../resources/user.md) and [groups](../resources/group.md) as group members. -* [databricks_permissions](../resources/permissions.md) to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. -* [databricks_user](../resources/user.md) to [manage users](https://docs.databricks.com/administration-guide/users-groups/users.html), that could be added to [databricks_group](../resources/group.md) within the workspace. -* [databricks_user_instance_profile](../resources/user_instance_profile.md) to attach [databricks_instance_profile](../resources/instance_profile.md) (AWS) to [databricks_user](../resources/user.md). +- [End to end workspace management](../guides/workspace-management.md) guide. +- [databricks_current_user](current_user.md) data to retrieve information about [databricks_user](../resources/user.md) or [databricks_service_principal](../resources/service_principal.md), that is calling Databricks REST API. +- [databricks_group](../resources/group.md) to manage [groups in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/groups.html) or [Account Console](https://accounts.cloud.databricks.com/) (for AWS deployments). +- [databricks_group](group.md) data to retrieve information about [databricks_group](../resources/group.md) members, entitlements and instance profiles. +- [databricks_group_instance_profile](../resources/group_instance_profile.md) to attach [databricks_instance_profile](../resources/instance_profile.md) (AWS) to [databricks_group](../resources/group.md). +- [databricks_group_member](../resources/group_member.md) to attach [users](../resources/user.md) and [groups](../resources/group.md) as group members. +- [databricks_permissions](../resources/permissions.md) to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. +- [databricks_user](../resources/user.md) to [manage users](https://docs.databricks.com/administration-guide/users-groups/users.html), that could be added to [databricks_group](../resources/group.md) within the workspace. +- [databricks_user_instance_profile](../resources/user_instance_profile.md) to attach [databricks_instance_profile](../resources/instance_profile.md) (AWS) to [databricks_user](../resources/user.md). diff --git a/docs/data-sources/zones.md b/docs/data-sources/zones.md index 4813ecd406..d38379d902 100644 --- a/docs/data-sources/zones.md +++ b/docs/data-sources/zones.md @@ -12,6 +12,7 @@ This data source allows you to fetch all available AWS availability zones on you ```hcl data "databricks_zones" "zones" {} ``` + ## Argument Reference There are no arguments to this data source and only attributes that are computed. diff --git a/docs/guides/aws-workspace.md b/docs/guides/aws-workspace.md index 76c2aab224..429dcd8834 100644 --- a/docs/guides/aws-workspace.md +++ b/docs/guides/aws-workspace.md @@ -349,4 +349,4 @@ Error: MALFORMED_REQUEST: Failed credentials validation checks: Spot Cancellatio ### More than one authorization method configured error -See the [troubleshooting guide](https://registry.terraform.io/providers/databricks/databricks/latest/docs/guides/troubleshooting#more-than-one-authorization-method-configured) \ No newline at end of file +See the [troubleshooting guide](https://registry.terraform.io/providers/databricks/databricks/latest/docs/guides/troubleshooting#more-than-one-authorization-method-configured) diff --git a/docs/guides/azure-private-link-workspace-simplified.md b/docs/guides/azure-private-link-workspace-simplified.md index b27131248a..483ed910f9 100644 --- a/docs/guides/azure-private-link-workspace-simplified.md +++ b/docs/guides/azure-private-link-workspace-simplified.md @@ -13,6 +13,7 @@ page_title: "Provisioning Azure Databricks with Private Link - Simple deployment You can use Terraform to deploy the underlying cloud resources and the private access settings resources automatically using a programmatic approach. This guide covers a [simple deployment](https://learn.microsoft.com/en-us/azure/databricks/administration-guide/cloud-configurations/azure/private-link-simplified) to configure Azure Databricks with Private Link: + * No separate VNet separates user access from the VNet that you use for your compute resources in the Classic data plane * A transit subnet in the data plane VNet is used for user access * Only a single private endpoint is used for both front-end and back-end connectivity. @@ -23,25 +24,25 @@ This guide covers a [simple deployment](https://learn.microsoft.com/en-us/azure/ This guide uses the following variables: -- `cidr`: The CIDR for the Azure Vnet -- `rg_name`: The name of the existing resource group -- `location`: The location for Azure resources +* `cidr`: The CIDR for the Azure Vnet +* `rg_name`: The name of the existing resource group +* `location`: The location for Azure resources This guide is provided as-is, and you can use it as the basis for your custom Terraform module. This guide takes you through the following high-level steps to set up a workspace with Azure Private Link: -- Initialize the required providers -- Configure Azure objects: - - Deploy an Azure Vnet with the following subnets: - - Public and private subnets for Azure Databricks workspace - - Private Link subnet that will contain the following private endpoints: - - Frontend / Backend private endpoint - - Web_auth private endpoint - - Configure the private DNS zone to add: - - DNS A record to map connection for workspace access - - DNS A record(s) for web_auth -- Workspace Creation +* Initialize the required providers +* Configure Azure objects: + * Deploy an Azure Vnet with the following subnets: + * Public and private subnets for Azure Databricks workspace + * Private Link subnet that will contain the following private endpoints: + * Frontend / Backend private endpoint + * Web_auth private endpoint + * Configure the private DNS zone to add: + * DNS A record to map connection for workspace access + * DNS A record(s) for web_auth +* Workspace Creation ## Provider initialization @@ -218,7 +219,6 @@ resource "azurerm_subnet" "plsubnet" { Create a private endpoint with sub-resource **databricks_ui_api**: - ```hcl resource "azurerm_private_endpoint" "uiapi" { name = "uiapipvtendpoint" diff --git a/docs/guides/azure-private-link-workspace-standard.md b/docs/guides/azure-private-link-workspace-standard.md index 982b36f49b..6b9e3431ea 100644 --- a/docs/guides/azure-private-link-workspace-standard.md +++ b/docs/guides/azure-private-link-workspace-standard.md @@ -2,30 +2,30 @@ page_title: "Provisioning Azure Databricks with Private Link - Standard deployment." --- -# Deploying pre-requisite resources and enabling Private Link connections - Standard deployment. +# Deploying pre-requisite resources and enabling Private Link connections - Standard deployment -> **Note** - - Refer to [adb-with-private-link-standard](https://github.com/databricks/terraform-databricks-examples/tree/main/modules/adb-with-private-link-standard), a Terraform module that contains code used to deploy an Azure Databricks workspace with Azure Private Link using the Standard deployment approach. - - Refer to the [Databricks Terraform Registry modules](https://registry.terraform.io/modules/databricks/examples/databricks/latest) for more Terraform modules and examples to deploy Azure Databricks resources. - - This guide assumes that connectivity from the on-premises user environment is already configured using ExpressRoute or a VPN gateway connection. + +- Refer to [adb-with-private-link-standard](https://github.com/databricks/terraform-databricks-examples/tree/main/modules/adb-with-private-link-standard), a Terraform module that contains code used to deploy an Azure Databricks workspace with Azure Private Link using the Standard deployment approach. +- Refer to the [Databricks Terraform Registry modules](https://registry.terraform.io/modules/databricks/examples/databricks/latest) for more Terraform modules and examples to deploy Azure Databricks resources. +- This guide assumes that connectivity from the on-premises user environment is already configured using ExpressRoute or a VPN gateway connection. [Azure Private Link](https://learn.microsoft.com/en-us/azure/private-link/private-link-overview) support enables private connectivity between users and their Databricks workspaces and between clusters on the data plane and core services on the control plane within the Databricks workspace infrastructure. You can use Terraform to deploy the underlying cloud resources and the private access settings resources automatically using a programmatic approach. - This guide covers a [standard deployment](https://learn.microsoft.com/en-us/azure/databricks/administration-guide/cloud-configurations/azure/private-link-standard) to configure Azure Databricks with Private Link: -* Two separate VNets are used: - * A transit VNet - * A customer Data Plane VNet -* A private endpoint is used for back-end connectivity and deployed in the customer Data Plane VNet. -* A private endpoint is used for front-end connectivity and deployed in the transit VNet. -* A private endpoint is used for web authentication and deployed in the transit VNet. -* A dedicated Databricks workspace, called Web Auth workspace, is used for web authentication traffic. This workspace is configured with the sub-resource **browser_authentication** and deployed using subnets in the transit VNet. +- Two separate VNets are used: + - A transit VNet + - A customer Data Plane VNet +- A private endpoint is used for back-end connectivity and deployed in the customer Data Plane VNet. +- A private endpoint is used for front-end connectivity and deployed in the transit VNet. +- A private endpoint is used for web authentication and deployed in the transit VNet. +- A dedicated Databricks workspace, called Web Auth workspace, is used for web authentication traffic. This workspace is configured with the sub-resource **browser_authentication** and deployed using subnets in the transit VNet. -> **Note** -* A separate Web Auth workspace is not mandatory but recommended. -* DNS mapping for SSO login callbacks to the Azure Databricks web application can be managed by the Web Auth workspace or another workspace associated with the **browser_authentication** private endpoint. +- A separate Web Auth workspace is not mandatory but recommended. +- DNS mapping for SSO login callbacks to the Azure Databricks web application can be managed by the Web Auth workspace or another workspace associated with the **browser_authentication** private endpoint. ![Azure Databricks with Private Link - Standard deployment](https://github.com/databricks/terraform-provider-databricks/raw/master/docs/images/azure-private-link-standard.png) @@ -44,14 +44,14 @@ This guide takes you through the following high-level steps to set up a workspac - Initialize the required providers - Configure Azure objects: - Deploy two Azure VNets with the following subnets: - - Public and private subnets for each Azure Databricks workspace in the Data Plane VNet - - Private Link subnet in the Data Plane VNet that will contain the Backend private endpoint + - Public and private subnets for each Azure Databricks workspace in the Data Plane VNet + - Private Link subnet in the Data Plane VNet that will contain the Backend private endpoint - Private Link subnet in the Transit VNet that will contain the following private endpoints: - - Frontend private endpoint - - Web auth private endpoint + - Frontend private endpoint + - Web auth private endpoint - Configure the private DNS zone to add: - - DNS A record to map connection for workspace access - - DNS A record(s) for web_auth + - DNS A record to map connection for workspace access + - DNS A record(s) for web_auth - Workspace Creation ## Provider initialization @@ -124,12 +124,12 @@ locals { ## Summary -* In the Transit resource group: +- In the Transit resource group: 1. Create a Transit VNet 2. Create a private DNS zone 3. Create Web Auth Databricks workspace with the sub-resource **browser_authentication** 4. Create a Frontend private endpoint with the sub-resource **databricks_ui_api** -* In the Data Plane resource group: +- In the Data Plane resource group: 1. Create a Data Plane VNet 2. Create a private DNS zone 3. Create a new Azure Databricks workspace @@ -518,5 +518,6 @@ resource "azurerm_private_endpoint" "app_dpcp" { ``` -> **Note** + - The public network access to the workspace is disabled. You can access the workspace only through private connectivity to the on-premises user environment. For testing purposes, you can deploy an Azure VM in the Transit VNet to test the frontend connectivity. - If you wish to deploy a test VM in the Data Plane VNet, you should configure a peering connection between the two VNets diff --git a/docs/guides/experimental-exporter.md b/docs/guides/experimental-exporter.md index f8371928bd..6c58361da4 100644 --- a/docs/guides/experimental-exporter.md +++ b/docs/guides/experimental-exporter.md @@ -21,10 +21,10 @@ Exporter can also be used in a non-interactive mode: export DATABRICKS_HOST=... export DATABRICKS_TOKEN=... ./terraform-provider-databricks exporter -skip-interactive \ - -services=groups,secrets,access,compute,users,jobs,storage \ - -listing=jobs,compute \ - -last-active-days=90 \ - -debug + -services=groups,secrets,access,compute,users,jobs,storage \ + -listing=jobs,compute \ + -last-active-days=90 \ + -debug ``` ## Argument Reference @@ -108,7 +108,6 @@ To speed up export, Terraform Exporter performs many operations, such as listing * `EXPORTER_PARALLELISM_NNN` - number of Goroutines used to process resources of a specific type (replace `NNN` with the exact resource name, for example, `EXPORTER_PARALLELISM_databricks_notebook=10` sets the number of Goroutines for `databricks_notebook` resource to `10`). There is a shared channel (with name `default`) for handling of resources for which there are no dedicated channels - use `EXPORTER_PARALLELISM_default` to increase it's size (default size is `15`). Defaults for some resources are defined by the `goroutinesNumber` map in `exporter/context.go` or equal to `2` if there is no value. *Don't increase default values too much to avoid REST API throttling!* * `EXPORTER_DEFAULT_HANDLER_CHANNEL_SIZE` - the size of the shared channel (default: `200000`) - you may need to increase it if you have a huge workspace. - ## Support Matrix Exporter aims to generate HCL code for most of the resources within the Databricks workspace: @@ -174,4 +173,4 @@ Exporter aims to generate HCL code for most of the resources within the Databric Notes: -- \* - libraries are exported as blocks inside the cluster definition instead of generating `databricks_library` resources. This is done to decrease the number of generated resources. +* \* - libraries are exported as blocks inside the cluster definition instead of generating `databricks_library` resources. This is done to decrease the number of generated resources. diff --git a/docs/guides/gcp-workspace.md b/docs/guides/gcp-workspace.md index 02dedc63b5..87aa056687 100644 --- a/docs/guides/gcp-workspace.md +++ b/docs/guides/gcp-workspace.md @@ -271,4 +271,4 @@ We assume that you have a terraform module in your project that creates a worksp ### More than one authorization method configured error -See the [troubleshooting guide](https://registry.terraform.io/providers/databricks/databricks/latest/docs/guides/troubleshooting#more-than-one-authorization-method-configured) \ No newline at end of file +See the [troubleshooting guide](https://registry.terraform.io/providers/databricks/databricks/latest/docs/guides/troubleshooting#more-than-one-authorization-method-configured) diff --git a/docs/guides/troubleshooting.md b/docs/guides/troubleshooting.md index dc0e980de3..e6113f98a9 100644 --- a/docs/guides/troubleshooting.md +++ b/docs/guides/troubleshooting.md @@ -58,16 +58,16 @@ terraform { │ │ ├── main.tf │ │ └── versions.tf │ └── production -│ ├── README.md -│ ├── main.tf -│ └── versions.tf +│ ├── README.md +│ ├── main.tf +│ └── versions.tf └── modules - ├── first-module - │ ├── ... - │ └── versions.tf - └── second-module - ├── ... - └── versions.tf + ├── first-module + │ ├── ... + │ └── versions.tf + └── second-module + ├── ... + └── versions.tf ``` ### Error: Failed to install provider @@ -135,7 +135,6 @@ then it means that you're trying to access a workspace that uses private link wi ### Error: ....: Unauthorized access to Org: NNNNNNNNNN - There are a few possible reasons for this error: * You’re trying to access a Databricks workspace with a private link enabled and public network access set to disabled. Typically this happens when a computer from which you’re running terraform apply or terraform plan doesn’t have domain name resolution configured correctly, and Terraform is reaching the workspace via a public IP address. Also, this may happen when you’re accessing the internet via a proxy, so all traffic from Terraform is forwarded to the proxy, and routed via the public internet. @@ -203,4 +202,4 @@ provider "databricks" { } ``` -The above would enforce the use of PAT authorization. \ No newline at end of file +The above would enforce the use of PAT authorization. diff --git a/docs/guides/unity-catalog.md b/docs/guides/unity-catalog.md index 798591bf0e..8d5b632b09 100644 --- a/docs/guides/unity-catalog.md +++ b/docs/guides/unity-catalog.md @@ -250,13 +250,13 @@ data "aws_iam_policy_document" "passrole_for_uc" { effect = "Allow" actions = ["sts:AssumeRole"] principals { - identifiers = [databricks_storage_credential.external.aws_iam_role.unity_catalog_iam_arn] + identifiers = [databricks_storage_credential.external.aws_iam_role[0].unity_catalog_iam_arn] type = "AWS" } condition { test = "StringEquals" variable = "sts:ExternalId" - values = [databricks_storage_credential.external.aws_iam_role.external_id] + values = [databricks_storage_credential.external.aws_iam_role[0].external_id] } } statement { diff --git a/docs/resources/access_control_rule_set.md b/docs/resources/access_control_rule_set.md index b1123cea72..44a6479651 100644 --- a/docs/resources/access_control_rule_set.md +++ b/docs/resources/access_control_rule_set.md @@ -238,12 +238,12 @@ grant_rules { Arguments of the `grant_rules` block are: -- `role` - (Required) Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles), [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page) or [marketplace roles](https://docs.databricks.com/en/marketplace/get-started-provider.html#assign-the-marketplace-admin-role). +* `role` - (Required) Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles), [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page) or [marketplace roles](https://docs.databricks.com/en/marketplace/get-started-provider.html#assign-the-marketplace-admin-role). * `roles/servicePrincipal.manager` - Manager of a service principal. * `roles/servicePrincipal.user` - User of a service principal. * `roles/group.manager` - Manager of a group. * `roles/marketplace.admin` - Admin of marketplace. -- `principals` - (Required) a list of principals who are granted a role. The following format is supported: +* `principals` - (Required) a list of principals who are granted a role. The following format is supported: * `users/{username}` (also exposed as `acl_principal_id` attribute of `databricks_user` resource). * `groups/{groupname}` (also exposed as `acl_principal_id` attribute of `databricks_group` resource). * `servicePrincipals/{applicationId}` (also exposed as `acl_principal_id` attribute of `databricks_service_principal` resource). diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md index d101978f3a..5a74322065 100644 --- a/docs/resources/cluster.md +++ b/docs/resources/cluster.md @@ -436,7 +436,7 @@ The following options are available: * `google_service_account` - (Optional, string) Google Service Account email address that the cluster uses to authenticate with Google Identity. This field is used for authentication with the GCS and BigQuery data sources. * `availability` - (Optional) Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. * `boot_disk_size` (optional, int) Boot disk size in GB -* `local_ssd_count` (optional, int) Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. +* `local_ssd_count` (optional, int) Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. * `zone_id` (optional) Identifier for the availability zone in which the cluster resides. This can be one of the following: * `HA` (default): High availability, spread nodes across availability zones for a Databricks deployment region. * `AUTO`: Databricks picks an availability zone to schedule the cluster on. @@ -553,7 +553,7 @@ terraform import databricks_cluster.this The following resources are often used in the same context: -* [Dynamic Passthrough Clusters for a Group](../guides/passthrough-cluster-per-user.md) guide. +* [Dynamic Passthrough Clusters for a Group](../guides/workspace-management.md) guide. * [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_clusters](../data-sources/clusters.md) data to retrieve a list of [databricks_cluster](cluster.md) ids. * [databricks_cluster_policy](cluster_policy.md) to create a [databricks_cluster](cluster.md) policy, which limits the ability to create clusters based on a set of rules. diff --git a/docs/resources/cluster_policy.md b/docs/resources/cluster_policy.md index 856905869e..39b7b1dd31 100644 --- a/docs/resources/cluster_policy.md +++ b/docs/resources/cluster_policy.md @@ -163,7 +163,7 @@ terraform import databricks_cluster_policy.this The following resources are often used in the same context: -* [Dynamic Passthrough Clusters for a Group](../guides/passthrough-cluster-per-user.md) guide. +* [Dynamic Passthrough Clusters for a Group](../guides/workspace-management.md) guide. * [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_clusters](../data-sources/clusters.md) data to retrieve a list of [databricks_cluster](cluster.md) ids. * [databricks_cluster](cluster.md) to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). diff --git a/docs/resources/dbfs_file.md b/docs/resources/dbfs_file.md index 0823da3d46..e3b9c73eea 100644 --- a/docs/resources/dbfs_file.md +++ b/docs/resources/dbfs_file.md @@ -49,7 +49,7 @@ resource "databricks_library" "app" { ## Argument Reference --> **Note** DBFS files would only be changed, if Terraform stage did change. This means that any manual changes to managed file won't be overwritten by Terraform, if there's no local change. +-> **Note** DBFS files would only be changed, if Terraform stage did change. This means that any manual changes to managed file won't be overwritten by Terraform, if there's no local change. The following arguments are supported: @@ -65,13 +65,12 @@ In addition to all arguments above, the following attributes are exported: * `file_size` - The file size of the file that is being tracked by this resource in bytes. * `dbfs_path` - Path, but with `dbfs:` prefix. - ## Import The resource dbfs file can be imported using the path of the file: ```bash -$ terraform import databricks_dbfs_file.this +terraform import databricks_dbfs_file.this ``` ## Related Resources diff --git a/docs/resources/default_namespace_settings.md b/docs/resources/default_namespace_settings.md index ff4a5f992e..78b766d63e 100644 --- a/docs/resources/default_namespace_settings.md +++ b/docs/resources/default_namespace_settings.md @@ -12,6 +12,7 @@ a fully qualified 3 level name. For example, if the default catalog is set to 'r 'SELECT * FROM myTable' would reference the object 'retail_prod.default.myTable' (the schema 'default' is always assumed). This setting requires a restart of clusters and SQL warehouses to take effect. Additionally, the default namespace only applies when using Unity Catalog-enabled compute. + ## Example Usage ```hcl @@ -28,5 +29,3 @@ The resource supports the following arguments: * `namespace` - (Required) The configuration details. * `value` - (Required) The value for the setting. - - diff --git a/docs/resources/directory.md b/docs/resources/directory.md index 390334265e..ed53546ce0 100644 --- a/docs/resources/directory.md +++ b/docs/resources/directory.md @@ -40,17 +40,17 @@ In addition to all arguments above, the following attributes are exported: The resource directory can be imported using directory path: ```bash -$ terraform import databricks_directory.this /path/to/directory +terraform import databricks_directory.this /path/to/directory ``` ## Related Resources The following resources are often used in the same context: -* [End to end workspace management](../guides/workspace-management.md) guide. -* [databricks_notebook](notebook.md) to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). -* [databricks_notebook](../data-sources/notebook.md) data to export a notebook from Databricks Workspace. -* [databricks_notebook_paths](../data-sources/notebook_paths.md) data to list notebooks in Databricks Workspace. -* [databricks_repo](repo.md) to manage [Databricks Repos](https://docs.databricks.com/repos.html). -* [databricks_spark_version](../data-sources/spark_version.md) data to get [Databricks Runtime (DBR)](https://docs.databricks.com/runtime/dbr.html) version that could be used for `spark_version` parameter in [databricks_cluster](cluster.md) and other resources. -* [databricks_workspace_conf](workspace_conf.md) to manage workspace configuration for expert usage. +- [End to end workspace management](../guides/workspace-management.md) guide. +- [databricks_notebook](notebook.md) to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). +- [databricks_notebook](../data-sources/notebook.md) data to export a notebook from Databricks Workspace. +- [databricks_notebook_paths](../data-sources/notebook_paths.md) data to list notebooks in Databricks Workspace. +- [databricks_repo](repo.md) to manage [Databricks Repos](https://docs.databricks.com/repos.html). +- [databricks_spark_version](../data-sources/spark_version.md) data to get [Databricks Runtime (DBR)](https://docs.databricks.com/runtime/dbr.html) version that could be used for `spark_version` parameter in [databricks_cluster](cluster.md) and other resources. +- [databricks_workspace_conf](workspace_conf.md) to manage workspace configuration for expert usage. diff --git a/docs/resources/file.md b/docs/resources/file.md index 2a6bf99240..9738e15c02 100644 --- a/docs/resources/file.md +++ b/docs/resources/file.md @@ -3,9 +3,10 @@ subcategory: "Storage" --- # databricks_file Resource -This resource allows uploading and downloading files in [databricks_volume](volume.md). +This resource allows uploading and downloading files in [databricks_volume](volume.md). + +Notes: -Notes: * Currently the limit is 5GiB in octet-stream. * Currently, only UC volumes are supported. The list of destinations may change. @@ -75,13 +76,12 @@ In addition to all arguments above, the following attributes are exported: * `file_size` - The file size of the file that is being tracked by this resource in bytes. * `modification_time` - The last time stamp when the file was modified - ## Import The resource `databricks_file` can be imported using the path of the file: ```bash -$ terraform import databricks_file.this +terraform import databricks_file.this ``` ## Related Resources diff --git a/docs/resources/git_credential.md b/docs/resources/git_credential.md index f46b90450e..93c728585b 100644 --- a/docs/resources/git_credential.md +++ b/docs/resources/git_credential.md @@ -5,7 +5,6 @@ subcategory: "Workspace" This resource allows you to manage credentials for [Databricks Repos](https://docs.databricks.com/repos.html) using [Git Credentials API](https://docs.databricks.com/dev-tools/api/latest/gitcredentials.html). - ## Example Usage You can declare Terraform-managed Git credential using following code: @@ -20,7 +19,6 @@ resource "databricks_git_credential" "ado" { ## Argument Reference - The following arguments are supported: * `personal_access_token` - (Required) The personal access token used to authenticate to the corresponding Git provider. If value is not provided, it's sourced from the first environment variable of [`GITHUB_TOKEN`](https://registry.terraform.io/providers/integrations/github/latest/docs#oauth--personal-access-token), [`GITLAB_TOKEN`](https://registry.terraform.io/providers/gitlabhq/gitlab/latest/docs#required), or [`AZDO_PERSONAL_ACCESS_TOKEN`](https://registry.terraform.io/providers/microsoft/azuredevops/latest/docs#argument-reference), that has a non-empty value. @@ -39,10 +37,9 @@ In addition to all arguments above, the following attributes are exported: The resource cluster can be imported using ID of Git credential that could be obtained via REST API: ```bash -$ terraform import databricks_git_credential.this +terraform import databricks_git_credential.this ``` - ## Related Resources The following resources are often used in the same context: diff --git a/docs/resources/global_init_script.md b/docs/resources/global_init_script.md index 4fdd921a2d..bb8e50d98b 100644 --- a/docs/resources/global_init_script.md +++ b/docs/resources/global_init_script.md @@ -28,7 +28,7 @@ resource "databricks_global_init_script" "init2" { name = "hello script" } ``` - + ## Argument Reference -> **Note** Global init script in the Databricks workspace would only be changed, if Terraform stage did change. This means that any manual changes to managed global init script won't be overwritten by Terraform, if there's no local change to source. @@ -56,7 +56,7 @@ Global init scripts are available only for administrators, so you can't change p The resource global init script can be imported using script ID: ```bash -$ terraform import databricks_global_init_script.this script_id +terraform import databricks_global_init_script.this script_id ``` ## Related Resources diff --git a/docs/resources/grant.md b/docs/resources/grant.md index 13da1d0e0d..2adfdb7d67 100644 --- a/docs/resources/grant.md +++ b/docs/resources/grant.md @@ -353,4 +353,3 @@ The resource can be imported using combination of securable type (`table`, `cata ```bash terraform import databricks_grant.this catalog/abc/user_name ``` - diff --git a/docs/resources/grants.md b/docs/resources/grants.md index 7551e4bdcb..597ca1c8fa 100644 --- a/docs/resources/grants.md +++ b/docs/resources/grants.md @@ -349,4 +349,3 @@ The resource can be imported using combination of securable type (`table`, `cata ```bash terraform import databricks_grants.this catalog/abc ``` - diff --git a/docs/resources/group_member.md b/docs/resources/group_member.md index 5f8df90a3d..ce87a9c7e0 100644 --- a/docs/resources/group_member.md +++ b/docs/resources/group_member.md @@ -53,7 +53,7 @@ In addition to all arguments above, the following attributes are exported: You can import a `databricks_group_member` resource with name `my_group_member` like the following: ```bash -$ terraform import databricks_group_member.my_group_member "|" +terraform import databricks_group_member.my_group_member "|" ``` ## Related Resources diff --git a/docs/resources/instance_pool.md b/docs/resources/instance_pool.md index c3d574bdb4..2912fe9658 100644 --- a/docs/resources/instance_pool.md +++ b/docs/resources/instance_pool.md @@ -71,26 +71,26 @@ The following options are [available](https://docs.microsoft.com/en-us/azure/dat The following options are [available](https://docs.gcp.databricks.com/dev-tools/api/latest/clusters.html#gcpavailability): * `gcp_availability` - (Optional) Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. -* `local_ssd_count` (optional, int) Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. - +* `local_ssd_count` (optional, int) Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. ### disk_spec Configuration Block For disk_spec make sure to use **ebs_volume_type** only on AWS deployment of Databricks and **azure_disk_volume_type** only on a Azure deployment of Databricks. * `disk_count` - (Optional) (Integer) The number of disks to attach to each instance. This feature is only enabled for supported node types. Users can choose up to the limit of the disks supported by the node type. For node types with no local disk, at least one disk needs to be specified. -* `disk_size` - (Optional) (Integer) The size of each disk (in GiB) to attach. +* `disk_size` - (Optional) (Integer) The size of each disk (in GiB) to attach. #### disk_type sub-block + `ebs_volume_type` - (Optional) (String) The EBS volume type to use. Options are: `GENERAL_PURPOSE_SSD` (Provision extra storage using AWS gp2 EBS volumes) or `THROUGHPUT_OPTIMIZED_HDD` (Provision extra storage using AWS st1 volumes) - * General Purpose SSD: `100 - 4096` GiB - * Throughput Optimized HDD: `500 - 4096` GiB +* General Purpose SSD: `100 - 4096` GiB +* Throughput Optimized HDD: `500 - 4096` GiB `azure_disk_volume_type` - (Optional) (String) The type of Azure disk to use. Options are: `PREMIUM_LRS` (Premium storage tier, backed by SSDs) or `"STANDARD_LRS"` (Standard storage tier, backed by HDDs) - * Premium LRS (SSD): `1 - 1023` GiB - * Standard LRS (HDD): `1- 1023` GiB +* Premium LRS (SSD): `1 - 1023` GiB +* Standard LRS (HDD): `1- 1023` GiB ### preloaded_docker_image sub_block @@ -139,5 +139,5 @@ In addition to all arguments above, the following attributes are exported: The resource instance pool can be imported using it's id: ```bash -$ terraform import databricks_instance_pool.this +terraform import databricks_instance_pool.this ``` diff --git a/docs/resources/ip_access_list.md b/docs/resources/ip_access_list.md index c0644246b3..fa0aca22c6 100644 --- a/docs/resources/ip_access_list.md +++ b/docs/resources/ip_access_list.md @@ -27,6 +27,7 @@ resource "databricks_ip_access_list" "allowed-list" { depends_on = [databricks_workspace_conf.this] } ``` + ## Argument Reference The following arguments are supported: @@ -48,7 +49,7 @@ In addition to all arguments above, the following attributes are exported: The databricks_ip_access_list can be imported using id: ```bash -$ terraform import databricks_ip_access_list.this +terraform import databricks_ip_access_list.this ``` ## Related Resources diff --git a/docs/resources/job.md b/docs/resources/job.md index 34e5e18486..fd8e8282b7 100644 --- a/docs/resources/job.md +++ b/docs/resources/job.md @@ -325,7 +325,7 @@ You can invoke Spark submit tasks only on new clusters. **In the `new_cluster` s * `commands` - (Required) (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt". * `source` - (Optional) The source of the project. Possible values are `WORKSPACE` and `GIT`. Defaults to `GIT` if a `git_source` block is present in the job definition. -* `project_directory` - (Required when `source` is `WORKSPACE`) The path where dbt should look for `dbt_project.yml`. Equivalent to passing `--project-dir` to the dbt CLI. +* `project_directory` - (Required when `source` is `WORKSPACE`) The path where dbt should look for `dbt_project.yml`. Equivalent to passing `--project-dir` to the dbt CLI. * If `source` is `GIT`: Relative path to the directory in the repository specified in the `git_source` block. Defaults to the repository's root directory when not specified. * If `source` is `WORKSPACE`: Absolute path to the folder in the workspace. * `profiles_directory` - (Optional) The relative path to the directory in the repository specified by `git_source` where dbt should look in for the `profiles.yml` file. If not specified, defaults to the repository's root directory. Equivalent to passing `--profile-dir` to a dbt command. @@ -354,7 +354,7 @@ This task does not require a cluster to execute and does not support retries or * `concurrency` - (Optional) Controls the number of active iteration task runs. Default is 20, maximum allowed is 100. * `inputs` - (Required) (String) Array for task to iterate on. This can be a JSON string or a reference to an array parameter. -* `task` - (Required) Task to run against the `inputs` list. +* `task` - (Required) Task to run against the `inputs` list. ### sql_task Configuration Block @@ -372,7 +372,7 @@ One of the `query`, `dashboard` or `alert` needs to be provided. * `alert_id` - (Required) (String) identifier of the Databricks SQL Alert. * `subscriptions` - (Required) a list of subscription blocks consisting out of one of the required fields: `user_name` for user emails or `destination_id` - for Alert destination's identifier. * `pause_subscriptions` - (Optional) flag that specifies if subscriptions are paused or not. -* `file` - (Optional) block consisting of single string fields: +* `file` - (Optional) block consisting of single string fields: * `source` - (Optional) The source of the project. Possible values are `WORKSPACE` and `GIT`. * `path` - If `source` is `GIT`: Relative path to the file in the repository specified in the `git_source` block with SQL commands to execute. If `source` is `WORKSPACE`: Absolute path to the file in the workspace with SQL commands to execute. @@ -433,7 +433,7 @@ By default, all users can create and modify jobs unless an administrator [enable ## Single-task syntax (deprecated) --> **Deprecated** Please define tasks in a `task` block rather than using single-task syntax. +-> **Deprecated** Please define tasks in a `task` block rather than using single-task syntax. This syntax uses Jobs API 2.0 to create a job with a single task. Only a subset of arguments above is supported (`name`, `libraries`, `email_notifications`, `webhook_notifications`, `timeout_seconds`, `max_retries`, `min_retry_interval_millis`, `retry_on_timeout`, `schedule`, `max_concurrent_runs`), and only a single block of `notebook_task`, `spark_jar_task`, `spark_python_task`, `spark_submit_task` and `pipeline_task` can be specified. diff --git a/docs/resources/library.md b/docs/resources/library.md index 329edd8d99..c3f8e9822b 100644 --- a/docs/resources/library.md +++ b/docs/resources/library.md @@ -3,7 +3,7 @@ subcategory: "Compute" --- # databricks_library resource -Installs a [library](https://docs.databricks.com/libraries/index.html) on [databricks_cluster](cluster.md). Each different type of library has a slightly different syntax. It's possible to set only one type of library within one resource. Otherwise, the plan will fail with an error. +Installs a [library](https://docs.databricks.com/libraries/index.html) on [databricks_cluster](cluster.md). Each different type of library has a slightly different syntax. It's possible to set only one type of library within one resource. Otherwise, the plan will fail with an error. -> **Note** `databricks_library` resource would always start the associated cluster if it's not running, so make sure to have auto-termination configured. It's not possible to atomically change the version of the same library without cluster restart. Libraries are fully removed from the cluster only after restart. @@ -128,5 +128,5 @@ The following resources are often used in the same context: * [databricks_global_init_script](global_init_script.md) to manage [global init scripts](https://docs.databricks.com/clusters/init-scripts.html#global-init-scripts), which are run on all [databricks_cluster](cluster.md#init_scripts) and [databricks_job](job.md#new_cluster). * [databricks_job](job.md) to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a [databricks_cluster](cluster.md). * [databricks_mount](mount.md) to [mount your cloud storage](https://docs.databricks.com/data/databricks-file-system.html#mount-object-storage-to-dbfs) on `dbfs:/mnt/name`. -* [databricks_pipeline](pipeline.md) to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). +* [databricks_pipeline](pipeline.md) to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). * [databricks_repo](repo.md) to manage [Databricks Repos](https://docs.databricks.com/repos.html). diff --git a/docs/resources/metastore.md b/docs/resources/metastore.md index efed83c3c0..d159f53037 100644 --- a/docs/resources/metastore.md +++ b/docs/resources/metastore.md @@ -74,7 +74,7 @@ The following arguments are required: * `storage_root` - (Optional) Path on cloud storage account, where managed `databricks_table` are stored. Change forces creation of a new resource. If no `storage_root` is defined for the metastore, each catalog must have a `storage_root` defined. * `region` - (Mandatory for account-level) The region of the metastore * `owner` - (Optional) Username/groupname/sp application_id of the metastore owner. -* `delta_sharing_scope` - (Optional) Required along with `delta_sharing_recipient_token_lifetime_in_seconds`. Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. +* `delta_sharing_scope` - (Optional) Required along with `delta_sharing_recipient_token_lifetime_in_seconds`. Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. INTERNAL only allows sharing within the same account, and INTERNAL_AND_EXTERNAL allows cross account sharing and token based sharing. * `delta_sharing_recipient_token_lifetime_in_seconds` - (Optional) Required along with `delta_sharing_scope`. Used to set expiration duration in seconds on recipient data access tokens. Set to 0 for unlimited duration. * `delta_sharing_organization_name` - (Optional) The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. Once this is set it cannot be removed and can only be modified to another valid value. To delete this value please taint and recreate the resource. * `force_destroy` - (Optional) Destroy metastore regardless of its contents. diff --git a/docs/resources/mlflow_experiment.md b/docs/resources/mlflow_experiment.md index edb66ac25a..8da86061ab 100644 --- a/docs/resources/mlflow_experiment.md +++ b/docs/resources/mlflow_experiment.md @@ -40,7 +40,7 @@ In addition to all arguments above, the following attributes are exported: The experiment resource can be imported using the id of the experiment ```bash -$ terraform import databricks_mlflow_experiment.this +terraform import databricks_mlflow_experiment.this ``` ## Related Resources diff --git a/docs/resources/mlflow_model.md b/docs/resources/mlflow_model.md index 33968b9a4f..60c310d295 100644 --- a/docs/resources/mlflow_model.md +++ b/docs/resources/mlflow_model.md @@ -45,7 +45,7 @@ In addition to all arguments above, the following attributes are exported: The model resource can be imported using the name ```bash -$ terraform import databricks_mlflow_model.this +terraform import databricks_mlflow_model.this ``` ## Access Control diff --git a/docs/resources/mws_private_access_settings.md b/docs/resources/mws_private_access_settings.md index c5a510ecc0..e5bde84479 100644 --- a/docs/resources/mws_private_access_settings.md +++ b/docs/resources/mws_private_access_settings.md @@ -3,7 +3,7 @@ subcategory: "Deployment" --- # databricks_mws_private_access_settings Resource -Allows you to create a [Private Access Setting]that can be used as part of a [databricks_mws_workspaces](mws_workspaces.md) resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) or [GCP Private Service Connect](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/private-service-connect.html) +Allows you to create a Private Access Setting resource that can be used as part of a [databricks_mws_workspaces](mws_workspaces.md) resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) or [GCP Private Service Connect](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/private-service-connect.html) It is strongly recommended that customers read the [Enable AWS Private Link](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) [Enable GCP Private Service Connect](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/private-service-connect.html) documentation before trying to leverage this resource. diff --git a/docs/resources/notebook.md b/docs/resources/notebook.md index 64c4123469..a88b9db484 100644 --- a/docs/resources/notebook.md +++ b/docs/resources/notebook.md @@ -70,7 +70,7 @@ In addition to all arguments above, the following attributes are exported: The resource notebook can be imported using notebook path ```bash -$ terraform import databricks_notebook.this /path/to/notebook +terraform import databricks_notebook.this /path/to/notebook ``` ## Related Resources diff --git a/docs/resources/pipeline.md b/docs/resources/pipeline.md index e9ffb7cbb0..7fa7a90e76 100644 --- a/docs/resources/pipeline.md +++ b/docs/resources/pipeline.md @@ -3,7 +3,7 @@ subcategory: "Compute" --- # databricks_pipeline Resource -Use `databricks_pipeline` to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). +Use `databricks_pipeline` to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). ## Example Usage @@ -106,7 +106,7 @@ In addition to all arguments above, the following attributes are exported: The resource job can be imported using the id of the pipeline ```bash -$ terraform import databricks_pipeline.this +terraform import databricks_pipeline.this ``` ## Related Resources diff --git a/docs/resources/registered_model.md b/docs/resources/registered_model.md index e0b584cfcc..0aaf7404c1 100644 --- a/docs/resources/registered_model.md +++ b/docs/resources/registered_model.md @@ -41,7 +41,7 @@ In addition to all arguments above, the following attributes are exported: The registered model resource can be imported using the full (3-level) name of the model. ```bash -$ terraform import databricks_registered_model.this +terraform import databricks_registered_model.this ``` ## Related Resources diff --git a/docs/resources/restrict_workspace_admins_setting.md b/docs/resources/restrict_workspace_admins_setting.md index bb2c018d7d..5e54487d6f 100644 --- a/docs/resources/restrict_workspace_admins_setting.md +++ b/docs/resources/restrict_workspace_admins_setting.md @@ -6,14 +6,16 @@ subcategory: "Settings" -> **Note** This resource could be only used with workspace-level provider! -The `databricks_restrict_workspace_admins_setting` resource lets you control the capabilities of workspace admins. +The `databricks_restrict_workspace_admins_setting` resource lets you control the capabilities of workspace admins. With the status set to `ALLOW_ALL`, workspace admins can: -1. Create service principal personal access tokens on behalf of any service principal in their workspace. + +1. Create service principal personal access tokens on behalf of any service principal in their workspace. 2. Change a job owner to any user in the workspace. 3. Change the job run_as setting to any user in their workspace or a service principal on which they have the Service Principal User role. With the status set to `RESTRICT_TOKENS_AND_JOB_RUN_AS`, workspace admins can: + 1. Only create personal access tokens on behalf of service principals on which they have the Service Principal User role. 2. Only change a job owner to themselves. 3. Only change the job run_as setting to themselves a service principal on which they have the Service Principal User role. diff --git a/docs/resources/secret.md b/docs/resources/secret.md index 3f155ebfef..a79aef3e83 100644 --- a/docs/resources/secret.md +++ b/docs/resources/secret.md @@ -35,7 +35,6 @@ The following arguments are required: * `scope` - (Required) (String) name of databricks secret scope. Must consist of alphanumeric characters, dashes, underscores, and periods, and may not exceed 128 characters. * `key` - (Required) (String) key within secret scope. Must consist of alphanumeric characters, dashes, underscores, and periods, and may not exceed 128 characters. - ## Attribute Reference In addition to all arguments above, the following attributes are exported: @@ -44,13 +43,12 @@ In addition to all arguments above, the following attributes are exported: * `last_updated_timestamp` - (Integer) time secret was updated * `config_reference` - (String) value to use as a secret reference in [Spark configuration and environment variables](https://docs.databricks.com/security/secrets/secrets.html#use-a-secret-in-a-spark-configuration-property-or-environment-variable): `{{secrets/scope/key}}`. - ## Import The resource secret can be imported using `scopeName|||secretKey` combination. **This may change in future versions.** ```bash -$ terraform import databricks_secret.app `scopeName|||secretKey` +terraform import databricks_secret.app `scopeName|||secretKey` ``` ## Related Resources @@ -59,7 +57,7 @@ The following resources are often used in the same context: * [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_notebook](notebook.md) to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). -* [databricks_pipeline](pipeline.md) to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). +* [databricks_pipeline](pipeline.md) to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html). * [databricks_repo](repo.md) to manage [Databricks Repos](https://docs.databricks.com/repos.html). * [databricks_secret_acl](secret_acl.md) to manage access to [secrets](https://docs.databricks.com/security/secrets/index.html#secrets-user-guide) in Databricks workspace. * [databricks_secret_scope](secret_scope.md) to create [secret scopes](https://docs.databricks.com/security/secrets/index.html#secrets-user-guide) in Databricks workspace. diff --git a/docs/resources/secret_acl.md b/docs/resources/secret_acl.md index ddc3df797f..ef63f1181c 100644 --- a/docs/resources/secret_acl.md +++ b/docs/resources/secret_acl.md @@ -48,7 +48,7 @@ The following arguments are required: The resource secret acl can be imported using `scopeName|||principalName` combination. ```bash -$ terraform import databricks_secret_acl.object `scopeName|||principalName` +terraform import databricks_secret_acl.object `scopeName|||principalName` ``` ## Related Resources diff --git a/docs/resources/secret_scope.md b/docs/resources/secret_scope.md index d73d588e5b..25218d5c7c 100644 --- a/docs/resources/secret_scope.md +++ b/docs/resources/secret_scope.md @@ -22,13 +22,12 @@ The following arguments are supported: ### keyvault_metadata -On Azure, it is possible to create Azure Databricks secret scopes backed by Azure Key Vault. Secrets are stored in Azure Key Vault and can be accessed through the Azure Databricks secrets utilities, making use of Azure Databricks access control and secret redaction. A secret scope may be configured with at most one Key Vault. +On Azure, it is possible to create Azure Databricks secret scopes backed by Azure Key Vault. Secrets are stored in Azure Key Vault and can be accessed through the Azure Databricks secrets utilities, making use of Azure Databricks access control and secret redaction. A secret scope may be configured with at most one Key Vault. -> **Warning** To create a secret scope from Azure Key Vault, you must use one of the [Azure-specific authentication methods](../index.md#special-configurations-for-azure). Secret scopes backed by Azure Key Vault cannot be created using personal access tokens (PAT). To define AKV access policies, you must use [azurerm_key_vault_access_policy](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/key_vault_access_policy) instead of [access_policy](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/key_vault#access_policy) blocks on `azurerm_key_vault`, otherwise Terraform will remove access policies needed to access the Key Vault and the secret scope won't be in a usable state anymore. - ```hcl data "azurerm_client_config" "current" { } @@ -73,7 +72,7 @@ In addition to all arguments above, the following attributes are exported: The secret resource scope can be imported using the scope name. `initial_manage_principal` state won't be imported, because the underlying API doesn't include it in the response. ```bash -$ terraform import databricks_secret_scope.object +terraform import databricks_secret_scope.object ``` ## Related Resources diff --git a/docs/resources/service_principal_role.md b/docs/resources/service_principal_role.md index 369f70c6e4..511089d7b0 100644 --- a/docs/resources/service_principal_role.md +++ b/docs/resources/service_principal_role.md @@ -23,6 +23,7 @@ resource "databricks_service_principal_role" "my_service_principal_instance_prof role = databricks_instance_profile.instance_profile.id } ``` + ## Argument Reference The following arguments are supported: @@ -34,7 +35,7 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: -* `id` - The id in the format `|`. +* `id` - The id in the format `|`. ## Import diff --git a/docs/resources/service_principal_secret.md b/docs/resources/service_principal_secret.md index 03162a5930..f98abd9b3e 100644 --- a/docs/resources/service_principal_secret.md +++ b/docs/resources/service_principal_secret.md @@ -11,7 +11,6 @@ This secret can be used to configure the Databricks Terraform Provider to authen Additionally, the secret can be used to request OAuth tokens for the service principal, which can be used to authenticate to Databricks REST APIs. See [Authentication using OAuth tokens for service principals](https://docs.databricks.com/dev-tools/authentication-oauth.html). - ## Example Usage Create service principal secret @@ -28,14 +27,12 @@ The following arguments are available: * `service_principal_id` - ID of the [databricks_service_principal](service_principal.md) (not application ID). - ## Attribute Reference In addition to all arguments above, the following attributes are exported: -- `id` - ID of the secret -- `secret` - Generated secret for the service principal - +* `id` - ID of the secret +* `secret` - Generated secret for the service principal ## Related Resources diff --git a/docs/resources/sql_alert.md b/docs/resources/sql_alert.md index 5360db8139..f523c2acc7 100644 --- a/docs/resources/sql_alert.md +++ b/docs/resources/sql_alert.md @@ -50,7 +50,7 @@ The following arguments are available: * `custom_body` - (Optional, String) Custom body of alert notification, if it exists. See [Alerts API reference](https://docs.databricks.com/sql/user/alerts/index.html) for custom templating instructions. * `empty_result_state` - (Optional, String) State that alert evaluates to when query result is empty. Currently supported values are `unknown`, `triggered`, `ok` - check [API documentation](https://docs.databricks.com/api/workspace/alerts/create) for full list of supported values. * `parent` - (Optional, String) The identifier of the workspace folder containing the alert. The default is ther user's home folder. The folder identifier is formatted as `folder/`. -* `rearm` - (Optional, Integer) Number of seconds after being triggered before the alert rearms itself and can be triggered again. If not defined, alert will never be triggered again. +* `rearm` - (Optional, Integer) Number of seconds after being triggered before the alert rearms itself and can be triggered again. If not defined, alert will never be triggered again. ## Attribute Reference diff --git a/docs/resources/sql_dashboard.md b/docs/resources/sql_dashboard.md index 97b3d583da..3fc87cf4e1 100644 --- a/docs/resources/sql_dashboard.md +++ b/docs/resources/sql_dashboard.md @@ -51,10 +51,9 @@ In addition to all arguments above, the following attributes are exported: You can import a `databricks_sql_dashboard` resource with ID like the following: ```bash -$ terraform import databricks_sql_dashboard.this +terraform import databricks_sql_dashboard.this ``` - ## Related Resources The following resources are often used in the same context: diff --git a/docs/resources/sql_endpoint.md b/docs/resources/sql_endpoint.md index 5deabc8410..c77cd3cf54 100644 --- a/docs/resources/sql_endpoint.md +++ b/docs/resources/sql_endpoint.md @@ -39,9 +39,9 @@ The following arguments are supported: * `enable_photon` - Whether to enable [Photon](https://databricks.com/product/delta-engine). This field is optional and is enabled by default. * `enable_serverless_compute` - Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly. - - **For AWS**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated [terms of use](https://docs.databricks.com/sql/admin/serverless.html#accept-terms), workspace admins are prompted in the Databricks SQL UI. A workspace must meet the [requirements](https://docs.databricks.com/sql/admin/serverless.html#requirements) and might require an update to its instance profile role to [add a trust relationship](https://docs.databricks.com/sql/admin/serverless.html#aws-instance-profile-setup). + * **For AWS**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated [terms of use](https://docs.databricks.com/sql/admin/serverless.html#accept-terms), workspace admins are prompted in the Databricks SQL UI. A workspace must meet the [requirements](https://docs.databricks.com/sql/admin/serverless.html#requirements) and might require an update to its instance profile role to [add a trust relationship](https://docs.databricks.com/sql/admin/serverless.html#aws-instance-profile-setup). - - **For Azure**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the [requirements](https://learn.microsoft.com/azure/databricks/sql/admin/serverless) and might require an update to its [Azure storage firewall](https://learn.microsoft.com/azure/databricks/sql/admin/serverless-firewall). + * **For Azure**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the [requirements](https://learn.microsoft.com/azure/databricks/sql/admin/serverless) and might require an update to its [Azure storage firewall](https://learn.microsoft.com/azure/databricks/sql/admin/serverless-firewall). * `channel` block, consisting of following fields: * `name` - Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. @@ -82,7 +82,7 @@ timeouts { You can import a `databricks_sql_endpoint` resource with ID like the following: ```bash -$ terraform import databricks_sql_endpoint.this +terraform import databricks_sql_endpoint.this ``` ## Related resources diff --git a/docs/resources/sql_global_config.md b/docs/resources/sql_global_config.md index 3fbd7738b6..013dd55500 100644 --- a/docs/resources/sql_global_config.md +++ b/docs/resources/sql_global_config.md @@ -39,15 +39,14 @@ resource "databricks_sql_global_config" "this" { } ``` - ## Argument Reference The following arguments are supported (see [documentation](https://docs.databricks.com/sql/api/sql-endpoints.html#global-edit) for more details): * `security_policy` (Optional, String) - The policy for controlling access to datasets. Default value: `DATA_ACCESS_CONTROL`, consult documentation for list of possible values * `data_access_config` (Optional, Map) - Data access configuration for [databricks_sql_endpoint](sql_endpoint.md), such as configuration for an external Hive metastore, Hadoop Filesystem configuration, etc. Please note that the list of supported configuration properties is limited, so refer to the [documentation](https://docs.databricks.com/sql/admin/data-access-configuration.html#supported-properties) for a full list. Apply will fail if you're specifying not permitted configuration. -* `instance_profile_arn` (Optional, String) - [databricks_instance_profile](instance_profile.md) used to access storage from [databricks_sql_endpoint](sql_endpoint.md). Please note that this parameter is only for AWS, and will generate an error if used on other clouds. -* `google_service_account` (Optional, String) - used to access GCP services, such as Cloud Storage, from [databricks_sql_endpoint](sql_endpoint.md). Please note that this parameter is only for GCP, and will generate an error if used on other clouds. +* `instance_profile_arn` (Optional, String) - [databricks_instance_profile](instance_profile.md) used to access storage from [databricks_sql_endpoint](sql_endpoint.md). Please note that this parameter is only for AWS, and will generate an error if used on other clouds. +* `google_service_account` (Optional, String) - used to access GCP services, such as Cloud Storage, from [databricks_sql_endpoint](sql_endpoint.md). Please note that this parameter is only for GCP, and will generate an error if used on other clouds. * `sql_config_params` (Optional, Map) - SQL Configuration Parameters let you override the default behavior for all sessions with all endpoints. ## Import @@ -55,7 +54,7 @@ The following arguments are supported (see [documentation](https://docs.databric You can import a `databricks_sql_global_config` resource with command like the following (you need to use `global` as ID): ```bash -$ terraform import databricks_sql_global_config.this global +terraform import databricks_sql_global_config.this global ``` ## Related Resources diff --git a/docs/resources/sql_query.md b/docs/resources/sql_query.md index 0cd8433ce4..27e12e03ea 100644 --- a/docs/resources/sql_query.md +++ b/docs/resources/sql_query.md @@ -122,7 +122,7 @@ In addition to all arguments above, the following attributes are exported: You can import a `databricks_sql_query` resource with ID like the following: ```bash -$ terraform import databricks_sql_query.this +terraform import databricks_sql_query.this ``` ## Troubleshooting diff --git a/docs/resources/sql_visualization.md b/docs/resources/sql_visualization.md index 86e1c296a3..b9ea7d6c99 100644 --- a/docs/resources/sql_visualization.md +++ b/docs/resources/sql_visualization.md @@ -42,16 +42,15 @@ resource "databricks_sql_visualization" "q1v1" { } ``` - ## Separating `visualization definition` from IAC configuration Since `options` field contains the full JSON encoded string definition of how to render a visualization for the backend API - `sql/api/visualizations`, they can get quite verbose. If you have lots of visualizations to declare, it might be cleaner to separate the `options` field and store them as separate `.json` files to be referenced. -### Example Usage +### Example -- directory tree +- directory tree ```bash . @@ -72,7 +71,6 @@ If you have lots of visualizations to declare, it might be cleaner to separate t name = "My Table" description = "Some Description" options = file("${path.module}/visualizations/q1v1.json") - ) } resource "databricks_sql_visualization" "q1v2" { @@ -81,7 +79,6 @@ If you have lots of visualizations to declare, it might be cleaner to separate t name = "My Chart" description = "Some Description" options = file("${path.module}/visualizations/q1v2.json") - ) } ``` @@ -97,15 +94,15 @@ In preparation for this operational scenario; you should be familiar with, and h You can import a `databricks_sql_visualization` resource with ID like the following: ```bash -$ terraform import databricks_sql_visualization.this / +terraform import databricks_sql_visualization.this / ``` ## Related Resources The following resources are often used in the same context: -* [End to end workspace management](../guides/workspace-management.md) guide. -* [databricks_sql_dashboard](sql_dashboard.md) to manage Databricks SQL [Dashboards](https://docs.databricks.com/sql/user/dashboards/index.html). -* [databricks_sql_endpoint](sql_endpoint.md) to manage Databricks SQL [Endpoints](https://docs.databricks.com/sql/admin/sql-endpoints.html). -* [databricks_sql_global_config](sql_global_config.md) to configure the security policy, [databricks_instance_profile](instance_profile.md), and [data access properties](https://docs.databricks.com/sql/admin/data-access-configuration.html) for all [databricks_sql_endpoint](sql_endpoint.md) of workspace. -* [databricks_sql_permissions](sql_permissions.md) to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). +- [End to end workspace management](../guides/workspace-management.md) guide. +- [databricks_sql_dashboard](sql_dashboard.md) to manage Databricks SQL [Dashboards](https://docs.databricks.com/sql/user/dashboards/index.html). +- [databricks_sql_endpoint](sql_endpoint.md) to manage Databricks SQL [Endpoints](https://docs.databricks.com/sql/admin/sql-endpoints.html). +- [databricks_sql_global_config](sql_global_config.md) to configure the security policy, [databricks_instance_profile](instance_profile.md), and [data access properties](https://docs.databricks.com/sql/admin/data-access-configuration.html) for all [databricks_sql_endpoint](sql_endpoint.md) of workspace. +- [databricks_sql_permissions](sql_permissions.md) to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). diff --git a/docs/resources/sql_widget.md b/docs/resources/sql_widget.md index 1c38f09652..ec862e85c8 100644 --- a/docs/resources/sql_widget.md +++ b/docs/resources/sql_widget.md @@ -11,7 +11,7 @@ A widget is always tied to a [dashboard](sql_dashboard.md). Every dashboard may ## Example Usage -``` +```hcl resource "databricks_sql_widget" "d1w1" { dashboard_id = databricks_sql_dashboard.d1.id text = "Hello! I'm a **text widget**!" @@ -42,7 +42,7 @@ resource "databricks_sql_widget" "d1w2" { You can import a `databricks_sql_widget` resource with ID like the following: ```bash -$ terraform import databricks_sql_widget.this / +terraform import databricks_sql_widget.this / ``` ## Related Resources diff --git a/docs/resources/user_instance_profile.md b/docs/resources/user_instance_profile.md index d655c76cca..88e6016c8e 100644 --- a/docs/resources/user_instance_profile.md +++ b/docs/resources/user_instance_profile.md @@ -23,6 +23,7 @@ resource "databricks_user_instance_profile" "my_user_instance_profile" { instance_profile_id = databricks_instance_profile.instance_profile.id } ``` + ## Argument Reference The following arguments are supported: @@ -34,7 +35,7 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: -* `id` - The id in the format `|`. +* `id` - The id in the format `|`. ## Import diff --git a/docs/resources/vector_search_endpoint.md b/docs/resources/vector_search_endpoint.md index 3553c8c7c2..51f2addd06 100644 --- a/docs/resources/vector_search_endpoint.md +++ b/docs/resources/vector_search_endpoint.md @@ -5,11 +5,10 @@ subcategory: "Vector Search" -> **Note** This resource could be only used on Unity Catalog-enabled workspace! -This resource allows you to create [Vector Search Endpoint](https://docs.databricks.com/en/generative-ai/vector-search.html) in Databricks. Vector Search is a serverless similarity search engine that allows you to store a vector representation of your data, including metadata, in a vector database. The Vector Search Endpoint is used to create and access vector search indexes. +This resource allows you to create [Vector Search Endpoint](https://docs.databricks.com/en/generative-ai/vector-search.html) in Databricks. Vector Search is a serverless similarity search engine that allows you to store a vector representation of your data, including metadata, in a vector database. The Vector Search Endpoint is used to create and access vector search indexes. ## Example Usage - ```hcl resource "databricks_vector_search_endpoint" "this" { name = "vector-search-test" diff --git a/docs/resources/workspace_conf.md b/docs/resources/workspace_conf.md index 0c45405b23..0c433c9b79 100644 --- a/docs/resources/workspace_conf.md +++ b/docs/resources/workspace_conf.md @@ -11,11 +11,11 @@ Manages workspace configuration for expert usage. Currently, more than one insta Allows specification of custom configuration properties for expert usage: - * `enableIpAccessLists` - enables the use of [databricks_ip_access_list](ip_access_list.md) resources - * `maxTokenLifetimeDays` - (string) Maximum token lifetime of new tokens in days, as an integer. If zero, new tokens are permitted to have no lifetime limit. Negative numbers are unsupported. **WARNING:** This limit only applies to new tokens, so there may be tokens with lifetimes longer than this value, including unlimited lifetime. Such tokens may have been created before the current maximum token lifetime was set. - * `enableTokensConfig` - (boolean) Enable or disable personal access tokens for this workspace. - * `enableDeprecatedClusterNamedInitScripts` - (boolean) Enable or disable [legacy cluster-named init scripts](https://docs.databricks.com/clusters/init-scripts.html#disable-legacy-cluster-named-init-scripts-for-a-workspace) for this workspace. - * `enableDeprecatedGlobalInitScripts` - (boolean) Enable or disable [legacy global init scripts](https://docs.databricks.com/clusters/init-scripts.html#migrate-legacy-scripts) for this workspace. +* `enableIpAccessLists` - enables the use of [databricks_ip_access_list](ip_access_list.md) resources +* `maxTokenLifetimeDays` - (string) Maximum token lifetime of new tokens in days, as an integer. If zero, new tokens are permitted to have no lifetime limit. Negative numbers are unsupported. **WARNING:** This limit only applies to new tokens, so there may be tokens with lifetimes longer than this value, including unlimited lifetime. Such tokens may have been created before the current maximum token lifetime was set. +* `enableTokensConfig` - (boolean) Enable or disable personal access tokens for this workspace. +* `enableDeprecatedClusterNamedInitScripts` - (boolean) Enable or disable [legacy cluster-named init scripts](https://docs.databricks.com/clusters/init-scripts.html#disable-legacy-cluster-named-init-scripts-for-a-workspace) for this workspace. +* `enableDeprecatedGlobalInitScripts` - (boolean) Enable or disable [legacy global init scripts](https://docs.databricks.com/clusters/init-scripts.html#migrate-legacy-scripts) for this workspace. ```hcl resource "databricks_workspace_conf" "this" { diff --git a/docs/resources/workspace_file.md b/docs/resources/workspace_file.md index 50c46ef061..f7cbc8e1de 100644 --- a/docs/resources/workspace_file.md +++ b/docs/resources/workspace_file.md @@ -60,7 +60,7 @@ In addition to all arguments above, the following attributes are exported: The workspace file resource can be imported using workspace file path ```bash -$ terraform import databricks_workspace_file.this /path/to/file +terraform import databricks_workspace_file.this /path/to/file ``` ## Related Resources diff --git a/internal/acceptance/grant_test.go b/internal/acceptance/grant_test.go index 08d328db38..19898e477d 100644 --- a/internal/acceptance/grant_test.go +++ b/internal/acceptance/grant_test.go @@ -1,6 +1,8 @@ package acceptance import ( + "fmt" + "regexp" "strings" "testing" ) @@ -99,8 +101,36 @@ resource "databricks_grant" "some" { func TestUcAccGrant(t *testing.T) { unityWorkspaceLevel(t, step{ Template: strings.ReplaceAll(grantTemplate, "%s", "{env.TEST_DATA_ENG_GROUP}"), - }, - step{ - Template: strings.ReplaceAll(grantTemplate, "%s", "{env.TEST_DATA_SCI_GROUP}"), - }) + }, step{ + Template: strings.ReplaceAll(grantTemplate, "%s", "{env.TEST_DATA_SCI_GROUP}"), + }) +} + +func grantTemplateForNamePermissionChange(suffix string, permission string) string { + return fmt.Sprintf(` + resource "databricks_storage_credential" "external" { + name = "cred-{var.STICKY_RANDOM}%s" + aws_iam_role { + role_arn = "{env.TEST_METASTORE_DATA_ACCESS_ARN}" + } + comment = "Managed by TF" + } + + resource "databricks_grant" "cred" { + storage_credential = databricks_storage_credential.external.id + principal = "{env.TEST_DATA_ENG_GROUP}" + privileges = ["%s"] + } + `, suffix, permission) +} + +func TestUcAccGrantForIdChange(t *testing.T) { + unityWorkspaceLevel(t, step{ + Template: grantTemplateForNamePermissionChange("-old", "ALL_PRIVILEGES"), + }, step{ + Template: grantTemplateForNamePermissionChange("-new", "ALL_PRIVILEGES"), + }, step{ + Template: grantTemplateForNamePermissionChange("-fail", "abc"), + ExpectError: regexp.MustCompile(`cannot create grant: Privilege abc is not applicable to this entity`), + }) } diff --git a/internal/acceptance/grants_test.go b/internal/acceptance/grants_test.go index 303e325f2d..23beb2fe3b 100644 --- a/internal/acceptance/grants_test.go +++ b/internal/acceptance/grants_test.go @@ -1,6 +1,8 @@ package acceptance import ( + "fmt" + "regexp" "strings" "testing" ) @@ -105,8 +107,38 @@ resource "databricks_grants" "some" { func TestUcAccGrants(t *testing.T) { unityWorkspaceLevel(t, step{ Template: strings.ReplaceAll(grantsTemplate, "%s", "{env.TEST_DATA_ENG_GROUP}"), - }, - step{ - Template: strings.ReplaceAll(grantsTemplate, "%s", "{env.TEST_DATA_SCI_GROUP}"), - }) + }, step{ + Template: strings.ReplaceAll(grantsTemplate, "%s", "{env.TEST_DATA_SCI_GROUP}"), + }) +} + +func grantsTemplateForNamePermissionChange(suffix string, permission string) string { + return fmt.Sprintf(` + resource "databricks_storage_credential" "external" { + name = "cred-{var.STICKY_RANDOM}%s" + aws_iam_role { + role_arn = "{env.TEST_METASTORE_DATA_ACCESS_ARN}" + } + comment = "Managed by TF" + } + + resource "databricks_grants" "cred" { + storage_credential = databricks_storage_credential.external.id + grant { + principal = "{env.TEST_DATA_ENG_GROUP}" + privileges = ["%s"] + } + } + `, suffix, permission) +} + +func TestUcAccGrantsForIdChange(t *testing.T) { + unityWorkspaceLevel(t, step{ + Template: grantsTemplateForNamePermissionChange("-old", "ALL_PRIVILEGES"), + }, step{ + Template: grantsTemplateForNamePermissionChange("-new", "ALL_PRIVILEGES"), + }, step{ + Template: grantsTemplateForNamePermissionChange("-fail", "abc"), + ExpectError: regexp.MustCompile(`Error: cannot create grants: Privilege abc is not applicable to this entity`), + }) } diff --git a/scripts/gcp-integration/README.md b/scripts/gcp-integration/README.md index d76239a607..3c2ae934cf 100644 --- a/scripts/gcp-integration/README.md +++ b/scripts/gcp-integration/README.md @@ -1,4 +1,5 @@ -make test-gcp +# make test-gcp + --- Used for running integration tests on GCP.