Skip to content

Commit

Permalink
[Fix] Fix drift in databricks_cluster when is_single_node is set …
Browse files Browse the repository at this point in the history
…to `true`
  • Loading branch information
ashenm committed Jan 30, 2025
1 parent 991ec12 commit 77067f2
Show file tree
Hide file tree
Showing 3 changed files with 162 additions and 6 deletions.
1 change: 1 addition & 0 deletions NEXT_CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

### Bug Fixes

* Fix `databricks_cluster` drift when `is_single_node` is `true` ([#4360](https://github.com/databricks/terraform-provider-databricks/issues/4360)).
* Correctly handle PAT and OBO tokens without expiration ([#4444](https://github.com/databricks/terraform-provider-databricks/pull/4444)).
* Mark `task.spark_jar_task.run_as_repl` as `computed` to fix configuration drift ([#4452](https://github.com/databricks/terraform-provider-databricks/pull/4452)).

Expand Down
61 changes: 55 additions & 6 deletions clusters/resource_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,17 @@ const (

func ResourceCluster() common.Resource {
return common.Resource{
Create: resourceClusterCreate,
Read: resourceClusterRead,
Update: resourceClusterUpdate,
Delete: resourceClusterDelete,
Schema: clusterSchema,
Create: resourceClusterCreate,
Read: resourceClusterRead,
Update: resourceClusterUpdate,
Delete: resourceClusterDelete,
Schema: clusterSchema,
CustomizeDiff: func(ctx context.Context, d *schema.ResourceDiff) error {
if isSingleNode, ok := d.GetOk("is_single_node"); ok && isSingleNode.(bool) {
return singleNodeClusterChangesCustomizeDiff(d)
}
return nil
},
SchemaVersion: clusterSchemaVersion,
Timeouts: resourceClusterTimeouts(),
StateUpgraders: []schema.StateUpgrader{
Expand All @@ -48,6 +54,48 @@ func ResourceCluster() common.Resource {
}
}

// the API automatically sets the `ResourceClass` key in `custom_tags` and two other keys in the `spark_conf`.
// If the user hasn't set these explicitly in their config, the plan marks these keys for removal.
// This method copies the values for these keys from state to the plan.
// This needs to be done in addition to setting these attributes as computed; otherwise, this customization
// won't take effect for users who have set additional `spark_conf` or `custom_tags`.
func singleNodeClusterChangesCustomizeDiff(d *schema.ResourceDiff) error {
autoConfigAttributes := map[string][]string{
"custom_tags": {"ResourceClass"},
"spark_conf": {"spark.databricks.cluster.profile", "spark.master"},
}

for key, attributes := range autoConfigAttributes {
if !d.HasChange(key) {
continue
}

o, n := d.GetChange(key)
old, okOld := o.(map[string]interface{})
new, okNew := n.(map[string]interface{})

if !okNew || !okOld {
return fmt.Errorf("internal type casting error n: %T, o: %T", n, o)
}

log.Printf("[DEBUG] values for key %s, old: %v, new: %v", key, old, new)

for _, attribute := range attributes {
if _, exists := new[attribute]; exists && new[attribute] != nil {
continue
}

new[attribute] = old[attribute]
}

if err := d.SetNew(key, new); err != nil {
return err
}
}

return nil
}

func clusterSchemaV0() cty.Type {
return (&schema.Resource{
Schema: clusterSchema}).CoreConfigSchema().ImpliedType()
Expand Down Expand Up @@ -346,7 +394,8 @@ func (ClusterSpec) CustomizeSchema(s *common.CustomizableSchema) *common.Customi
s.SchemaPath("docker_image", "url").SetRequired()
s.SchemaPath("docker_image", "basic_auth", "password").SetRequired().SetSensitive()
s.SchemaPath("docker_image", "basic_auth", "username").SetRequired()
s.SchemaPath("spark_conf").SetCustomSuppressDiff(SparkConfDiffSuppressFunc)
s.SchemaPath("spark_conf").SetCustomSuppressDiff(SparkConfDiffSuppressFunc).SetComputed().SetOptional()
s.SchemaPath("custom_tags").SetComputed().SetOptional()
s.SchemaPath("aws_attributes").SetSuppressDiff().SetConflictsWith([]string{"azure_attributes", "gcp_attributes"})
s.SchemaPath("aws_attributes", "zone_id").SetCustomSuppressDiff(ZoneDiffSuppress)
s.SchemaPath("azure_attributes").SetSuppressDiff().SetConflictsWith([]string{"aws_attributes", "gcp_attributes"})
Expand Down
106 changes: 106 additions & 0 deletions clusters/resource_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/terraform-provider-databricks/common"
"github.com/databricks/terraform-provider-databricks/qa"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
Expand Down Expand Up @@ -1630,6 +1631,111 @@ func TestResourceClusterCreate_SingleNode(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, 0, d.Get("num_workers"))
}

func TestResourceClusterCreate_SingleNodeAutoPropertiesCustomizeDiff(t *testing.T) {
testCases := []struct {
name string
hcl string
instanceState map[string]string
expectedDiff map[string]*terraform.ResourceAttrDiff
}{
{
"resource with no custom_tags or spark_conf",
"",
map[string]string{
"custom_tags.ResourceClass": "SingleNode",
"spark_conf.spark.master": "local[*]",
"spark_conf.spark.databricks.cluster.profile": "singleNode",
},
nil,
},
{
"resource with custom_tags and spark_conf",
`custom_tags = {
"ClusterName" = "SingleNode"
}
spark_conf = {
"spark.databricks.delta.preview.enabled" = "true"
}`,
map[string]string{
"custom_tags.ClusterName": "SingleNode",
"custom_tags.ResourceClass": "SingleNode",
"spark_conf.spark.master": "local[*]",
"spark_conf.spark.databricks.cluster.profile": "singleNode",
"spark_conf.spark.databricks.delta.preview.enabled": "true",
},
nil,
},
{
"resource with custom_tags and spark_conf changes",
`custom_tags = {
"ClusterName" = "MonoNode"
}
spark_conf = {
"spark.databricks.delta.preview.enabled" = "false"
}`,
map[string]string{
"custom_tags.ClusterName": "SingleNode",
"custom_tags.ResourceClass": "SingleNode",
"spark_conf.spark.master": "local[*]",
"spark_conf.spark.databricks.cluster.profile": "singleNode",
"spark_conf.spark.databricks.delta.preview.enabled": "true",
},
map[string]*terraform.ResourceAttrDiff{
"custom_tags.ClusterName": {New: "MonoNode", Old: "SingleNode"},
"spark_conf.spark.databricks.delta.preview.enabled": {New: "false", Old: "true"},
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
expectedDiff := testCase.expectedDiff

if expectedDiff == nil {
expectedDiff = make(map[string]*terraform.ResourceAttrDiff)
}

expectedDiff["default_tags.%"] = &terraform.ResourceAttrDiff{Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}
expectedDiff["driver_instance_pool_id"] = &terraform.ResourceAttrDiff{Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}
expectedDiff["driver_node_type_id"] = &terraform.ResourceAttrDiff{Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}
expectedDiff["enable_elastic_disk"] = &terraform.ResourceAttrDiff{Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}
expectedDiff["enable_local_disk_encryption"] = &terraform.ResourceAttrDiff{Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}
expectedDiff["state"] = &terraform.ResourceAttrDiff{Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}
expectedDiff["url"] = &terraform.ResourceAttrDiff{Old: "", New: "", NewComputed: true, NewRemoved: false, RequiresNew: false, Sensitive: false}

instanceState := testCase.instanceState
instanceState["cluster_id"] = "abc"
instanceState["autotermination_minutes"] = "60"
instanceState["cluster_name"] = "Single Node Cluster"
instanceState["data_security_mode"] = "SINGLE_USER"
instanceState["spark_version"] = "15.4.x-scala2.12"
instanceState["single_user_name"] = "testuser"
instanceState["runtime_engine"] = "STANDARD"
instanceState["num_workers"] = "0"
instanceState["node_type_id"] = "Standard_F4s"
instanceState["kind"] = "CLASSIC_PREVIEW"
instanceState["is_single_node"] = "true"

qa.ResourceFixture{
HCL: `
spark_version = "15.4.x-scala2.12"
runtime_engine = "STANDARD"
node_type_id = "Standard_F4s"
kind = "CLASSIC_PREVIEW"
cluster_name = "Single Node Cluster"
data_security_mode = "SINGLE_USER"
autotermination_minutes = 60
is_single_node = true
single_user_name = "testuser"
` + testCase.hcl,
InstanceState: instanceState,
ExpectedDiff: expectedDiff,
Resource: ResourceCluster(),
}.ApplyNoError(t)
})
}
}

func TestResourceClusterCreate_NegativeNumWorkers(t *testing.T) {
_, err := qa.ResourceFixture{
Create: true,
Expand Down

0 comments on commit 77067f2

Please sign in to comment.