Skip to content

Commit

Permalink
CDPCP-9784 - Terminate DH cluster deletion polling upon termination f…
Browse files Browse the repository at this point in the history
…ailure (#24)
  • Loading branch information
gregito authored and balazsgaspar committed Jul 26, 2023
1 parent 601dd44 commit 2cce7f8
Show file tree
Hide file tree
Showing 12 changed files with 399 additions and 221 deletions.
22 changes: 22 additions & 0 deletions docs/resources/datahub_aws_cluster.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,11 @@ resource "cdp_datahub_aws_cluster" "aws-cluster" {
environment = "<value>"
cluster_template = "7.2.15 - Data Engineering: Apache Spark, Apache Hive, Apache Oozie"
cluster_definition = "7.2.15 - Data Engineering for AWS"
destroy_options = {
force_delete_cluster = false
}
/* The below section kept here as a working example if one would like to use the cluster creation w/o the usage of the cluster definition
/*instance_group = [
Expand Down Expand Up @@ -133,6 +138,14 @@ output "cluster_definition" {
value = cdp_datahub_aws_cluster.aws-cluster.cluster_definition
}
output "destroy_options" {
value = cdp_datahub_aws_cluster.aws-cluster.destroy_options
}
output "force_delete_cluster" {
value = cdp_datahub_aws_cluster.aws-cluster.destroy_options.force_delete_cluster
}
/*
output "recipes" {
value = cdp_datahub_aws_cluster.aws-cluster.instance_group[*].recipes
Expand Down Expand Up @@ -203,6 +216,7 @@ output "encryption" {

### Optional

- `destroy_options` (Attributes) Cluster deletion options. (see [below for nested schema](#nestedatt--destroy_options))
- `instance_group` (Attributes List) (see [below for nested schema](#nestedatt--instance_group))

### Read-Only
Expand All @@ -211,6 +225,14 @@ output "encryption" {
- `id` (String) The ID of this resource.
- `status` (String) The last known state of the cluster

<a id="nestedatt--destroy_options"></a>
### Nested Schema for `destroy_options`

Optional:

- `force_delete_cluster` (Boolean) An indicator that will take place once the cluster termination will be performed. If it is true, that means if something would go sideways during termination, the operation will proceed, however in such a case no notification would come thus it is advisable to check the cloud provider if there are no leftover resources once the destroy is finished.


<a id="nestedatt--instance_group"></a>
### Nested Schema for `instance_group`

Expand Down
9 changes: 9 additions & 0 deletions docs/resources/datahub_azure_cluster.md
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,7 @@ output "encryption" {

### Optional

- `destroy_options` (Attributes) Cluster deletion options. (see [below for nested schema](#nestedatt--destroy_options))
- `instance_group` (Attributes List) (see [below for nested schema](#nestedatt--instance_group))

### Read-Only
Expand All @@ -212,6 +213,14 @@ output "encryption" {
- `id` (String) The ID of this resource.
- `status` (String) The last known state of the cluster

<a id="nestedatt--destroy_options"></a>
### Nested Schema for `destroy_options`

Optional:

- `force_delete_cluster` (Boolean) An indicator that will take place once the cluster termination will be performed. If it is true, that means if something would go sideways during termination, the operation will proceed, however in such a case no notification would come thus it is advisable to check the cloud provider if there are no leftover resources once the destroy is finished.


<a id="nestedatt--instance_group"></a>
### Nested Schema for `instance_group`

Expand Down
13 changes: 13 additions & 0 deletions examples/resources/cdp_datahub_aws_cluster/resource.tf
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,11 @@ resource "cdp_datahub_aws_cluster" "aws-cluster" {
environment = "<value>"
cluster_template = "7.2.15 - Data Engineering: Apache Spark, Apache Hive, Apache Oozie"
cluster_definition = "7.2.15 - Data Engineering for AWS"

destroy_options = {
force_delete_cluster = false
}

/* The below section kept here as a working example if one would like to use the cluster creation w/o the usage of the cluster definition
/*instance_group = [
Expand Down Expand Up @@ -119,6 +124,14 @@ output "cluster_definition" {
value = cdp_datahub_aws_cluster.aws-cluster.cluster_definition
}

output "destroy_options" {
value = cdp_datahub_aws_cluster.aws-cluster.destroy_options
}

output "force_delete_cluster" {
value = cdp_datahub_aws_cluster.aws-cluster.destroy_options.force_delete_cluster
}

/*
output "recipes" {
value = cdp_datahub_aws_cluster.aws-cluster.instance_group[*].recipes
Expand Down
147 changes: 147 additions & 0 deletions resources/datahub/common_scheme.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
// Copyright 2023 Cloudera. All Rights Reserved.
//
// This file is licensed under the Apache License Version 2.0 (the "License").
// You may not use this file except in compliance with the License.
// You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
//
// This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
// OF ANY KIND, either express or implied. Refer to the License for the specific
// permissions and limitations governing your use of the file.

package datahub

import (
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/types"
)

var generalAttributes = map[string]schema.Attribute{
"id": schema.StringAttribute{
Computed: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
"crn": schema.StringAttribute{
MarkdownDescription: "The CRN of the cluster.",
Computed: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
"status": schema.StringAttribute{
MarkdownDescription: "The last known state of the cluster",
Description: "The last known state of the cluster",
Computed: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
"name": schema.StringAttribute{
MarkdownDescription: "The name of the cluster.",
Required: true,
},
"cluster_template": schema.StringAttribute{
MarkdownDescription: "The name of the cluster template.",
Required: true,
},
"cluster_definition": schema.StringAttribute{
MarkdownDescription: "The name of the cluster definition.",
Required: true,
},
"environment": schema.StringAttribute{
MarkdownDescription: "The name of the environment where the cluster will belong to.",
Required: true,
},
"destroy_options": schema.SingleNestedAttribute{
Optional: true,
Description: "Cluster deletion options.",
MarkdownDescription: "Cluster deletion options.",
Attributes: map[string]schema.Attribute{
"force_delete_cluster": schema.BoolAttribute{
MarkdownDescription: "An indicator that will take place once the cluster termination will be performed. " +
"If it is true, that means if something would go sideways during termination, the operation will proceed, " +
"however in such a case no notification would come thus it is advisable to check the cloud provider if " +
"there are no leftover resources once the destroy is finished.",
Description: "An indicator that will take place once the cluster termination will be performed. " +
"If it is true, that means if something would go sideways during termination, the operation will proceed, " +
"however in such a case no notification would come thus it is advisable to check the cloud provider if " +
"there are no leftover resources once the destroy is finished.",
Default: booldefault.StaticBool(false),
Computed: true,
Optional: true,
},
},
},
}

var instanceGroupSchemaAttributes = map[string]schema.Attribute{
"instance_group": schema.ListNestedAttribute{
Optional: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"node_count": schema.Int64Attribute{
MarkdownDescription: "The cluster node count. Has to be greater or equal than 0 and less than 100,000.",
Required: true,
},
"instance_group_name": schema.StringAttribute{
MarkdownDescription: "The name of the instance group.",
Required: true,
},
"instance_group_type": schema.StringAttribute{
MarkdownDescription: "The type of the instance group.",
Required: true,
},
"instance_type": schema.StringAttribute{
MarkdownDescription: "The cloud provider-side instance type.",
Required: true,
},
"root_volume_size": schema.Int64Attribute{
MarkdownDescription: "The size of the root volume in GB",
Required: true,
},
"recipes": schema.SetAttribute{
MarkdownDescription: "The set of recipe names that are going to be applied on the given instance group.",
ElementType: types.StringType,
Optional: true,
},
"attached_volume_configuration": schema.ListNestedAttribute{
Required: true,
MarkdownDescription: "Configuration regarding the attached volume to the specific instance group.",
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"volume_size": schema.Int64Attribute{
MarkdownDescription: "The size of the volume in GB.",
Required: true,
},
"volume_count": schema.Int64Attribute{
MarkdownDescription: "The number of volumes to be attached.",
Required: true,
},
"volume_type": schema.StringAttribute{
MarkdownDescription: "The - cloud provider - type of the volume.",
Required: true,
},
},
},
},
"recovery_mode": schema.StringAttribute{
MarkdownDescription: "The type of the recovery mode.",
Required: true,
},
"volume_encryption": schema.SingleNestedAttribute{
MarkdownDescription: "The volume encryption related configuration.",
Required: true,
Attributes: map[string]schema.Attribute{
"encryption": schema.BoolAttribute{
Required: true,
},
},
},
},
},
},
}
11 changes: 10 additions & 1 deletion resources/datahub/model_datahub.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,10 @@ type datahubResourceModel struct {
Name types.String `tfsdk:"name"`
Status types.String `tfsdk:"status"`
Environment types.String `tfsdk:"environment"`
InstanceGroup []InstanceGroup `tfsdk:"instance_group"`
DestroyOptions *DestroyOptions `tfsdk:"destroy_options"`
ClusterTemplate types.String `tfsdk:"cluster_template"`
ClusterDefinition types.String `tfsdk:"cluster_definition"`
InstanceGroup []InstanceGroup `tfsdk:"instance_group"`
}

type InstanceGroup struct {
Expand All @@ -46,3 +47,11 @@ type AttachedVolumeConfiguration struct {
type VolumeEncryption struct {
Encryption types.Bool `tfsdk:"encryption"`
}

type DestroyOptions struct {
ForceDeleteCluster types.Bool `tfsdk:"force_delete_cluster"`
}

func (d *datahubResourceModel) forceDeleteRequested() bool {
return d.DestroyOptions != nil && !d.DestroyOptions.ForceDeleteCluster.IsNull() && d.DestroyOptions.ForceDeleteCluster.ValueBool()
}
52 changes: 52 additions & 0 deletions resources/datahub/model_datahub_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
// Copyright 2023 Cloudera. All Rights Reserved.
//
// This file is licensed under the Apache License Version 2.0 (the "License").
// You may not use this file except in compliance with the License.
// You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
//
// This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
// OF ANY KIND, either express or implied. Refer to the License for the specific
// permissions and limitations governing your use of the file.

package datahub

import (
"github.com/hashicorp/terraform-plugin-framework/types"
"testing"
)

func TestForceDeleteRequested(t *testing.T) {
tests := []struct {
name string
model *datahubResourceModel
expectedResult bool
}{
{
name: "when DestroyOptions nil",
model: &datahubResourceModel{DestroyOptions: nil},
expectedResult: false,
},
{
name: "when DestroyOptions not nil but ForceDeleteCluster is",
model: &datahubResourceModel{DestroyOptions: &DestroyOptions{ForceDeleteCluster: types.BoolNull()}},
expectedResult: false,
},
{
name: "when neither DestroyOptions or ForceDeleteCluster are nil but ForceDeleteCluster is false",
model: &datahubResourceModel{DestroyOptions: &DestroyOptions{ForceDeleteCluster: types.BoolValue(false)}},
expectedResult: false,
},
{
name: "when ForceDeleteCluster is true",
model: &datahubResourceModel{DestroyOptions: &DestroyOptions{ForceDeleteCluster: types.BoolValue(true)}},
expectedResult: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if test.model.forceDeleteRequested() != test.expectedResult {
t.Errorf("Did not get the expected output! Expected: %t, got: %t", test.expectedResult, test.model.forceDeleteRequested())
}
})
}
}
6 changes: 6 additions & 0 deletions resources/datahub/resource_aws_datahub.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,13 @@ func (r *awsDatahubResource) Delete(ctx context.Context, req resource.DeleteRequ

params := operations.NewDeleteClusterParamsWithContext(ctx).WithInput(&datahubmodels.DeleteClusterRequest{
ClusterName: state.ID.ValueStringPointer(),
Force: state.forceDeleteRequested(),
})
if state.forceDeleteRequested() {
tflog.Debug(ctx, fmt.Sprintf("Sending force delete request for cluster: %s", *params.Input.ClusterName))
} else {
tflog.Debug(ctx, fmt.Sprintf("Sending delete request for cluster: %s", *params.Input.ClusterName))
}
_, err := r.client.Datahub.Operations.DeleteCluster(params)
if err != nil {
if !isNotFoundError(err) {
Expand Down
6 changes: 6 additions & 0 deletions resources/datahub/resource_azure_datahub.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,13 @@ func (r *azureDatahubResource) Delete(ctx context.Context, req resource.DeleteRe

params := operations.NewDeleteClusterParamsWithContext(ctx).WithInput(&datahubmodels.DeleteClusterRequest{
ClusterName: state.ID.ValueStringPointer(),
Force: state.forceDeleteRequested(),
})
if state.forceDeleteRequested() {
tflog.Debug(ctx, fmt.Sprintf("Sending force delete request for cluster: %s", *params.Input.ClusterName))
} else {
tflog.Debug(ctx, fmt.Sprintf("Sending delete request for cluster: %s", *params.Input.ClusterName))
}
_, err := r.client.Datahub.Operations.DeleteCluster(params)
if err != nil {
if !isNotFoundError(err) {
Expand Down
Loading

0 comments on commit 2cce7f8

Please sign in to comment.