diff --git a/docs/resources/datahub_aws_cluster.md b/docs/resources/datahub_aws_cluster.md index d3c26ac5..b2fb9d2a 100644 --- a/docs/resources/datahub_aws_cluster.md +++ b/docs/resources/datahub_aws_cluster.md @@ -35,6 +35,11 @@ resource "cdp_datahub_aws_cluster" "aws-cluster" { environment = "" cluster_template = "7.2.15 - Data Engineering: Apache Spark, Apache Hive, Apache Oozie" cluster_definition = "7.2.15 - Data Engineering for AWS" + + destroy_options = { + force_delete_cluster = false + } + /* The below section kept here as a working example if one would like to use the cluster creation w/o the usage of the cluster definition /*instance_group = [ @@ -133,6 +138,14 @@ output "cluster_definition" { value = cdp_datahub_aws_cluster.aws-cluster.cluster_definition } +output "destroy_options" { + value = cdp_datahub_aws_cluster.aws-cluster.destroy_options +} + +output "force_delete_cluster" { + value = cdp_datahub_aws_cluster.aws-cluster.destroy_options.force_delete_cluster +} + /* output "recipes" { value = cdp_datahub_aws_cluster.aws-cluster.instance_group[*].recipes @@ -203,6 +216,7 @@ output "encryption" { ### Optional +- `destroy_options` (Attributes) Cluster deletion options. (see [below for nested schema](#nestedatt--destroy_options)) - `instance_group` (Attributes List) (see [below for nested schema](#nestedatt--instance_group)) ### Read-Only @@ -211,6 +225,14 @@ output "encryption" { - `id` (String) The ID of this resource. - `status` (String) The last known state of the cluster + +### Nested Schema for `destroy_options` + +Optional: + +- `force_delete_cluster` (Boolean) An indicator that will take place once the cluster termination will be performed. If it is true, that means if something would go sideways during termination, the operation will proceed, however in such a case no notification would come thus it is advisable to check the cloud provider if there are no leftover resources once the destroy is finished. + + ### Nested Schema for `instance_group` diff --git a/docs/resources/datahub_azure_cluster.md b/docs/resources/datahub_azure_cluster.md index 9ad5c4f1..0a5f064c 100644 --- a/docs/resources/datahub_azure_cluster.md +++ b/docs/resources/datahub_azure_cluster.md @@ -204,6 +204,7 @@ output "encryption" { ### Optional +- `destroy_options` (Attributes) Cluster deletion options. (see [below for nested schema](#nestedatt--destroy_options)) - `instance_group` (Attributes List) (see [below for nested schema](#nestedatt--instance_group)) ### Read-Only @@ -212,6 +213,14 @@ output "encryption" { - `id` (String) The ID of this resource. - `status` (String) The last known state of the cluster + +### Nested Schema for `destroy_options` + +Optional: + +- `force_delete_cluster` (Boolean) An indicator that will take place once the cluster termination will be performed. If it is true, that means if something would go sideways during termination, the operation will proceed, however in such a case no notification would come thus it is advisable to check the cloud provider if there are no leftover resources once the destroy is finished. + + ### Nested Schema for `instance_group` diff --git a/docs/resources/datalake_azure_datalake.md b/docs/resources/datalake_azure_datalake.md index abc76caa..a2a192df 100644 --- a/docs/resources/datalake_azure_datalake.md +++ b/docs/resources/datalake_azure_datalake.md @@ -35,7 +35,6 @@ A Data Lake is a service which provides a protective ring around the data stored - `certificate_expiration_state` (String) - `cloud_storage_base_location` (String) -- `cloudbreak_version` (String) - `cloudera_manager` (Attributes) (see [below for nested schema](#nestedatt--cloudera_manager)) - `creation_date` (String) - `credential_crn` (String) @@ -106,31 +105,17 @@ Read-Only: Read-Only: -- `ambari_server` (Boolean) - `discovery_fqdn` (String) - `id` (String) - `instance_group` (String) - `instance_status` (String) - `instance_type_val` (String) -- `life_cycle` (String) -- `mounted_volumes` (Attributes Set) (see [below for nested schema](#nestedatt--instance_groups--instances--mounted_volumes)) - `private_ip` (String) - `public_ip` (String) - `ssh_port` (Number) - `state` (String) - `status_reason` (String) - -### Nested Schema for `instance_groups.instances.mounted_volumes` - -Read-Only: - -- `device` (String) -- `volume_id` (String) -- `volume_size` (String) -- `volume_type` (String) - - diff --git a/docs/resources/environments_azure_environment.md b/docs/resources/environments_azure_environment.md index ae5b61bf..4bef0db7 100644 --- a/docs/resources/environments_azure_environment.md +++ b/docs/resources/environments_azure_environment.md @@ -92,6 +92,7 @@ output "crn" { - `enable_tunnel` (Boolean) - `encryption_key_resource_group_name` (String) - `encryption_key_url` (String) +- `endpoint_access_gateway_scheme` (String) The scheme for the endpoint gateway. PUBLIC creates an external endpoint that can be accessed over the Internet. Defaults to PRIVATE which restricts the traffic to be internal to the VPC. - `existing_network_params` (Attributes) (see [below for nested schema](#nestedatt--existing_network_params)) - `freeipa` (Attributes) (see [below for nested schema](#nestedatt--freeipa)) - `new_network_params` (Attributes) (see [below for nested schema](#nestedatt--new_network_params)) diff --git a/examples/resources/cdp_datahub_aws_cluster/resource.tf b/examples/resources/cdp_datahub_aws_cluster/resource.tf index 997ac46d..bbc5d752 100644 --- a/examples/resources/cdp_datahub_aws_cluster/resource.tf +++ b/examples/resources/cdp_datahub_aws_cluster/resource.tf @@ -21,6 +21,11 @@ resource "cdp_datahub_aws_cluster" "aws-cluster" { environment = "" cluster_template = "7.2.15 - Data Engineering: Apache Spark, Apache Hive, Apache Oozie" cluster_definition = "7.2.15 - Data Engineering for AWS" + + destroy_options = { + force_delete_cluster = false + } + /* The below section kept here as a working example if one would like to use the cluster creation w/o the usage of the cluster definition /*instance_group = [ @@ -119,6 +124,14 @@ output "cluster_definition" { value = cdp_datahub_aws_cluster.aws-cluster.cluster_definition } +output "destroy_options" { + value = cdp_datahub_aws_cluster.aws-cluster.destroy_options +} + +output "force_delete_cluster" { + value = cdp_datahub_aws_cluster.aws-cluster.destroy_options.force_delete_cluster +} + /* output "recipes" { value = cdp_datahub_aws_cluster.aws-cluster.instance_group[*].recipes diff --git a/resources/datahub/common_scheme.go b/resources/datahub/common_scheme.go new file mode 100644 index 00000000..c34a3a17 --- /dev/null +++ b/resources/datahub/common_scheme.go @@ -0,0 +1,147 @@ +// Copyright 2023 Cloudera. All Rights Reserved. +// +// This file is licensed under the Apache License Version 2.0 (the "License"). +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. +// +// This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS +// OF ANY KIND, either express or implied. Refer to the License for the specific +// permissions and limitations governing your use of the file. + +package datahub + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +var generalAttributes = map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "crn": schema.StringAttribute{ + MarkdownDescription: "The CRN of the cluster.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "status": schema.StringAttribute{ + MarkdownDescription: "The last known state of the cluster", + Description: "The last known state of the cluster", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "name": schema.StringAttribute{ + MarkdownDescription: "The name of the cluster.", + Required: true, + }, + "cluster_template": schema.StringAttribute{ + MarkdownDescription: "The name of the cluster template.", + Required: true, + }, + "cluster_definition": schema.StringAttribute{ + MarkdownDescription: "The name of the cluster definition.", + Required: true, + }, + "environment": schema.StringAttribute{ + MarkdownDescription: "The name of the environment where the cluster will belong to.", + Required: true, + }, + "destroy_options": schema.SingleNestedAttribute{ + Optional: true, + Description: "Cluster deletion options.", + MarkdownDescription: "Cluster deletion options.", + Attributes: map[string]schema.Attribute{ + "force_delete_cluster": schema.BoolAttribute{ + MarkdownDescription: "An indicator that will take place once the cluster termination will be performed. " + + "If it is true, that means if something would go sideways during termination, the operation will proceed, " + + "however in such a case no notification would come thus it is advisable to check the cloud provider if " + + "there are no leftover resources once the destroy is finished.", + Description: "An indicator that will take place once the cluster termination will be performed. " + + "If it is true, that means if something would go sideways during termination, the operation will proceed, " + + "however in such a case no notification would come thus it is advisable to check the cloud provider if " + + "there are no leftover resources once the destroy is finished.", + Default: booldefault.StaticBool(false), + Computed: true, + Optional: true, + }, + }, + }, +} + +var instanceGroupSchemaAttributes = map[string]schema.Attribute{ + "instance_group": schema.ListNestedAttribute{ + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "node_count": schema.Int64Attribute{ + MarkdownDescription: "The cluster node count. Has to be greater or equal than 0 and less than 100,000.", + Required: true, + }, + "instance_group_name": schema.StringAttribute{ + MarkdownDescription: "The name of the instance group.", + Required: true, + }, + "instance_group_type": schema.StringAttribute{ + MarkdownDescription: "The type of the instance group.", + Required: true, + }, + "instance_type": schema.StringAttribute{ + MarkdownDescription: "The cloud provider-side instance type.", + Required: true, + }, + "root_volume_size": schema.Int64Attribute{ + MarkdownDescription: "The size of the root volume in GB", + Required: true, + }, + "recipes": schema.SetAttribute{ + MarkdownDescription: "The set of recipe names that are going to be applied on the given instance group.", + ElementType: types.StringType, + Optional: true, + }, + "attached_volume_configuration": schema.ListNestedAttribute{ + Required: true, + MarkdownDescription: "Configuration regarding the attached volume to the specific instance group.", + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "volume_size": schema.Int64Attribute{ + MarkdownDescription: "The size of the volume in GB.", + Required: true, + }, + "volume_count": schema.Int64Attribute{ + MarkdownDescription: "The number of volumes to be attached.", + Required: true, + }, + "volume_type": schema.StringAttribute{ + MarkdownDescription: "The - cloud provider - type of the volume.", + Required: true, + }, + }, + }, + }, + "recovery_mode": schema.StringAttribute{ + MarkdownDescription: "The type of the recovery mode.", + Required: true, + }, + "volume_encryption": schema.SingleNestedAttribute{ + MarkdownDescription: "The volume encryption related configuration.", + Required: true, + Attributes: map[string]schema.Attribute{ + "encryption": schema.BoolAttribute{ + Required: true, + }, + }, + }, + }, + }, + }, +} diff --git a/resources/datahub/model_datahub.go b/resources/datahub/model_datahub.go index 7cc064b5..b6fd938d 100644 --- a/resources/datahub/model_datahub.go +++ b/resources/datahub/model_datahub.go @@ -20,9 +20,10 @@ type datahubResourceModel struct { Name types.String `tfsdk:"name"` Status types.String `tfsdk:"status"` Environment types.String `tfsdk:"environment"` + InstanceGroup []InstanceGroup `tfsdk:"instance_group"` + DestroyOptions *DestroyOptions `tfsdk:"destroy_options"` ClusterTemplate types.String `tfsdk:"cluster_template"` ClusterDefinition types.String `tfsdk:"cluster_definition"` - InstanceGroup []InstanceGroup `tfsdk:"instance_group"` } type InstanceGroup struct { @@ -46,3 +47,11 @@ type AttachedVolumeConfiguration struct { type VolumeEncryption struct { Encryption types.Bool `tfsdk:"encryption"` } + +type DestroyOptions struct { + ForceDeleteCluster types.Bool `tfsdk:"force_delete_cluster"` +} + +func (d *datahubResourceModel) forceDeleteRequested() bool { + return d.DestroyOptions != nil && !d.DestroyOptions.ForceDeleteCluster.IsNull() && d.DestroyOptions.ForceDeleteCluster.ValueBool() +} diff --git a/resources/datahub/model_datahub_test.go b/resources/datahub/model_datahub_test.go new file mode 100644 index 00000000..ca042a8b --- /dev/null +++ b/resources/datahub/model_datahub_test.go @@ -0,0 +1,52 @@ +// Copyright 2023 Cloudera. All Rights Reserved. +// +// This file is licensed under the Apache License Version 2.0 (the "License"). +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. +// +// This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS +// OF ANY KIND, either express or implied. Refer to the License for the specific +// permissions and limitations governing your use of the file. + +package datahub + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" + "testing" +) + +func TestForceDeleteRequested(t *testing.T) { + tests := []struct { + name string + model *datahubResourceModel + expectedResult bool + }{ + { + name: "when DestroyOptions nil", + model: &datahubResourceModel{DestroyOptions: nil}, + expectedResult: false, + }, + { + name: "when DestroyOptions not nil but ForceDeleteCluster is", + model: &datahubResourceModel{DestroyOptions: &DestroyOptions{ForceDeleteCluster: types.BoolNull()}}, + expectedResult: false, + }, + { + name: "when neither DestroyOptions or ForceDeleteCluster are nil but ForceDeleteCluster is false", + model: &datahubResourceModel{DestroyOptions: &DestroyOptions{ForceDeleteCluster: types.BoolValue(false)}}, + expectedResult: false, + }, + { + name: "when ForceDeleteCluster is true", + model: &datahubResourceModel{DestroyOptions: &DestroyOptions{ForceDeleteCluster: types.BoolValue(true)}}, + expectedResult: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.model.forceDeleteRequested() != test.expectedResult { + t.Errorf("Did not get the expected output! Expected: %t, got: %t", test.expectedResult, test.model.forceDeleteRequested()) + } + }) + } +} diff --git a/resources/datahub/resource_aws_datahub.go b/resources/datahub/resource_aws_datahub.go index 2153812e..e196e084 100644 --- a/resources/datahub/resource_aws_datahub.go +++ b/resources/datahub/resource_aws_datahub.go @@ -140,7 +140,13 @@ func (r *awsDatahubResource) Delete(ctx context.Context, req resource.DeleteRequ params := operations.NewDeleteClusterParamsWithContext(ctx).WithInput(&datahubmodels.DeleteClusterRequest{ ClusterName: state.ID.ValueStringPointer(), + Force: state.forceDeleteRequested(), }) + if state.forceDeleteRequested() { + tflog.Debug(ctx, fmt.Sprintf("Sending force delete request for cluster: %s", *params.Input.ClusterName)) + } else { + tflog.Debug(ctx, fmt.Sprintf("Sending delete request for cluster: %s", *params.Input.ClusterName)) + } _, err := r.client.Datahub.Operations.DeleteCluster(params) if err != nil { if !isNotFoundError(err) { diff --git a/resources/datahub/resource_azure_datahub.go b/resources/datahub/resource_azure_datahub.go index 8ab8b0e9..b2157520 100644 --- a/resources/datahub/resource_azure_datahub.go +++ b/resources/datahub/resource_azure_datahub.go @@ -140,7 +140,13 @@ func (r *azureDatahubResource) Delete(ctx context.Context, req resource.DeleteRe params := operations.NewDeleteClusterParamsWithContext(ctx).WithInput(&datahubmodels.DeleteClusterRequest{ ClusterName: state.ID.ValueStringPointer(), + Force: state.forceDeleteRequested(), }) + if state.forceDeleteRequested() { + tflog.Debug(ctx, fmt.Sprintf("Sending force delete request for cluster: %s", *params.Input.ClusterName)) + } else { + tflog.Debug(ctx, fmt.Sprintf("Sending delete request for cluster: %s", *params.Input.ClusterName)) + } _, err := r.client.Datahub.Operations.DeleteCluster(params) if err != nil { if !isNotFoundError(err) { diff --git a/resources/datahub/schema_aws_datahub.go b/resources/datahub/schema_aws_datahub.go index 9bd236fb..b7ffc239 100644 --- a/resources/datahub/schema_aws_datahub.go +++ b/resources/datahub/schema_aws_datahub.go @@ -14,119 +14,16 @@ import ( "context" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" - "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-plugin-log/tflog" + + "github.com/cloudera/terraform-provider-cdp/utils" ) -func (r *awsDatahubResource) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { - tflog.Info(ctx, "Creating awsDatahubResource.") +func (r *awsDatahubResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + attr := map[string]schema.Attribute{} + utils.Append(attr, generalAttributes) + utils.Append(attr, instanceGroupSchemaAttributes) resp.Schema = schema.Schema{ MarkdownDescription: "Creates an AWS Data hub cluster.", - Attributes: map[string]schema.Attribute{ - "id": schema.StringAttribute{ - Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - }, - }, - "crn": schema.StringAttribute{ - MarkdownDescription: "The CRN of the cluster.", - Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - }, - }, - "status": schema.StringAttribute{ - MarkdownDescription: "The last known state of the cluster", - Description: "The last known state of the cluster", - Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - }, - }, - "name": schema.StringAttribute{ - MarkdownDescription: "The name of the cluster.", - Required: true, - }, - "cluster_template": schema.StringAttribute{ - MarkdownDescription: "The name of the cluster template.", - Required: true, - }, - "cluster_definition": schema.StringAttribute{ - MarkdownDescription: "The name of the cluster definition.", - Required: true, - }, - "environment": schema.StringAttribute{ - MarkdownDescription: "The name of the environment where the cluster will belong to.", - Required: true, - }, - "instance_group": schema.ListNestedAttribute{ - Optional: true, - NestedObject: schema.NestedAttributeObject{ - Attributes: map[string]schema.Attribute{ - "node_count": schema.Int64Attribute{ - MarkdownDescription: "The cluster node count. Has to be greater or equal than 0 and less than 100,000.", - Required: true, - }, - "instance_group_name": schema.StringAttribute{ - MarkdownDescription: "The name of the instance group.", - Required: true, - }, - "instance_group_type": schema.StringAttribute{ - MarkdownDescription: "The type of the instance group.", - Required: true, - }, - "instance_type": schema.StringAttribute{ - MarkdownDescription: "The cloud provider-side instance type.", - Required: true, - }, - "root_volume_size": schema.Int64Attribute{ - MarkdownDescription: "The size of the root volume in GB", - Required: true, - }, - "recipes": schema.SetAttribute{ - MarkdownDescription: "The set of recipe names that are going to be applied on the given instance group.", - ElementType: types.StringType, - Optional: true, - }, - "attached_volume_configuration": schema.ListNestedAttribute{ - Required: true, - MarkdownDescription: "Configuration regarding the attached volume to the specific instance group.", - NestedObject: schema.NestedAttributeObject{ - Attributes: map[string]schema.Attribute{ - "volume_size": schema.Int64Attribute{ - MarkdownDescription: "The size of the volume in GB.", - Required: true, - }, - "volume_count": schema.Int64Attribute{ - MarkdownDescription: "The number of volumes to be attached.", - Required: true, - }, - "volume_type": schema.StringAttribute{ - MarkdownDescription: "The - cloud provider - type of the volume.", - Required: true, - }, - }, - }, - }, - "recovery_mode": schema.StringAttribute{ - MarkdownDescription: "The type of the recovery mode.", - Required: true, - }, - "volume_encryption": schema.SingleNestedAttribute{ - MarkdownDescription: "The volume encryption related configuration.", - Required: true, - Attributes: map[string]schema.Attribute{ - "encryption": schema.BoolAttribute{ - Required: true, - }, - }, - }, - }, - }, - }, - }, + Attributes: attr, } } diff --git a/resources/datahub/schema_azure_datahub.go b/resources/datahub/schema_azure_datahub.go index 0199b7f6..cc897936 100644 --- a/resources/datahub/schema_azure_datahub.go +++ b/resources/datahub/schema_azure_datahub.go @@ -14,119 +14,16 @@ import ( "context" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" - "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-plugin-log/tflog" + + "github.com/cloudera/terraform-provider-cdp/utils" ) -func (r *azureDatahubResource) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { - tflog.Info(ctx, "Creating azureDatahubResource.") +func (r *azureDatahubResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + attr := map[string]schema.Attribute{} + utils.Append(attr, generalAttributes) + utils.Append(attr, instanceGroupSchemaAttributes) resp.Schema = schema.Schema{ MarkdownDescription: "Creates an Azure Data hub cluster.", - Attributes: map[string]schema.Attribute{ - "id": schema.StringAttribute{ - Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - }, - }, - "crn": schema.StringAttribute{ - MarkdownDescription: "The CRN of the cluster.", - Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - }, - }, - "status": schema.StringAttribute{ - MarkdownDescription: "The last known state of the cluster", - Description: "The last known state of the cluster", - Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - }, - }, - "name": schema.StringAttribute{ - MarkdownDescription: "The name of the cluster.", - Required: true, - }, - "cluster_template": schema.StringAttribute{ - MarkdownDescription: "The name of the cluster template.", - Required: true, - }, - "cluster_definition": schema.StringAttribute{ - MarkdownDescription: "The name of the cluster definition.", - Required: true, - }, - "environment": schema.StringAttribute{ - MarkdownDescription: "The name of the environment where the cluster will belong to.", - Required: true, - }, - "instance_group": schema.ListNestedAttribute{ - Optional: true, - NestedObject: schema.NestedAttributeObject{ - Attributes: map[string]schema.Attribute{ - "node_count": schema.Int64Attribute{ - MarkdownDescription: "The cluster node count. Has to be greater or equal than 0 and less than 100,000.", - Required: true, - }, - "instance_group_name": schema.StringAttribute{ - MarkdownDescription: "The name of the instance group.", - Required: true, - }, - "instance_group_type": schema.StringAttribute{ - MarkdownDescription: "The type of the instance group.", - Required: true, - }, - "instance_type": schema.StringAttribute{ - MarkdownDescription: "The cloud provider-side instance type.", - Required: true, - }, - "root_volume_size": schema.Int64Attribute{ - MarkdownDescription: "The size of the root volume in GB", - Required: true, - }, - "recipes": schema.SetAttribute{ - MarkdownDescription: "The set of recipe names that are going to be applied on the given instance group.", - ElementType: types.StringType, - Optional: true, - }, - "attached_volume_configuration": schema.ListNestedAttribute{ - Required: true, - MarkdownDescription: "Configuration regarding the attached volume to the specific instance group.", - NestedObject: schema.NestedAttributeObject{ - Attributes: map[string]schema.Attribute{ - "volume_size": schema.Int64Attribute{ - MarkdownDescription: "The size of the volume in GB.", - Required: true, - }, - "volume_count": schema.Int64Attribute{ - MarkdownDescription: "The number of volumes to be attached.", - Required: true, - }, - "volume_type": schema.StringAttribute{ - MarkdownDescription: "The - cloud provider - type of the volume.", - Required: true, - }, - }, - }, - }, - "recovery_mode": schema.StringAttribute{ - MarkdownDescription: "The type of the recovery mode.", - Required: true, - }, - "volume_encryption": schema.SingleNestedAttribute{ - MarkdownDescription: "The volume encryption related configuration.", - Required: true, - Attributes: map[string]schema.Attribute{ - "encryption": schema.BoolAttribute{ - Required: true, - }, - }, - }, - }, - }, - }, - }, + Attributes: attr, } } diff --git a/utils/schema_utils.go b/utils/schema_utils.go new file mode 100644 index 00000000..d374ce77 --- /dev/null +++ b/utils/schema_utils.go @@ -0,0 +1,21 @@ +// Copyright 2023 Cloudera. All Rights Reserved. +// +// This file is licensed under the Apache License Version 2.0 (the "License"). +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. +// +// This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS +// OF ANY KIND, either express or implied. Refer to the License for the specific +// permissions and limitations governing your use of the file. + +package utils + +import "github.com/hashicorp/terraform-plugin-framework/resource/schema" + +func Append(to map[string]schema.Attribute, from map[string]schema.Attribute) { + if from != nil && to != nil { + for key, attribute := range from { + to[key] = attribute + } + } +} diff --git a/utils/schema_utils_test.go b/utils/schema_utils_test.go new file mode 100644 index 00000000..ba7703e9 --- /dev/null +++ b/utils/schema_utils_test.go @@ -0,0 +1,99 @@ +// Copyright 2023 Cloudera. All Rights Reserved. +// +// This file is licensed under the Apache License Version 2.0 (the "License"). +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. +// +// This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS +// OF ANY KIND, either express or implied. Refer to the License for the specific +// permissions and limitations governing your use of the file. + +package utils + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "testing" +) + +func TestWithNoErrorAndAction(t *testing.T) { + type testCase struct { + name string + target map[string]schema.Attribute + source map[string]schema.Attribute + } + for _, test := range []testCase{ + { + name: "Both inputs are nil", + target: nil, + source: nil, + }, + { + name: "Target map is nil.", + target: nil, + source: map[string]schema.Attribute{}, + }, + { + name: "Source is nil.", + target: map[string]schema.Attribute{}, + source: nil, + }, + { + name: "Both are empty.", + target: map[string]schema.Attribute{}, + source: map[string]schema.Attribute{}, + }, + } { + t.Run(test.name, func(t *testing.T) { + Append(test.target, test.source) + + if test.target != nil && len(test.target) > 0 { + t.Errorf("Target got extended eventhough it should've left untouched.") + } + }) + } +} + +func TestAppendSchemaWhenNoOverlapThenSimpleCopyHappens(t *testing.T) { + sourceKey := "some_other_key" + target := map[string]schema.Attribute{ + "id": schema.StringAttribute{}, + } + initialLength := len(target) + source := map[string]schema.Attribute{ + sourceKey: schema.StringAttribute{}, + } + + Append(target, source) + + if len(target) != initialLength+len(source) { + t.Errorf("Map did not get updated!") + } + if target["some_other_key"] == nil { + t.Errorf("the new value with the key of '%s' did not get into the target map.", sourceKey) + } +} + +func TestAppendSchemaWhenOverlapThenOverwriteHappens(t *testing.T) { + key := "keyValue" + originalDescr := "original description" + target := map[string]schema.Attribute{ + key: schema.StringAttribute{ + Description: originalDescr, + }, + } + initialLength := len(target) + source := map[string]schema.Attribute{ + key: schema.StringAttribute{ + Description: "some other description", + }, + } + + Append(target, source) + + if len(target) != initialLength { + t.Errorf("Map got extended but should not be!") + } + if target[key].GetDescription() == originalDescr { + t.Errorf("The target map did not get updated properly.") + } +}