From e7b66cdaa3bd6bd76683ca390ec1e3a98a57a86e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 14 Sep 2024 21:33:48 +0300 Subject: [PATCH] Squashed commit of the following: commit 0961f188952d6b5920b210359d1933ee331565ff Author: Kimmo Forss Date: Sat Sep 14 14:34:48 2024 +0300 Refactor VM name generation for scale-out databases with zonal markers Don 't remove hana shared from fstab Refactor ANF mount logic for HANA shared volume based on site configuration Refactor inventory.tf to include scale_out variable Update provider configurations to use version 4.0 or higher Refactor ANF volume group creation in HDB node module --- .../tasks/2.6.1-anf-mounts.yaml | 8 +-- .../tasks/2.6.1.1-anf-mount.yaml | 25 +------- deploy/terraform/run/sap_system/module.tf | 1 + .../modules/sap_namegenerator/output.tf | 6 +- .../sap_namegenerator/variables_global.tf | 5 ++ .../modules/sap_namegenerator/vm.tf | 21 +++++++ .../modules/sap_system/hdb_node/outputs.tf | 59 +++++++++++++++---- 7 files changed, 85 insertions(+), 40 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index 088d0734a6..aac1b20aa3 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -654,7 +654,7 @@ when: - hana_shared_mountpoint is defined - hana_shared_mountpoint | length > 0 - - ansible_hostname == db_hosts[0] + - not database_scale_out - name: "ANF Mount: HANA data (secondary)" ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml @@ -722,8 +722,7 @@ when: - hana_shared_mountpoint is defined - hana_shared_mountpoint | length > 1 - - db_hosts | length == 2 - - ansible_hostname == db_hosts[1] + - not database_scale_out - name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" ansible.builtin.file: @@ -802,7 +801,8 @@ # change folder to match the mount folder within the share 'folder': 'shared', # Logic : hana_shared_mountpoint[0] goes on odd numbered HANA hosts and hana_shared_mountpoint[1] goes on even numbered HANA hosts. - 'mount': "{% if ansible_hostname in query('inventory_hostnames', '{{ sap_sid | upper }}_DB')[0::2] %}{{ hana_shared_mountpoint[0] }}{% else %}{{ hana_shared_mountpoint[1] }}{% endif %}", +# 'mount': "{% if ansible_hostname in query('inventory_hostnames', '{{ sap_sid | upper }}_DB')[0::2] %}{{ hana_shared_mountpoint[0] }}{% else %}{{ hana_shared_mountpoint[1] }}{% endif %}", + 'mount': "{% if site | default('SITE1') == 'SITE1' %}{{ hana_shared_mountpoint[0] }}{% else %}{{ hana_shared_mountpoint[1] }}{% endif %}", 'opts': '{{ mnt_options }}', 'path': '{{ hana_shared_basepath }}', 'permissions': '0775', diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml index 4ee385f983..676049cff4 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml @@ -142,28 +142,7 @@ when: - node_tier in item.target_nodes or item.target_nodes == ['all'] -- name: "Backward Compatibility - Check required Database HA variables" - ansible.builtin.set_fact: - database_high_availability: "{{ db_high_availability | default(false) }}" - when: - - db_high_availability is defined - - database_high_availability is not defined - -- name: "Backward Compatibility - Check required Database HA variables" - ansible.builtin.set_fact: - database_high_availability: "{{ db_high_availability | default(false) }}" - when: - - db_high_availability is defined - - database_high_availability is not defined - -- name: "Backward Compatibility - Check required Database HA variables" - ansible.builtin.set_fact: - database_high_availability: "{{ db_high_availability | default(false) }}" - when: - - db_high_availability is defined - - database_high_availability is not defined - -# absent_from_fstab +# Remove entries from fstab - name: "ANF Mount: RHEL DB high availability configuration" when: ansible_os_family | upper == "REDHAT" and database_high_availability block: @@ -178,6 +157,7 @@ - item.target_nodes == ['hana'] - item.type in ['data','log','shared'] - database_high_availability + - not database_scale_out - name: "ANF Mount: make mount for {{ item.path }} ephemeral when DB high availability" ansible.posix.mount: @@ -190,6 +170,7 @@ - item.target_nodes == ['hana'] - item.type in ['data','log','shared'] - database_high_availability + - not database_scale_out # https://www.suse.com/support/kb/doc/?id=000019904 # - name: "ANF Mount: SLES DB high availability configuration" diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index 2dc0df308c..c12541a31a 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -39,6 +39,7 @@ module "sap_namegenerator" { scs_high_availability = local.application_tier.scs_high_availability scs_cluster_type = local.application_tier.scs_cluster_type use_zonal_markers = var.use_zonal_markers + scale_out = var.database_HANA_use_ANF_scaleout_scenario } ######################################################################################### diff --git a/deploy/terraform/terraform-units/modules/sap_namegenerator/output.tf b/deploy/terraform/terraform-units/modules/sap_namegenerator/output.tf index 1f7c95456d..552b1a617d 100644 --- a/deploy/terraform/terraform-units/modules/sap_namegenerator/output.tf +++ b/deploy/terraform/terraform-units/modules/sap_namegenerator/output.tf @@ -65,9 +65,9 @@ output "naming" { ANYDB_SECONDARY_DNSNAME = var.database_high_availability ? local.anydb_secondary_dnsnames_ha : local.anydb_secondary_dnsnames ANYDB_VMNAME = var.database_high_availability ? local.anydb_vm_names_ha : local.anydb_vm_names DEPLOYER = local.deployer_vm_names - HANA_COMPUTERNAME = var.database_high_availability ? local.hana_computer_names_ha : local.hana_computer_names - HANA_SECONDARY_DNSNAME = var.database_high_availability ? local.hana_secondary_dnsnames_ha : local.hana_secondary_dnsnames - HANA_VMNAME = var.database_high_availability ? local.hana_server_vm_names_ha : local.hana_server_vm_names + HANA_COMPUTERNAME = var.database_high_availability ? var.scale_out ? local.hana_computer_names_scaleout : concat(local.hana_computer_names, local.hana_computer_names_ha) : local.hana_computer_names + HANA_SECONDARY_DNSNAME = var.database_high_availability ? var.scale_out ? local.hana_secondary_dnsnames_scaleout : concat(local.hana_secondary_dnsnames, local.hana_secondary_dnsnames_ha) : local.hana_secondary_dnsnames + HANA_VMNAME = var.database_high_availability ? var.scale_out ? local.hana_server_vm_names_scaleout : concat(local.hana_server_vm_names, local.hana_server_vm_names_ha) : local.hana_server_vm_names ISCSI_COMPUTERNAME = local.iscsi_server_names OBSERVER_COMPUTERNAME = local.observer_computer_names OBSERVER_VMNAME = local.observer_vm_names diff --git a/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf index d113ad9d07..073ce697e8 100644 --- a/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf @@ -559,3 +559,8 @@ variable "utility_vm_count" { type = number default = 0 } + +variable "scale_out" { + type = bool + default = false +} diff --git a/deploy/terraform/terraform-units/modules/sap_namegenerator/vm.tf b/deploy/terraform/terraform-units/modules/sap_namegenerator/vm.tf index d3b9fd170f..c79b25ce89 100644 --- a/deploy/terraform/terraform-units/modules/sap_namegenerator/vm.tf +++ b/deploy/terraform/terraform-units/modules/sap_namegenerator/vm.tf @@ -40,6 +40,13 @@ locals { ) ] + anydb_vm_names_scaleout = [for idx in range(var.db_server_count * 2) : + length(var.db_zones) > 0 && var.use_zonal_markers ? ( + format("%sdb%sz%s%s%02d%s%d%s", lower(var.sap_sid), local.separator, var.db_zones[idx % max(length(var.db_zones), 1)], local.separator, idx + var.resource_offset, local.db_oscode, 1, local.random_id_vm_verified)) : ( + format("%sdb%02d%s%d%s", lower(var.sap_sid), idx + var.resource_offset, local.db_oscode, 1, local.random_id_vm_verified) + ) + ] + app_computer_names = [for idx in range(var.app_server_count) : format("%sapp%02d%s%s", lower(var.sap_sid), idx + var.resource_offset, local.app_oscode, local.random_id_vm_verified) ] @@ -63,6 +70,10 @@ locals { format("%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset + var.resource_offset, tonumber((idx % 2)), substr(local.random_id_vm_verified, 0, 2)) ] + hana_computer_names_scaleout = [for idx in range(var.db_server_count * 2) : + format("%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset, tonumber((( idx + 1 ) % 2)), substr(local.random_id_vm_verified, 0, 2)) + ] + hana_server_vm_names = [for idx in range(var.db_server_count) : length(var.db_zones) > 0 && var.use_zonal_markers ? ( format("%sd%s%sz%s%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), local.separator, var.db_zones[idx % max(length(var.db_zones), 1)], local.separator, idx + var.resource_offset, 0, local.random_id_vm_verified)) : ( @@ -77,6 +88,13 @@ locals { ) ] + hana_server_vm_names_scaleout = [for idx in range(var.db_server_count * 2) : + length(var.db_zones) > 0 && var.use_zonal_markers ? ( + format("%sd%s%sz%s%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), local.separator, var.db_zones[idx % max(length(var.db_zones), 1)], local.separator, floor(idx/2) + var.resource_offset, tonumber(( idx % 2)), local.random_id_vm_verified)) : ( + format("%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset, tonumber(( idx % 2)), local.random_id_vm_verified) + ) + ] + scs_computer_names = [for idx in range(var.scs_server_count) : format("%sscs%02d%s%s", lower(var.sap_sid), idx + var.resource_offset, local.app_oscode, local.random_id_vm_verified) ] @@ -135,6 +153,9 @@ locals { format("v%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset, tonumber((idx % 2)), local.random_id_virt_vm_verified) ] + hana_secondary_dnsnames_scaleout = [for idx in range(var.db_server_count * 2) : + format("v%sd%s%02dl%d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset, tonumber((( idx + 1 ) % 2)), local.random_id_vm_verified) + ] scs_secondary_dnsnames = [for idx in range(var.scs_server_count) : format("v%ss%02d%s%s", lower(var.sap_sid), idx + var.resource_offset, local.app_oscode, local.random_id_virt_vm_verified) ] diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf index d28830b0bf..5d8f9da3d3 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf @@ -168,24 +168,61 @@ output "hana_log_ANF_volumes" { ]) : [] } +# Order the list so that the zonal information is in the correct order + output "hana_shared" { - description = "HANA Shared primary volume" - value = local.shared_volume_count > 0 ? flatten([ - for idx in range(local.shared_volume_count) : [ + description = "HANA Shared volumes" + value = local.shared_volume_count == 0 ? ( + [] + ) : (( + var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? data.azurerm_netapp_volume.hanashared[0].zone : azurerm_netapp_volume.hanashared[0].zone)) == var.database.zones[0] ? ( + [ format("%s:/%s", var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( - data.azurerm_netapp_volume.hanashared[idx].mount_ip_addresses[0]) : ( - azurerm_netapp_volume.hanashared[idx].mount_ip_addresses[0] + data.azurerm_netapp_volume.hanashared[0].mount_ip_addresses[0]) : ( + azurerm_netapp_volume.hanashared[0].mount_ip_addresses[0] ), var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( - data.azurerm_netapp_volume.hanashared[idx].volume_path) : ( - azurerm_netapp_volume.hanashared[idx].volume_path + data.azurerm_netapp_volume.hanashared[0].volume_path) : ( + azurerm_netapp_volume.hanashared[0].volume_path ) + ), + format("%s:/%s", + var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( + data.azurerm_netapp_volume.hanashared[1].mount_ip_addresses[0]) : ( + azurerm_netapp_volume.hanashared[1].mount_ip_addresses[0] + ), + var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( + data.azurerm_netapp_volume.hanashared[1].volume_path) : ( + azurerm_netapp_volume.hanashared[1].volume_path ) - - ] - ]) : [] - } + ) + ] + ) : ( + [ + format("%s:/%s", + var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( + data.azurerm_netapp_volume.hanashared[1].mount_ip_addresses[0]) : ( + azurerm_netapp_volume.hanashared[1].mount_ip_addresses[0] + ), + var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( + data.azurerm_netapp_volume.hanashared[1].volume_path) : ( + azurerm_netapp_volume.hanashared[1].volume_path + ) + ), + format("%s:/%s", + var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( + data.azurerm_netapp_volume.hanashared[0].mount_ip_addresses[0]) : ( + azurerm_netapp_volume.hanashared[0].mount_ip_addresses[0] + ), + var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( + data.azurerm_netapp_volume.hanashared[0].volume_path) : ( + azurerm_netapp_volume.hanashared[0].volume_path + ) + ) + ] + ) + } output "application_volume_group" { description = "Application volume group"