From 72aefbcb66b6b0f7d1704a069ac0fcceffbe9e9c Mon Sep 17 00:00:00 2001 From: Steffen Bo Thomsen Date: Fri, 8 Dec 2023 13:09:09 +0100 Subject: [PATCH 001/607] Fixes for HANA VM ppg and /etc/hosts file generation (#511) * resolve proximity_placement_group_id for HANA db * ensure hosts.j2 correctly places at least a single whitespace when FQDN is longer than the 50 chars and increase the alignment of the hostname at 80 chars. --- .../ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 | 6 +++--- .../terraform-units/modules/sap_system/hdb_node/vm-hdb.tf | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 b/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 index 4218163092..cc8c2d2cc3 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 @@ -80,15 +80,15 @@ ansible_facts. {# Check if there are IPs available for the current host #} {% if host_ips %} {# Print the primary host entry #} -{{ '%-19s' | format(host_ips[0]) }}{{ '%-50s' | format(host + '.' + sap_fqdn) }}{{ '%-21s' | format(host) }} +{{ '%-19s' | format(host_ips[0]) }}{{ '%-80s ' | format(host + '.' + sap_fqdn) }}{{ '%-21s' | format(host) }} {# If there's only one IP, also use it for the virtual_host #} {% if host_ips|length == 1 %} -{{ '%-19s' | format(host_ips[0]) }}{{ '%-50s' | format(virtual_host_name + '.' + sap_fqdn) }}{{ '%-21s' | format(virtual_host_name) }} +{{ '%-19s' | format(host_ips[0]) }}{{ '%-80s ' | format(virtual_host_name + '.' + sap_fqdn) }}{{ '%-21s' | format(virtual_host_name) }} {% else %} {# Loop through remaining IPs for the virtual host #} {% for ip in host_ips[1:] %} -{{ '%-19s' | format(ip) }}{{ '%-50s' | format(virtual_host_name + '.' + sap_fqdn) }}{{ '%-21s' | format(virtual_host_name) }} +{{ '%-19s' | format(ip) }}{{ '%-80s ' | format(virtual_host_name + '.' + sap_fqdn) }}{{ '%-21s' | format(virtual_host_name) }} {% endfor %} {% endif %} {% endif %} diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf index 7849af3cd6..ac359c9f85 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf @@ -167,7 +167,7 @@ resource "azurerm_linux_virtual_machine" "vm_dbnode" { location = var.resource_group[0].location proximity_placement_group_id = var.database.use_ppg ? ( - var.ppg[count.index]) : ( + var.ppg[count.index % max(local.db_zone_count, 1)]) : ( null ) From 1390e4e78740f159435e2a8ca18c4bfaac0dbf18 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Fri, 8 Dec 2023 18:17:55 +0530 Subject: [PATCH 002/607] Final changes before release v3.10.0.0 --- .ansible-lint | 1 - .../SDAF/ParameterDetails/custom_naming.json | 5 + .../SDAF/ParameterDetails/custom_sizes.json | 12 +- Webapp/SDAF/SDAFWebApp.csproj | 24 +-- deploy/ansible/configuration_menu.sh | 5 +- .../playbook_06_00_acss_registration.yaml | 113 +++++++++++ .../0.5-ACSS-registration/defaults/main.yaml | 2 +- .../tasks/0.5.1-set-runtime-parameters.yaml | 14 +- .../0.5-ACSS-registration/tasks/main.yaml | 160 +++++++++++++--- .../pipelines/05-DB-and-SAP-installation.yaml | 30 +-- .../templates/acss-registration.yaml | 177 ++++++++++++++++-- 11 files changed, 452 insertions(+), 91 deletions(-) create mode 100644 deploy/ansible/playbook_06_00_acss_registration.yaml diff --git a/.ansible-lint b/.ansible-lint index d286943e8b..e04e512877 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -22,7 +22,6 @@ skip_list: - yaml[indentation] - yaml[line-length] - yaml[octal-values] - - yaml[indentation] - name[template] - command-instead-of-shell # csh is not supported in commands - jinja[spacing] diff --git a/Webapp/SDAF/ParameterDetails/custom_naming.json b/Webapp/SDAF/ParameterDetails/custom_naming.json index 8ee324c021..9a3734b81b 100644 --- a/Webapp/SDAF/ParameterDetails/custom_naming.json +++ b/Webapp/SDAF/ParameterDetails/custom_naming.json @@ -46,6 +46,7 @@ "bastion_host": "", "bastion_pip": "", "cluster_disk": "", + "database_cluster_disk": "", "db_alb": "", "db_alb_bepool": "", "db_alb_feip": "", @@ -106,6 +107,7 @@ "scs_avset": "", "scs_clst_feip": "", "scs_clst_hp": "", + "scs_cluster_disk": "", "scs_clst_rule": "", "scs_ers_feip": "", "scs_ers_hp": "", @@ -167,6 +169,8 @@ "bastion_host": "bastion-host", "bastion_pip": "bastion-pip", "cluster_disk": "cluster-disks", + "database_cluster_disk": "", + "db-cluster-disk": null, "db_alb": "db-alb", "db_alb_bepool": "dbAlb-bePool", "db_alb_feip": "dbAlb-feip", @@ -229,6 +233,7 @@ "scs_clst_feip": "scsClst-feip", "scs_clst_hp": "scsClst-hp", "scs_clst_rule": "scsClst-rule", + "scs_cluster_disk" : "scs-cluster-disk", "scs_ers_feip": "scsErs-feip", "scs_ers_hp": "scsErs-hp", "scs_ers_rule": "scsErs-rule", diff --git a/Webapp/SDAF/ParameterDetails/custom_sizes.json b/Webapp/SDAF/ParameterDetails/custom_sizes.json index 53d515a3e1..701c598d55 100644 --- a/Webapp/SDAF/ParameterDetails/custom_sizes.json +++ b/Webapp/SDAF/ParameterDetails/custom_sizes.json @@ -57,7 +57,7 @@ "app": { "Default": { "compute": { - "vm_size": "Standard_D4s_v3", + "vm_size": "Standard_D4s_v5", "accelerated_networking": true }, "storage": [ @@ -81,7 +81,7 @@ }, "Optimized": { "compute": { - "vm_size": "Standard_D4s_v3", + "vm_size": "Standard_D4s_v5", "accelerated_networking": true }, "storage": [ @@ -107,7 +107,7 @@ "scs": { "Default": { "compute": { - "vm_size": "Standard_D4s_v3", + "vm_size": "Standard_D4s_v5", "accelerated_networking": true }, "storage": [ @@ -133,7 +133,7 @@ "scsha": { "Default": { "compute": { - "vm_size": "Standard_D4s_v3", + "vm_size": "Standard_D4s_v5", "accelerated_networking": true }, "storage": [ @@ -157,7 +157,7 @@ }, "Optimized": { "compute": { - "vm_size": "Standard_D4s_v3", + "vm_size": "Standard_D4s_v5", "accelerated_networking": true }, "storage": [ @@ -183,7 +183,7 @@ "web": { "Default": { "compute": { - "vm_size": "Standard_D4s_v3", + "vm_size": "Standard_D4s_v5", "accelerated_networking": true }, "storage": [ diff --git a/Webapp/SDAF/SDAFWebApp.csproj b/Webapp/SDAF/SDAFWebApp.csproj index af26ffcd83..3d3d086e3d 100644 --- a/Webapp/SDAF/SDAFWebApp.csproj +++ b/Webapp/SDAF/SDAFWebApp.csproj @@ -15,19 +15,19 @@ - - - - - - - - - - + + + + + + + + + + - - + + diff --git a/deploy/ansible/configuration_menu.sh b/deploy/ansible/configuration_menu.sh index ae5982f978..348a56f0c5 100755 --- a/deploy/ansible/configuration_menu.sh +++ b/deploy/ansible/configuration_menu.sh @@ -31,7 +31,8 @@ cmd_dir="$(dirname "$(readlink -e "${BASH_SOURCE[0]}")")" # playbook_05_01_sap_dbload.yaml \ # playbook_05_02_sap_pas_install.yaml \ # playbook_05_03_sap_app_install.yaml \ -# playbook_05_04_sap_web_install.yaml +# playbook_05_04_sap_web_install.yaml \ +# playbook_06_00_acss_registration.yaml # The SAP System parameters file which should exist in the current directory sap_params_file=sap-parameters.yaml @@ -110,6 +111,7 @@ options=( "Application Server installations" "Web Dispatcher installations" "HCMT" + "ACSS Registration" # Special menu entries "BOM Download" @@ -139,6 +141,7 @@ all_playbooks=( ${cmd_dir}/playbook_05_03_sap_app_install.yaml ${cmd_dir}/playbook_05_04_sap_web_install.yaml ${cmd_dir}/playbook_04_00_02_db_hcmt.yaml + ${cmd_dir}/playbook_06_00_acss_registration.yaml ${cmd_dir}/playbook_bom_downloader.yaml ${cmd_dir}/playbook_07_00_00_post_installation.yaml ) diff --git a/deploy/ansible/playbook_06_00_acss_registration.yaml b/deploy/ansible/playbook_06_00_acss_registration.yaml new file mode 100644 index 0000000000..07bed22975 --- /dev/null +++ b/deploy/ansible/playbook_06_00_acss_registration.yaml @@ -0,0 +1,113 @@ +# /*---------------------------------------------------------------------------8 +# | | +# | Playbook for SAP Web Dispatcher Install | +# | | +# +------------------------------------4--------------------------------------*/ + +# -------------------------------------+---------------------------------------8 +# Role: 5.4 Web Dispatcher Installation +# +# Description: +# +# -------------------------------------+---------------------------------------8 + +--- + +- hosts: localhost + name: "ACSS Registration Playbook: - Initialization" + gather_facts: true + vars_files: + - vars/ansible-input-api.yaml # API Input template with defaults + + tasks: + + - name: "ACSS Registration Playbook: - Create Progress folder" + ansible.builtin.file: + path: "{{ _workspace_directory }}/.progress" + state: directory + mode: 0755 + + - name: "ACSS Registration Playbook: - Remove acss-registration-done flag" + ansible.builtin.file: + path: "{{ _workspace_directory }}/.progress/acss-registration-done" + state: absent + + - name: "ACSS Registration Playbook: - Read/Create passwords" + ansible.builtin.include_role: + name: roles-misc/0.1-passwords + public: true + tags: + - 0.1-passwords + +# /*---------------------------------------------------------------------------8 +# | | +# | Playbook for SAP Web Dispatcher Install | +# | | +# +------------------------------------4--------------------------------------*/ + +- hosts: "{{ sap_sid | upper }}_DB : + {{ sap_sid | upper }}_SCS : + {{ sap_sid | upper }}_ERS : + {{ sap_sid | upper }}_PAS : + {{ sap_sid | upper }}_APP : + {{ sap_sid | upper }}_WEB" + + name: ACSS Registration + remote_user: "{{ orchestration_ansible_user }}" + become: true + gather_facts: true # Important to collect hostvars information + vars_files: + - vars/ansible-input-api.yaml # API Input template with defaults + + tasks: +# -------------------------------------+---------------------------------------8 +# +# Build the list of tasks to be executed in order here. +# +# -------------------------------------+---------------------------------------8 + + - name: "ACSS Registration Playbook: Define this SID" + ansible.builtin.set_fact: + this_sid: + { + 'sid': '{{ sap_sid | upper }}', + 'web_sid': '{{ web_sid | upper }}', + 'dbsid_uid': '{{ hdbadm_uid }}', + 'sidadm_uid': '{{ sidadm_uid }}', + 'ascs_inst_no': '{{ scs_instance_number }}', + 'pas_inst_no': '{{ pas_instance_number }}' , + 'app_inst_no': '{{ app_instance_number }}' + } + + - name: "ACSS Registration Playbook: Define list of SIDs" + ansible.builtin.set_fact: + all_sids: "{% if MULTI_SIDS is defined %}{{ MULTI_SIDS }}{% else %}{{ all_sids | default([]) + [this_sid] }}{% endif %}" + + - name: "Run the ACSS Regisration" + when: + - acssEnvironment is defined + - acssSapProduct is defined + ansible.builtin.include_role: + name: "roles-misc/0.5-ACSS-registration" + vars: + acss_environment: "{{ acssEnvironment }}" + acss_sap_product: "{{ acssSapProduct }}" + loop: "{{ all_sids }}" + loop_control: + loop_var: sid_to_be_deployed + tags: + - 6.0-acss-registration + + - name: "ACSS Registration Playbook: - Create acss-registration-done flag" + delegate_to: localhost + become: false + ansible.builtin.file: + path: "{{ _workspace_directory }}/.progress/acss-registration-done" + state: touch + mode: 0755 + +... + +# /*---------------------------------------------------------------------------8 +# | END | +# +------------------------------------4--------------------------------------*/ diff --git a/deploy/ansible/roles-misc/0.5-ACSS-registration/defaults/main.yaml b/deploy/ansible/roles-misc/0.5-ACSS-registration/defaults/main.yaml index 403ead632a..a089096602 100644 --- a/deploy/ansible/roles-misc/0.5-ACSS-registration/defaults/main.yaml +++ b/deploy/ansible/roles-misc/0.5-ACSS-registration/defaults/main.yaml @@ -11,6 +11,6 @@ ers_virtual_hostname: "{{ sap_sid | lower }}ers{{ ers_instance_ sapcontrol_command: "sapcontrol -nr {{ scs_instance_number }}" -acss_cli_extension_url: "https://aka.ms/ACSSCLI" +acss_cli_extension_url: "https://files.pythonhosted.org/packages/ce/f3/91b1a5fdff7a7f0cc8bdfc9a7177f1c1dbab909f857a5ba4cc837650635e/azure_mgmt_workloads-1.0.0-py3-none-any.whl" ... diff --git a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/0.5.1-set-runtime-parameters.yaml b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/0.5.1-set-runtime-parameters.yaml index b1aeddffd1..ab7439b35b 100644 --- a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/0.5.1-set-runtime-parameters.yaml +++ b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/0.5.1-set-runtime-parameters.yaml @@ -8,13 +8,13 @@ # Validate that the SCS cluster is working as expected -- name: "0.5.1 acss registration: - Set Python version {{ distribution_id }}" +- name: "0.5.1 acss registration: - Set Python version {{ distribution_id }}" ansible.builtin.set_fact: - python_version: "python3" + python_version: "python3" - name: "0.5.1 acss registration: - Set Python version {{ distribution_id }}" ansible.builtin.set_fact: - python_version: "python2" + python_version: "python2" when: (ansible_distribution | lower ~ ansible_distribution_major_version) in ['sles_sap12'] # Get BOM bom object @@ -55,7 +55,7 @@ use_proxy: false headers: Metadata: true - register: azure_metadata + register: azure_metadata - name: "0.5.1 acss registration: Set variables from Azure IMDS" ansible.builtin.set_fact: @@ -64,10 +64,4 @@ acss_sid: "{{ sap_sid | upper }}" acss_instance_type: "{{ instance_type }}" -# --sap-virtual-instance-name "${acss_sid}" \ -# --resource-group "${acss_resource_group}" \ -# --location "${acss_location}" \ -# --environment "${acss_environment}" \ -# --sap-product "${acss_sap_product}" \ -# --configuration "${acss_configuration}" ... diff --git a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml index 23adccdca6..8c13d2ca56 100644 --- a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml @@ -1,20 +1,22 @@ --- # Validate that the SCS cluster is working as expected -- name: "0.0 Validations: - Set Python version {{ distribution_id }}" +- name: "0.5.1 acss registration: - Set Python version {{ distribution_id }}" ansible.builtin.set_fact: - python_version: "python3" + python_version: "python3" -- name: "0.0 Validations: - Set Python version {{ distribution_id }}" +- name: "0.5.1 acss registration: - Set Python version {{ distribution_id }}" ansible.builtin.set_fact: - python_version: "python2" + python_version: "python2" when: (ansible_distribution | lower ~ ansible_distribution_major_version) in ['sles_sap12'] -- name: "Determine if SCS is running on {{ ansible_hostname }}" +- name: "0.5.1 acss registration: - Determine if SCS is running on {{ ansible_hostname }}" become_user: "{{ sap_sid | lower }}adm" become: true + when: + - "'scs' in supported_tiers" block: - - name: "PAS Install: Get sapcontrol path" + - name: "0.5.1 acss registration: - Get sapcontrol path" ansible.builtin.find: paths: "/usr/sap/{{ sap_sid | upper }}" file_type: file @@ -23,14 +25,14 @@ follow: true register: sapcontrol_file - - name: "PAS Install: Set sapcontrol path" + - name: "0.5.1 acss registration: - Set sapcontrol path" ansible.builtin.set_fact: sapcontrol_path: "{{ sapcontrol_file.files[0].path }}" when: - sapcontrol_file | length > 0 # {{ sapcontrol_path }} -nr {{ scs_instance_number }} -function GetProcessList | grep MessageServer | awk '{split($0,result,", "); print result[1],result[3] }' - - name: "Determine if SCS is running on {{ ansible_hostname }}" + - name: "0.5.1 acss registration: - Determine if SCS is running on {{ ansible_hostname }}" ansible.builtin.command: "{{ sapcontrol_path }} -nr {{ scs_instance_number }} -function GetProcessList" changed_when: false failed_when: false @@ -50,46 +52,150 @@ tags: - skip_ansible_lint - - name: "Show if SCS is running on {{ ansible_hostname }}" + - name: "0.5.1 acss registration: - Show if SCS is running on {{ ansible_hostname }}" ansible.builtin.debug: var: is_running - # when the rc is 3, the SCS is running on ansible_hostname. - # if the ansible_hostname is not the primary_instance_name, then failover cluster resources for ASCS/SCS - # start the SAP instance on primary_instance_name. This is a failover scenario. - - name: "Validate that SCS is running on {{ ansible_hostname }}" + - name: "0.5.1 acss registration: - Validate that SCS is running on {{ ansible_hostname }}" ansible.builtin.set_fact: - scs_running_on: "{{ ansible_hostname }}" + scs_running_on: "{{ ansible_hostname }}" when: - is_running.rc == 3 - is_running.stdout | regex_search('MessageServer') - - name: "Check where the cluster group g-{{ sap_sid | upper }}_{{ instance_type | upper }} is running" - become: true - become_user: root +- name: "0.5.1 acss registration: - Check cluster Group and get the metadata" + become: true + become_user: root + when: + - hostvars[ansible_hostname]['scs_running_on'] is defined + block: + - name: "0.5.1 acss registration: - Check where the cluster group g-{{ sap_sid | upper }}_{{ instance_type | upper }} is running" ansible.builtin.shell: >- set -o pipefail; crm_resource --resource g-{{ sap_sid | upper }}_{{ instance_type | upper }} --locate | cut -d ':' -f 2| cut -d " " -f 2 when: - - ansible_hostname == primary_instance_name - failed_when: false + - scs_high_availability + - ansible_os_family | upper == 'SUSE' register: cluster_group_location + failed_when: cluster_group_location.stdout != hostvars[ansible_hostname]['scs_running_on'] tags: - skip_ansible_lint - - name: "Install [ACSS] cli extension" - delegate_to: localhost - ansible.builtin.command: >- - "az extension add --name workloads --yes || exit 1" + - name: "0.5.1 acss registration: - Check where the cluster group g-{{ sap_sid | upper }}_{{ instance_type | upper }} is running" + ansible.builtin.shell: >- + set -o pipefail; + pcs constraint location show resources g-{{ sap_sid | upper }}_{{ instance_type | upper }} | grep "Node" | awk '{print $2}' + when: + - scs_high_availability + - ansible_os_family | upper == 'REDHAT' + register: cluster_group_location + failed_when: cluster_group_location.stdout != hostvars[ansible_hostname]['scs_running_on'] tags: - skip_ansible_lint - - name: "Create [ACSS] virtual instance" - ansible.builtin.command: "az workloads sap-virtual-instance create --sap-virtual-instance-name {{ acss_sid }} --resource-group {{ acss_resource_group }} --location {{ acss_location }} --environment {{ acss_environment }} --sap-product {{ acss_sap_product }} --configuration {{ acss_configuration }}" + - name: "0.5.1 acss registration: - Get Azure instance metadata" + ansible.builtin.uri: + url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 + use_proxy: false + headers: + Metadata: true + register: azure_metadata when: - - ansible_hostname == primary_instance_name - - cluster_group_location.stdout != ansible_hostname + - hostvars[ansible_hostname]['scs_running_on'] is defined + - ansible_hostname == hostvars[ansible_hostname]['scs_running_on'] + + - name: "0.5.1 acss registration: - Set variables from Azure IMDS" + delegate_facts: true + delegate_to: localhost + ansible.builtin.set_fact: + acss_scs_instance_metadata: "{{ azure_metadata }}" + when: + - hostvars[ansible_hostname]['scs_running_on'] is defined + - ansible_hostname == hostvars[ansible_hostname]['scs_running_on'] + +- name: "0.5.1 acss registration: - Register SAP System to ACSS" + delegate_to: localhost + when: + - hostvars[ansible_hostname]['scs_running_on'] is defined + block: + - name: "0.5.1 acss registration: - Get Azure metadata from the VM where scs_running_on is defined" + ansible.builtin.set_fact: + az_instance_metadata: "{{ hostvars.localhost.acss_scs_instance_metadata }}" + + - name: "0.5.1 acss registration: - Print metadata" + ansible.builtin.debug: + var: az_instance_metadata + verbosity: 2 + + - name: "0.5.1 acss registration: - Set variables from Azure IMDS" + ansible.builtin.set_fact: + acss_resource_id: "{{ az_instance_metadata.json.compute.resourceId }}" + acss_subscription_id: "{{ az_instance_metadata.json.compute.subscriptionId }}" + acss_resource_group: "{{ az_instance_metadata.json.compute.resourceGroupName }}" + acss_location: "{{ az_instance_metadata.json.compute.location }}" + acss_sid: "{{ sap_sid | upper }}" + acss_instance_type: "{{ instance_type }}" + + - name: "0.5.1 acss registration: - Install [ACSS] cli extension" + ansible.builtin.shell: >- + az extension add --name workloads --yes || exit 1 tags: - skip_ansible_lint + - name: "0.5.1 acss registration: - Get Access Token" + ansible.builtin.shell: >- + az account get-access-token --resource https://management.azure.com \ + --query accessToken -o tsv + register: acss_access_token + no_log: true + tags: + - skip_ansible_lint + + - name: "0.5.1 acss registration: - Generate a guid for the ACSS instance" + ansible.builtin.command: uuidgen + register: acss_guid + tags: + - skip_ansible_lint + + - name: "0.5.1 acss registration: - Create [ACSS] virtual instance" + ansible.builtin.uri: + url: "https://management.azure.com/subscriptions/{{ acss_subscription_id }}/resourceGroups/{{ acss_resource_group }}/providers/Microsoft.Workloads/sapVirtualInstances/{{ acss_sid }}?api-version=2023-04-01" + method: PUT + body_format: json + body: | + { + "properties": { + "environment": "{{ acss_environment }}", + "sapProduct": "{{ acss_sap_product }}", + "configuration": { + "configurationType": "Discovery", + "centralServerVmId": "/subscriptions/{{ acss_subscription_id }}/resourceGroups/{{ acss_resource_group }}/providers/Microsoft.Compute/virtualMachines/{{ hostvars[ansible_hostname]['scs_running_on'] }}" + } + }, + "location": "{{ acss_location }}" + } + # status_code: [200, 201] + headers: + Authorization: "Bearer {{ acss_access_token.stdout }}" + x-ms-rpaas-new-resource: "true" + x-ms-client-request-id: "SDAF-{{ acss_guid.stdout }}" + register: create_vis_response + failed_when: create_vis_response.json.properties.provisioningState != 'Accepted' + no_log: true + + - name: "0.5.1 acss registration: - Check the registered [ACSS] virtual instance" + ansible.builtin.uri: + url: "https://management.azure.com/subscriptions/{{ acss_subscription_id }}/resourceGroups/{{ acss_resource_group }}/providers/Microsoft.Workloads/sapVirtualInstances/{{ acss_sid }}?api-version=2023-04-01" + method: GET + # status_code: [200, 201] + headers: + Authorization: "Bearer {{ acss_access_token.stdout }}" + x-ms-rpaas-new-resource: "true" + x-ms-client-request-id: "SDAF-{{ acss_guid.stdout }}" + register: get_vis_response + until: get_vis_response.json.properties.provisioningState == 'Succeeded' + retries: 10 + delay: 60 + no_log: true + ... diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index b1c8329ddc..20915e2629 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -459,20 +459,22 @@ stages: - ${{ if eq(parameters.acss_registration, true) }}: - template: templates\acss-registration.yaml parameters: - displayName: "ACSS Registration" - scriptPath: ${{ parameters.sap_automation_repo_path }}/deploy/pipelines/templates - azureClientId: $(ARM_CLIENT_ID) - azureClientSecret: $(ARM_CLIENT_SECRET) - azureTenantId: $(ARM_TENANT_ID) - azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) - acssEnvironment: ${{ parameters.acss_environment }} - acssSapProduct: ${{ parameters.acss_sap_product }} - sapSystemConfigurationName: ${{ parameters.sap_system_configuration_name }} - parametersFolder: $(Preparation.FOLDER) - config_repo_path: ${{ parameters.config_repo_path }} - sap_automation_repo_path: ${{ parameters.sap_automation_repo_path }} - TF_Remote_SA_Name: $(Terraform_Remote_Storage_Account_Name) - TF_Remote_Storage_Subscription: $(Terraform_Remote_Storage_Subscription) + displayName: "ACSS Registration" + ansibleFilePath: ${{ parameters.sap_automation_repo_path }}/deploy/ansible/playbook_06_00_acss_registration.yaml + secretName: "$(Preparation.SSH_KEY_NAME)" + passwordSecretName: "$(Preparation.PASSWORD_KEY_NAME)" + userNameSecretName: "$(Preparation.USERNAME_KEY_NAME)" + vaultName: $(Preparation.VAULT_NAME) + parametersFolder: $(Preparation.FOLDER) + sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" + sidHosts: $(Preparation.HOSTS) + extraParams: ${{ parameters.extra_params }} + azureClientId: $(ARM_CLIENT_ID) + azureClientSecret: $(ARM_CLIENT_SECRET) + azureTenantId: $(ARM_TENANT_ID) + azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + acssEnvironment: ${{ parameters.acss_environment }} + acssSapProduct: ${{ parameters.acss_sap_product }} - template: templates\collect-log-files.yaml parameters: diff --git a/deploy/pipelines/templates/acss-registration.yaml b/deploy/pipelines/templates/acss-registration.yaml index ff515d2b8e..88cc4659cd 100644 --- a/deploy/pipelines/templates/acss-registration.yaml +++ b/deploy/pipelines/templates/acss-registration.yaml @@ -1,29 +1,168 @@ parameters: - displayName: '' - scriptPath: '' azureClientId: '' azureClientSecret: '' azureTenantId: '' azureSubscriptionId: '' + displayName: '' + ansibleFilePath: '' + sidHosts: '' + secretName: '' + vaultName: '' + parametersFolder: '' + extraParams: '' + sapParams: '' + passwordSecretName: '' + userNameSecretName: '' acssEnvironment: '' acssSapProduct: '' - sapSystemConfigurationName: '' - sap_automation_repo_path: '' - config_repo_path: '' - TF_Remote_SA_Name: '' - TF_Remote_Storage_Subscription: '' + steps: -- bash: ${{ parameters.scriptPath }}/acss-registration.sh +- script: | + #!/bin/bash + # Exit immediately if a command exits with a non-zero status. + # Treat unset variables as an error when substituting. + + #Stage could be executed on a different machine by default, need to login again for ansible + #If the deployer_file exists we run on a deployer configured by the framework instead of a azdo hosted one + + # deployer_file=/etc/profile.d/deploy_server.sh + # if [ -f "$deployer_file" ]; then + # echo "##[section]Running on a deployer..." + # az login --identity --allow-no-subscriptions --output none + # noAccess=$( az account show --query name | grep "N/A(tenant level account)") + + # if [ -z "$noAccess" ]; then + # az account set --subscription $AZURE_SUBSCRIPTION_ID --output none + # fi + # else + # echo "##[section]Running on an Azure DevOps agent..." + # az login --service-principal -u $AZURE_CLIENT_ID -p=$AZURE_CLIENT_SECRET --tenant $AZURE_TENANT_ID --output none + # az account set --subscription $AZURE_SUBSCRIPTION_ID --output none + + # fi + + az login --service-principal -u "${AZURE_CLIENT_ID}" -p="$AZURE_CLIENT_SECRET" --tenant "$AZURE_TENANT_ID" --output none + az account set --subscription "$AZURE_SUBSCRIPTION_ID" --output none + set -eu + + if [ ! -f $PARAMETERS_FOLDER/sshkey ]; then + echo "##[section]Retrieving sshkey..." + az keyvault secret show --name "$SSH_KEY_NAME" --vault-name "$VAULT_NAME" --query value -o tsv > "$PARAMETERS_FOLDER"/sshkey + sudo chmod 600 "$PARAMETERS_FOLDER"/sshkey + fi + + password_secret=$(az keyvault secret show --name "$PASSWORD_KEY_NAME" --vault-name "$VAULT_NAME" --query value -o tsv) + + echo "Extra parameters passed: " "$EXTRA_PARAMS" + + + base=$(basename "$ANSIBLE_FILE_PATH") + + filename_without_prefix=$(echo "$base" | awk -F'.' '{print $1}') + filename=./config/Ansible/"${filename_without_prefix}"_pre.yml + return_code=0 + + echo "Extra parameters passed: " $EXTRA_PARAMS + echo "Check for file: ${filename}" + + command="ansible --version" + eval $command + + EXTRA_PARAM_FILE="" + + if [ -f $PARAMETERS_FOLDER/extra-params.yaml ]; then + echo "Extra parameter file passed: " $PARAMETERS_FOLDER/extra-params.yaml + + EXTRA_PARAM_FILE="-e @$PARAMETERS_FOLDER/extra-params.yaml" + fi + + + ############################################################################################ + # # + # Run Pre tasks if Ansible playbook with the correct naming exists # + # # + ############################################################################################ + + if [ -f "${filename}" ]; then + echo "##[group]- preconfiguration" + + redacted_command="ansible-playbook -i $INVENTORY -e @$SAP_PARAMS $EXTRA_PARAMS $EXTRA_PARAM_FILE ${filename}" + echo "##[section]Executing [$redacted_command]..." + + command="ansible-playbook -i $INVENTORY --private-key $PARAMETERS_FOLDER/sshkey \ + -e @$SAP_PARAMS -e 'download_directory=$(Agent.TempDirectory)' -e '_workspace_directory=$PARAMETERS_FOLDER' $EXTRA_PARAMS \ + -e ansible_ssh_pass='${password_secret}' $EXTRA_PARAM_FILE ${filename}" + + eval $command + return_code=$? + echo "##[section]Ansible playbook ${filename} execution completed with exit code [$return_code]" + echo "##[endgroup]" + + fi + + command="ansible-playbook -i $INVENTORY --private-key $PARAMETERS_FOLDER/sshkey \ + -e @$SAP_PARAMS -e 'download_directory=$(Agent.TempDirectory)' -e '_workspace_directory=$PARAMETERS_FOLDER' \ + -e ansible_ssh_pass='${password_secret}' -e 'acssEnvironment=$ACSS_ENVIRONMENT' -e 'acssSapProduct=$ACSS_SAP_PRODUCT' \ + $EXTRA_PARAMS $EXTRA_PARAM_FILE $ANSIBLE_FILE_PATH" + + + redacted_command="ansible-playbook -i $INVENTORY -e @$SAP_PARAMS $EXTRA_PARAMS $EXTRA_PARAM_FILE $ANSIBLE_FILE_PATH" + + echo "##[section]Executing [$redacted_command]..." + echo "##[group]- output" + eval $command + return_code=$? + echo "##[section]Ansible playbook execution completed with exit code [$return_code]" + echo "##[endgroup]" + + + filename=./config/Ansible/"${filename_without_prefix}"_post.yml + echo "Check for file: ${filename}" + + + ############################################################################################ + # # + # Run Post tasks if Ansible playbook with the correct naming exists # + # # + ############################################################################################ + + if [ -f ${filename} ]; then + + echo "##[group]- postconfiguration" + redacted_command="ansible-playbook -i "$INVENTORY" -e @"$SAP_PARAMS" "$EXTRA_PARAMS" $EXTRA_PARAM_FILE "${filename}"" + echo "##[section]Executing [$redacted_command]..." + + command="ansible-playbook -i "$INVENTORY" --private-key $PARAMETERS_FOLDER/sshkey \ + -e @$SAP_PARAMS -e 'download_directory=$(Agent.TempDirectory)' -e '_workspace_directory=$PARAMETERS_FOLDER' \ + -e ansible_ssh_pass='${password_secret}' ${filename} $EXTRA_PARAMS $EXTRA_PARAM_FILE" + + eval $command + return_code=$? + echo "##[section]Ansible playbook ${filename} execution completed with exit code [$return_code]" + echo "##[endgroup]" + + fi + + exit $return_code + displayName: "ACSS: ${{ parameters.displayName }}" env: - ARM_SUBSCRIPTION_ID: ${{ parameters.azureSubscriptionId }} - ARM_CLIENT_ID: ${{ parameters.azureClientId }} - ARM_CLIENT_SECRET: ${{ parameters.azureClientSecret }} - ARM_TENANT_ID: ${{ parameters.azureTenantId }} - ACSS_ENVIRONMENT: ${{ parameters.acssEnvironment }} - ACSS_SAP_PRODUCT: ${{ parameters.acssSapProduct }} - SAP_SYSTEM_CONFIGURATION_NAME: ${{ parameters.sapSystemConfigurationName }} - ROOT_FOLDER: ${{ parameters.config_repo_path }} - CODE_FOLDER: ${{ parameters.sap_automation_repo_path }} - TERRAFORM_REMOTE_STORAGE_ACCOUNT_NAME: ${{ parameters.TF_Remote_SA_Name }} - TERRAFORM_REMOTE_STORAGE_SUBSCRIPTION: ${{ parameters.TF_Remote_Storage_Subscription }} + ACSS_ENVIRONMENT: ${{ parameters.acssEnvironment }} + ACSS_SAP_PRODUCT: ${{ parameters.acssSapProduct }} + AZURE_CLIENT_ID: ${{ parameters.azureClientId }} + AZURE_CLIENT_SECRET: ${{ parameters.azureClientSecret }} + AZURE_SUBSCRIPTION_ID: ${{ parameters.azureSubscriptionId }} + AZURE_TENANT_ID: ${{ parameters.azureTenantId }} + ANSIBLE_COLLECTIONS_PATHS: ~/.ansible/collections:/opt/ansible/collections + ANSIBLE_PYTHON_INTERPRETER: auto_silent + ANSIBLE_DISPLAY_SKIPPED_HOSTS: false + ANSIBLE_HOST_KEY_CHECKING: false + ANSIBLE_FILE_PATH: ${{ parameters.ansibleFilePath }} + PARAMETERS_FOLDER: ${{ parameters.parametersFolder }} + EXTRA_PARAMS: ${{ parameters.extraParams }} + SAP_PARAMS: ${{ parameters.sapParams }} + INVENTORY: ${{ parameters.parametersFolder }}/${{ parameters.sidHosts }} + SSH_KEY_NAME: ${{ parameters.secretName }} + VAULT_NAME: ${{ parameters.vaultName }} + PASSWORD_KEY_NAME: ${{ parameters.passwordSecretName }} + USERNAME_KEY_NAME: ${{ parameters.userNameSecretName }} From d03654fdf872cb582b6faac9a406850fef415bad Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 11 Dec 2023 20:53:50 +0530 Subject: [PATCH 003/607] pass the correct operation name --- deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml | 2 +- deploy/ansible/roles-misc/0.2-kv-secrets/tasks/s_user.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml b/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml index ac6426455f..85940d854d 100644 --- a/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml @@ -40,7 +40,7 @@ - name: "0.2 Key Vault: - Import S User tasks" ansible.builtin.import_tasks: "s_user.yaml" when: - - operation == "bom" + - operation == "SoftwareAcquisition" # -------------------------------------+---------------------------------------8 diff --git a/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/s_user.yaml b/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/s_user.yaml index 6df4038ddd..1a7e8732c0 100644 --- a/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/s_user.yaml +++ b/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/s_user.yaml @@ -7,7 +7,7 @@ - name: "0.2 Key Vault: - Retrieve Deployer Keyvault details" block: -# TODO: Move check to validation playbook (MKD) + # TODO: Move check to validation playbook (MKD) - name: "0.2 Key Vault: - Check required variables are present and not empty" ansible.builtin.assert: that: From b23f5516167fcf4008cf1ecc84f9a811a537d098 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 11 Dec 2023 22:07:39 +0530 Subject: [PATCH 004/607] switch to using the newer parameters for high availability. --- .../playbook_00_validate_parameters.yaml | 7 ++ .../ansible/playbook_04_00_00_db_install.yaml | 42 +++++++-- deploy/ansible/playbook_04_00_01_db_ha.yaml | 90 +++++++++++++++---- .../playbook_05_00_00_sap_scs_install.yaml | 7 ++ .../playbook_05_02_sap_pas_install.yaml | 8 +- .../playbook_05_03_sap_app_install.yaml | 8 +- .../tasks/4.0.1.7-sap-profile-changes.yml | 42 +++++---- .../roles-misc/0.1-passwords/tasks/main.yaml | 9 +- .../tasks/wincluster-witness.yaml | 18 ++-- .../0.6-ARM-Deployment/tasks/main.yaml | 9 +- .../1.17-generic-wincluster/tasks/main.yaml | 11 ++- .../2.4-hosts-file/tasks/main.yaml | 13 ++- .../tasks/2.6.1-anf-mounts.yaml | 9 +- .../tasks/2.6.1.1-anf-mount.yaml | 13 ++- .../tasks/2.6.8-anf-mounts-simplemount.yaml | 9 +- .../roles-sap/5.1-dbload/tasks/main.yaml | 74 +++++++++------ .../roles-sap/5.2-pas-install/tasks/main.yaml | 29 +++--- .../roles-sap/5.3-app-install/tasks/main.yaml | 29 +++--- .../tasks/5.5.4.0-clusterPrep-RedHat.yml | 8 +- .../tasks/5.5.4.0-clusterPrep-Suse.yml | 9 +- .../tasks/5.5.4.1-cluster-RedHat.yml | 9 +- .../tasks/5.5.4.1-cluster-Suse.yml | 9 +- .../tasks/5.6.4.1-scsersprofile.yaml | 8 +- .../windows/5.2-pas-install/tasks/main.yaml | 9 +- .../windows/5.3-app-install/tasks/main.yaml | 9 +- deploy/ansible/vars/ansible-input-api.yaml | 8 +- 26 files changed, 377 insertions(+), 119 deletions(-) diff --git a/deploy/ansible/playbook_00_validate_parameters.yaml b/deploy/ansible/playbook_00_validate_parameters.yaml index de57fd0ee0..a6760e35ae 100644 --- a/deploy/ansible/playbook_00_validate_parameters.yaml +++ b/deploy/ansible/playbook_00_validate_parameters.yaml @@ -55,6 +55,13 @@ # Ensure cluster determining parameters are present # # -------------------------------------+---------------------------------------8 + - name: "Backward Compatibility - Check required Database HA variables " + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "0.0 Validations - Check required variables are present and not empty" ansible.builtin.assert: that: diff --git a/deploy/ansible/playbook_04_00_00_db_install.yaml b/deploy/ansible/playbook_04_00_00_db_install.yaml index 9a19409669..efdf1fc955 100644 --- a/deploy/ansible/playbook_04_00_00_db_install.yaml +++ b/deploy/ansible/playbook_04_00_00_db_install.yaml @@ -50,6 +50,13 @@ tags: - 0.1-win-passwords + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "Database Installation Playbook: - Read/Create key vault secrets" ansible.builtin.include_role: name: roles-misc/0.2-kv-secrets @@ -450,12 +457,19 @@ # Build the list of tasks to be executed in order here. # # -------------------------------------+---------------------------------------8 + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "Install SAP DB2" become: true become_user: root when: - platform == 'DB2' - - not db_high_availability + - not database_high_availability block: - name: "DB2 : Setting the DB facts" @@ -509,9 +523,20 @@ # Build the list of tasks to be executed in order here. # # -------------------------------------+---------------------------------------8 + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "Install SAP DB2" become: true become_user: root + when: + - platform == 'DB2' + - database_high_availability + - (ansible_play_hosts_all | length) >= 2 block: - name: Setting the DB facts @@ -523,7 +548,6 @@ bom_processing: true primary_instance_name: "{{ ansible_play_hosts_all[0] }}" # Setting up Primary Instance Name secondary_instance_name: "{{ ansible_play_hosts_all[1] }}" # Setting up Secondary Instance Name - tags: - always @@ -548,10 +572,7 @@ prefix: "{{ bom.product_ids.dblha.replace('.', '/').replace('/ABAP', '').split(':')[1] }}" path: "INSTALL/DISTRIBUTED/ABAP/DB" this_sid: "{{ sap_sid }}" - when: - - platform == 'DB2' - - db_high_availability - - (ansible_play_hosts_all | length) >= 2 + # /*----------------------------------------------------------------------------8 # | | @@ -572,12 +593,19 @@ # Build the list of tasks to be executed in order here. # # -------------------------------------+---------------------------------------8 + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "Install SAP ASE" become: true become_user: root when: - platform == 'SYBASE' - - not db_high_availability + - not database_high_availability block: - name: "ASE Installation Play: Setting the DB facts" diff --git a/deploy/ansible/playbook_04_00_01_db_ha.yaml b/deploy/ansible/playbook_04_00_01_db_ha.yaml index 7046ac75a6..7c813b079a 100644 --- a/deploy/ansible/playbook_04_00_01_db_ha.yaml +++ b/deploy/ansible/playbook_04_00_01_db_ha.yaml @@ -12,6 +12,13 @@ - vars/ansible-input-api.yaml # API Input template with defaults tasks: + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: Initialization block: @@ -26,13 +33,13 @@ path: "{{ _workspace_directory }}/.progress/db-ha-done" state: absent -# MKD - Fencing is not a tier... it is a misuse to code for that -# To detect we should be using: -# database_high_availability -# AND -# database_cluster_type == "AFA" -# database_cluster_type == "ASD" -# database_cluster_type == "ISCSI" + # MKD - Fencing is not a tier... it is a misuse to code for that + # To detect we should be using: + # database_high_availability + # AND + # database_cluster_type == "AFA" + # database_cluster_type == "ASD" + # database_cluster_type == "ISCSI" # - name: Initialize facts # ansible.builtin.set_fact: @@ -90,8 +97,15 @@ # Build the list of tasks to be executed in order here. # # -------------------------------------+---------------------------------------8 + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "HANA HA Setup" - become: true + become: true block: - name: "Install HANA System Replication" block: @@ -159,7 +173,7 @@ - 5.5-hanadb-pacemaker when: - - db_high_availability + - database_high_availability - platform == 'HANA' # -------------------------------------+---------------------------------------8 @@ -179,10 +193,16 @@ - vars/ansible-input-api.yaml # API Input template with defaults tasks: + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined - name: "Oracle Data Guard Setup" when: - - db_high_availability + - database_high_availability - platform in ['ORACLE', 'ORACLE-ASM'] become: true block: @@ -208,10 +228,16 @@ - vars/ansible-input-api.yaml # API Input template with defaults tasks: + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined - name: "Oracle Data Guard Setup" when: - - db_high_availability + - database_high_availability - platform in ['ORACLE', 'ORACLE-ASM'] become: true block: @@ -244,10 +270,16 @@ - vars/ansible-input-api.yaml # API Input template with defaults tasks: + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined - name: "Oracle Data Guard Setup" when: - - db_high_availability + - database_high_availability - platform in ['ORACLE', 'ORACLE-ASM'] become: true block: @@ -287,10 +319,17 @@ - vars/ansible-input-api.yaml # API Input template with defaults tasks: + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "Oracle: Observer setup" become: true when: - - db_high_availability + - database_high_availability - platform in ['ORACLE', 'ORACLE-ASM'] block: - name: "Observer Playbook: Setting the DB facts" @@ -365,6 +404,12 @@ # # -----------------------------------+---------------------------------------8 tasks: + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined - name: "DB2 HA - Install SAP DB2 on both nodes" become: true @@ -482,7 +527,7 @@ - 5.7-db2-pacemaker when: - - db_high_availability + - database_high_availability - platform == 'DB2' # # /*-------------------------------------------------------------------------8 @@ -504,10 +549,16 @@ # # -----------------------------------+---------------------------------------8 tasks: + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined - name: "SQLServer HA - Install SQL4SAP on both nodes" when: - - db_high_availability + - database_high_availability - platform | upper == 'SQLSERVER' - node_tier == 'sqlserver' become: true @@ -627,12 +678,19 @@ vars_files: - vars/ansible-input-api.yaml # API Input template with defaults tasks: + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: Ensure db-ha-done file exists ansible.builtin.file: path: "{{ _workspace_directory }}/.progress/db-ha-done" state: touch mode: 0755 - when: db_high_availability + when: database_high_availability ... # /*----------------------------------------------------------------------------8 diff --git a/deploy/ansible/playbook_05_00_00_sap_scs_install.yaml b/deploy/ansible/playbook_05_00_00_sap_scs_install.yaml index b66d828abe..37f275a4bb 100644 --- a/deploy/ansible/playbook_05_00_00_sap_scs_install.yaml +++ b/deploy/ansible/playbook_05_00_00_sap_scs_install.yaml @@ -51,6 +51,13 @@ tags: - 0.1-win-passwords + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "WinCluster-Create: Get witness account details" ansible.builtin.include_role: name: roles-misc/0.2-kv-secrets diff --git a/deploy/ansible/playbook_05_02_sap_pas_install.yaml b/deploy/ansible/playbook_05_02_sap_pas_install.yaml index c21074b578..8880826561 100644 --- a/deploy/ansible/playbook_05_02_sap_pas_install.yaml +++ b/deploy/ansible/playbook_05_02_sap_pas_install.yaml @@ -206,10 +206,16 @@ - name: Run the PAS installation Playbook block: + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined - name: "PAS Installation playbook: - Determine if part of high availablity deployment" ansible.builtin.set_fact: - is_ha: "{{ db_high_availability | default(false) }}" + is_ha: "{{ database_high_availability | default(false) }}" tags: - always diff --git a/deploy/ansible/playbook_05_03_sap_app_install.yaml b/deploy/ansible/playbook_05_03_sap_app_install.yaml index 18e6d72083..b0f6cab6a5 100644 --- a/deploy/ansible/playbook_05_03_sap_app_install.yaml +++ b/deploy/ansible/playbook_05_03_sap_app_install.yaml @@ -164,10 +164,16 @@ become: true when: ansible_os_family != "Windows" block: + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined - name: "APP Installation playbook: - Determine if part of high availablity deployment" ansible.builtin.set_fact: - is_ha: "{{ db_high_availability | default(false) }}" + is_ha: "{{ database_high_availability | default(false) }}" tags: - always diff --git a/deploy/ansible/roles-db/4.0.1-hdb-hsr/tasks/4.0.1.7-sap-profile-changes.yml b/deploy/ansible/roles-db/4.0.1-hdb-hsr/tasks/4.0.1.7-sap-profile-changes.yml index a6f8a89b7e..66d5752c1c 100644 --- a/deploy/ansible/roles-db/4.0.1-hdb-hsr/tasks/4.0.1.7-sap-profile-changes.yml +++ b/deploy/ansible/roles-db/4.0.1-hdb-hsr/tasks/4.0.1.7-sap-profile-changes.yml @@ -5,8 +5,17 @@ # To connect to the primary instance of the HSR configuration, the SAP application layer needs to use the # # virtual IP address that you defined and configured for the Azure Load Balancer # ############################################################################################################## +- name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined - name: "HSR 4.0.1.7 - SAP Profile changes - HANA Installations" + when: + - platform == 'HANA' + - database_high_availability block: - name: "HSR 4.0.1.7 - SAP Profile changes - Set the DB Server name list" @@ -15,13 +24,13 @@ with_items: - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" when: - - db_high_availability + - database_high_availability - name: "HSR 4.0.1.7 - SAP Profile changes - Set the DB virtual_host name" ansible.builtin.set_fact: - db_lb_virtual_host: "{% if db_high_availability %}{{ sap_sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ hostvars[db_server_temp | first]['virtual_host'] }}{% endif %}" + db_lb_virtual_host: "{% if database_high_availability %}{{ sap_sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ hostvars[db_server_temp | first]['virtual_host'] }}{% endif %}" when: - - db_high_availability + - database_high_availability - db_server_temp is defined - db_server_temp | length > 0 @@ -34,7 +43,7 @@ check_mode: true register: check_default_pfl when: - - db_high_availability + - database_high_availability - name: "HSR: 4.0.1.7 - SAP DEFAULT.PFL changes " ansible.builtin.replace: @@ -44,7 +53,7 @@ replace: '#SAPDBHOST' when: - check_default_pfl is changed - - db_high_availability + - database_high_availability tags: - dbhostcomment @@ -55,11 +64,16 @@ insertafter: '#SAPDBHOST' when: - check_default_pfl is changed - - db_high_availability + - database_high_availability tags: - dbhostpara - name: "HSR: 4.0.1.7 - SAP DEFAULT.PFL changes for JAVA" + when: + - platform == 'HANA' + - database_high_availability + - instance_type is defined + - instance_type == 'JAVA' block: - name: "HSR: 4.0.1.7 - Check if SAP DEFAULT.PFL changes are needed for JAVA" ansible.builtin.lineinfile: @@ -70,7 +84,7 @@ check_mode: true register: check_default_pfl when: - - db_high_availability + - database_high_availability - name: "HSR: 4.0.1.7 - SAP DEFAULT.PFL changes for JAVA" ansible.builtin.replace: @@ -80,7 +94,7 @@ replace: '#j2ee/dbhost' when: - check_default_pfl is changed - - db_high_availability + - database_high_availability tags: - dbhostcomment @@ -91,17 +105,11 @@ insertafter: '#j2ee/dbhost' when: - check_default_pfl is changed - - db_high_availability + - database_high_availability tags: - dbhostpara - when: - - platform == 'HANA' - - db_high_availability - - instance_type is defined - - instance_type == 'JAVA' - when: - - platform == 'HANA' - - db_high_availability + + - name: "4.0.1.7 - Check if SAP DEFAULT.PFL changes are needed for rsdb/ssfs_connect" ansible.builtin.lineinfile: diff --git a/deploy/ansible/roles-misc/0.1-passwords/tasks/main.yaml b/deploy/ansible/roles-misc/0.1-passwords/tasks/main.yaml index 7f4569e08d..79f1967c1f 100644 --- a/deploy/ansible/roles-misc/0.1-passwords/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.1-passwords/tasks/main.yaml @@ -107,8 +107,15 @@ var: sap_password verbosity: 4 +- name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "0.1 Password: - Get Cluster passwords" ansible.builtin.include_tasks: 0.1.1-ha_clusterpasswords.yaml - when: db_high_availability or scs_high_availability + when: database_high_availability or scs_high_availability ... diff --git a/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/wincluster-witness.yaml b/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/wincluster-witness.yaml index 153d341543..2af7124e8e 100644 --- a/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/wincluster-witness.yaml +++ b/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/wincluster-witness.yaml @@ -18,6 +18,12 @@ # -------------------------------------+---------------------------------------8 # +- name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined - name: "0.2 Key Vault - WinCluster: - Witness Account Details" block: @@ -27,9 +33,9 @@ wincluster_witness_key: "{{ secret_prefix }}-witness-accesskey" wincluster_witness_name: "{{ secret_prefix }}-witness-name" -# -------------------------------------+---------------------------------------8 -# witness_storage_account_key -# -------------------------------------+---------------------------------------8 + # -------------------------------------+---------------------------------------8 + # witness_storage_account_key + # -------------------------------------+---------------------------------------8 - name: "0.2 Key Vault - WinCluster: - Retrieve witness storage account key details" ansible.builtin.command: >- az keyvault secret show @@ -47,9 +53,9 @@ cacheable: true no_log: false -# -------------------------------------+---------------------------------------8 -# witness_storage_account_name -# -------------------------------------+---------------------------------------8 + # -------------------------------------+---------------------------------------8 + # witness_storage_account_name + # -------------------------------------+---------------------------------------8 - name: "0.2 Key Vault - WinCluster: - Retrieve witness storage account details" ansible.builtin.command: >- az keyvault secret show diff --git a/deploy/ansible/roles-misc/0.6-ARM-Deployment/tasks/main.yaml b/deploy/ansible/roles-misc/0.6-ARM-Deployment/tasks/main.yaml index f982578afb..907938df0a 100644 --- a/deploy/ansible/roles-misc/0.6-ARM-Deployment/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.6-ARM-Deployment/tasks/main.yaml @@ -13,11 +13,18 @@ ansible.builtin.set_fact: deployment_type: "STANDALONE" + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "SDAF: Set Deployment Type" delegate_to: localhost ansible.builtin.set_fact: deployment_type: "HA" - when: db_high_availability + when: database_high_availability - name: "SDAF: Create ARM deployment name" delegate_to: localhost diff --git a/deploy/ansible/roles-os/windows/1.17-generic-wincluster/tasks/main.yaml b/deploy/ansible/roles-os/windows/1.17-generic-wincluster/tasks/main.yaml index 9eab784037..8254235350 100644 --- a/deploy/ansible/roles-os/windows/1.17-generic-wincluster/tasks/main.yaml +++ b/deploy/ansible/roles-os/windows/1.17-generic-wincluster/tasks/main.yaml @@ -14,9 +14,16 @@ - domain_name | trim | length > 1 - domain is not defined +- name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "WinCluster: Prepare for cluster configuration" when: - - scs_high_availability or db_high_availability + - scs_high_availability or database_high_availability - ansible_os_family | upper == "WINDOWS" ansible.builtin.import_tasks: file: "1.17.0-wincluster-prerequisites.yaml" @@ -25,7 +32,7 @@ - name: "WinCluster: Configure cluster" when: - - scs_high_availability or db_high_availability + - scs_high_availability or database_high_availability - ansible_os_family | upper == "WINDOWS" ansible.builtin.import_tasks: file: "1.17.1-wincluster-createcluster.yaml" diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml index d347885251..52d80900ec 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml @@ -48,11 +48,18 @@ when: - scs_high_availability +- name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "2.4 Hosts: - Set the DB Virtual Instance hostname" ansible.builtin.set_fact: db_virtual_hostname: "{{ custom_db_virtual_hostname | default(sap_sid | lower ~ db_sid | lower ~ 'db' ~ db_instance_number ~ 'cl', true) }}" when: - - db_high_availability + - database_high_availability - name: "2.4 Hosts: - Get the Server name list" ansible.builtin.set_fact: @@ -84,7 +91,7 @@ {{ '%-19s' | format(db_lb_ip) }} {{ '%-50s' | format(db_virtual_hostname + '.' + sap_fqdn) }} {{ '%-21s' | format(db_virtual_hostname) }} marker: "# {mark} DB Entries {{ db_virtual_hostname }}" when: - - db_high_availability + - database_high_availability - platform == 'HANA' - name: "2.4 Hosts: - Setup Virtual host name resolution - DB" @@ -97,7 +104,7 @@ {{ '%-19s' | format(db_lb_ip) }} {{ '%-50s' | format(db_virtual_hostname + '.' + sap_fqdn) }} {{ '%-21s' | format(db_virtual_hostname) }} marker: "# {mark} DB Entries {{ db_virtual_hostname }}" when: - - db_high_availability + - database_high_availability - platform == 'DB2' - name: "2.4 Hosts: Process pas_hostname variable and update host file when it is defined" diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index 2705e1fc45..3a5a8e8d8c 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -488,6 +488,13 @@ when: - node_tier == 'hana' + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "ANF Mount: Create configuration file for the NetApp configuration settings" ansible.builtin.blockinfile: path: /etc/sysctl.d/91-NetApp-HANA.conf @@ -512,7 +519,7 @@ net.ipv4.tcp_sack = 1 when: - node_tier == 'hana' - - not db_high_availability + - not database_high_availability - name: "ANF Mount: Create configuration file with additional optimization settings" diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml index 421177cdb4..d0f5e3525e 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml @@ -117,9 +117,16 @@ when: - node_tier in item.target_nodes or item.target_nodes == ['all'] +- name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + # absent_from_fstab - name: "ANF Mount: RHEL DB high availability configuration" - when: ansible_os_family | upper == "REDHAT" and db_high_availability + when: ansible_os_family | upper == "REDHAT" and database_high_availability block: - name: "ANF Mount: remove entry in fstab for {{ item.path }} when DB high availability" ansible.posix.mount: @@ -131,7 +138,7 @@ when: - item.target_nodes == ['hana'] - item.type in ['data','log','shared'] - - db_high_availability + - database_high_availability - name: "ANF Mount: make mount for {{ item.path }} ephemeral when DB high availability" ansible.posix.mount: @@ -143,7 +150,7 @@ when: - item.target_nodes == ['hana'] - item.type in ['data','log','shared'] - - db_high_availability + - database_high_availability # https://www.suse.com/support/kb/doc/?id=000019904 # - name: "ANF Mount: SLES DB high availability configuration" diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml index 0dd1ce70d5..25347b51c3 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml @@ -433,6 +433,13 @@ when: - node_tier == 'hana' + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "ANF Mount: Create configuration file for the NetApp configuration settings" ansible.builtin.blockinfile: path: /etc/sysctl.d/91-NetApp-HANA.conf @@ -457,7 +464,7 @@ net.ipv4.tcp_sack = 1 when: - node_tier == 'hana' - - not db_high_availability + - not database_high_availability - name: "ANF Mount: Create configuration file with additional optimization settings" diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml index 4e620db186..d7ada6e639 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml @@ -335,9 +335,19 @@ when: - platform == 'HANA' + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "DBLoad: Get hdbuserstore path" become: true become_user: "{{ sid_to_be_deployed.sid | lower }}adm" + when: + - database_high_availability + - platform == 'HANA' ansible.builtin.find: paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/" file_type: file @@ -346,31 +356,32 @@ register: hdbuserstore_file environment: SAPSYSTEMNAME: "{{ sid_to_be_deployed.sid | upper }}" - when: - - db_high_availability - - platform == 'HANA' - name: "DBLoad: Set hdbuserstore path" - ansible.builtin.set_fact: - hdbuserstore_path: "{{ hdbuserstore_file.files[0].path }}" - db_lb_virtual_host: "{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl.{{ sap_fqdn }}" when: - - db_high_availability + - database_high_availability - platform == 'HANA' - hdbuserstore_file is defined - hdbuserstore_file | length > 0 + ansible.builtin.set_fact: + hdbuserstore_path: "{{ hdbuserstore_file.files[0].path }}" + db_lb_virtual_host: "{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl.{{ sap_fqdn }}" - name: "DBLoad: show hdbuserstore path" + when: + - database_high_availability + - platform == 'HANA' ansible.builtin.debug: var: hdbuserstore_path verbosity: 2 - when: - - db_high_availability - - platform == 'HANA' - name: "DBLoad: Set DB Virtual Host name ({{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }})" become: true become_user: "{{ sid_to_be_deployed.sid | lower }}adm" + when: + - database_high_availability + - platform == 'HANA' + - hdbuserstore_path is defined ansible.builtin.shell: | {{ hdbuserstore_path }} -H {{ pas_virtual_hostname }} SET DEFAULT {{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }}:3{{ db_instance_number }}13@{{ db_sid | upper }} {{ schema_name }} {{ main_password }} environment: @@ -380,10 +391,6 @@ register: hdbuserstore vars: allow_world_readable_tmpfiles: true - when: - - db_high_availability - - platform == 'HANA' - - hdbuserstore_path is defined when: - not dbload_installed.stat.exists @@ -405,9 +412,19 @@ ansible.builtin.set_fact: dbload_already_performed: true + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "DBLoad: Get hdbuserstore path" become: true become_user: "{{ sid_to_be_deployed.sid | lower }}adm" + when: + - database_high_availability + - platform == 'HANA' ansible.builtin.find: paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/" file_type: file @@ -416,31 +433,35 @@ register: hdbuserstore_file environment: SAPSYSTEMNAME: "{{ sid_to_be_deployed.sid | upper }}" - when: - - db_high_availability - - platform == 'HANA' + - name: "DBLoad: Set hdbuserstore path" - ansible.builtin.set_fact: - hdbuserstore_path: "{{ hdbuserstore_file.files[0].path }}" - db_lb_virtual_host: "{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl.{{ sap_fqdn }}" when: - - db_high_availability + - database_high_availability - platform == 'HANA' - hdbuserstore_file is defined - hdbuserstore_file | length > 0 + ansible.builtin.set_fact: + hdbuserstore_path: "{{ hdbuserstore_file.files[0].path }}" + db_lb_virtual_host: "{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl.{{ sap_fqdn }}" + - name: "DBLoad: show hdbuserstore path" + when: + - database_high_availability + - platform == 'HANA' ansible.builtin.debug: var: hdbuserstore_path verbosity: 2 - when: - - db_high_availability - - platform == 'HANA' + - name: "DBLoad: Set DB Virtual Host name ({{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }})" become: true become_user: "{{ sid_to_be_deployed.sid | lower }}adm" + when: + - database_high_availability + - platform == 'HANA' + - hdbuserstore_path is defined ansible.builtin.shell: | {{ hdbuserstore_path }} -H {{ pas_virtual_hostname }} SET DEFAULT {{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }}:3{{ db_instance_number }}13@{{ db_sid | upper }} {{ hana_schema }} {{ main_password }} environment: @@ -450,11 +471,6 @@ register: hdbuserstore vars: allow_world_readable_tmpfiles: true - when: - - db_high_availability - - platform == 'HANA' - - hdbuserstore_path is defined - ... # /*---------------------------------------------------------------------------8 diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index 6d96984e6d..c7fd7c5ec7 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -21,13 +21,20 @@ with_items: - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" +- name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "PAS Install: Set BOM facts" ansible.builtin.set_fact: sap_inifile: "{{ bom_base_name }}-pas-{{ sid_to_be_deployed.sid | lower }}-{{ ansible_hostname }}.params" sap_inifile_template: "{{ bom_base_name }}{{ bom_suffix }}-pas-inifile-param.j2" dir_params: "{{ tmp_directory }}/.{{ sid_to_be_deployed.sid | lower }}-params" - db_lb_virtual_host_HANA: "{% if db_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ hostvars[db_server_temp | first]['virtual_host'] }}{% endif %}" - db_lb_virtual_host_AnyDB: "{% if db_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ db_server_temp }}{% endif %}" + db_lb_virtual_host_HANA: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ hostvars[db_server_temp | first]['virtual_host'] }}{% endif %}" + db_lb_virtual_host_AnyDB: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ db_server_temp }}{% endif %}" - name: "PAS Install: Set BOM facts db host" ansible.builtin.set_fact: @@ -178,7 +185,7 @@ vars: instance_type: "{{ pas_bom_instance_type }}" when: - - db_high_availability + - database_high_availability - platform == "HANA" - name: "PAS Install: Create temp directory for sid" @@ -284,7 +291,7 @@ recurse: true register: hdbuserstore_file when: - - db_high_availability + - database_high_availability - platform == 'HANA' @@ -292,7 +299,7 @@ ansible.builtin.set_fact: hdbuserstore_path: "{{ hdbuserstore_file.files[0].path }}" when: - - db_high_availability + - database_high_availability - platform == 'HANA' - hdbuserstore_file | length > 0 @@ -300,7 +307,7 @@ ansible.builtin.debug: var: hdbuserstore_path when: - - db_high_availability + - database_high_availability - platform == 'HANA' - name: "PAS Install: Set DB Virtual Host name ({{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }})" @@ -316,7 +323,7 @@ vars: allow_world_readable_tmpfiles: true when: - - db_high_availability + - database_high_availability - platform == 'HANA' - pas_installed_according_to_sapinst is defined - pas_installed_according_to_sapinst | length > 0 @@ -365,14 +372,14 @@ recurse: true register: hdbuserstore_file when: - - db_high_availability + - database_high_availability - platform == 'HANA' - name: "PAS Install: Set hdbuserstore path" ansible.builtin.set_fact: hdbuserstore_path: "{{ hdbuserstore_file.files[0].path }}" when: - - db_high_availability + - database_high_availability - platform == 'HANA' - hdbuserstore_file | length > 0 @@ -380,7 +387,7 @@ ansible.builtin.debug: var: hdbuserstore_path when: - - db_high_availability + - database_high_availability - platform == 'HANA' - name: "PAS Install: Set DB Virtual Host name ({{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }})" @@ -395,7 +402,7 @@ vars: allow_world_readable_tmpfiles: true when: - - db_high_availability + - database_high_availability - platform == 'HANA' diff --git a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml index c9109bd148..6b82e2f747 100644 --- a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml @@ -19,13 +19,20 @@ with_items: - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" +- name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "APP Install: Set BOM facts" ansible.builtin.set_fact: sap_inifile: "{{ bom_base_name }}-app-{{ sid_to_be_deployed.sid }}-{{ ansible_hostname }}.params" sap_inifile_template: "{{ bom_base_name }}{{ bom_suffix }}-app-inifile-param.j2" dir_params: "{{ tmp_directory }}/.{{ sid_to_be_deployed.sid | upper }}-params" - db_lb_virtual_host_HANA: "{% if db_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ hostvars[db_server_temp | first]['virtual_host'] }}{% endif %}" - db_lb_virtual_host_AnyDB: "{% if db_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ db_server_temp }}{% endif %}" + db_lb_virtual_host_HANA: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ hostvars[db_server_temp | first]['virtual_host'] }}{% endif %}" + db_lb_virtual_host_AnyDB: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ db_server_temp }}{% endif %}" - name: "APP Install: Set BOM facts db host" ansible.builtin.set_fact: @@ -153,7 +160,7 @@ - name: "APP Install: HANA HSR - Update Profile" ansible.builtin.import_tasks: ../../../roles-db/4.0.1-hdb-hsr/tasks/4.0.1.7-sap-profile-changes.yml when: - - db_high_availability + - database_high_availability - platform == "HANA" - name: "APP Install: Create temp directory for sid" @@ -230,20 +237,20 @@ recurse: true register: hdbuserstore_file when: - - db_high_availability + - database_high_availability - platform == 'HANA' - name: "APP Install: Set hdbuserstore path" ansible.builtin.set_fact: hdbuserstore_path: "{{ hdbuserstore_file.files[0].path }}" when: - - db_high_availability + - database_high_availability - platform == 'HANA' - hdbuserstore_file | length > 0 - name: "APP Install: show hdbuserstore path" ansible.builtin.debug: var: hdbuserstore_path when: - - db_high_availability + - database_high_availability - platform == 'HANA' - name: "APP Install: Set DB Virtual Host name" @@ -257,7 +264,7 @@ vars: allow_world_readable_tmpfiles: true when: - - db_high_availability + - database_high_availability - platform == 'HANA' - pas_installed_according_to_sapinst is defined - pas_installed_according_to_sapinst | length > 0 @@ -281,14 +288,14 @@ recurse: true register: hdbuserstore_file when: - - db_high_availability + - database_high_availability - platform == 'HANA' - name: "APP Install: Set hdbuserstore path" ansible.builtin.set_fact: hdbuserstore_path: "{{ hdbuserstore_file.files[0].path }}" when: - - db_high_availability + - database_high_availability - platform == 'HANA' - hdbuserstore_file | length > 0 @@ -296,7 +303,7 @@ ansible.builtin.debug: var: hdbuserstore_path when: - - db_high_availability + - database_high_availability - platform == 'HANA' - name: "APP Install: Set DB Virtual Host name ({{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }})" @@ -310,7 +317,7 @@ vars: allow_world_readable_tmpfiles: true when: - - db_high_availability + - database_high_availability - platform == 'HANA' when: diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml index b15a512727..51a1b93169 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml @@ -11,10 +11,16 @@ # +------------------------------------4--------------------------------------*/ +- name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined - name: "Configure the ANF file system resources" when: - - db_high_availability + - database_high_availability - NFS_provider == "ANF" - hana_data_mountpoint is defined - hana_data_mountpoint | length > 1 diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-Suse.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-Suse.yml index f5077f0de8..51d7846681 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-Suse.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-Suse.yml @@ -5,9 +5,16 @@ # Ref: https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/sap-hana-high-availability # Ref: https://learn.microsoft.com/en-us/azure/sap/workloads/sap-hana-high-availability-netapp-files-suse#create-file-system-resources +- name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "Configure the ANF specific resources when relevant" when: - - db_high_availability + - database_high_availability - NFS_provider == "ANF" - hana_data_mountpoint is defined - hana_data_mountpoint | length > 1 diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index b64a6c21a5..de5df63a15 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -120,9 +120,16 @@ register: resource_defaults failed_when: resource_defaults.rc > 1 + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "5.5.4.1 HANA Cluster configuration - Configure cluster constraints for SAP HANA and ANF" when: - - db_high_availability + - database_high_availability - NFS_provider == "ANF" - ansible_hostname == primary_instance_name block: diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml index d3c1e9a322..5b19674f47 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml @@ -143,9 +143,16 @@ when: inventory_hostname == primary_instance_name +- name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "5.5.4.1 HANA Pacemaker configuration - Configure the ANF specific resources when relevant" when: - - db_high_availability + - database_high_availability - NFS_provider == "ANF" - hana_data_mountpoint is defined - hana_data_mountpoint | length > 1 diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml index 657fe69ad0..911ebd9b86 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml @@ -136,6 +136,12 @@ # To connect to the primary instance of the HADR configuration, the SAP application layer needs to use the # # virtual IP address that you defined and configured for the Azure Load Balancer # ################################################################################################################# +- name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined - name: "5.6 - SCS / ASCS profile changes - DB2 Installations" become: true @@ -183,7 +189,7 @@ when: - inventory_hostname == primary_instance_name - platform == 'DB2' - - db_high_availability + - database_high_availability - ensa1 | default(false) ... diff --git a/deploy/ansible/roles-sap/windows/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/windows/5.2-pas-install/tasks/main.yaml index a89b6b4e98..eb63112593 100644 --- a/deploy/ansible/roles-sap/windows/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/windows/5.2-pas-install/tasks/main.yaml @@ -70,6 +70,13 @@ with_items: - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" +- name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "PAS Install: Set the SCS Server name" ansible.builtin.set_fact: scs_server: "{{ hostvars[scs_server_temp | first]['virtual_host'] }}" @@ -77,7 +84,7 @@ db_virtual_hostname: "{{ hostvars[db_server_temp | first]['virtual_host'] }}" instance_number: "{% if node_tier == 'pas' %}'00'{% else %}'01'{% endif %}" file_path: "{% if scs_high_availability %}INSTALL/HA/ABAP/APP1{% else %}INSTALL/DISTRIBUTED/ABAP/APP1{% endif %}" - db_lb_virtual_host: "{% if db_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ hostvars[db_server_temp | first]['virtual_host'] }}{% endif %}" + db_lb_virtual_host: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ hostvars[db_server_temp | first]['virtual_host'] }}{% endif %}" - name: "PAS Install: Include 3.3.1-bom-utility role" ansible.builtin.include_role: diff --git a/deploy/ansible/roles-sap/windows/5.3-app-install/tasks/main.yaml b/deploy/ansible/roles-sap/windows/5.3-app-install/tasks/main.yaml index 9588449c13..f832e2357f 100644 --- a/deploy/ansible/roles-sap/windows/5.3-app-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/windows/5.3-app-install/tasks/main.yaml @@ -70,13 +70,20 @@ with_items: - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" +- name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + - name: "APP Install: Set the SCS Server name" ansible.builtin.set_fact: scs_server: "{{ hostvars[scs_server_temp | first]['virtual_host'] }}" scs_sapmnt_server: "{% if scs_high_availability %}{{ sid_to_be_deployed.sid | lower }}scs{{ scs_instance_number }}cl1{% else %}{{ hostvars[scs_server_temp | first]['virtual_host'] }}{% endif %}" db_virtual_hostname: "{{ hostvars[db_server_temp | first]['virtual_host'] }}" file_path: "{% if scs_high_availability %}INSTALL/HA/ABAP/APP1{% else %}INSTALL/DISTRIBUTED/ABAP/APP1{% endif %}" - db_lb_virtual_host: "{% if db_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ hostvars[db_server_temp | first]['virtual_host'] }}{% endif %}" + db_lb_virtual_host: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ hostvars[db_server_temp | first]['virtual_host'] }}{% endif %}" - name: "APP Install: Include 3.3.1-bom-utility role" ansible.builtin.include_role: diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index 2484f2fc72..1316691ff4 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -4,7 +4,7 @@ become_user_name: root # ------------------- Begin - SDAF Ansible Version ---------------------------8 -SDAF_Version: "3.9.3.0" +SDAF_Version: "3.10.0.0" # ------------------- End - SDAF Ansible Version ---------------------------8 # ------------------- Begin - OS Config Settings variables -------------------8 @@ -206,6 +206,12 @@ NFS_provider: "NONE" NFS_version: "NFSv4.1" use_simple_mount: false +# Cluster - Defaults +database_high_availability: false +database_cluster_type: "AFA" +scs_high_availability: false +scs_cluster_type: "AFA" + # ------------------- Begin - SAP SWAP settings variables --------------------8 sap_swap: - { tier: "scs", swap_size_mb: "4096" } From 0446a4202bc5c0c4843ca8386efae012bbd49921 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 11 Dec 2023 22:11:13 +0530 Subject: [PATCH 005/607] remove extra lines --- .../4.0.1-hdb-hsr/tasks/4.0.1.7-sap-profile-changes.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/deploy/ansible/roles-db/4.0.1-hdb-hsr/tasks/4.0.1.7-sap-profile-changes.yml b/deploy/ansible/roles-db/4.0.1-hdb-hsr/tasks/4.0.1.7-sap-profile-changes.yml index 66d5752c1c..4d54ce5cbc 100644 --- a/deploy/ansible/roles-db/4.0.1-hdb-hsr/tasks/4.0.1.7-sap-profile-changes.yml +++ b/deploy/ansible/roles-db/4.0.1-hdb-hsr/tasks/4.0.1.7-sap-profile-changes.yml @@ -109,8 +109,6 @@ tags: - dbhostpara - - - name: "4.0.1.7 - Check if SAP DEFAULT.PFL changes are needed for rsdb/ssfs_connect" ansible.builtin.lineinfile: path: "/sapmnt/{{ sap_sid | upper }}/profile/DEFAULT.PFL" From 240c9cd5c2d123b7a9cbdc83c142af05b5e59b42 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 12 Dec 2023 16:35:42 +0530 Subject: [PATCH 006/607] Remove unnecessary tasks for SUSE and REDHAT in ACSS registration playbook --- .../0.5-ACSS-registration/tasks/main.yaml | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml index 8c13d2ca56..9366f09bba 100644 --- a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml @@ -75,19 +75,6 @@ crm_resource --resource g-{{ sap_sid | upper }}_{{ instance_type | upper }} --locate | cut -d ':' -f 2| cut -d " " -f 2 when: - scs_high_availability - - ansible_os_family | upper == 'SUSE' - register: cluster_group_location - failed_when: cluster_group_location.stdout != hostvars[ansible_hostname]['scs_running_on'] - tags: - - skip_ansible_lint - - - name: "0.5.1 acss registration: - Check where the cluster group g-{{ sap_sid | upper }}_{{ instance_type | upper }} is running" - ansible.builtin.shell: >- - set -o pipefail; - pcs constraint location show resources g-{{ sap_sid | upper }}_{{ instance_type | upper }} | grep "Node" | awk '{print $2}' - when: - - scs_high_availability - - ansible_os_family | upper == 'REDHAT' register: cluster_group_location failed_when: cluster_group_location.stdout != hostvars[ansible_hostname]['scs_running_on'] tags: From 3aa7a705ddd1cfc7d8b1152b6231f01207cb25b7 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 12 Dec 2023 16:44:53 +0530 Subject: [PATCH 007/607] Add az login before Get Access Token tasks for ACSS registration --- .../roles-misc/0.5-ACSS-registration/tasks/main.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml index 9366f09bba..dd7db433cd 100644 --- a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml @@ -129,11 +129,18 @@ tags: - skip_ansible_lint + - name: "0.5.1 acss registration: - perform az login" + ansible.builtin.command: >- + az login --identity --allow-no-subscriptions --output none + no_log: true + changed_when: false + - name: "0.5.1 acss registration: - Get Access Token" ansible.builtin.shell: >- az account get-access-token --resource https://management.azure.com \ --query accessToken -o tsv register: acss_access_token + changed_when: false no_log: true tags: - skip_ansible_lint From 13a0707f58657fa45e561dd16c6171eb80f7ce57 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 12 Dec 2023 16:57:04 +0530 Subject: [PATCH 008/607] Add debug task for ACSS registration parameters --- .../0.5-ACSS-registration/tasks/main.yaml | 39 +++++++++++++------ 1 file changed, 28 insertions(+), 11 deletions(-) diff --git a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml index dd7db433cd..00f7ab61a1 100644 --- a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml @@ -151,23 +151,40 @@ tags: - skip_ansible_lint + - name: "0.5.1 acss registration: - Debug [ACSS] virtual instance parameters" + ansible.builtin.debug: + msg: + - "acss_resource_id: {{ acss_resource_id }}" + - "acss_subscription_id: {{ acss_subscription_id }}" + - "acss_resource_group: {{ acss_resource_group }}" + - "acss_location: {{ acss_location }}" + - "acss_sid: {{ acss_sid }}" + - "acss_instance_type: {{ acss_instance_type }}" + - "acss_environment: {{ acss_environment }}" + - "acss_sap_product: {{ acss_sap_product }}" + - "acss_guid: {{ acss_guid.stdout }}" + - "acss_access_token: {{ acss_access_token.stdout }}" + - "acss_vm_id: {{ hostvars[ansible_hostname]['scs_running_on'] }}" + tags: + - skip_ansible_lint + - name: "0.5.1 acss registration: - Create [ACSS] virtual instance" ansible.builtin.uri: url: "https://management.azure.com/subscriptions/{{ acss_subscription_id }}/resourceGroups/{{ acss_resource_group }}/providers/Microsoft.Workloads/sapVirtualInstances/{{ acss_sid }}?api-version=2023-04-01" method: PUT body_format: json body: | - { - "properties": { - "environment": "{{ acss_environment }}", - "sapProduct": "{{ acss_sap_product }}", - "configuration": { - "configurationType": "Discovery", - "centralServerVmId": "/subscriptions/{{ acss_subscription_id }}/resourceGroups/{{ acss_resource_group }}/providers/Microsoft.Compute/virtualMachines/{{ hostvars[ansible_hostname]['scs_running_on'] }}" - } - }, - "location": "{{ acss_location }}" - } + { + "properties": { + "environment": "{{ acss_environment }}", + "sapProduct": "{{ acss_sap_product }}", + "configuration": { + "configurationType": "Discovery", + "centralServerVmId": "/subscriptions/{{ acss_subscription_id }}/resourceGroups/{{ acss_resource_group }}/providers/Microsoft.Compute/virtualMachines/{{ hostvars[ansible_hostname]['scs_running_on'] }}" + } + }, + "location": "{{ acss_location }}" + } # status_code: [200, 201] headers: Authorization: "Bearer {{ acss_access_token.stdout }}" From 2c6028403ba8387ca10a87aa5998fcdbbfd93e4a Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 12 Dec 2023 17:02:12 +0530 Subject: [PATCH 009/607] Update ACSS registration tasks in Ansible playbook --- .../roles-misc/0.5-ACSS-registration/tasks/main.yaml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml index 00f7ab61a1..5b3df27090 100644 --- a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml @@ -192,7 +192,13 @@ x-ms-client-request-id: "SDAF-{{ acss_guid.stdout }}" register: create_vis_response failed_when: create_vis_response.json.properties.provisioningState != 'Accepted' - no_log: true + no_log: false + + - name: "0.5.1 acss registration: - Debug [ACSS] virtual instance creation response" + ansible.builtin.debug: + msg: "{{ create_vis_response }}" + tags: + - skip_ansible_lint - name: "0.5.1 acss registration: - Check the registered [ACSS] virtual instance" ansible.builtin.uri: From 65286ae8b760be87be5fd2971d1c14006e40c1e4 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 12 Dec 2023 15:12:14 +0200 Subject: [PATCH 010/607] Public network flag --- .../terraform/terraform-units/modules/sap_deployer/key_vault.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/key_vault.tf b/deploy/terraform/terraform-units/modules/sap_deployer/key_vault.tf index c189a04d70..8aacf17c4a 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/key_vault.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/key_vault.tf @@ -29,7 +29,7 @@ resource "azurerm_key_vault" "kv_user" { dynamic "network_acls" { - for_each = range(true ? 1 : 0) + for_each = range(!var.public_network_access_enabled ? 1 : 0) content { bypass = "AzureServices" From 2ec95efdfa95ff11bcf9ed025d2a25eb97e828bc Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 12 Dec 2023 15:17:31 +0200 Subject: [PATCH 011/607] Add the Azure Center for SAP solutions Service role for management role for the MSI --- .../modules/sap_deployer/infrastructure.tf | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf index 6e4a4b0cc0..731655acdf 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf @@ -128,3 +128,19 @@ resource "azurerm_role_assignment" "resource_group_contributor_contributor_msi" principal_id = azurerm_user_assigned_identity.deployer.principal_id } +resource "azurerm_role_assignment" "resource_group_acsservice" { + provider = azurerm.main + count = var.assign_subscription_permissions && var.deployer.add_system_assigned_identity ? var.deployer_vm_count : 0 + scope = local.resource_group_exists ? data.azurerm_resource_group.deployer[0].id : azurerm_resource_group.deployer[0].id + role_definition_name = "Azure Center for SAP solutions Service role for management" + principal_id = azurerm_linux_virtual_machine.deployer[count.index].identity[0].principal_id +} + +resource "azurerm_role_assignment" "resource_group_acsservice_msi" { + provider = azurerm.main + count = var.assign_subscription_permissions ? 1 : 0 + scope = local.resource_group_exists ? data.azurerm_resource_group.deployer[0].id : azurerm_resource_group.deployer[0].id + role_definition_name = "Azure Center for SAP solutions Service role for management" + principal_id = azurerm_user_assigned_identity.deployer.principal_id +} + From 44e6c936a9cfc17bb69da69fcb1397a76798408a Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 12 Dec 2023 15:23:02 +0200 Subject: [PATCH 012/607] Use Azure Center for SAP solutions administrator --- .../terraform-units/modules/sap_deployer/infrastructure.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf index 731655acdf..38ab790e46 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf @@ -132,7 +132,7 @@ resource "azurerm_role_assignment" "resource_group_acsservice" { provider = azurerm.main count = var.assign_subscription_permissions && var.deployer.add_system_assigned_identity ? var.deployer_vm_count : 0 scope = local.resource_group_exists ? data.azurerm_resource_group.deployer[0].id : azurerm_resource_group.deployer[0].id - role_definition_name = "Azure Center for SAP solutions Service role for management" + role_definition_name = "Azure Center for SAP solutions administrator" principal_id = azurerm_linux_virtual_machine.deployer[count.index].identity[0].principal_id } @@ -140,7 +140,7 @@ resource "azurerm_role_assignment" "resource_group_acsservice_msi" { provider = azurerm.main count = var.assign_subscription_permissions ? 1 : 0 scope = local.resource_group_exists ? data.azurerm_resource_group.deployer[0].id : azurerm_resource_group.deployer[0].id - role_definition_name = "Azure Center for SAP solutions Service role for management" + role_definition_name = "Azure Center for SAP solutions administrator" principal_id = azurerm_user_assigned_identity.deployer.principal_id } From 3507abdd281faabfa3ee3a5dc1163ab087129af8 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 12 Dec 2023 19:15:25 +0530 Subject: [PATCH 013/607] Add ACSS virtual instance write check --- .../roles-misc/0.5-ACSS-registration/tasks/main.yaml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml index 5b3df27090..4abf494cd2 100644 --- a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml @@ -151,6 +151,13 @@ tags: - skip_ansible_lint + - name: "0.5.1 acss registration: - Check if we have [ACSS] virtual instance write" + ansible.builtin.shell: >- + az provider show --namespace Microsoft.Workloads \ + --query "resourceTypes[?resourceType=='sapVirtualInstances'].permissions[?contains(@.actions, 'Microsoft.Workloads/sapVirtualInstances/write')]" + register: acss_virtual_instance_write + changed_when: false + - name: "0.5.1 acss registration: - Debug [ACSS] virtual instance parameters" ansible.builtin.debug: msg: @@ -163,8 +170,9 @@ - "acss_environment: {{ acss_environment }}" - "acss_sap_product: {{ acss_sap_product }}" - "acss_guid: {{ acss_guid.stdout }}" - - "acss_access_token: {{ acss_access_token.stdout }}" - "acss_vm_id: {{ hostvars[ansible_hostname]['scs_running_on'] }}" + - "acss_write_auth: {{ acss_virtual_instance_write.stdout }}" + verbosity: 2 tags: - skip_ansible_lint From a2d637375281b981930e8d18fc44235318c11df3 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 12 Dec 2023 19:46:14 +0530 Subject: [PATCH 014/607] Add DB2 keystore file handling tasks --- .../tasks/4.2.1.8-db2_copy_keystore_files.yml | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml index e61d13a79d..48a29b6551 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml @@ -13,13 +13,27 @@ - sapdb2{{ db_sid | lower }}_db_encr.p12 - sapdb2{{ db_sid | lower }}_db_encr.sth +- name: "DB2: Stat if the keystore files exist on Primary node" + ansible.builtin.stat: + path: "/db2/db2{{ db_sid | lower }}/keystore/{{ item }}" + loop: "{{ keystore_files }}" + register: keystore_files_stat + when: ansible_hostname == primary_instance_name + +- name: "DB2: Determine if the database is encrypted" + ansible.builtin.set_fact: + db_encrypted: "(keystore_files_stat.results | map(attribute='stat.exists')) is all" + when: ansible_hostname == primary_instance_name + - name: "DB2: Fetch keystore files from Primary node to Controller" ansible.builtin.fetch: src: "/db2/db2{{ db_sid | lower }}/keystore/{{ item }}" dest: /tmp/keystore_files/ flat: true loop: "{{ keystore_files }}" - when: ansible_hostname == primary_instance_name + when: + - ansible_hostname == primary_instance_name + - db_encrypted - name: "DB2: Copy keystore files from Controller to Secondary node" ansible.builtin.copy: @@ -29,4 +43,6 @@ owner: db2{{ db_sid | lower }} group: db{{ db_sid | lower }}adm loop: "{{ keystore_files }}" - when: ansible_hostname == secondary_instance_name + when: + - ansible_hostname == secondary_instance_name + - hostvars.primary_instance_name.db_encrypted From 9d121672b7817674f07a06a437140009f9788987 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 12 Dec 2023 20:08:29 +0530 Subject: [PATCH 015/607] Update keystore file handling and ACSS registration configuration --- .../tasks/4.2.1.8-db2_copy_keystore_files.yml | 8 ++++++++ .../roles-misc/0.5-ACSS-registration/tasks/main.yaml | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml index 48a29b6551..5e41f16e13 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml @@ -25,6 +25,14 @@ db_encrypted: "(keystore_files_stat.results | map(attribute='stat.exists')) is all" when: ansible_hostname == primary_instance_name +- name: "DB2: Debug if the database is encrypted" + ansible.builtin.debug: + msg: + - "Database is encrypted: {{ db_encrypted }}" + - "Keystore files exist: {{ keystore_files_stat.results | map(attribute='stat.exists') | list }}" + - "Keystore files stat: {{ keystore_files_stat.results | map(attribute='stat') | selectattr('exists') | list }}" + when: ansible_hostname == primary_instance_name + - name: "DB2: Fetch keystore files from Primary node to Controller" ansible.builtin.fetch: src: "/db2/db2{{ db_sid | lower }}/keystore/{{ item }}" diff --git a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml index 4abf494cd2..5889636819 100644 --- a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml @@ -188,7 +188,7 @@ "sapProduct": "{{ acss_sap_product }}", "configuration": { "configurationType": "Discovery", - "centralServerVmId": "/subscriptions/{{ acss_subscription_id }}/resourceGroups/{{ acss_resource_group }}/providers/Microsoft.Compute/virtualMachines/{{ hostvars[ansible_hostname]['scs_running_on'] }}" + "centralServerVmId": "{{ acss_resource_id }}" } }, "location": "{{ acss_location }}" From ec6dbbf9cec1e8907264cde4da4755a129d8e79b Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 12 Dec 2023 20:17:18 +0530 Subject: [PATCH 016/607] Fix DB2 keystore file check in ansible playbook --- .../tasks/4.2.1.8-db2_copy_keystore_files.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml index 5e41f16e13..fa424a7994 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml @@ -22,7 +22,7 @@ - name: "DB2: Determine if the database is encrypted" ansible.builtin.set_fact: - db_encrypted: "(keystore_files_stat.results | map(attribute='stat.exists')) is all" + db_encrypted: "{{ (keystore_files_stat.results | map(attribute='stat.exists')) is all }}" when: ansible_hostname == primary_instance_name - name: "DB2: Debug if the database is encrypted" From 1d632601cc47c689868f491a26b3fdf944f3dd29 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 12 Dec 2023 20:30:44 +0530 Subject: [PATCH 017/607] Fix condition for copying keystore files --- .../tasks/4.2.1.8-db2_copy_keystore_files.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml index fa424a7994..85dd1a04b1 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml @@ -53,4 +53,5 @@ loop: "{{ keystore_files }}" when: - ansible_hostname == secondary_instance_name - - hostvars.primary_instance_name.db_encrypted + - hostvars[primary_instance_name]['db_encrypted'] is defined + - hostvars[primary_instance_name]['db_encrypted'] From 1750be2a0d73f21c320aba020a6c341e9f1005d1 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 12 Dec 2023 20:42:18 +0530 Subject: [PATCH 018/607] Add DB2 restore tasks for encrypted and non-encrypted databases --- .../tasks/4.2.1.3-db2_restore_secondary.yml | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.3-db2_restore_secondary.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.3-db2_restore_secondary.yml index a39aa6137f..d7bd544f59 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.3-db2_restore_secondary.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.3-db2_restore_secondary.yml @@ -67,6 +67,20 @@ ansible.builtin.debug: msg: "DB2 - Restoring database, please wait" + # ##################### Start of Restore without Encryption ################################## + - name: "DB2 - Restore without encryption" + ansible.builtin.shell: db2 restore database {{ db_sid }} from {{ db_sid_backup_dir }} taken at {{ backup_timestamp }} on /db2/{{ db_sid }} no encrypt without prompting + args: + executable: /bin/csh + register: db2_restore_result + environment: + PATH: "{{ ansible_env.PATH }}:/db2/db2{{ db_sid | lower }}/sqllib/gskit/bin" + failed_when: db2_restore_result.rc > 2 + when: + - db2_started.rc == 0 + - hostvars[primary_instance_name]['db_encrypted'] is defined + - not hostvars[primary_instance_name]['db_encrypted'] + # ######### ########### End of Restore without Encryption #################################### # ##################### Start of Restore with Encryption ################################## - name: "DB2 - Restore with encryption" @@ -77,7 +91,10 @@ environment: PATH: "{{ ansible_env.PATH }}:/db2/db2{{ db_sid | lower }}/sqllib/gskit/bin" failed_when: db2_restore_result.rc > 2 - when: db2_started.rc == 0 + when: + - db2_started.rc == 0 + - hostvars[primary_instance_name]['db_encrypted'] is defined + - hostvars[primary_instance_name]['db_encrypted'] # ######### ########### End of Restore with Encryption #################################### when: - ansible_hostname == secondary_instance_name From 2b9d1aa543dc43f90c85506bf6d0ade84027e08f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 12 Dec 2023 17:18:54 +0200 Subject: [PATCH 019/607] Remove the extra debug messages --- .../tasks/4.2.1.8-db2_copy_keystore_files.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml index 85dd1a04b1..376cdd0f18 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml @@ -27,10 +27,8 @@ - name: "DB2: Debug if the database is encrypted" ansible.builtin.debug: - msg: + msg: - "Database is encrypted: {{ db_encrypted }}" - - "Keystore files exist: {{ keystore_files_stat.results | map(attribute='stat.exists') | list }}" - - "Keystore files stat: {{ keystore_files_stat.results | map(attribute='stat') | selectattr('exists') | list }}" when: ansible_hostname == primary_instance_name - name: "DB2: Fetch keystore files from Primary node to Controller" From 6adf39fd97dbf69cd1d416f18cd2832eb0471078 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 13 Dec 2023 16:36:40 +0530 Subject: [PATCH 020/607] Commented out database_high_availability and scs_high_availability settings --- deploy/ansible/vars/ansible-input-api.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index 1316691ff4..c9d274a933 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -207,9 +207,9 @@ NFS_version: "NFSv4.1" use_simple_mount: false # Cluster - Defaults -database_high_availability: false +# database_high_availability: false database_cluster_type: "AFA" -scs_high_availability: false +# scs_high_availability: false scs_cluster_type: "AFA" # ------------------- Begin - SAP SWAP settings variables --------------------8 From ff18ee1836b222c51faa67e8128b5585e00887d3 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 13 Dec 2023 18:36:49 +0530 Subject: [PATCH 021/607] Add backward compatibility for checking required Database HA variables --- deploy/ansible/playbook_04_00_00_db_install.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/deploy/ansible/playbook_04_00_00_db_install.yaml b/deploy/ansible/playbook_04_00_00_db_install.yaml index efdf1fc955..acd40fb1af 100644 --- a/deploy/ansible/playbook_04_00_00_db_install.yaml +++ b/deploy/ansible/playbook_04_00_00_db_install.yaml @@ -98,6 +98,12 @@ # Build the list of tasks to be executed in order here. # # -------------------------------------+---------------------------------------8 + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined - name: "Database Installation Playbook: - Install HANA" become: true From bc3a899a05d2396610e87374049ad30d71336482 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 13 Dec 2023 19:01:25 +0530 Subject: [PATCH 022/607] Refactor playbook_04_00_00_db_install.yaml --- .../ansible/playbook_04_00_00_db_install.yaml | 60 +++++++++---------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/deploy/ansible/playbook_04_00_00_db_install.yaml b/deploy/ansible/playbook_04_00_00_db_install.yaml index acd40fb1af..11a62d6595 100644 --- a/deploy/ansible/playbook_04_00_00_db_install.yaml +++ b/deploy/ansible/playbook_04_00_00_db_install.yaml @@ -93,11 +93,11 @@ - vars/ansible-input-api.yaml # API Input template with defaults tasks: -# -------------------------------------+---------------------------------------8 -# -# Build the list of tasks to be executed in order here. -# -# -------------------------------------+---------------------------------------8 + # -------------------------------------+---------------------------------------8 + # + # Build the list of tasks to be executed in order here. + # + # -------------------------------------+---------------------------------------8 - name: "Backward Compatibility - Check required Database HA variables" ansible.builtin.set_fact: database_high_availability: "{{ db_high_availability | default(false) }}" @@ -260,11 +260,11 @@ - vars/ansible-input-api.yaml # API Input template with defaults tasks: -# -------------------------------------+---------------------------------------8 -# -# Build the list of tasks to be executed in order here. -# -# -------------------------------------+---------------------------------------8 + # -------------------------------------+---------------------------------------8 + # + # Build the list of tasks to be executed in order here. + # + # -------------------------------------+---------------------------------------8 - name: "Database Installation Playbook: - Install Oracle" become: true become_user: root @@ -298,11 +298,11 @@ - vars/ansible-input-api.yaml # API Input template with defaults tasks: -# -------------------------------------+---------------------------------------8 -# -# Build the list of tasks to be executed in order here. -# -# -------------------------------------+---------------------------------------8 + # -------------------------------------+---------------------------------------8 + # + # Build the list of tasks to be executed in order here. + # + # -------------------------------------+---------------------------------------8 - name: "Database Installation Playbook: - Install Oracle" become: true become_user: root @@ -458,11 +458,11 @@ - vars/ansible-input-api.yaml # API Input template with defaults tasks: -# -------------------------------------+---------------------------------------8 -# -# Build the list of tasks to be executed in order here. -# -# -------------------------------------+---------------------------------------8 + # -------------------------------------+---------------------------------------8 + # + # Build the list of tasks to be executed in order here. + # + # -------------------------------------+---------------------------------------8 - name: "Backward Compatibility - Check required Database HA variables" ansible.builtin.set_fact: database_high_availability: "{{ db_high_availability | default(false) }}" @@ -524,11 +524,11 @@ - vars/ansible-input-api.yaml # API Input template with defaults tasks: -# -------------------------------------+---------------------------------------8 -# -# Build the list of tasks to be executed in order here. -# -# -------------------------------------+---------------------------------------8 + # -------------------------------------+---------------------------------------8 + # + # Build the list of tasks to be executed in order here. + # + # -------------------------------------+---------------------------------------8 - name: "Backward Compatibility - Check required Database HA variables" ansible.builtin.set_fact: database_high_availability: "{{ db_high_availability | default(false) }}" @@ -594,11 +594,11 @@ - vars/ansible-input-api.yaml # API Input template with defaults tasks: -# -------------------------------------+---------------------------------------8 -# -# Build the list of tasks to be executed in order here. -# -# -------------------------------------+---------------------------------------8 + # -------------------------------------+---------------------------------------8 + # + # Build the list of tasks to be executed in order here. + # + # -------------------------------------+---------------------------------------8 - name: "Backward Compatibility - Check required Database HA variables" ansible.builtin.set_fact: database_high_availability: "{{ db_high_availability | default(false) }}" From a1f770a8bcc44f0f4f4c195ada1d8e5877fb6b26 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 13 Dec 2023 19:19:09 +0530 Subject: [PATCH 023/607] Add DBLoad installation tasks and PAS installation tasks --- .../roles-sap/5.1-dbload/tasks/main.yaml | 17 +++ .../roles-sap/5.2-pas-install/tasks/main.yaml | 130 ++++++++++++------ 2 files changed, 103 insertions(+), 44 deletions(-) diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml index d7ada6e639..94193865cc 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml @@ -419,6 +419,23 @@ - db_high_availability is defined - database_high_availability is not defined + - name: "DBLoad Install: Get DEFAULT.PFL" + ansible.builtin.slurp: + src: "/sapmnt/{{ sap_sid | upper }}/profile/DEFAULT.PFL" + register: profilefile + + - name: "DBLoad Install: Get schema name" + ansible.builtin.set_fact: + schema_name: "{{ profilefile['content'] | b64decode | split('\n') | select('search', 'dbs/hdb/schema') | first | split('=') | last | trim | default('{{ hana_schema }}') }}" + when: + - platform == 'HANA' + + - name: "DBLoad Install: Installation results" + ansible.builtin.debug: + msg: "Schema name {{ schema_name }}" + when: + - platform == 'HANA' + - name: "DBLoad: Get hdbuserstore path" become: true become_user: "{{ sid_to_be_deployed.sid | lower }}adm" diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index c7fd7c5ec7..d2fe11a9d9 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -215,6 +215,48 @@ - name: "Starting PAS installation" block: + - name: "PAS Install: Get hdbuserstore path" + ansible.builtin.find: + paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/" + file_type: file + patterns: 'hdbuserstore' + recurse: true + register: hdbuserstore_file + when: + - database_high_availability + - platform == 'HANA' + + - name: "PAS Install: Set hdbuserstore path" + ansible.builtin.set_fact: + hdbuserstore_path: "{{ hdbuserstore_file.files[0].path }}" + when: + - database_high_availability + - platform == 'HANA' + - hdbuserstore_file | length > 0 + + - name: "PAS Install: show hdbuserstore path" + ansible.builtin.debug: + var: hdbuserstore_path + when: + - database_high_availability + - platform == 'HANA' + + - name: "PAS Install: Set DB Virtual Host name ({{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }})" + become: true + become_user: "{{ sid_to_be_deployed.sid | lower }}adm" + ansible.builtin.shell: | + {{ hdbuserstore_path }} -H {{ pas_virtual_hostname }} SET DEFAULT {{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }}:3{{ db_instance_number }}13@{{ db_sid | upper }} {{ schema_name }} {{ main_password }} + environment: + SAPSYSTEMNAME: "{{ sid_to_be_deployed.sid | upper }}" + TMPDIR: "{{ hdbuserstore_path }}" + ssfs_connect: "1" + register: hdbuserstore + vars: + allow_world_readable_tmpfiles: true + when: + - database_high_availability + - platform == 'HANA' + - name: "PAS Install" ansible.builtin.shell: | umask {{ custom_umask | default('022') }} ; @@ -283,50 +325,50 @@ - pas_installed_according_to_sapinst is defined - pas_installed_according_to_sapinst | length > 0 - - name: "PAS Install: Get hdbuserstore path" - ansible.builtin.find: - paths: "/usr/sap/{{ sap_sid | upper }}" - file_type: file - patterns: 'hdbuserstore' - recurse: true - register: hdbuserstore_file - when: - - database_high_availability - - platform == 'HANA' - - - - name: "PAS Install: Set hdbuserstore path" - ansible.builtin.set_fact: - hdbuserstore_path: "{{ hdbuserstore_file.files[0].path }}" - when: - - database_high_availability - - platform == 'HANA' - - hdbuserstore_file | length > 0 - - - name: "PAS Install: show hdbuserstore path" - ansible.builtin.debug: - var: hdbuserstore_path - when: - - database_high_availability - - platform == 'HANA' - - - name: "PAS Install: Set DB Virtual Host name ({{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }})" - become: true - become_user: "{{ sid_to_be_deployed.sid | lower }}adm" - ansible.builtin.shell: | - {{ hdbuserstore_path }} -H {{ pas_virtual_hostname }} SET DEFAULT {{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }}:3{{ db_instance_number }}13@{{ db_sid | upper }} {{ schema_name }} {{ main_password }} - environment: - SAPSYSTEMNAME: "{{ sid_to_be_deployed.sid | upper }}" - TMPDIR: "{{ hdbuserstore_path }}" - ssfs_connect: "1" - register: hdbuserstore - vars: - allow_world_readable_tmpfiles: true - when: - - database_high_availability - - platform == 'HANA' - - pas_installed_according_to_sapinst is defined - - pas_installed_according_to_sapinst | length > 0 + # - name: "PAS Install: Get hdbuserstore path" + # ansible.builtin.find: + # paths: "/usr/sap/{{ sap_sid | upper }}" + # file_type: file + # patterns: 'hdbuserstore' + # recurse: true + # register: hdbuserstore_file + # when: + # - database_high_availability + # - platform == 'HANA' + + + # - name: "PAS Install: Set hdbuserstore path" + # ansible.builtin.set_fact: + # hdbuserstore_path: "{{ hdbuserstore_file.files[0].path }}" + # when: + # - database_high_availability + # - platform == 'HANA' + # - hdbuserstore_file | length > 0 + + # - name: "PAS Install: show hdbuserstore path" + # ansible.builtin.debug: + # var: hdbuserstore_path + # when: + # - database_high_availability + # - platform == 'HANA' + + # - name: "PAS Install: Set DB Virtual Host name ({{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }})" + # become: true + # become_user: "{{ sid_to_be_deployed.sid | lower }}adm" + # ansible.builtin.shell: | + # {{ hdbuserstore_path }} -H {{ pas_virtual_hostname }} SET DEFAULT {{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }}:3{{ db_instance_number }}13@{{ db_sid | upper }} {{ schema_name }} {{ main_password }} + # environment: + # SAPSYSTEMNAME: "{{ sid_to_be_deployed.sid | upper }}" + # TMPDIR: "{{ hdbuserstore_path }}" + # ssfs_connect: "1" + # register: hdbuserstore + # vars: + # allow_world_readable_tmpfiles: true + # when: + # - database_high_availability + # - platform == 'HANA' + # - pas_installed_according_to_sapinst is defined + # - pas_installed_according_to_sapinst | length > 0 - name: "PAS Install: Check if Standalone" ansible.builtin.set_fact: From 5ec3465858eac9f5c692086bb5cbcf10fcc7a6a0 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 13 Dec 2023 19:24:40 +0530 Subject: [PATCH 024/607] Fix condition for secret existence check in ha_clusterpasswords.yaml --- .../0.1-passwords/tasks/0.1.1-ha_clusterpasswords.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-misc/0.1-passwords/tasks/0.1.1-ha_clusterpasswords.yaml b/deploy/ansible/roles-misc/0.1-passwords/tasks/0.1.1-ha_clusterpasswords.yaml index 3040ec4a57..07121188c5 100644 --- a/deploy/ansible/roles-misc/0.1-passwords/tasks/0.1.1-ha_clusterpasswords.yaml +++ b/deploy/ansible/roles-misc/0.1-passwords/tasks/0.1.1-ha_clusterpasswords.yaml @@ -83,7 +83,7 @@ --vault-name {{ kv_name }} --name {{ cluster_password_id }} --value "{{ db_cluster_password }}" - when: secret_exists is defined + when: not secret_exists - name: "0.1 HA Cluster Password: - Show SAP cluster Password" ansible.builtin.debug: From 305b1dc8df65618ef31d5e8ec5919138dc610a13 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 13 Dec 2023 22:39:48 +0200 Subject: [PATCH 025/607] If iscsi count == 0 no output --- .../terraform-units/modules/sap_landscape/outputs.tf | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf b/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf index 51c7e196b8..c19eaa70d2 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf @@ -472,7 +472,9 @@ output "iSCSI_server_names" { output "iSCSI_servers" { description = "iSCSI devices" - value = distinct(flatten([for idx, vm in var.naming.virtualmachine_names.ISCSI_COMPUTERNAME : [ + value = local.iscsi_count > 0 ? distinct(flatten([for idx, vm in var.naming.virtualmachine_names.ISCSI_COMPUTERNAME : [ format("{ host: '%s', IP: %s }", vm, azurerm_network_interface.iscsi[idx].private_ip_address)] - ])) + ])) : ( + [] + ) } From 8da77dad463e2e21c815141de4e149893d47f061 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 13 Dec 2023 22:53:58 +0200 Subject: [PATCH 026/607] Add private endpoint checks --- .../terraform-units/modules/sap_landscape/storage_accounts.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf index feb4090e41..b08d475e82 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf @@ -358,7 +358,7 @@ resource "azurerm_storage_account_network_rules" "transport" { resource "azurerm_private_dns_a_record" "transport" { provider = azurerm.dnsmanagement - count = var.create_transport_storage && local.use_Azure_native_DNS && var.NFS_provider == "AFS" && length(var.transport_private_endpoint_id) == 0 ? 1 : 0 + count = var.use_private_endpoint && var.create_transport_storage && local.use_Azure_native_DNS && var.NFS_provider == "AFS" && length(var.transport_private_endpoint_id) == 0 ? 1 : 0 name = replace( lower( format("%s", local.landscape_shared_transport_storage_account_name) @@ -602,7 +602,7 @@ resource "azurerm_storage_account_network_rules" "install" { resource "azurerm_private_dns_a_record" "install" { provider = azurerm.dnsmanagement - count = local.use_Azure_native_DNS && local.use_AFS_for_install && length(var.install_private_endpoint_id) == 0 ? 1 : 0 + count = var.use_private_endpoint && local.use_Azure_native_DNS && local.use_AFS_for_install && length(var.install_private_endpoint_id) == 0 ? 1 : 0 name = replace( lower( format("%s", local.landscape_shared_install_storage_account_name) From cf073e31d5da1e9c29c3cb1cb9a057d8ff487ff1 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 14 Dec 2023 10:50:57 +0200 Subject: [PATCH 027/607] respect the end point flag --- .../terraform-units/modules/sap_landscape/storage_accounts.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf index b08d475e82..4bb36ca98c 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf @@ -432,7 +432,7 @@ resource "azurerm_private_endpoint" "transport" { azurerm_subnet.app, azurerm_private_dns_zone_virtual_network_link.vnet_sap_file ] - count = var.create_transport_storage && var.NFS_provider == "AFS" ? ( + count = var.create_transport_storage && var.use_private_endpoint && var.NFS_provider == "AFS" ? ( length(var.transport_storage_account_id) > 0 ? ( 0) : ( 1 @@ -678,7 +678,7 @@ resource "azurerm_private_endpoint" "install" { azurerm_storage_share.install, azurerm_storage_share.install_smb ] - count = local.use_AFS_for_install ? ( + count = local.use_AFS_for_install && var.use_private_endpoint ? ( length(var.install_private_endpoint_id) > 0 ? ( 0) : ( 1 From edebb7ac5d1a32e221041ee598f814eb72351b62 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 14 Dec 2023 11:11:15 +0200 Subject: [PATCH 028/607] Provide the means to control the Utility VM disk size --- deploy/terraform/run/sap_landscape/tfvar_variables.tf | 5 +++++ deploy/terraform/run/sap_landscape/transform.tf | 10 ++++++++++ deploy/terraform/run/sap_landscape/variables_local.tf | 9 --------- .../terraform-units/modules/sap_landscape/vm.tf | 2 +- 4 files changed, 16 insertions(+), 10 deletions(-) diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index 9a0c019f00..38a1b63d14 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -653,6 +653,11 @@ variable "utility_vm_size" { description = "The size of the utility_vm Virtual Machine" default = "Standard_D4ds_v4" } +variable "utility_vm_os_disk_size" { + description = "The size of the OS disk for the Virtual Machine" + default = "128" + } + variable "utility_vm_useDHCP" { description = "value indicating if utility_vm should use DHCP" diff --git a/deploy/terraform/run/sap_landscape/transform.tf b/deploy/terraform/run/sap_landscape/transform.tf index e1bd481403..09ff872be2 100644 --- a/deploy/terraform/run/sap_landscape/transform.tf +++ b/deploy/terraform/run/sap_landscape/transform.tf @@ -453,4 +453,14 @@ locals { ) : null) ) + vm_settings = { + count = var.utility_vm_count + size = var.utility_vm_size + use_DHCP = var.utility_vm_useDHCP + image = var.utility_vm_image + private_ip_address = var.utility_vm_nic_ips + disk_size = var.utility_vm_os_disk_size + + } + } diff --git a/deploy/terraform/run/sap_landscape/variables_local.tf b/deploy/terraform/run/sap_landscape/variables_local.tf index cb95c9ab88..ee1b19ad44 100644 --- a/deploy/terraform/run/sap_landscape/variables_local.tf +++ b/deploy/terraform/run/sap_landscape/variables_local.tf @@ -86,15 +86,6 @@ locals { null ) - vm_settings = { - count = var.utility_vm_count - size = var.utility_vm_size - use_DHCP = var.utility_vm_useDHCP - image = var.utility_vm_image - private_ip_address = var.utility_vm_nic_ips - - } - is_DNS_info_different = ( var.management_dns_subscription_id != data.azurerm_key_vault_secret.subscription_id.value ) || ( diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf index 19e17bc723..0876f06f54 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf @@ -77,7 +77,7 @@ resource "azurerm_windows_virtual_machine" "utility_vm" { ) caching = "ReadWrite" storage_account_type = "Premium_LRS" - disk_size_gb = 128 + disk_size_gb = try(var.vm_settings.disk_size, 128) } source_image_reference { From a48e28e9bf0723f653bbde1f3d66117d2d11702a Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 14 Dec 2023 11:19:08 +0200 Subject: [PATCH 029/607] PRovide the ability to deploy a Linux Utility VM and also control the OS Disk type --- .../run/sap_landscape/tfvar_variables.tf | 5 ++ .../terraform/run/sap_landscape/transform.tf | 1 + .../modules/sap_landscape/vm.tf | 55 ++++++++++++++++++- 3 files changed, 59 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index 38a1b63d14..fec9230204 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -658,6 +658,11 @@ variable "utility_vm_os_disk_size" { default = "128" } +variable "utility_vm_os_disk_type" { + description = "The type of the OS disk for the Virtual Machine" + default = "Premium_LRS" + } + variable "utility_vm_useDHCP" { description = "value indicating if utility_vm should use DHCP" diff --git a/deploy/terraform/run/sap_landscape/transform.tf b/deploy/terraform/run/sap_landscape/transform.tf index 09ff872be2..8cda150d6b 100644 --- a/deploy/terraform/run/sap_landscape/transform.tf +++ b/deploy/terraform/run/sap_landscape/transform.tf @@ -460,6 +460,7 @@ locals { image = var.utility_vm_image private_ip_address = var.utility_vm_nic_ips disk_size = var.utility_vm_os_disk_size + disk_type = var.utility_vm_os_disk_type } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf index 0876f06f54..a81109d114 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf @@ -43,7 +43,7 @@ resource "azurerm_network_interface" "utility_vm" { # Create the Windows Application VM(s) resource "azurerm_windows_virtual_machine" "utility_vm" { provider = azurerm.main - count = var.vm_settings.count + count = upper(var.vm_settings.image.os_type) == "WINDOWS" ? var.vm_settings.count : 0 name = format("%s%s%s%s%s", var.naming.resource_prefixes.vm, local.prefix, @@ -76,7 +76,58 @@ resource "azurerm_windows_virtual_machine" "utility_vm" { local.resource_suffixes.osdisk ) caching = "ReadWrite" - storage_account_type = "Premium_LRS" + storage_account_type = try(var.vm_settings.disk_type, "Premium_LRS") + disk_size_gb = try(var.vm_settings.disk_size, 128) + } + + source_image_reference { + publisher = var.vm_settings.image.publisher + offer = var.vm_settings.image.offer + sku = var.vm_settings.image.sku + version = var.vm_settings.image.version + } + + +} + +# Create the Linux Application VM(s) +resource "azurerm_linux_virtual_machine" "utility_vm" { + provider = azurerm.main + count = upper(var.vm_settings.image.os_type) == "LINUX" ? var.vm_settings.count : 0 + name = format("%s%s%s%s%s", + var.naming.resource_prefixes.vm, + local.prefix, + var.naming.separator, + var.naming.virtualmachine_names.WORKLOAD_VMNAME[count.index], + local.resource_suffixes.vm + ) + computer_name = var.naming.virtualmachine_names.WORKLOAD_VMNAME[count.index] + location = local.resource_group_exists ? ( + data.azurerm_resource_group.resource_group[0].location) : ( + azurerm_resource_group.resource_group[0].location + ) + resource_group_name = local.resource_group_exists ? ( + data.azurerm_resource_group.resource_group[0].name) : ( + azurerm_resource_group.resource_group[0].name + ) + tags = var.tags + network_interface_ids = [azurerm_network_interface.utility_vm[count.index].id] + + size = var.vm_settings.size + admin_username = local.iscsi.authentication.username + admin_password = local.iscsi_auth_password + disable_password_authentication = local.enable_iscsi_auth_key + + os_disk { + name = format("%s%s%s%s%s", + var.naming.resource_prefixes.osdisk, + local.prefix, + var.naming.separator, + var.naming.virtualmachine_names.WORKLOAD_VMNAME[count.index], + local.resource_suffixes.osdisk + ) + caching = "ReadWrite" + storage_account_type = try(var.vm_settings.disk_type, "Premium_LRS") disk_size_gb = try(var.vm_settings.disk_size, 128) } From 0ab9acc9e8a673a833cba9f9935d1700d3d24a96 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 14 Dec 2023 11:46:32 +0200 Subject: [PATCH 030/607] Authentication for Linux utility VM --- .../modules/sap_landscape/vm.tf | 41 +++++++++++-------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf index a81109d114..206cb47bb3 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf @@ -114,22 +114,31 @@ resource "azurerm_linux_virtual_machine" "utility_vm" { network_interface_ids = [azurerm_network_interface.utility_vm[count.index].id] size = var.vm_settings.size - admin_username = local.iscsi.authentication.username - admin_password = local.iscsi_auth_password - disable_password_authentication = local.enable_iscsi_auth_key - - os_disk { - name = format("%s%s%s%s%s", - var.naming.resource_prefixes.osdisk, - local.prefix, - var.naming.separator, - var.naming.virtualmachine_names.WORKLOAD_VMNAME[count.index], - local.resource_suffixes.osdisk - ) - caching = "ReadWrite" - storage_account_type = try(var.vm_settings.disk_type, "Premium_LRS") - disk_size_gb = try(var.vm_settings.disk_size, 128) - } + admin_username = local.input_sid_username + admin_password = local.input_sid_password + disable_password_authentication = true + + dynamic "admin_ssh_key" { + for_each = range(1) + content { + username = local.input_sid_username + public_key = local.sid_public_key + } + } + + + os_disk { + name = format("%s%s%s%s%s", + var.naming.resource_prefixes.osdisk, + local.prefix, + var.naming.separator, + var.naming.virtualmachine_names.WORKLOAD_VMNAME[count.index], + local.resource_suffixes.osdisk + ) + caching = "ReadWrite" + storage_account_type = try(var.vm_settings.disk_type, "Premium_LRS") + disk_size_gb = try(var.vm_settings.disk_size, 128) + } source_image_reference { publisher = var.vm_settings.image.publisher From 4071db789e2796dbb75234deede4333a9717b22e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 14 Dec 2023 12:16:20 +0200 Subject: [PATCH 031/607] Web App updates --- Webapp/SDAF/Models/LandscapeModel.cs | 5 +++++ .../ParameterDetails/LandscapeDetails.json | 18 ++++++++++++++++++ .../ParameterDetails/LandscapeTemplate.txt | 7 +++++++ 3 files changed, 30 insertions(+) diff --git a/Webapp/SDAF/Models/LandscapeModel.cs b/Webapp/SDAF/Models/LandscapeModel.cs index 2cbe93f304..834803e047 100644 --- a/Webapp/SDAF/Models/LandscapeModel.cs +++ b/Webapp/SDAF/Models/LandscapeModel.cs @@ -302,6 +302,11 @@ public bool IsValid() public string utility_vm_size { get; set; } + public string utility_vm_os_disk_size { get; set; } = "128"; + + public string utility_vm_os_disk_type { get; set; } = "Premium_LRS"; + + public bool? utility_vm_useDHCP { get; set; } = true; public Image utility_vm_image { get; set; } diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index 1bd961661f..01c72b0f47 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -1104,6 +1104,24 @@ "Overrules": "", "Display": 3 }, + { + "Name": "utility_vm_os_disk_size", + "Required": false, + "Description": "Defines the size of the OS disk for the Virtual Machine. Default size is 128", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "utility_vm_os_disk_type", + "Required": false, + "Description": "Defines the type of the OS disk for the Virtual Machine. Default size is Premium_LRS.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, { "Name": "utility_vm_useDHCP", "Required": false, diff --git a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt index 68f5c53ef8..f45d742c3e 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt @@ -484,6 +484,13 @@ $$utility_vm_count$$ # Defines the SKU for the workload virtual machine $$utility_vm_size$$ +# Defines the size of the OS disk for the Virtual Machine +$$utility_vm_os_disk_size$$ + +# Defines the type of the OS disk for the Virtual Machine" +$$utility_vm_os_disk_type$$ + + # Defines if the utility virtual machine uses DHCP $$utility_vm_useDHCP$$ From 41c4d7f4712963e89519f7e8fe592954dbcc3242 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 14 Dec 2023 14:05:58 +0200 Subject: [PATCH 032/607] Add the ability to provide persisted extra parameters --- deploy/pipelines/05-DB-and-SAP-installation.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index 20915e2629..f374846b50 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -231,6 +231,8 @@ stages: export workload_prefix=${az_var} ; echo 'Workload Prefix' ${workload_prefix}; echo 'Workload Prefix' ${workload_prefix} fi + echo $EXTRA_PARAMETERS + echo "##vso[task.setvariable variable=SSH_KEY_NAME;isOutput=true]${workload_prefix}-sid-sshkey" echo "##vso[task.setvariable variable=VAULT_NAME;isOutput=true]$workload_key_vault" echo "##vso[task.setvariable variable=PASSWORD_KEY_NAME;isOutput=true]${workload_prefix}-sid-password" @@ -270,6 +272,7 @@ stages: CONFIG_REPO_PATH: ${{ parameters.config_repo_path }} BOM_BASE_NAME: ${{ parameters.bom_base_name }} SAP_SYSTEM_CONFIGURATION_NAME: ${{ parameters.sap_system_configuration_name }} + EXTRA_PARAMETERS: $(EXTRA_PARAMETERS) - template: templates\run-ansible.yaml parameters: displayName: "Parameter validation" From e6ff5f8089df345ad2c9c3c3ccad83db38f925bf Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 14 Dec 2023 14:12:56 +0200 Subject: [PATCH 033/607] show consolidated list --- deploy/pipelines/05-DB-and-SAP-installation.yaml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index f374846b50..dfc0d118bb 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -231,7 +231,15 @@ stages: export workload_prefix=${az_var} ; echo 'Workload Prefix' ${workload_prefix}; echo 'Workload Prefix' ${workload_prefix} fi - echo $EXTRA_PARAMETERS + if [ $EXTRA_PARAMETERS = '$(EXTRA_PARAMETERS)' ]; then + echo "##vso[task.logissue type=warning]No extra parameters were provided." + new_parameters=$(${{ parameters.extra_params }}) + else + echo "##vso[task.logissue type=warning]Extra parameters were provided: $EXTRA_PARAMETERS" + new_parameters=$(echo $EXTRA_PARAMETERS ${{ parameters.extra_params }}) + fi + + echo $new_parameters echo "##vso[task.setvariable variable=SSH_KEY_NAME;isOutput=true]${workload_prefix}-sid-sshkey" echo "##vso[task.setvariable variable=VAULT_NAME;isOutput=true]$workload_key_vault" From 089678f263fbabb2bd07a8b4a6a4db4e40986245 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 14 Dec 2023 14:23:23 +0200 Subject: [PATCH 034/607] Parameter logic --- deploy/pipelines/05-DB-and-SAP-installation.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index dfc0d118bb..1ed74616af 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -233,10 +233,10 @@ stages: if [ $EXTRA_PARAMETERS = '$(EXTRA_PARAMETERS)' ]; then echo "##vso[task.logissue type=warning]No extra parameters were provided." - new_parameters=$(${{ parameters.extra_params }}) + new_parameters=$PIPELINE_EXTRA_PARAMETERS else echo "##vso[task.logissue type=warning]Extra parameters were provided: $EXTRA_PARAMETERS" - new_parameters=$(echo $EXTRA_PARAMETERS ${{ parameters.extra_params }}) + new_parameters=$(echo $EXTRA_PARAMETERS $PIPELINE_EXTRA_PARAMETERS) fi echo $new_parameters @@ -281,6 +281,7 @@ stages: BOM_BASE_NAME: ${{ parameters.bom_base_name }} SAP_SYSTEM_CONFIGURATION_NAME: ${{ parameters.sap_system_configuration_name }} EXTRA_PARAMETERS: $(EXTRA_PARAMETERS) + PIPELINE_EXTRA_PARAMETERS: ${{ parameters.extra_params }} - template: templates\run-ansible.yaml parameters: displayName: "Parameter validation" From a42711e45f0f54016290ba132bb40a313e58ee04 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 14 Dec 2023 14:32:21 +0200 Subject: [PATCH 035/607] Concat the variables --- .../pipelines/05-DB-and-SAP-installation.yaml | 28 ++++++++++--------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index 1ed74616af..72000dde62 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -236,7 +236,7 @@ stages: new_parameters=$PIPELINE_EXTRA_PARAMETERS else echo "##vso[task.logissue type=warning]Extra parameters were provided: $EXTRA_PARAMETERS" - new_parameters=$(echo $EXTRA_PARAMETERS $PIPELINE_EXTRA_PARAMETERS) + new_parameters=$EXTRA_PARAMETERS $PIPELINE_EXTRA_PARAMETERS fi echo $new_parameters @@ -245,6 +245,8 @@ stages: echo "##vso[task.setvariable variable=VAULT_NAME;isOutput=true]$workload_key_vault" echo "##vso[task.setvariable variable=PASSWORD_KEY_NAME;isOutput=true]${workload_prefix}-sid-password" echo "##vso[task.setvariable variable=USERNAME_KEY_NAME;isOutput=true]${workload_prefix}-sid-username" + echo "##vso[task.setvariable variable=NEW_PARAMETERS;isOutput=true]${new_parameters}" + echo -e "$green--- az login ---$reset" #If the deployer_file exists we run on a deployer configured by the framework instead of a azdo hosted one @@ -293,7 +295,7 @@ stages: parametersFolder: $(Preparation.FOLDER) sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" sidHosts: $(Preparation.HOSTS) - extraParams: ${{ parameters.extra_params }} + extraParams: "$(Preparation.NEW_PARAMETERS)" azureClientId: $(ARM_CLIENT_ID) azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) @@ -310,7 +312,7 @@ stages: parametersFolder: $(Preparation.FOLDER) sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" sidHosts: $(Preparation.HOSTS) - extraParams: ${{ parameters.extra_params }} + extraParams: "$(Preparation.NEW_PARAMETERS)" azureClientId: $(ARM_CLIENT_ID) azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) @@ -327,7 +329,7 @@ stages: parametersFolder: $(Preparation.FOLDER) sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" sidHosts: $(Preparation.HOSTS) - extraParams: ${{ parameters.extra_params }} + extraParams: "$(Preparation.NEW_PARAMETERS)" azureClientId: $(ARM_CLIENT_ID) azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) @@ -344,7 +346,7 @@ stages: parametersFolder: $(Preparation.FOLDER) sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" sidHosts: $(Preparation.HOSTS) - extraParams: ${{ parameters.extra_params }} + extraParams: "$(Preparation.NEW_PARAMETERS)" azureClientId: $(ARM_CLIENT_ID) azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) @@ -361,7 +363,7 @@ stages: parametersFolder: $(Preparation.FOLDER) sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" sidHosts: $(Preparation.HOSTS) - extraParams: ${{ parameters.extra_params }} + extraParams: "$(Preparation.NEW_PARAMETERS)" azureClientId: $(ARM_CLIENT_ID) azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) @@ -378,7 +380,7 @@ stages: parametersFolder: $(Preparation.FOLDER) sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" sidHosts: $(Preparation.HOSTS) - extraParams: ${{ parameters.extra_params }} + extraParams: "$(Preparation.NEW_PARAMETERS)" azureClientId: $(ARM_CLIENT_ID) azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) @@ -395,7 +397,7 @@ stages: parametersFolder: $(Preparation.FOLDER) sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" sidHosts: $(Preparation.HOSTS) - extraParams: ${{ parameters.extra_params }} + extraParams: "$(Preparation.NEW_PARAMETERS)" azureClientId: $(ARM_CLIENT_ID) azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) @@ -412,7 +414,7 @@ stages: parametersFolder: $(Preparation.FOLDER) sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" sidHosts: $(Preparation.HOSTS) - extraParams: ${{ parameters.extra_params }} + extraParams: "$(Preparation.NEW_PARAMETERS)" azureClientId: $(ARM_CLIENT_ID) azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) @@ -429,7 +431,7 @@ stages: parametersFolder: $(Preparation.FOLDER) sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" sidHosts: $(Preparation.HOSTS) - extraParams: ${{ parameters.extra_params }} + extraParams: "$(Preparation.NEW_PARAMETERS)" azureClientId: $(ARM_CLIENT_ID) azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) @@ -446,7 +448,7 @@ stages: parametersFolder: $(Preparation.FOLDER) sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" sidHosts: $(Preparation.HOSTS) - extraParams: ${{ parameters.extra_params }} + extraParams: "$(Preparation.NEW_PARAMETERS)" azureClientId: $(ARM_CLIENT_ID) azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) @@ -463,7 +465,7 @@ stages: parametersFolder: $(Preparation.FOLDER) sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" sidHosts: $(Preparation.HOSTS) - extraParams: ${{ parameters.extra_params }} + extraParams: "$(Preparation.NEW_PARAMETERS)" azureClientId: $(ARM_CLIENT_ID) azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) @@ -480,7 +482,7 @@ stages: parametersFolder: $(Preparation.FOLDER) sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" sidHosts: $(Preparation.HOSTS) - extraParams: ${{ parameters.extra_params }} + extraParams: "$(Preparation.NEW_PARAMETERS)" azureClientId: $(ARM_CLIENT_ID) azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) From 7d669cbce68a0f9295d028e6d45428d5676a40dd Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 14 Dec 2023 14:37:52 +0200 Subject: [PATCH 036/607] Testing --- deploy/pipelines/05-DB-and-SAP-installation.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index 72000dde62..4bffb0daab 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -236,10 +236,10 @@ stages: new_parameters=$PIPELINE_EXTRA_PARAMETERS else echo "##vso[task.logissue type=warning]Extra parameters were provided: $EXTRA_PARAMETERS" - new_parameters=$EXTRA_PARAMETERS $PIPELINE_EXTRA_PARAMETERS + new_parameters="$EXTRA_PARAMETERS $PIPELINE_EXTRA_PARAMETERS" fi - echo $new_parameters + echo "${new_parameters}" echo "##vso[task.setvariable variable=SSH_KEY_NAME;isOutput=true]${workload_prefix}-sid-sshkey" echo "##vso[task.setvariable variable=VAULT_NAME;isOutput=true]$workload_key_vault" From 5c1dc21d5b5ac09618d1e2c1b80372e9e82c90b8 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 14 Dec 2023 14:46:53 +0200 Subject: [PATCH 037/607] Add the ability to source extra variables from the variable groups --- deploy/pipelines/05-DB-and-SAP-installation.yaml | 2 -- deploy/pipelines/templates/run-ansible.yaml | 10 +++++----- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index 4bffb0daab..a0f60e3607 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -239,8 +239,6 @@ stages: new_parameters="$EXTRA_PARAMETERS $PIPELINE_EXTRA_PARAMETERS" fi - echo "${new_parameters}" - echo "##vso[task.setvariable variable=SSH_KEY_NAME;isOutput=true]${workload_prefix}-sid-sshkey" echo "##vso[task.setvariable variable=VAULT_NAME;isOutput=true]$workload_key_vault" echo "##vso[task.setvariable variable=PASSWORD_KEY_NAME;isOutput=true]${workload_prefix}-sid-password" diff --git a/deploy/pipelines/templates/run-ansible.yaml b/deploy/pipelines/templates/run-ansible.yaml index d0bca0233b..9fa299f914 100644 --- a/deploy/pipelines/templates/run-ansible.yaml +++ b/deploy/pipelines/templates/run-ansible.yaml @@ -84,11 +84,11 @@ steps: if [ -f "${filename}" ]; then echo "##[group]- preconfiguration" - redacted_command="ansible-playbook -i $INVENTORY -e @$SAP_PARAMS $EXTRA_PARAMS $EXTRA_PARAM_FILE ${filename}" + redacted_command="ansible-playbook -i $INVENTORY -e @$SAP_PARAMS "$EXTRA_PARAMS" $EXTRA_PARAM_FILE ${filename}" echo "##[section]Executing [$redacted_command]..." command="ansible-playbook -i $INVENTORY --private-key $PARAMETERS_FOLDER/sshkey \ - -e @$SAP_PARAMS -e 'download_directory=$(Agent.TempDirectory)' -e '_workspace_directory=$PARAMETERS_FOLDER' $EXTRA_PARAMS \ + -e @$SAP_PARAMS -e 'download_directory=$(Agent.TempDirectory)' -e '_workspace_directory=$PARAMETERS_FOLDER' "$EXTRA_PARAMS" \ -e ansible_ssh_pass='${password_secret}' $EXTRA_PARAM_FILE ${filename}" eval $command @@ -100,11 +100,11 @@ steps: command="ansible-playbook -i $INVENTORY --private-key $PARAMETERS_FOLDER/sshkey \ -e @$SAP_PARAMS -e 'download_directory=$(Agent.TempDirectory)' -e '_workspace_directory=$PARAMETERS_FOLDER' \ - -e ansible_ssh_pass='${password_secret}' $EXTRA_PARAMS $EXTRA_PARAM_FILE \ + -e ansible_ssh_pass='${password_secret}' "$EXTRA_PARAMS" $EXTRA_PARAM_FILE \ $ANSIBLE_FILE_PATH" - redacted_command="ansible-playbook -i $INVENTORY -e @$SAP_PARAMS $EXTRA_PARAMS $EXTRA_PARAM_FILE $ANSIBLE_FILE_PATH" + redacted_command="ansible-playbook -i $INVENTORY -e @$SAP_PARAMS "$EXTRA_PARAMS" $EXTRA_PARAM_FILE $ANSIBLE_FILE_PATH" echo "##[section]Executing [$redacted_command]..." echo "##[group]- output" @@ -132,7 +132,7 @@ steps: command="ansible-playbook -i "$INVENTORY" --private-key $PARAMETERS_FOLDER/sshkey \ -e @$SAP_PARAMS -e 'download_directory=$(Agent.TempDirectory)' -e '_workspace_directory=$PARAMETERS_FOLDER' \ - -e ansible_ssh_pass='${password_secret}' ${filename} $EXTRA_PARAMS $EXTRA_PARAM_FILE" + -e ansible_ssh_pass='${password_secret}' ${filename} "$EXTRA_PARAMS" $EXTRA_PARAM_FILE" eval $command return_code=$? From d7efc59fe132f963d04b5ffc922857e525efe988 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 14 Dec 2023 20:04:49 +0530 Subject: [PATCH 038/607] Remove unnecessary TMPDIR environment variable --- deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index d2fe11a9d9..8a52f09193 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -248,7 +248,6 @@ {{ hdbuserstore_path }} -H {{ pas_virtual_hostname }} SET DEFAULT {{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }}:3{{ db_instance_number }}13@{{ db_sid | upper }} {{ schema_name }} {{ main_password }} environment: SAPSYSTEMNAME: "{{ sid_to_be_deployed.sid | upper }}" - TMPDIR: "{{ hdbuserstore_path }}" ssfs_connect: "1" register: hdbuserstore vars: From ae87a338c7a4e74475f7b4fbbc64ad9b3053ee2f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 15 Dec 2023 10:05:05 +0200 Subject: [PATCH 039/607] Update all packages on RHEL based systems --- .../tasks/1.4.3-update-packages-RedHat.yaml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml index 5ae129385a..005cd06c82 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml @@ -7,7 +7,15 @@ path: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/packages_installed.txt" register: packages_installed - +- name: "1.4 Packages: - Update packages" + become: true + ansible.builtin.yum: + name: '*' + state: latest + skip_broken: true + register: reboot_output + when: + - tier == 'os' # Analyse the package list for this distribution selecting only those # packages assigned to the active tier or 'all'. # - name: "1.4 Packages: - Upgrade all: {{ distribution_full_id }}" # noqa package-latest From 77cebbc7bfae0426f611c94f8fb6a916ff88cba0 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Fri, 15 Dec 2023 17:40:42 +0530 Subject: [PATCH 040/607] Fix typo in LandscapeTemplate.txt --- Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt index f45d742c3e..6b1035f48f 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt @@ -487,7 +487,7 @@ $$utility_vm_size$$ # Defines the size of the OS disk for the Virtual Machine $$utility_vm_os_disk_size$$ -# Defines the type of the OS disk for the Virtual Machine" +# Defines the type of the OS disk for the Virtual Machine $$utility_vm_os_disk_type$$ From f86c1dee1f0a39bbe0b988aba436d76e4a1e2a0d Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Fri, 15 Dec 2023 17:55:20 +0530 Subject: [PATCH 041/607] Update SDAF version to 3.10.1.0 --- deploy/ansible/vars/ansible-input-api.yaml | 2 +- deploy/configs/version.txt | 2 +- deploy/scripts/New-SDAFDevopsProject.ps1 | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index c9d274a933..216b62e5a7 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -4,7 +4,7 @@ become_user_name: root # ------------------- Begin - SDAF Ansible Version ---------------------------8 -SDAF_Version: "3.10.0.0" +SDAF_Version: "3.10.1.0" # ------------------- End - SDAF Ansible Version ---------------------------8 # ------------------- Begin - OS Config Settings variables -------------------8 diff --git a/deploy/configs/version.txt b/deploy/configs/version.txt index 37c18af77e..e05acb17e7 100644 --- a/deploy/configs/version.txt +++ b/deploy/configs/version.txt @@ -1 +1 @@ -3.10.0.0 +3.10.1.0 diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index 3acb3532d2..db45433b12 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -14,7 +14,7 @@ $Workload_zoneSubscriptionName = $Env:SDAF_WorkloadZoneSubscriptionName $ARM_TENANT_ID = $Env:ARM_TENANT_ID #endregion -$versionLabel="v3.10.0.0" +$versionLabel="v3.10.1.0" az logout From aa77536595bcb90bed146ba93cec306f37cbb69d Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Fri, 15 Dec 2023 17:58:24 +0530 Subject: [PATCH 042/607] Remove duplicate failed_when condition in main.yaml --- deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml index 33026118db..46200cbfc0 100644 --- a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml @@ -77,7 +77,6 @@ - scs_high_availability register: cluster_group_location failed_when: cluster_group_location.stdout != hostvars[ansible_hostname]['scs_running_on'] - failed_when: cluster_group_location.stdout != hostvars[ansible_hostname]['scs_running_on'] tags: - skip_ansible_lint From f46bbfec00f075a61137db0e5cb4da01d1e5a202 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 15 Dec 2023 20:00:44 +0200 Subject: [PATCH 043/607] Squashed commit of the following: commit 51afc157af112a2c886dc658ce0461f016816262 Author: Harm Jan Stam Date: Fri Dec 15 18:45:21 2023 +0100 Bugfix kv-secrets debug and import tasks (#516) Because the operation variable has no default within the role its existence should be checked. --- deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml b/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml index 85940d854d..54840b167b 100644 --- a/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml @@ -32,7 +32,9 @@ - "use_msi_for_clusters: {{ use_msi_for_clusters }}" - "platform: {{ platform | upper }}" verbosity: 2 - when: operation == "fencing" + when: + - operation is defined + - operation == "fencing" # -------------------------------------+---------------------------------------8 # @@ -40,6 +42,7 @@ - name: "0.2 Key Vault: - Import S User tasks" ansible.builtin.import_tasks: "s_user.yaml" when: + - operation is defined - operation == "SoftwareAcquisition" @@ -49,6 +52,7 @@ - name: "0.2 Key Vault: - Import Fencing secrets" ansible.builtin.import_tasks: "fencing.yaml" when: + - operation is defined - operation == "fencing" - (database_high_availability and database_cluster_type == "AFA") or (scs_high_availability and scs_cluster_type == "AFA") # AFA (Azure Fencing Agent) @@ -62,6 +66,7 @@ ansible.builtin.import_tasks: "wincluster-witness.yaml" # TODO: update when clause more appropriately when: + - operation is defined - operation == "fencing" - (scs_high_availability or database_high_availability) - not use_msi_for_clusters From d375f8c93e22a1dc79aff19e8cded494750ba155 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 18 Dec 2023 15:32:22 +0530 Subject: [PATCH 044/607] Update keystore file handling and ACSS registration configuration --- .../tasks/4.2.1.8-db2_copy_keystore_files.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml index 376cdd0f18..100aab34b5 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml @@ -31,6 +31,14 @@ - "Database is encrypted: {{ db_encrypted }}" when: ansible_hostname == primary_instance_name +- name: "DB2: Debug if the database is encrypted" + ansible.builtin.debug: + msg: + - "Database is encrypted: {{ db_encrypted }}" + - "Keystore files exist: {{ keystore_files_stat.results | map(attribute='stat.exists') | list }}" + - "Keystore files stat: {{ keystore_files_stat.results | map(attribute='stat') | selectattr('exists') | list }}" + when: ansible_hostname == primary_instance_name + - name: "DB2: Fetch keystore files from Primary node to Controller" ansible.builtin.fetch: src: "/db2/db2{{ db_sid | lower }}/keystore/{{ item }}" From 18e120e1b8e8e0b098417578cab277a22e36a883 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 18 Dec 2023 15:32:45 +0530 Subject: [PATCH 045/607] Fix DB2 keystore file check in ansible playbook --- .../tasks/4.2.1.8-db2_copy_keystore_files.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml index 100aab34b5..b5dfa42fe3 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml @@ -33,7 +33,7 @@ - name: "DB2: Debug if the database is encrypted" ansible.builtin.debug: - msg: + msg: - "Database is encrypted: {{ db_encrypted }}" - "Keystore files exist: {{ keystore_files_stat.results | map(attribute='stat.exists') | list }}" - "Keystore files stat: {{ keystore_files_stat.results | map(attribute='stat') | selectattr('exists') | list }}" From 57e6a389cadc80ce3eac5ff6cbca8dde6d137fa2 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 18 Dec 2023 15:36:49 +0530 Subject: [PATCH 046/607] Web App updates --- Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt index 6b1035f48f..a656a56a02 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt @@ -489,7 +489,7 @@ $$utility_vm_os_disk_size$$ # Defines the type of the OS disk for the Virtual Machine $$utility_vm_os_disk_type$$ - + # Defines if the utility virtual machine uses DHCP $$utility_vm_useDHCP$$ From 5dc0fc258179ef2444dbb4616a6da5667241b4d6 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 18 Dec 2023 22:47:28 +0200 Subject: [PATCH 047/607] Systemd-Based SAP Startup Framework --- .../tasks/5.5.4.1-cluster-RedHat.yml | 40 ++++++++++++++++++ .../tasks/5.6.4.2-sap-resources-RedHat.yml | 41 +++++++++++++++++++ 2 files changed, 81 insertions(+) diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index de5df63a15..24544ccef7 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -177,6 +177,46 @@ loop_var: item failed_when: constraint.rc > 1 +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" + ansible.builtin.set_fact: + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') }}" + when: ansible_distribution_major_version in ["8", "9"] + +- name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" + when: + - ansible_distribution_major_version in ["8", "9"] + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + become: true + ansible.builtin.blockinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + block: | + [Unit] + Description=Pacemaker needs the SAP HANA instance service + Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service + After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service + register: dropinfile + +- name: "5.5.4.1 HANA Cluster configuration - systemd reload" + ansible.builtin.systemd: + daemon_reload: yes + when: + - dropinfile.changed + +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - END | +# | | +# +------------------------------------4--------------------------------------*/ + + - name: "5.5.4.1 HANA Cluster configuration - Disable Maintenance mode for the cluster" ansible.builtin.shell: pcs property set maintenance-mode=false diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index ecfa7b6251..8811a4ac70 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -113,6 +113,47 @@ # ansible.builtin.wait_for: # timeout: 120 +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "5.6 SCSERS - check if the OS version is RHEL 8.2 or newer" + ansible.builtin.set_fact: + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') }}" + when: ansible_distribution_major_version in ["8", "9"] + +- name: "5.6 SCSERS Pacemaker - (systemd) Create drop-in file" + when: + - ansible_distribution_major_version in ["8", "9"] + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + become: true + ansible.builtin.blockinfile: + path: '{{ dropfile }}' + create: true + block: | + [Service] + Restart=no + loop: + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" + loop_var: dropfile + register: dropinfile + +- name: "5.6 SCSERS - systemd reload" + ansible.builtin.systemd: + daemon_reload: yes + when: + - dropinfile.changed + +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - END | +# | | +# +------------------------------------4--------------------------------------*/ + # +------------------------------------4---------------------------------------*| # | 5.6.6-validate.yml - Validate the SAP ASCS/SCS and ERS resources | # | These are common tasks From 7c7d3dc84260bc7b6d67a103aa5f5a2a9acd5707 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 19 Dec 2023 16:00:50 +0530 Subject: [PATCH 048/607] Update HANA Cluster and SCSERS profiles --- .../tasks/5.5.4.1-cluster-RedHat.yml | 7 +- .../tasks/5.6.4.1-scsersprofile.yaml | 30 ++++-- .../tasks/5.6.4.2-sap-resources-RedHat.yml | 100 ++++++++++++++---- 3 files changed, 101 insertions(+), 36 deletions(-) diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index 24544ccef7..a8afdbe600 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -182,15 +182,14 @@ # | Systemd-Based SAP Startup Framework - BEGIN | # | | # +------------------------------------4--------------------------------------*/ +# Follow steps described in https://access.redhat.com/articles/6884531 - name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" ansible.builtin.set_fact: - is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') }}" - when: ansible_distribution_major_version in ["8", "9"] + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" -- name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" +- name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" when: - - ansible_distribution_major_version in ["8", "9"] - is_rhel_82_or_newer is defined - is_rhel_82_or_newer become: true diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml index 911ebd9b86..476ad229ba 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml @@ -31,8 +31,14 @@ ansible.builtin.replace: backup: true path: /usr/sap/sapservices - regexp: '^LD_LIBRARY_PATH=' - replace: "#LD_LIBRARY_PATH=" + regexp: '^([^#\n].+{{ sapservice }}.+)$' + replace: '# \1' + loop: + - "{{ sap_sid | upper }}_{{ instance_type | upper }}{{ scs_instance_number }}_{{ scs_virtual_hostname }}" + - "{{ sap_sid | upper }}_ERS{{ ers_instance_number }}_{{ ers_virtual_hostname }}" + loop_control: + label: "{{ sapservice }}" + loop_var: sapservice when: - ansible_os_family | upper == "REDHAT" @@ -43,8 +49,10 @@ - name: "5.6 SCSERS - ASCS, ERS profile changes" become: true + when: + - ansible_os_family | upper == "SUSE" + - inventory_hostname == primary_instance_name block: - - name: "5.6 SCSERS - ASCS Profile - add service/halib" ansible.builtin.blockinfile: path: '{{ scs_instance_profile_path }}' @@ -60,12 +68,13 @@ service/halib = $(DIR_CT_RUN)/saphascriptco.so service/halib_cluster_connector = /usr/bin/sap_suse_cluster_connector register: ersservicehalib - when: - - ansible_os_family | upper == "SUSE" - - inventory_hostname == primary_instance_name + - name: "5.6 SCSERS - ASCS, ERS profile changes" become: true + when: + - ansible_os_family | upper == "REDHAT" + - inventory_hostname == primary_instance_name block: - name: "5.6 SCSERS - ASCS Profile - add service/halib" ansible.builtin.blockinfile: @@ -82,12 +91,12 @@ service/halib = $(DIR_CT_RUN)/saphascriptco.so service/halib_cluster_connector = /usr/bin/sap_cluster_connector register: ersservicehalib - when: - - ansible_os_family | upper == "REDHAT" - - inventory_hostname == primary_instance_name + - name: "5.6 SCSERS - ASCS, ERS profile changes" become: true + when: + - inventory_hostname == primary_instance_name block: # SAP introduced support for enqueue server 2, including replication, as of SAP NW 7.52. # Starting with ABAP Platform 1809, enqueue server 2 is installed by default @@ -116,8 +125,7 @@ replace: '# Autostart' tags: - ersautostart - when: - - inventory_hostname == primary_instance_name + # Following are the changes in ASCS/ERS profiles based if ENSA1 is applicable - name: "5.6 SCSERS - Add the keep alive parameter, if using ENSA1" diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index 8811a4ac70..40b4b5504b 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -121,32 +121,90 @@ - name: "5.6 SCSERS - check if the OS version is RHEL 8.2 or newer" ansible.builtin.set_fact: - is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') }}" - when: ansible_distribution_major_version in ["8", "9"] + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" -- name: "5.6 SCSERS Pacemaker - (systemd) Create drop-in file" +# For systemd services the SAP ASCS/SCS and ERS resources are created as systemd services +# the path for the service file is /etc/systemd/system/SAP_.service +- name: "5.6 SCSERS - check if the SAP ASCS/SCS and ERS resources are created as systemd services" when: - - ansible_distribution_major_version in ["8", "9"] - is_rhel_82_or_newer is defined - is_rhel_82_or_newer - become: true - ansible.builtin.blockinfile: - path: '{{ dropfile }}' - create: true - block: | - [Service] - Restart=no - loop: - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" - loop_var: dropfile - register: dropinfile - -- name: "5.6 SCSERS - systemd reload" - ansible.builtin.systemd: - daemon_reload: yes + ansible.builtin.stat: + path: "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ sap_instance_number }}.service" + register: systemd_service_file_path + loop: + - "{{ scs_instance_number }}" + - "{{ ers_instance_number }}" + loop_control: + loop_var: sap_instance_number + +- name: "5.6 SCSERS - Set fact for the systemd services existance" + ansible.builtin.set_fact: + systemd_service_names: "{{ + systemd_service_file_path.results + | selectattr('stat.exists') + | map(attribute='stat.exists') + | regex_replace('/etc/systemd/system/', '') + | list + }}" + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + +- name: "5.6 SCSERS - Disable the services and stop auto-restart if they exist" when: - - dropinfile.changed + - systemd_service_names is defined + - systemd_service_names | length > 0 + block: + - name: "5.6 SCSERS - Disable the services if they exist" + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + ansible.builtin.systemd: + name: "{{ service_name }}" + enabled: false + loop: "{{ systemd_service_names }}" + loop_control: + loop_var: service_name + + - name: "5.6 SCSERS Pacemaker - (systemd) Create drop-in file" + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + become: true + ansible.builtin.blockinfile: + path: '{{ dropfile }}' + create: true + backup: true + owner: root + group: root + mode: '0644' + block: >- + [Service] + Restart=no + loop: + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" + loop_control: + loop_var: dropfile + register: dropinfile + + - name: "5.6 SCSERS - systemd reload" + ansible.builtin.systemd: + daemon_reload: true + when: + - dropinfile.changed + + - name: "5.6 SCSERS - validate that the drop-in file is active" + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + ansible.builtin.shell: >- + systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d' + register: dropinfile_validation + changed_when: false + failed_when: dropinfile_validation.rc > 0 + # /*---------------------------------------------------------------------------8 # | | From d2c6b0d154c03254254bc1c7feaf72f14326631d Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 19 Dec 2023 13:35:46 +0200 Subject: [PATCH 049/607] Debugging drop file --- .../tasks/5.6.4.2-sap-resources-RedHat.yml | 1 - .../5.6-scsers-pacemaker/tasks/main.yml | 100 ++++++++++++++++++ 2 files changed, 100 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index 40b4b5504b..05dcd51470 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -145,7 +145,6 @@ | selectattr('stat.exists') | map(attribute='stat.exists') | regex_replace('/etc/systemd/system/', '') - | list }}" when: - is_rhel_82_or_newer is defined diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml index e4ce2853c7..7989ed3df6 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml @@ -26,6 +26,106 @@ become: true become_user: root +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "5.6 SCSERS - check if the OS version is RHEL 8.2 or newer" + ansible.builtin.set_fact: + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" + +# For systemd services the SAP ASCS/SCS and ERS resources are created as systemd services +# the path for the service file is /etc/systemd/system/SAP_.service +- name: "5.6 SCSERS - check if the SAP ASCS/SCS and ERS resources are created as systemd services" + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + ansible.builtin.stat: + path: "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ sap_instance_number }}.service" + register: systemd_service_file_path + loop: + - "{{ scs_instance_number }}" + - "{{ ers_instance_number }}" + loop_control: + loop_var: sap_instance_number + +- name: "5.6 SCSERS - Set fact for the systemd services existance" + ansible.builtin.set_fact: + systemd_service_names: "{{ + systemd_service_file_path.results + | selectattr('stat.exists') + | map(attribute='stat.exists') + | regex_replace('/etc/systemd/system/', '') + }}" + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + +- name: "5.6 SCSERS - Disable the services and stop auto-restart if they exist" + when: + - systemd_service_names is defined + - systemd_service_names | length > 0 + block: + - name: "5.6 SCSERS - Disable the services if they exist" + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + ansible.builtin.systemd: + name: "{{ service_name }}" + enabled: false + loop: "{{ systemd_service_names }}" + loop_control: + loop_var: service_name + + - name: "5.6 SCSERS Pacemaker - (systemd) Create drop-in file" + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + become: true + ansible.builtin.blockinfile: + path: '{{ dropfile }}' + create: true + backup: true + owner: root + group: root + mode: '0644' + block: >- + [Service] + Restart=no + loop: + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" + loop_control: + loop_var: dropfile + register: dropinfile + + - name: "5.6 SCSERS - systemd reload" + ansible.builtin.systemd: + daemon_reload: true + when: + - dropinfile.changed + + - name: "5.6 SCSERS - validate that the drop-in file is active" + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + ansible.builtin.shell: >- + systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d' + register: dropinfile_validation + changed_when: false + failed_when: dropinfile_validation.rc > 0 + + +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - END | +# | | +# +------------------------------------4--------------------------------------*/ + + + - name: "5.6 SCSERS Pacemaker - provision" ansible.builtin.include_tasks: file: 5.6.4-provision.yml From 573ff041b5e2689efcb630132f21c6f38c2ec2fb Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 19 Dec 2023 13:49:54 +0200 Subject: [PATCH 050/607] debug the services retrieval --- deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml index 7989ed3df6..c4bf020175 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml @@ -55,8 +55,8 @@ ansible.builtin.set_fact: systemd_service_names: "{{ systemd_service_file_path.results - | selectattr('stat.exists') - | map(attribute='stat.exists') + | selectattr('stat.exists', 'equalto', true) + | map(attribute='stat.path') | regex_replace('/etc/systemd/system/', '') }}" when: From ffd0f985abef80c301f2524f1f5de48bae3d6be6 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 19 Dec 2023 14:02:54 +0200 Subject: [PATCH 051/607] don't fail if service does not exist --- deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml index c4bf020175..bed4e83f22 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml @@ -75,6 +75,8 @@ ansible.builtin.systemd: name: "{{ service_name }}" enabled: false + failed_when: false + loop: "{{ systemd_service_names }}" loop: "{{ systemd_service_names }}" loop_control: loop_var: service_name From ab3d678ff249965653aae80efaa7a6c1ba3d671f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 19 Dec 2023 14:27:48 +0200 Subject: [PATCH 052/607] Management DNS group --- deploy/terraform/run/sap_system/module.tf | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index 3ecce5b028..aeef2a126d 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -80,7 +80,7 @@ module "common_infrastructure" { key_vault = local.key_vault landscape_tfstate = data.terraform_remote_state.landscape.outputs license_type = var.license_type - management_dns_resourcegroup_name = coalesce(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) + management_dns_resourcegroup_name = try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) management_dns_subscription_id = try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, null) naming = length(var.name_override_file) > 0 ? local.custom_names : module.sap_namegenerator.naming NFS_provider = var.NFS_provider @@ -142,8 +142,8 @@ module "hdb_node" { infrastructure = local.infrastructure landscape_tfstate = data.terraform_remote_state.landscape.outputs license_type = var.license_type - management_dns_resourcegroup_name = coalesce(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) - management_dns_subscription_id = coalesce(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, local.saplib_subscription_id) + management_dns_resourcegroup_name = try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) + management_dns_subscription_id = try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, null) naming = length(var.name_override_file) > 0 ? local.custom_names : module.sap_namegenerator.naming NFS_provider = var.NFS_provider options = local.options @@ -263,7 +263,7 @@ module "anydb_node" { infrastructure = local.infrastructure landscape_tfstate = data.terraform_remote_state.landscape.outputs license_type = var.license_type - management_dns_resourcegroup_name = coalesce(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) + management_dns_resourcegroup_name = try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) management_dns_subscription_id = try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, null) naming = length(var.name_override_file) > 0 ? local.custom_names : module.sap_namegenerator.naming options = local.options From a378058943b9c3d2534dd8ddc10a8dbac36dd774 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 19 Dec 2023 14:34:56 +0200 Subject: [PATCH 053/607] Fix the output --- deploy/terraform/run/sap_system/output.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/run/sap_system/output.tf b/deploy/terraform/run/sap_system/output.tf index d885e49689..f1ad2197e8 100644 --- a/deploy/terraform/run/sap_system/output.tf +++ b/deploy/terraform/run/sap_system/output.tf @@ -115,7 +115,7 @@ output "management_dns_subscription_id" { } output "management_dns_resourcegroup_name" { description = "Resource group name for DNS resource group" - value = coalesce(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) + value = try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) } From 4988f167f4b82b21eb79408f2a3445d51ca1f37e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 19 Dec 2023 16:49:45 +0200 Subject: [PATCH 054/607] Add newline --- deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml index bed4e83f22..2fe30bc6ef 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml @@ -94,7 +94,7 @@ group: root mode: '0644' block: >- - [Service] + [Service]\n Restart=no loop: - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" From 3d88e10ec4378a78dfb5f57748a6a104290e3568 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 19 Dec 2023 17:04:03 +0200 Subject: [PATCH 055/607] switch to lineinfile --- .../5.6-scsers-pacemaker/tasks/main.yml | 26 ++++++++++++++++--- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml index 2fe30bc6ef..38271fcdee 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml @@ -86,16 +86,34 @@ - is_rhel_82_or_newer is defined - is_rhel_82_or_newer become: true - ansible.builtin.blockinfile: + ansible.builtin.lineinfile: path: '{{ dropfile }}' create: true backup: true owner: root group: root mode: '0644' - block: >- - [Service]\n - Restart=no + line: "[Service]" + loop: + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" + loop_control: + loop_var: dropfile + + - name: "5.6 SCSERS Pacemaker - (systemd) Update drop-in file" + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + become: true + ansible.builtin.lineinfile: + path: '{{ dropfile }}' + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^[Service]$' + line: "Restart=no" loop: - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" From bc926ee3298b6584651dd548b66b374f7801db2b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 19 Dec 2023 17:31:08 +0200 Subject: [PATCH 056/607] Moving the task to the correct place --- .../tasks/5.6.4.2-sap-resources-RedHat.yml | 34 ++++- .../5.6-scsers-pacemaker/tasks/main.yml | 120 ------------------ 2 files changed, 27 insertions(+), 127 deletions(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index 05dcd51470..cf5f995011 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -112,7 +112,6 @@ # - name: "5.6 SCSERS - RHEL - ENSA2 - pause the execution for 2 minutes" # ansible.builtin.wait_for: # timeout: 120 - # /*---------------------------------------------------------------------------8 # | | # | Systemd-Based SAP Startup Framework - BEGIN | @@ -142,8 +141,8 @@ ansible.builtin.set_fact: systemd_service_names: "{{ systemd_service_file_path.results - | selectattr('stat.exists') - | map(attribute='stat.exists') + | selectattr('stat.exists', 'equalto', true) + | map(attribute='stat.path') | regex_replace('/etc/systemd/system/', '') }}" when: @@ -162,6 +161,8 @@ ansible.builtin.systemd: name: "{{ service_name }}" enabled: false + failed_when: false + loop: "{{ systemd_service_names }}" loop: "{{ systemd_service_names }}" loop_control: loop_var: service_name @@ -171,16 +172,34 @@ - is_rhel_82_or_newer is defined - is_rhel_82_or_newer become: true - ansible.builtin.blockinfile: + ansible.builtin.lineinfile: path: '{{ dropfile }}' create: true backup: true owner: root group: root mode: '0644' - block: >- - [Service] - Restart=no + line: "[Service]" + loop: + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" + loop_control: + loop_var: dropfile + + - name: "5.6 SCSERS Pacemaker - (systemd) Update drop-in file" + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + become: true + ansible.builtin.lineinfile: + path: '{{ dropfile }}' + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^[Service]$' + line: "Restart=no" loop: - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" @@ -211,6 +230,7 @@ # | | # +------------------------------------4--------------------------------------*/ + # +------------------------------------4---------------------------------------*| # | 5.6.6-validate.yml - Validate the SAP ASCS/SCS and ERS resources | # | These are common tasks diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml index 38271fcdee..e4ce2853c7 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml @@ -26,126 +26,6 @@ become: true become_user: root -# /*---------------------------------------------------------------------------8 -# | | -# | Systemd-Based SAP Startup Framework - BEGIN | -# | | -# +------------------------------------4--------------------------------------*/ - -- name: "5.6 SCSERS - check if the OS version is RHEL 8.2 or newer" - ansible.builtin.set_fact: - is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" - -# For systemd services the SAP ASCS/SCS and ERS resources are created as systemd services -# the path for the service file is /etc/systemd/system/SAP_.service -- name: "5.6 SCSERS - check if the SAP ASCS/SCS and ERS resources are created as systemd services" - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - ansible.builtin.stat: - path: "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ sap_instance_number }}.service" - register: systemd_service_file_path - loop: - - "{{ scs_instance_number }}" - - "{{ ers_instance_number }}" - loop_control: - loop_var: sap_instance_number - -- name: "5.6 SCSERS - Set fact for the systemd services existance" - ansible.builtin.set_fact: - systemd_service_names: "{{ - systemd_service_file_path.results - | selectattr('stat.exists', 'equalto', true) - | map(attribute='stat.path') - | regex_replace('/etc/systemd/system/', '') - }}" - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - -- name: "5.6 SCSERS - Disable the services and stop auto-restart if they exist" - when: - - systemd_service_names is defined - - systemd_service_names | length > 0 - block: - - name: "5.6 SCSERS - Disable the services if they exist" - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - ansible.builtin.systemd: - name: "{{ service_name }}" - enabled: false - failed_when: false - loop: "{{ systemd_service_names }}" - loop: "{{ systemd_service_names }}" - loop_control: - loop_var: service_name - - - name: "5.6 SCSERS Pacemaker - (systemd) Create drop-in file" - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - become: true - ansible.builtin.lineinfile: - path: '{{ dropfile }}' - create: true - backup: true - owner: root - group: root - mode: '0644' - line: "[Service]" - loop: - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" - loop_control: - loop_var: dropfile - - - name: "5.6 SCSERS Pacemaker - (systemd) Update drop-in file" - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - become: true - ansible.builtin.lineinfile: - path: '{{ dropfile }}' - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^[Service]$' - line: "Restart=no" - loop: - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" - loop_control: - loop_var: dropfile - register: dropinfile - - - name: "5.6 SCSERS - systemd reload" - ansible.builtin.systemd: - daemon_reload: true - when: - - dropinfile.changed - - - name: "5.6 SCSERS - validate that the drop-in file is active" - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - ansible.builtin.shell: >- - systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d' - register: dropinfile_validation - changed_when: false - failed_when: dropinfile_validation.rc > 0 - - -# /*---------------------------------------------------------------------------8 -# | | -# | Systemd-Based SAP Startup Framework - END | -# | | -# +------------------------------------4--------------------------------------*/ - - - - name: "5.6 SCSERS Pacemaker - provision" ansible.builtin.include_tasks: file: 5.6.4-provision.yml From 14266c0e0624af5adda90a5de16fc0ac7c6da56c Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 19 Dec 2023 18:05:32 +0200 Subject: [PATCH 057/607] don't use the .d --- .../5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index cf5f995011..4225fd4087 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -218,7 +218,7 @@ - is_rhel_82_or_newer is defined - is_rhel_82_or_newer ansible.builtin.shell: >- - systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d' + systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service' register: dropinfile_validation changed_when: false failed_when: dropinfile_validation.rc > 0 From 4c5cf632834cb8fa82cfc0d6c76365b950272e99 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 19 Dec 2023 18:24:55 +0200 Subject: [PATCH 058/607] Don't do the validation --- .../tasks/5.6.4.2-sap-resources-RedHat.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index 4225fd4087..2ba31f9a3d 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -213,15 +213,15 @@ when: - dropinfile.changed - - name: "5.6 SCSERS - validate that the drop-in file is active" - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - ansible.builtin.shell: >- - systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service' - register: dropinfile_validation - changed_when: false - failed_when: dropinfile_validation.rc > 0 + # - name: "5.6 SCSERS - validate that the drop-in file is active" + # when: + # - is_rhel_82_or_newer is defined + # - is_rhel_82_or_newer + # ansible.builtin.shell: >- + # systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service' + # register: dropinfile_validation + # changed_when: false + # failed_when: dropinfile_validation.rc > 0 # /*---------------------------------------------------------------------------8 From 751f5f604ede5cbf8fb2482fa5915beb4dfa29fe Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 19 Dec 2023 19:12:50 +0200 Subject: [PATCH 059/607] add a try statement --- deploy/terraform/run/sap_system/providers.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/run/sap_system/providers.tf b/deploy/terraform/run/sap_system/providers.tf index 0803a66ccc..7c3632ecf5 100644 --- a/deploy/terraform/run/sap_system/providers.tf +++ b/deploy/terraform/run/sap_system/providers.tf @@ -37,7 +37,7 @@ provider "azurerm" { provider "azurerm" { features {} alias = "dnsmanagement" - subscription_id = length(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id) > 1 ? data.terraform_remote_state.landscape.outputs.management_dns_subscription_id : length(local.deployer_subscription_id) > 0 ? local.deployer_subscription_id : null + subscription_id = length(try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, "")) > 1 ? data.terraform_remote_state.landscape.outputs.management_dns_subscription_id : length(local.deployer_subscription_id) > 0 ? local.deployer_subscription_id : null client_id = local.cp_spn.client_id client_secret = local.cp_spn.client_secret tenant_id = local.cp_spn.tenant_id From 3d5fcd9f7d77c34b41771604c10e1eda0ae4aba4 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 19 Dec 2023 20:54:59 +0200 Subject: [PATCH 060/607] move from blockinfile --- .../tasks/5.5.4.1-cluster-RedHat.yml | 66 ++++++++++++++----- 1 file changed, 51 insertions(+), 15 deletions(-) diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index a8afdbe600..cfbe4119ad 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -189,25 +189,61 @@ is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" - name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" + block: when: - is_rhel_82_or_newer is defined - is_rhel_82_or_newer become: true - ansible.builtin.blockinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - block: | - [Unit] - Description=Pacemaker needs the SAP HANA instance service - Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service - After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service - register: dropinfile - -- name: "5.5.4.1 HANA Cluster configuration - systemd reload" - ansible.builtin.systemd: - daemon_reload: yes - when: - - dropinfile.changed + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + line: "[Unit]" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^[Unit]$' + line: "Description=Pacemaker needs the SAP HANA instance service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' + line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' + line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + register: dropinfile + + - name: "5.5.4.1 HANA Cluster configuration - systemd reload" + ansible.builtin.systemd: + daemon_reload: yes + when: + - dropinfile.changed # /*---------------------------------------------------------------------------8 # | | From 791ebd0f22ebae2f590f221f485d6bc126e31a56 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 19 Dec 2023 21:15:31 +0200 Subject: [PATCH 061/607] Revert "Bugfix kv-secrets debug and import tasks (#516)" This reverts commit 51afc157af112a2c886dc658ce0461f016816262. --- deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml b/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml index 54840b167b..85940d854d 100644 --- a/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml @@ -32,9 +32,7 @@ - "use_msi_for_clusters: {{ use_msi_for_clusters }}" - "platform: {{ platform | upper }}" verbosity: 2 - when: - - operation is defined - - operation == "fencing" + when: operation == "fencing" # -------------------------------------+---------------------------------------8 # @@ -42,7 +40,6 @@ - name: "0.2 Key Vault: - Import S User tasks" ansible.builtin.import_tasks: "s_user.yaml" when: - - operation is defined - operation == "SoftwareAcquisition" @@ -52,7 +49,6 @@ - name: "0.2 Key Vault: - Import Fencing secrets" ansible.builtin.import_tasks: "fencing.yaml" when: - - operation is defined - operation == "fencing" - (database_high_availability and database_cluster_type == "AFA") or (scs_high_availability and scs_cluster_type == "AFA") # AFA (Azure Fencing Agent) @@ -66,7 +62,6 @@ ansible.builtin.import_tasks: "wincluster-witness.yaml" # TODO: update when clause more appropriately when: - - operation is defined - operation == "fencing" - (scs_high_availability or database_high_availability) - not use_msi_for_clusters From e49aab81167fb210a6de866d2c83630b635c0d7c Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 19 Dec 2023 21:19:55 +0200 Subject: [PATCH 062/607] lint --- .../5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index cfbe4119ad..999efcae65 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -189,12 +189,11 @@ is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" - name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" - block: + become: true when: - is_rhel_82_or_newer is defined - is_rhel_82_or_newer - become: true - + block: - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" ansible.builtin.lineinfile: path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf From c2ca5f759c81bb9f943b78a2bd0176c0d5c259ba Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 19 Dec 2023 21:25:52 +0200 Subject: [PATCH 063/607] indentation --- .../tasks/5.5.4.1-cluster-RedHat.yml | 138 +++++++++--------- 1 file changed, 69 insertions(+), 69 deletions(-) diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index 999efcae65..73e13fbc0b 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -177,78 +177,78 @@ loop_var: item failed_when: constraint.rc > 1 -# /*---------------------------------------------------------------------------8 -# | | -# | Systemd-Based SAP Startup Framework - BEGIN | -# | | -# +------------------------------------4--------------------------------------*/ -# Follow steps described in https://access.redhat.com/articles/6884531 - -- name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" - ansible.builtin.set_fact: - is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" + # /*---------------------------------------------------------------------------8 + # | | + # | Systemd-Based SAP Startup Framework - BEGIN | + # | | + # +------------------------------------4--------------------------------------*/ + # Follow steps described in https://access.redhat.com/articles/6884531 + + - name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" + ansible.builtin.set_fact: + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" -- name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" - become: true - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - block: - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - line: "[Unit]" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^[Unit]$' - line: "Description=Pacemaker needs the SAP HANA instance service" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' - line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' - line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" - register: dropinfile - - - name: "5.5.4.1 HANA Cluster configuration - systemd reload" - ansible.builtin.systemd: - daemon_reload: yes + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" + become: true when: - - dropinfile.changed + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + block: + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + line: "[Unit]" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^[Unit]$' + line: "Description=Pacemaker needs the SAP HANA instance service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' + line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' + line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + register: dropinfile + + - name: "5.5.4.1 HANA Cluster configuration - systemd reload" + ansible.builtin.systemd: + daemon_reload: true + when: + - dropinfile.changed -# /*---------------------------------------------------------------------------8 -# | | -# | Systemd-Based SAP Startup Framework - END | -# | | -# +------------------------------------4--------------------------------------*/ + # /*---------------------------------------------------------------------------8 + # | | + # | Systemd-Based SAP Startup Framework - END | + # | | + # +------------------------------------4--------------------------------------*/ - name: "5.5.4.1 HANA Cluster configuration - Disable Maintenance mode for the cluster" From 3ec8eb35cada0ef2e7098dd549f58b3618c35be1 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 19 Dec 2023 21:42:10 +0200 Subject: [PATCH 064/607] fix indentation --- .../5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index 73e13fbc0b..f3542be700 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -236,7 +236,7 @@ mode: '0644' insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" - register: dropinfile + register: dropinfile - name: "5.5.4.1 HANA Cluster configuration - systemd reload" ansible.builtin.systemd: From e9b61e3455d25fb9fe5766c37a2f88ed9901e404 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 20 Dec 2023 01:14:11 +0530 Subject: [PATCH 065/607] idnentation --- .../tasks/5.5.4.1-cluster-RedHat.yml | 144 +++++++++--------- 1 file changed, 72 insertions(+), 72 deletions(-) diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index 73e13fbc0b..ddf840a6ed 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -177,78 +177,78 @@ loop_var: item failed_when: constraint.rc > 1 - # /*---------------------------------------------------------------------------8 - # | | - # | Systemd-Based SAP Startup Framework - BEGIN | - # | | - # +------------------------------------4--------------------------------------*/ - # Follow steps described in https://access.redhat.com/articles/6884531 - - - name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" - ansible.builtin.set_fact: - is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" - become: true - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - block: - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - line: "[Unit]" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^[Unit]$' - line: "Description=Pacemaker needs the SAP HANA instance service" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' - line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' - line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" - register: dropinfile - - - name: "5.5.4.1 HANA Cluster configuration - systemd reload" - ansible.builtin.systemd: - daemon_reload: true - when: - - dropinfile.changed - - # /*---------------------------------------------------------------------------8 - # | | - # | Systemd-Based SAP Startup Framework - END | - # | | - # +------------------------------------4--------------------------------------*/ + # /*---------------------------------------------------------------------------8 + # | | + # | Systemd-Based SAP Startup Framework - BEGIN | + # | | + # +------------------------------------4--------------------------------------*/ + # Follow steps described in https://access.redhat.com/articles/6884531 + + - name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" + ansible.builtin.set_fact: + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" + become: true + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + block: + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + line: "[Unit]" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^[Unit]$' + line: "Description=Pacemaker needs the SAP HANA instance service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' + line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' + line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + register: dropinfile + + - name: "5.5.4.1 HANA Cluster configuration - systemd reload" + ansible.builtin.systemd: + daemon_reload: true + when: + - dropinfile.changed + + # /*---------------------------------------------------------------------------8 + # | | + # | Systemd-Based SAP Startup Framework - END | + # | | + # +------------------------------------4--------------------------------------*/ - name: "5.5.4.1 HANA Cluster configuration - Disable Maintenance mode for the cluster" From 512c520fced4918fc69df1d0017ca4b13da124bc Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 20 Dec 2023 01:15:29 +0530 Subject: [PATCH 066/607] Revert "idnentation" This reverts commit e9b61e3455d25fb9fe5766c37a2f88ed9901e404. --- .../tasks/5.5.4.1-cluster-RedHat.yml | 144 +++++++++--------- 1 file changed, 72 insertions(+), 72 deletions(-) diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index ddf840a6ed..73e13fbc0b 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -177,78 +177,78 @@ loop_var: item failed_when: constraint.rc > 1 - # /*---------------------------------------------------------------------------8 - # | | - # | Systemd-Based SAP Startup Framework - BEGIN | - # | | - # +------------------------------------4--------------------------------------*/ - # Follow steps described in https://access.redhat.com/articles/6884531 - - - name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" - ansible.builtin.set_fact: - is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" - become: true - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - block: - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - line: "[Unit]" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^[Unit]$' - line: "Description=Pacemaker needs the SAP HANA instance service" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' - line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' - line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" - register: dropinfile - - - name: "5.5.4.1 HANA Cluster configuration - systemd reload" - ansible.builtin.systemd: - daemon_reload: true - when: - - dropinfile.changed - - # /*---------------------------------------------------------------------------8 - # | | - # | Systemd-Based SAP Startup Framework - END | - # | | - # +------------------------------------4--------------------------------------*/ + # /*---------------------------------------------------------------------------8 + # | | + # | Systemd-Based SAP Startup Framework - BEGIN | + # | | + # +------------------------------------4--------------------------------------*/ + # Follow steps described in https://access.redhat.com/articles/6884531 + + - name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" + ansible.builtin.set_fact: + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" + become: true + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + block: + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + line: "[Unit]" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^[Unit]$' + line: "Description=Pacemaker needs the SAP HANA instance service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' + line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' + line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + register: dropinfile + + - name: "5.5.4.1 HANA Cluster configuration - systemd reload" + ansible.builtin.systemd: + daemon_reload: true + when: + - dropinfile.changed + + # /*---------------------------------------------------------------------------8 + # | | + # | Systemd-Based SAP Startup Framework - END | + # | | + # +------------------------------------4--------------------------------------*/ - name: "5.5.4.1 HANA Cluster configuration - Disable Maintenance mode for the cluster" From 3d4c77c42c5385f228e50c8d61e4a89ee774e474 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 20 Dec 2023 01:31:56 +0530 Subject: [PATCH 067/607] Refactor HANA Cluster configuration for systemd-based SAP Startup Framework --- .../tasks/5.5.4.1-cluster-RedHat.yml | 147 +++++++++--------- 1 file changed, 73 insertions(+), 74 deletions(-) diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index 73e13fbc0b..711587ae0b 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -177,80 +177,6 @@ loop_var: item failed_when: constraint.rc > 1 - # /*---------------------------------------------------------------------------8 - # | | - # | Systemd-Based SAP Startup Framework - BEGIN | - # | | - # +------------------------------------4--------------------------------------*/ - # Follow steps described in https://access.redhat.com/articles/6884531 - - - name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" - ansible.builtin.set_fact: - is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" - become: true - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - block: - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - line: "[Unit]" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^[Unit]$' - line: "Description=Pacemaker needs the SAP HANA instance service" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' - line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' - line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" - register: dropinfile - - - name: "5.5.4.1 HANA Cluster configuration - systemd reload" - ansible.builtin.systemd: - daemon_reload: true - when: - - dropinfile.changed - - # /*---------------------------------------------------------------------------8 - # | | - # | Systemd-Based SAP Startup Framework - END | - # | | - # +------------------------------------4--------------------------------------*/ - - - name: "5.5.4.1 HANA Cluster configuration - Disable Maintenance mode for the cluster" ansible.builtin.shell: pcs property set maintenance-mode=false @@ -305,3 +231,76 @@ when: ansible_hostname == primary_instance_name # End of HANA clustering resources + +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ +# Follow steps described in https://access.redhat.com/articles/6884531 + +- name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" + ansible.builtin.set_fact: + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" + +- name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" + become: true + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + block: + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + line: "[Unit]" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^[Unit]$' + line: "Description=Pacemaker needs the SAP HANA instance service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' + line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' + line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + register: dropinfile + + - name: "5.5.4.1 HANA Cluster configuration - systemd reload" + ansible.builtin.systemd: + daemon_reload: true + when: + - dropinfile.changed + +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - END | +# | | +# +------------------------------------4--------------------------------------*/ From 669a761720b2d1cccc708a2f69914f787ebc0cf4 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 20 Dec 2023 01:55:19 +0530 Subject: [PATCH 068/607] Add Azure DevOps Agent configuration script to configure the agent directory and set environment variables for SAP automation deployment in the configure deployer script. --- deploy/scripts/configure_deployer.sh | 82 ++++++++++++++++++---------- deploy/scripts/setup_ado.sh | 9 +-- 2 files changed, 54 insertions(+), 37 deletions(-) diff --git a/deploy/scripts/configure_deployer.sh b/deploy/scripts/configure_deployer.sh index d414fb942e..66f2fd0954 100755 --- a/deploy/scripts/configure_deployer.sh +++ b/deploy/scripts/configure_deployer.sh @@ -36,6 +36,13 @@ # Setup some useful shell options # +# Check if the script is running as root +if [[ $EUID -eq 0 ]]; then + echo "This script should not be run as root or with sudo. Please run as a regular user." + exit 1 +fi + + # Print expanded commands as they are about to be executed set -o xtrace @@ -733,11 +740,50 @@ export PATH="${PATH}":"${ansible_bin}":"${tf_bin}":"${HOME}"/Azure_SAP_Automated echo "# Configure environment settings for deployer interactive sessions" | tee -a /tmp/deploy_server.sh echo "export ARM_SUBSCRIPTION_ID=${subscription_id}" | tee -a /tmp/deploy_server.sh -echo "export SAP_AUTOMATION_REPO_PATH=$HOME/Azure_SAP_Automated_Deployment/sap-automation" | tee -a /tmp/deploy_server.sh -echo "export DEPLOYMENT_REPO_PATH=$HOME/Azure_SAP_Automated_Deployment/sap-automation" | tee -a /tmp/deploy_server.sh -echo "export CONFIG_REPO_PATH=$HOME/Azure_SAP_Automated_Deployment/WORKSPACES" | tee -a /tmp/deploy_server.sh -echo export "PATH=${ansible_bin}:${tf_bin}:${PATH}:${HOME}/Azure_SAP_Automated_Deployment/sap-automation/deploy/scripts:${HOME}/Azure_SAP_Automated_Deployment/sap-automation/deploy/ansible" | tee -a /tmp/deploy_server.sh +# Replace with your actual agent directory +AGENT_DIR="/home/${USER}/agent" + +# Check if the .agent file exists +if [ -f "$AGENT_DIR/.agent" ]; then + echo "Azure DevOps Agent is configured." + echo export "PATH=${ansible_bin}:${tf_bin}:${PATH}" | tee -a /tmp/deploy_server.sh +else + echo "Azure DevOps Agent is not configured." + + echo "export SAP_AUTOMATION_REPO_PATH=$HOME/Azure_SAP_Automated_Deployment/sap-automation" | tee -a /tmp/deploy_server.sh + echo "export DEPLOYMENT_REPO_PATH=$HOME/Azure_SAP_Automated_Deployment/sap-automation" | tee -a /tmp/deploy_server.sh + echo "export CONFIG_REPO_PATH=$HOME/Azure_SAP_Automated_Deployment/WORKSPACES" | tee -a /tmp/deploy_server.sh + + echo export "PATH=${ansible_bin}:${tf_bin}:${PATH}:${HOME}/Azure_SAP_Automated_Deployment/sap-automation/deploy/scripts:${HOME}/Azure_SAP_Automated_Deployment/sap-automation/deploy/ansible" | tee -a /tmp/deploy_server.sh + + # Set env for MSI + echo "export ARM_USE_MSI=true" | tee -a /tmp/deploy_server.sh + + /usr/bin/az login --identity 2>error.log || : + # Ensure that the user's account is logged in to Azure with specified creds + + if [ ! -f error.log ]; then + /usr/bin/az account show > az.json + client_id=$(jq --raw-output .id az.json) + tenant_id=$(jq --raw-output .tenantId az.json) + rm az.json + else + client_id='' + tenant_id='' + fi + + if [ -n "${client_id}" ]; then + export ARM_CLIENT_ID=${client_id} + echo "export ARM_CLIENT_ID=${client_id}" | tee -a /tmp/deploy_server.sh + fi + + if [ -n "${tenant_id}" ]; then + export ARM_TENANT_ID=${tenant_id} + echo "export ARM_TENANT_ID=${tenant_id}" | tee -a /tmp/deploy_server.sh + fi +fi + # Set env for ansible echo "export ANSIBLE_HOST_KEY_CHECKING=False" | tee -a /tmp/deploy_server.sh @@ -753,43 +799,19 @@ case "$(get_distro_name)" in (sles) echo "export DOTNET_ROOT=${DOTNET_ROOT}" | tee -a /tmp/deploy_server.sh ;; - (rhel*) +(rhel*) ;; esac chown -R "${USER}" "${asad_home}" -# Set env for MSI -echo "export ARM_USE_MSI=true" | tee -a /tmp/deploy_server.sh - -/usr/bin/az login --identity 2>error.log || : -# Ensure that the user's account is logged in to Azure with specified creds - -if [ ! -f error.log ]; then - /usr/bin/az account show > az.json - client_id=$(jq --raw-output .id az.json) - tenant_id=$(jq --raw-output .tenantId az.json) - rm az.json -else - client_id='' - tenant_id='' -fi - -if [ -n "${client_id}" ]; then - export ARM_CLIENT_ID=${client_id} - echo "export ARM_CLIENT_ID=${client_id}" | tee -a /tmp/deploy_server.sh -fi - -if [ -n "${tenant_id}" ]; then - export ARM_TENANT_ID=${tenant_id} - echo "export ARM_TENANT_ID=${tenant_id}" | tee -a /tmp/deploy_server.sh -fi # echo "export DOTNET_ROOT=/snap/dotnet-sdk/current" | tee -a /tmp/deploy_server.sh # Ensure that the user's account is logged in to Azure with specified creds echo 'az login --identity --output none' | tee -a /tmp/deploy_server.sh +# shellcheck disable=SC2016 echo 'echo ${USER} account ready for use with Azure SAP Automated Deployment' | tee -a /tmp/deploy_server.sh sudo cp /tmp/deploy_server.sh /etc/profile.d/deploy_server.sh diff --git a/deploy/scripts/setup_ado.sh b/deploy/scripts/setup_ado.sh index cbf7c6994c..1a983b083b 100755 --- a/deploy/scripts/setup_ado.sh +++ b/deploy/scripts/setup_ado.sh @@ -13,15 +13,10 @@ tar zxvf agent.tar.gz # run the configuration script -./config.sh +./config.sh # automatic start configuration after VM reboot -sudo ./svc.sh install azureadm +sudo ./svc.sh install "${USER}" # start the deamon sudo ./svc.sh start - -# Install dotnet for the web app -sudo snap install dotnet-sdk --classic --channel=3.1 -sudo snap alias dotnet-sdk.dotnet dotnet -export DOTNET_ROOT=/snap/dotnet-sdk/current \ No newline at end of file From 9644e91fb7a45158efb11aad0974dcd8c378895d Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 20 Dec 2023 02:06:16 +0530 Subject: [PATCH 069/607] Update ansible_core_version to ansible_version --- .../modules/sap_deployer/templates/configure_deployer.sh.tmpl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl index e34382baf7..89e7301f81 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl @@ -61,14 +61,13 @@ subscription_id="${subscription_id}" tenant_id="${tenant_id}" local_user="${local_user}" use_webapp="${use_webapp}" -ansible_core_version="${ansible_core_version}" +ansible_version="${ansible_core_version}" TOKEN="${pat}" DEVURL="${ado_repo}" POOL="${pool}" AGENTNAME=$(hostname) - # # Ansible Version settings # From 557242f605c68197f181ad09228b50b1c7a1446b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 20 Dec 2023 00:07:13 +0200 Subject: [PATCH 070/607] clean up --- deploy/scripts/configure_deployer.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/deploy/scripts/configure_deployer.sh b/deploy/scripts/configure_deployer.sh index 66f2fd0954..d3c2a9ba90 100755 --- a/deploy/scripts/configure_deployer.sh +++ b/deploy/scripts/configure_deployer.sh @@ -461,6 +461,8 @@ wget -nv -O /tmp/"${tf_zip}" "https://releases.hashicorp.com/terraform/${tfversi sudo unzip -o /tmp/"${tf_zip}" -d "${tf_dir}" sudo ln -vfs "../$(basename "${tf_dir}")/terraform" "${tf_bin}/terraform" +sudo rm /tmp/"${tf_zip}" + # Uninstall Azure CLI - For some platforms case "$(get_distro_name)" in (ubuntu|sles) @@ -815,6 +817,7 @@ echo 'az login --identity --output none' | tee -a /tmp/deploy_server.sh echo 'echo ${USER} account ready for use with Azure SAP Automated Deployment' | tee -a /tmp/deploy_server.sh sudo cp /tmp/deploy_server.sh /etc/profile.d/deploy_server.sh +sudo rm /tmp/deploy_server.sh /usr/bin/az login --identity --output none echo "${USER} account ready for use with Azure SAP Automated Deployment" From 5427e1aca4faed0d2c6e8eba4e1b217c91b917c1 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 20 Dec 2023 12:35:39 +0200 Subject: [PATCH 071/607] Fix the search path --- deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml index 94193865cc..14d34a4f28 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml @@ -60,13 +60,17 @@ public: true when: bom.InstanceType is defined +- name: "DBLoad: register application type" + ansible.builtin.set_fact: + application_type: "{% if instance_type == 'SCS' %}JAVA{% else %}ABAP{% endif %}" + - name: "DBLoad: - Set the product ID" ansible.builtin.set_fact: product_id: "{{ bom.product_ids.dbl }}" - name: "DBLoad: - Set dbload path" ansible.builtin.set_fact: - db_load_path: "{{ tmp_directory }}/{{ sid_to_be_deployed.sid | upper }}/sapinst_instdir/{{ product_id.replace('.', '/').replace('/' + instance_type, '').split(':')[1] }}/INSTALL/DISTRIBUTED/{{ instance_type }}/DB" + db_load_path: "{{ tmp_directory }}/{{ sid_to_be_deployed.sid | upper }}/sapinst_instdir/{{ product_id.replace('.', '/').replace('/' + application_type, '').split(':')[1] }}/INSTALL/DISTRIBUTED/{{ application_type }}/DB" - name: "DBLoad: Check if DBLoad has been tried on this server" ansible.builtin.stat: @@ -92,7 +96,7 @@ - name: "DBLoad: flag" ansible.builtin.file: path: "/etc/sap_deployment_automation/{{ sid_to_be_deployed.sid | upper }}/sap_deployment_dbload.txt" - state: touch + state: touch mode: 0755 when: - dbload_performed_according_to_sapinst is defined From 40315ad44c02abe1a4034c4035f06f1e1b26a3f0 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 20 Dec 2023 16:32:01 +0530 Subject: [PATCH 072/607] Add Systemd-Based SAP Startup Framework for SLES --- .../tasks/5.6.1-set_runtime_facts.yml | 27 +++-- .../tasks/5.6.4.2-sap-resources-RedHat.yml | 113 ++---------------- ...5.6.4.2-sap-resources-Suse-SimpleMount.yml | 23 ++++ .../tasks/5.6.4.2-sap-resources-Suse.yml | 23 ++++ .../tasks/5.6.7-config-systemd-sap-start.yml | 96 +++++++++++++++ 5 files changed, 170 insertions(+), 112 deletions(-) create mode 100644 deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml index d2481bdd39..c587ccce48 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml @@ -36,7 +36,7 @@ verbosity: 2 # Returns bom object -- name: "5.6 SCSERS: SCS HA Install - Register BoM" +- name: "5.6 SCSERS - SCS HA Install - Register BoM" ansible.builtin.include_role: name: roles-sap/3.3.1-bom-utility tasks_from: bom-register @@ -46,12 +46,12 @@ sa_enabled: true when: bom is not defined -- name: "5.6 SCS HA Install: Default instance type" +- name: "5.6 SCSERS - HA Install: Default instance type" ansible.builtin.set_fact: instance_type: "ASCS" when: bom.InstanceType is not defined -- name: "5.6 SCS HA Install: register instance type" +- name: "5.6 SCSERS - HA Install: register instance type" ansible.builtin.set_fact: instance_type: >- {%- set _instance_type = 'ASCS' -%} @@ -66,7 +66,7 @@ public: true when: bom.InstanceType is defined -- name: "5.6 SCS HA Install: Populate InstanceName for cluster resource" +- name: "5.6 SCSERS - HA Install: Populate InstanceName for cluster resource" ansible.builtin.set_fact: instance_name: "{{ sap_sid }}_{{ instance_type }}{{ scs_instance_number }}_{{ scs_virtual_hostname }}" start_profile_path: "/sapmnt/{{ sap_sid }}/profile/{{ sap_sid }}_{{ instance_type }}{{ scs_instance_number }}_{{ scs_virtual_hostname }}" @@ -74,7 +74,7 @@ # Set fact for the timeout value of monitor operation for filesystem and ASCS/ERS resources # Since we only configure NFS4.1, the timeout value is set to 40 seconds for non-ANF and 105 seconds for ANF -- name: "5.6 SCS HA install calculate filesystem timeout" +- name: "5.6 SCSERS - HA install calculate filesystem timeout" ansible.builtin.set_fact: clus_fs_mon_timeout: >- {%- set _timeoutvalue = 40 -%} @@ -91,7 +91,7 @@ when: - scs_high_availability -- name: "5.6 SCS HA install calculate SAP resource monitor timeout" +- name: "5.6 SCSERS - HA install calculate SAP resource monitor timeout" ansible.builtin.set_fact: clus_sap_mon_timeout: >- {%- set _timeoutvalue = 60 -%} @@ -108,19 +108,28 @@ when: - scs_high_availability -- name: "ASCS/ERS check if installed" +- name: "5.6 SCSERS - ASCS/ERS check if installed" become: true block: - - name: "SCS HA Install: check if installed" + - name: "5.6 SCSERS - SCS HA Install: check if installed" ansible.builtin.stat: path: /etc/sap_deployment_automation//{{ sap_sid | upper }}/sap_deployment_scs.txt register: scs_installed - - name: "ERS Install: check if installed" + - name: "5.6 SCSERS - ERS Install: check if installed" ansible.builtin.stat: path: /etc/sap_deployment_automation//{{ sap_sid | upper }}/sap_deployment_ers.txt register: ers_installed + +- name: "5.6 SCSERS - check if the OS version is RHEL 8.2 or newer" + ansible.builtin.set_fact: + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" + +- name: "5.6 SCSERS - check if the OS version is SLES 15 or newer" + ansible.builtin.set_fact: + is_sles_15_or_newer: "{{ ansible_distribution_version is version('15', '>=') | default(false) }}" + # /*---------------------------------------------------------------------------8 # | END | # +------------------------------------4--------------------------------------*/ diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index 2ba31f9a3d..36ab353f22 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -112,117 +112,24 @@ # - name: "5.6 SCSERS - RHEL - ENSA2 - pause the execution for 2 minutes" # ansible.builtin.wait_for: # timeout: 120 + # /*---------------------------------------------------------------------------8 # | | # | Systemd-Based SAP Startup Framework - BEGIN | # | | # +------------------------------------4--------------------------------------*/ -- name: "5.6 SCSERS - check if the OS version is RHEL 8.2 or newer" - ansible.builtin.set_fact: - is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" - -# For systemd services the SAP ASCS/SCS and ERS resources are created as systemd services -# the path for the service file is /etc/systemd/system/SAP_.service -- name: "5.6 SCSERS - check if the SAP ASCS/SCS and ERS resources are created as systemd services" - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - ansible.builtin.stat: - path: "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ sap_instance_number }}.service" - register: systemd_service_file_path - loop: - - "{{ scs_instance_number }}" - - "{{ ers_instance_number }}" - loop_control: - loop_var: sap_instance_number - -- name: "5.6 SCSERS - Set fact for the systemd services existance" - ansible.builtin.set_fact: - systemd_service_names: "{{ - systemd_service_file_path.results - | selectattr('stat.exists', 'equalto', true) - | map(attribute='stat.path') - | regex_replace('/etc/systemd/system/', '') - }}" +- name: "5.6 SCSERS - RHEL - Systemd-Based SAP Startup Framework" + ansible.builtin.include_tasks: + file: "5.6.7-config-systemd-sap-start.yml" + apply: + become: true + become_user: root + tags: + - "5.6.7-config-systemd-sap-start" when: - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - -- name: "5.6 SCSERS - Disable the services and stop auto-restart if they exist" - when: - - systemd_service_names is defined - - systemd_service_names | length > 0 - block: - - name: "5.6 SCSERS - Disable the services if they exist" - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - ansible.builtin.systemd: - name: "{{ service_name }}" - enabled: false - failed_when: false - loop: "{{ systemd_service_names }}" - loop: "{{ systemd_service_names }}" - loop_control: - loop_var: service_name - - - name: "5.6 SCSERS Pacemaker - (systemd) Create drop-in file" - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - become: true - ansible.builtin.lineinfile: - path: '{{ dropfile }}' - create: true - backup: true - owner: root - group: root - mode: '0644' - line: "[Service]" - loop: - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" - loop_control: - loop_var: dropfile - - - name: "5.6 SCSERS Pacemaker - (systemd) Update drop-in file" - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - become: true - ansible.builtin.lineinfile: - path: '{{ dropfile }}' - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^[Service]$' - line: "Restart=no" - loop: - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" - loop_control: - loop_var: dropfile - register: dropinfile - - - name: "5.6 SCSERS - systemd reload" - ansible.builtin.systemd: - daemon_reload: true - when: - - dropinfile.changed - - # - name: "5.6 SCSERS - validate that the drop-in file is active" - # when: - # - is_rhel_82_or_newer is defined - # - is_rhel_82_or_newer - # ansible.builtin.shell: >- - # systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service' - # register: dropinfile_validation - # changed_when: false - # failed_when: dropinfile_validation.rc > 0 - + - is_rhel_82_or_newer | default(false) # /*---------------------------------------------------------------------------8 # | | diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse-SimpleMount.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse-SimpleMount.yml index 70d48b8a17..5777de86a3 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse-SimpleMount.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse-SimpleMount.yml @@ -187,6 +187,29 @@ ansible.builtin.wait_for: timeout: 120 +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "5.6 SCSERS - SLES - Systemd-Based SAP Startup Framework" + ansible.builtin.include_tasks: + file: "5.6.7-config-systemd-sap-start.yml" + apply: + become: true + become_user: root + tags: + - "5.6.7-config-systemd-sap-start" + when: + - is_sles_15_or_newer is defined + - is_sles_15_or_newer | default(false) + +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - END | +# | | +# +------------------------------------4--------------------------------------*/ # +------------------------------------4---------------------------------------*| # | 5.6.6-validate.yml - Validate the SAP ASCS/SCS and ERS resources | diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse.yml index 25836d2594..63d0223f5c 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse.yml @@ -148,6 +148,29 @@ ansible.builtin.wait_for: timeout: 120 +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "5.6 SCSERS - SLES - Systemd-Based SAP Startup Framework" + ansible.builtin.include_tasks: + file: "5.6.7-config-systemd-sap-start.yml" + apply: + become: true + become_user: root + tags: + - "5.6.7-config-systemd-sap-start" + when: + - is_sles_15_or_newer is defined + - is_sles_15_or_newer | default(false) + +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - END | +# | | +# +------------------------------------4--------------------------------------*/ # +------------------------------------4---------------------------------------*| # | 5.6.6-validate.yml - Validate the SAP ASCS/SCS and ERS resources | diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml new file mode 100644 index 0000000000..1004843dd3 --- /dev/null +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml @@ -0,0 +1,96 @@ +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ + +# For systemd services the SAP ASCS/SCS and ERS resources are created as systemd services +# the path for the service file is /etc/systemd/system/SAP_.service +- name: "5.6 SCSERS - check if the SAP ASCS/SCS and ERS resources are created as systemd services" + ansible.builtin.stat: + path: "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ sap_instance_number }}.service" + register: systemd_service_file_path + loop: + - "{{ scs_instance_number }}" + - "{{ ers_instance_number }}" + loop_control: + loop_var: sap_instance_number + +- name: "5.6 SCSERS - Set fact for the systemd services existance" + ansible.builtin.set_fact: + systemd_service_names: "{{ + systemd_service_file_path.results + | selectattr('stat.exists', 'equalto', true) + | map(attribute='stat.path') + | regex_replace('/etc/systemd/system/', '') + }}" + + +- name: "5.6 SCSERS - Disable the services and stop auto-restart if they exist" + when: + - systemd_service_names is defined + - systemd_service_names | length > 0 + block: + - name: "5.6 SCSERS - Disable the services if they exist" + ansible.builtin.systemd: + name: "{{ service_name }}" + enabled: false + failed_when: false + loop: "{{ systemd_service_names }}" + loop_control: + loop_var: service_name + + - name: "5.6 SCSERS Pacemaker - (systemd) Create drop-in file" + become: true + ansible.builtin.lineinfile: + path: '{{ dropfile }}' + create: true + backup: true + owner: root + group: root + mode: '0644' + line: "[Service]" + loop: + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" + loop_control: + loop_var: dropfile + + - name: "5.6 SCSERS Pacemaker - (systemd) Update drop-in file" + become: true + ansible.builtin.lineinfile: + path: '{{ dropfile }}' + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^[Service]$' + line: "Restart=no" + loop: + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" + loop_control: + loop_var: dropfile + register: dropinfile + + - name: "5.6 SCSERS - systemd reload" + ansible.builtin.systemd: + daemon_reload: true + when: + - dropinfile.changed + + # - name: "5.6 SCSERS - validate that the drop-in file is active" + # when: + # ansible.builtin.shell: >- + # systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service' + # register: dropinfile_validation + # changed_when: false + # failed_when: dropinfile_validation.rc > 0 + + +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - END | +# | | +# +------------------------------------4--------------------------------------*/ From af9eb2c3d551d48b962dc4b1122a4f094bce0b64 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 20 Dec 2023 14:47:42 +0200 Subject: [PATCH 073/607] Add the fqdn for the hostname --- deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index 8a52f09193..1a1d3709b1 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -33,8 +33,8 @@ sap_inifile: "{{ bom_base_name }}-pas-{{ sid_to_be_deployed.sid | lower }}-{{ ansible_hostname }}.params" sap_inifile_template: "{{ bom_base_name }}{{ bom_suffix }}-pas-inifile-param.j2" dir_params: "{{ tmp_directory }}/.{{ sid_to_be_deployed.sid | lower }}-params" - db_lb_virtual_host_HANA: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ hostvars[db_server_temp | first]['virtual_host'] }}{% endif %}" - db_lb_virtual_host_AnyDB: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ db_server_temp }}{% endif %}" + db_lb_virtual_host_HANA: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl.{{ sap_fqdn }}{% else %}{{ hostvars[db_server_temp | first]['virtual_host'] }}{% endif %}" + db_lb_virtual_host_AnyDB: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl.{{ sap_fqdn }}{% else %}{{ db_server_temp }}{% endif %}" - name: "PAS Install: Set BOM facts db host" ansible.builtin.set_fact: From 92b44b1d11e2eb2b8194c9e60dde79df7835c994 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 20 Dec 2023 19:03:13 +0530 Subject: [PATCH 074/607] Disable and stop services, set cluster out of maintenance mode --- .../tasks/5.6.4.2-sap-resources-RedHat.yml | 25 ++++---- ...5.6.4.2-sap-resources-Suse-SimpleMount.yml | 58 +++++++++++-------- .../tasks/5.6.4.2-sap-resources-Suse.yml | 58 +++++++++++-------- .../tasks/5.6.7-config-systemd-sap-start.yml | 14 ++++- 4 files changed, 94 insertions(+), 61 deletions(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index 36ab353f22..2c335ac124 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -52,12 +52,12 @@ - name: "5.6 SCSERS - RHEL - ENSA1 - Bring Primary node online" ansible.builtin.command: pcs node unstandby {{ primary_instance_name }} - - name: "5.6 SCSERS - RHEL - ENSA1 - Set the Cluster out of maintenance mode" - ansible.builtin.command: pcs property set maintenance-mode=false + # - name: "5.6 SCSERS - RHEL - ENSA1 - Set the Cluster out of maintenance mode" + # ansible.builtin.command: pcs property set maintenance-mode=false - - name: "5.6 SCSERS - RHEL - ENSA2 - pause the execution for 2 minutes" - ansible.builtin.wait_for: - timeout: 120 + # - name: "5.6 SCSERS - RHEL - ENSA2 - pause the execution for 2 minutes" + # ansible.builtin.wait_for: + # timeout: 120 # Use the following if using ENSA2 - name: "5.6 SCSERS - RHEL - SAP Resources - Cluster Configuration after Install" @@ -106,8 +106,8 @@ register: co_location failed_when: co_location.rc > 1 - - name: "5.6 SCSERS - RHEL - ENSA2 - Set the Cluster out of maintenance mode" - ansible.builtin.command: pcs property set maintenance-mode=false + # - name: "5.6 SCSERS - RHEL - ENSA2 - Set the Cluster out of maintenance mode" + # ansible.builtin.command: pcs property set maintenance-mode=false # - name: "5.6 SCSERS - RHEL - ENSA2 - pause the execution for 2 minutes" # ansible.builtin.wait_for: @@ -140,10 +140,11 @@ # +------------------------------------4---------------------------------------*| # | 5.6.6-validate.yml - Validate the SAP ASCS/SCS and ERS resources | -# | These are common tasks +# | These are common tasks | # +------------------------------------+---------------------------------------*| -- name: "5.6 SCSERS - RHEL - Enable Maintenance mode for the cluster" - ansible.builtin.command: pcs property set maintenance-mode=true + +# - name: "5.6 SCSERS - RHEL - Enable Maintenance mode for the cluster" +# ansible.builtin.command: pcs property set maintenance-mode=true - name: "5.6 SCSERS - RHEL - Reboot and wait 5 minutes" ansible.builtin.debug: @@ -151,8 +152,8 @@ - name: "5.6 SCSERS - RHEL - Reboot the primary/secondary instance" ansible.builtin.reboot: -# reboot_timeout: 300 -# post_reboot_delay: 300 + reboot_timeout: 300 + post_reboot_delay: 300 - name: "5.6 SCSERS - RHEL - Set the Cluster out of maintenance mode" ansible.builtin.command: pcs property set maintenance-mode=false diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse-SimpleMount.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse-SimpleMount.yml index 5777de86a3..9405b3e2d9 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse-SimpleMount.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse-SimpleMount.yml @@ -88,12 +88,12 @@ - name: " Bring primary node online " ansible.builtin.command: crm node online {{ primary_instance_name }} - - name: "5.6 SCSERS - SUSE - ENSA1 - Set the Cluster out of maintenance mode" - ansible.builtin.command: crm configure property maintenance-mode=false + # - name: "5.6 SCSERS - SUSE - ENSA1 - Set the Cluster out of maintenance mode" + # ansible.builtin.command: crm configure property maintenance-mode=false - - name: "5.6 SCSERS - SUSE - ENSA1 - pause the execution for 2 minutes" - ansible.builtin.pause: - seconds: 120 + # - name: "5.6 SCSERS - SUSE - ENSA1 - pause the execution for 2 minutes" + # ansible.builtin.pause: + # seconds: 120 # [1] Create the SAP cluster resources - if using ENSA2 - @@ -180,12 +180,12 @@ - name: "5.6 SCSERS - SUSE - ENSA2 - Bring primary node online " ansible.builtin.command: crm node online {{ primary_instance_name }} - - name: "5.6 SCSERS - SUSE - ENSA2 - Set the Cluster out of maintenance mode" - ansible.builtin.command: crm configure property maintenance-mode=false + # - name: "5.6 SCSERS - SUSE - ENSA2 - Set the Cluster out of maintenance mode" + # ansible.builtin.command: crm configure property maintenance-mode=false - - name: "5.6 SCSERS - SUSE - ENSA2 - pause the execution for 2 minutes" - ansible.builtin.wait_for: - timeout: 120 + # - name: "5.6 SCSERS - SUSE - ENSA2 - pause the execution for 2 minutes" + # ansible.builtin.wait_for: + # timeout: 120 # /*---------------------------------------------------------------------------8 # | | @@ -213,24 +213,36 @@ # +------------------------------------4---------------------------------------*| # | 5.6.6-validate.yml - Validate the SAP ASCS/SCS and ERS resources | -- name: "5.6 SCSERS - SUSE - Set the cluster on maintenance mode" - ansible.builtin.command: crm configure property maintenance-mode=true - -- name: "5.6 SCSERS - SUSE - Reboot the primary/secondary instance" +- name: "Get the cluster maintenance mode status" + ansible.builtin.shell: crm configure get_property maintenance-mode + register: get_status_maintenance_mode + changed_when: false + ignore_errors: true + +- name: "Set the cluster maintenance mode if not already in maintenance mode" + ansible.builtin.shell: crm configure property maintenance-mode=true + when: >- + get_status_maintenance_mode.stdout is not search('true') or + get_status_maintenance_mode.stdout is search('false') + +- name: "5.6 SCSERS - SLES - Reboot and wait 5 minutes" + ansible.builtin.debug: + msg: "Reboot and wait 5 minutes" + +- name: "5.6 SCSERS - SUSE - Reboot the primary/secondary instance" ansible.builtin.reboot: - reboot_timeout: 300 - post_reboot_delay: 300 - + reboot_timeout: 300 + post_reboot_delay: 300 -- name: "5.6 SCSERS - SUSE - Set the Cluster out of maintenance mode" - ansible.builtin.command: crm configure property maintenance-mode=false +- name: "5.6 SCSERS - SUSE - Set the Cluster out of maintenance mode" + ansible.builtin.command: crm configure property maintenance-mode=false -- name: "5.6 SCSERS - SUSE - cluster group validation" +- name: "5.6 SCSERS - SUSE - cluster group validation" ansible.builtin.include_tasks: - file: "5.6.6-validate.yml" + file: "5.6.6-validate.yml" apply: - become: true - become_user: root + become: true + become_user: root tags: - "5.6.6-validate" when: diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse.yml index 63d0223f5c..8b5d47f597 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse.yml @@ -69,12 +69,12 @@ # - name: " Bring primary node online " # ansible.builtin.command: crm node online {{ primary_instance_name }} - - name: "5.6 SCSERS - SUSE - ENSA1 - Set the Cluster out of maintenance mode" - ansible.builtin.command: crm configure property maintenance-mode=false + # - name: "5.6 SCSERS - SUSE - ENSA1 - Set the Cluster out of maintenance mode" + # ansible.builtin.command: crm configure property maintenance-mode=false - - name: "5.6 SCSERS - SUSE - ENSA1 - pause the execution for 2 minutes" - ansible.builtin.pause: - seconds: 120 + # - name: "5.6 SCSERS - SUSE - ENSA1 - pause the execution for 2 minutes" + # ansible.builtin.pause: + # seconds: 120 # [1] Create the SAP cluster resources - if using ENSA2 - @@ -141,12 +141,12 @@ - name: "5.6 SCSERS - SUSE - ENSA2 - Bring primary node online " ansible.builtin.command: crm node online {{ primary_instance_name }} - - name: "5.6 SCSERS - SUSE - ENSA2 - Set the Cluster out of maintenance mode" - ansible.builtin.command: crm configure property maintenance-mode=false + # - name: "5.6 SCSERS - SUSE - ENSA2 - Set the Cluster out of maintenance mode" + # ansible.builtin.command: crm configure property maintenance-mode=false - - name: "5.6 SCSERS - SUSE - ENSA2 - pause the execution for 2 minutes" - ansible.builtin.wait_for: - timeout: 120 + # - name: "5.6 SCSERS - SUSE - ENSA2 - pause the execution for 2 minutes" + # ansible.builtin.wait_for: + # timeout: 120 # /*---------------------------------------------------------------------------8 # | | @@ -174,24 +174,36 @@ # +------------------------------------4---------------------------------------*| # | 5.6.6-validate.yml - Validate the SAP ASCS/SCS and ERS resources | -- name: "5.6 SCSERS - SUSE - Set the cluster on maintenance mode" - ansible.builtin.command: crm configure property maintenance-mode=true - -- name: "5.6 SCSERS - SUSE - Reboot the primary/secondary instance" +- name: "Get the cluster maintenance mode status" + ansible.builtin.shell: crm configure get_property maintenance-mode + register: get_status_maintenance_mode + changed_when: false + ignore_errors: true + +- name: "Set the cluster maintenance mode if not already in maintenance mode" + ansible.builtin.shell: crm configure property maintenance-mode=true + when: >- + get_status_maintenance_mode.stdout is not search('true') or + get_status_maintenance_mode.stdout is search('false') + +- name: "5.6 SCSERS - SLES - Reboot and wait 5 minutes" + ansible.builtin.debug: + msg: "Reboot and wait 5 minutes" + +- name: "5.6 SCSERS - SUSE - Reboot the primary/secondary instance" ansible.builtin.reboot: - reboot_timeout: 300 - post_reboot_delay: 300 - + reboot_timeout: 300 + post_reboot_delay: 300 -- name: "5.6 SCSERS - SUSE - Set the Cluster out of maintenance mode" - ansible.builtin.command: crm configure property maintenance-mode=false +- name: "5.6 SCSERS - SUSE - Set the Cluster out of maintenance mode" + ansible.builtin.command: crm configure property maintenance-mode=false -- name: "5.6 SCSERS - SUSE - cluster group validation" +- name: "5.6 SCSERS - SUSE - cluster group validation" ansible.builtin.include_tasks: - file: "5.6.6-validate.yml" + file: "5.6.6-validate.yml" apply: - become: true - become_user: root + become: true + become_user: root tags: - "5.6.6-validate" when: diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml index 1004843dd3..c6a8ab8664 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml @@ -25,17 +25,25 @@ | regex_replace('/etc/systemd/system/', '') }}" - - name: "5.6 SCSERS - Disable the services and stop auto-restart if they exist" when: - systemd_service_names is defined - systemd_service_names | length > 0 block: - - name: "5.6 SCSERS - Disable the services if they exist" + # - name: "5.6 SCSERS - Disable the services if they exist" + # ansible.builtin.systemd: + # name: "{{ service_name }}" + # enabled: false + # failed_when: false + # loop: "{{ systemd_service_names }}" + # loop_control: + # loop_var: service_name + + - name: "5.6 SCSERS - Disable and Stop the services if they exist" ansible.builtin.systemd: name: "{{ service_name }}" enabled: false - failed_when: false + state: "stopped" loop: "{{ systemd_service_names }}" loop_control: loop_var: service_name From 54bd49141d85140848d019c218905d872c5e9927 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 20 Dec 2023 16:07:32 +0200 Subject: [PATCH 075/607] Attempt to fix the conditional --- .../1.17-generic-pacemaker/tasks/1.17.0-set_runtime_facts.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.0-set_runtime_facts.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.0-set_runtime_facts.yml index 3da499c9ff..d08271213f 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.0-set_runtime_facts.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.0-set_runtime_facts.yml @@ -251,7 +251,7 @@ - name: "Verify that the packages are the right version" ansible.builtin.assert: that: - - "ansible_facts.packages['{{ packages_list.name }}'][0].version is version('{{ packages_list.version }}', '>=', version_type='{{ packages_list.version_type }}')" + - 'ansible_facts.packages[ packages_list.name ][0].version is version( packages_list.version , ">=", version_type= packages_list.version_type )' fail_msg: "{{ packages_list.name }} version is not greater than {{ packages_list.version }}" success_msg: "{{ packages_list.name }} version is greater than {{ packages_list.version }}" register: package_version_results From c46440faa53d969b62fcf6ef2d2f9bbe4e23bbe1 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 20 Dec 2023 23:15:20 +0200 Subject: [PATCH 076/607] remove jinja from asserts --- .../playbook_00_validate_parameters.yaml | 60 +++++++++---------- .../roles-misc/0.1-passwords/tasks/main.yaml | 6 +- .../tasks/main.yaml | 4 +- 3 files changed, 35 insertions(+), 35 deletions(-) diff --git a/deploy/ansible/playbook_00_validate_parameters.yaml b/deploy/ansible/playbook_00_validate_parameters.yaml index a6760e35ae..9e16908bc8 100644 --- a/deploy/ansible/playbook_00_validate_parameters.yaml +++ b/deploy/ansible/playbook_00_validate_parameters.yaml @@ -62,20 +62,20 @@ - db_high_availability is defined - database_high_availability is not defined - - name: "0.0 Validations - Check required variables are present and not empty" + - name: "0.0 Validations - Check required variables are present and not empty" ansible.builtin.assert: that: - - "{{ item_to_check.parameter is defined }}" # Has the variable been defined - - "{{ item_to_check.parameter | type_debug != 'NoneType' }}" # Is the variable not empty" - - "{{ item_to_check.parameter | trim | length > 1 }}" - fail_msg: "{{ item_to_check.error }}" + - item_to_check.parameter is defined # Has the variable been defined + - item_to_check.parameter | type_debug != 'NoneType' # Is the variable not empty" + - item_to_check.parameter | trim | length > 1 + fail_msg: "{{ item_to_check.error }}" loop: - - { parameter: "database_high_availability", error: "database_high_availability has no value assigned" } - - { parameter: "database_cluster_type", error: "database_cluster_type has no value assigned" } - - { parameter: "scs_high_availability", error: "scs_high_availability has no value assigned" } - - { parameter: "scs_cluster_type", error: "scs_cluster_type has no value assigned" } - - { parameter: "use_msi_for_clusters", error: "use_msi_for_clusters has no value assigned" } - - { parameter: "platform", error: "platform has no value assigned" } + - { parameter: "database_high_availability", error: "database_high_availability has no value assigned" } + - { parameter: "database_cluster_type", error: "database_cluster_type has no value assigned" } + - { parameter: "scs_high_availability", error: "scs_high_availability has no value assigned" } + - { parameter: "scs_cluster_type", error: "scs_cluster_type has no value assigned" } + - { parameter: "use_msi_for_clusters", error: "use_msi_for_clusters has no value assigned" } + - { parameter: "platform", error: "platform has no value assigned" } loop_control: loop_var: item_to_check @@ -84,16 +84,16 @@ # Show parameters used for cluster aware coding # # -------------------------------------+---------------------------------------8 - - name: "Cluster aware code..." + - name: "Cluster aware code..." ansible.builtin.debug: - msg: # Best method for formatting output with Azure Devops Logs - - "database_high_availability: {{ database_high_availability }}" - - "database_cluster_type: {{ database_cluster_type }}" - - "scs_high_availability: {{ scs_high_availability }}" - - "scs_cluster_type: {{ scs_cluster_type }}" - - "use_msi_for_clusters: {{ use_msi_for_clusters }}" - - "platform: {{ platform | upper }}" - verbosity: 2 + msg: # Best method for formatting output with Azure Devops Logs + - "database_high_availability: {{ database_high_availability }}" + - "database_cluster_type: {{ database_cluster_type }}" + - "scs_high_availability: {{ scs_high_availability }}" + - "scs_cluster_type: {{ scs_cluster_type }}" + - "use_msi_for_clusters: {{ use_msi_for_clusters }}" + - "platform: {{ platform | upper }}" + verbosity: 2 # -------------------------------------+ # Fencing support is only needed when: @@ -118,17 +118,17 @@ # when: (database_high_availability and database_cluster_type == "AFA") or # (scs_high_availability and scs_cluster_type == "AFA") - - name: "0.0 Validations - Retrieve the Fencing SPN details" + - name: "0.0 Validations - Retrieve the Fencing SPN details" ansible.builtin.include_role: - name: roles-misc/0.2-kv-secrets + name: roles-misc/0.2-kv-secrets vars: - operation: fencing + operation: fencing when: - - (database_high_availability and database_cluster_type == "AFA") or - (scs_high_availability and scs_cluster_type == "AFA") - - platform != "ORACLE" + - (database_high_availability and database_cluster_type == "AFA") or + (scs_high_availability and scs_cluster_type == "AFA") + - platform != "ORACLE" tags: - - kv-secrets + - kv-secrets # -------------------------------------+ @@ -152,9 +152,9 @@ - name: "0.0 Validations - Check required variables are present and not empty" ansible.builtin.assert: that: - - "{{ item_to_check.parameter is defined }}" # Has the variable been defined - - "{{ item_to_check.parameter | type_debug != 'NoneType' }}" # Is the variable not empty" - - "{{ item_to_check.parameter | trim | length > 1 }}" + - item_to_check.parameter is defined # Has the variable been defined + - item_to_check.parameter | type_debug != 'NoneType' # Is the variable not empty" + - item_to_check.parameter | trim | length > 1 fail_msg: "{{ item_to_check.error }}" loop: - { diff --git a/deploy/ansible/roles-misc/0.1-passwords/tasks/main.yaml b/deploy/ansible/roles-misc/0.1-passwords/tasks/main.yaml index 79f1967c1f..b53c029d68 100644 --- a/deploy/ansible/roles-misc/0.1-passwords/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.1-passwords/tasks/main.yaml @@ -97,9 +97,9 @@ - name: "0.1 Password: - Ensure the password is set" ansible.builtin.assert: that: - - "{{ sap_password is defined }}" # Has the variable been defined - - "{{ sap_password | type_debug != 'NoneType' }}" # Is the variable not empty" - - "{{ sap_password | trim | length > 8 }}" + - sap_password is defined # Has the variable been defined + - sap_password | type_debug != 'NoneType' # Is the variable not empty" + - sap_password | trim | length > 8 fail_msg: "The SAP main password was not set in key vault" - name: "0.1 Password: - Show SAP Password" diff --git a/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml b/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml index 3f69f6df37..ad2a99d7c0 100644 --- a/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml @@ -167,8 +167,8 @@ - name: "0.0 Validations - Check required variables are present and not empty" ansible.builtin.assert: that: - - "{{ sapbits_sas_token is defined }}" # Has the variable been defined - - "{{ sapbits_sas_token | trim | length > 1 }}" # Does the variable have a value + - sapbits_sas_token is defined # Has the variable been defined + - sapbits_sas_token | trim | length > 1 # Does the variable have a value fail_msg: >- "The variable 'sapbits_sas_token' is not defined or is empty. Please provide it in the deployer key vault, sap-parameters file or pass it in as a parameter." From 30919ae0152c5ef33df73229114700507dc1ff53 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 20 Dec 2023 23:19:30 +0200 Subject: [PATCH 077/607] Change asserts --- .../playbook_00_validate_parameters.yaml | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/deploy/ansible/playbook_00_validate_parameters.yaml b/deploy/ansible/playbook_00_validate_parameters.yaml index 9e16908bc8..6bc62e5426 100644 --- a/deploy/ansible/playbook_00_validate_parameters.yaml +++ b/deploy/ansible/playbook_00_validate_parameters.yaml @@ -217,9 +217,9 @@ - name: "0.0 Validations - Check required SCS HA fencing variables are present and not empty" ansible.builtin.assert: that: - - "{{ item_to_check.parameter is defined }}" # Has the variable been defined - - "{{ item_to_check.parameter | type_debug != 'NoneType' }}" # Is the variable not empty" - - "{{ item_to_check.parameter | trim | length > 1 }}" + - item_to_check.parameter is defined # Has the variable been defined + - item_to_check.parameter | type_debug != 'NoneType' # Is the variable not empty" + - item_to_check.parameter | trim | length > 1 fail_msg: "{{ item_to_check.error }}" loop: - { @@ -248,9 +248,9 @@ - name: "0.0 Validations - Check required Database HA variables are present and not empty" ansible.builtin.assert: that: - - "{{ item_to_check.parameter is defined }}" # Has the variable been defined - - "{{ item_to_check.parameter | type_debug != 'NoneType' }}" # Is the variable not empty" - - "{{ item_to_check.parameter | trim | length > 1 }}" + - item_to_check.parameter is defined # Has the variable been defined + - item_to_check.parameter | type_debug != 'NoneType' # Is the variable not empty" + - item_to_check.parameter | trim | length > 1 fail_msg: "{{ item_to_check.error }}" loop: - { @@ -369,8 +369,8 @@ - name: "0.0 Validations - Check for free disk space on deployer" ansible.builtin.assert: - that: "{{ (mnt_free_diskspace | first | int / (1024 * 1024 * 1024) | int) > (deployer_free_temp_disk_space | int) }}" - fail_msg: "The deployer needs at least {{ deployer_free_temp_disk_space }} GB of free disk space in /mnt" + that: (mnt_free_diskspace | first | int / (1024 * 1024 * 1024) | int) > (deployer_free_temp_disk_space | int) + fail_msg: "The deployer needs at least {{ deployer_free_temp_disk_space }} GB of free disk space in /mnt" when: - mnt_free_diskspace | length > 0 tags: @@ -473,7 +473,7 @@ - name: Validate SCS and PAS instance numbers ansible.builtin.assert: that: - - "scs_instance_number != pas_instance_number" + - scs_instance_number != pas_instance_number fail_msg: "Please ensure that the pas_instance_number is different from the scs_instance_number when installing PAS on ASCS" when: (ansible_play_hosts_all | length) == 2 tags: @@ -482,7 +482,7 @@ - name: "0.0 Validations - Validate SCS and PAS instance numbers" ansible.builtin.assert: that: - - "scs_instance_number != pas_instance_number" + - scs_instance_number != pas_instance_number fail_msg: "Please ensure that the pas_instance_number is different from the scs_instance_number on standalone installation" when: (ansible_play_hosts_all | length) == 1 tags: @@ -491,7 +491,7 @@ - name: "0.0 Validations - Validate DB and PAS instance numbers" ansible.builtin.assert: that: - - "db_instance_number != pas_instance_number" + - db_instance_number != pas_instance_number fail_msg: "Please ensure that the pas_instance_number is different from the db_instance_number on standalone installation" when: (ansible_play_hosts_all | length) == 1 tags: @@ -611,12 +611,12 @@ - ansible_os_family != "Windows" - name: "0.0 Validations - Create validation-done flag" - delegate_to: localhost + delegate_to: localhost become: false ansible.builtin.file: - path: "{{ _workspace_directory }}/.progress/validation-done" - state: touch - mode: 0755 + path: "{{ _workspace_directory }}/.progress/validation-done" + state: touch + mode: 0755 - name: "0.0 Validations - Netmask" ansible.builtin.debug: From 296c3355d2d2a7d1618807492bd76c7533b314dc Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 20 Dec 2023 23:36:03 +0200 Subject: [PATCH 078/607] Ansible 2.15 support --- deploy/ansible/playbook_00_validate_parameters.yaml | 8 ++++---- .../tasks/5.6.7-config-systemd-sap-start.yml | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/deploy/ansible/playbook_00_validate_parameters.yaml b/deploy/ansible/playbook_00_validate_parameters.yaml index 6bc62e5426..e8ee37e142 100644 --- a/deploy/ansible/playbook_00_validate_parameters.yaml +++ b/deploy/ansible/playbook_00_validate_parameters.yaml @@ -187,9 +187,9 @@ - name: "0.0 Validations - Check required SCS HA variables are present and not empty" ansible.builtin.assert: that: - - "{{ item_to_check.parameter is defined }}" # Has the variable been defined - - "{{ item_to_check.parameter | type_debug != 'NoneType' }}" # Is the variable not empty" - - "{{ item_to_check.parameter | trim | length > 1 }}" + - item_to_check.parameter is defined # Has the variable been defined + - item_to_check.parameter | type_debug != 'NoneType' # Is the variable not empty" + - item_to_check.parameter | trim | length > 1 fail_msg: "{{ item_to_check.error }}" loop: - { @@ -671,7 +671,7 @@ - name: "0.0 Validations - Check for free disk space on SCS" ansible.builtin.assert: that: - - "{{ (free_diskspace | first | int / (1024 * 1024 * 1024) | int) > (scs_free_diskspace | int) }}" + - (free_diskspace | first | int / (1024 * 1024 * 1024) | int) > (scs_free_diskspace | int) fail_msg: "The SCS server needs at least {{ scs_free_diskspace }} GB of free disk space in /mnt" tags: - 0.0-scs-diskspace diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml index c6a8ab8664..a10a9b7975 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml @@ -44,6 +44,7 @@ name: "{{ service_name }}" enabled: false state: "stopped" + failed_when: false loop: "{{ systemd_service_names }}" loop_control: loop_var: service_name From cc59c23240e9154a5f43f4ec569955d70342e194 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 00:03:07 +0200 Subject: [PATCH 079/607] Fix variable name --- .../modules/sap_system/app_tier/variables_local.tf | 4 ++-- .../terraform-units/modules/sap_system/app_tier/vm-scs.tf | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_local.tf index 9e41a55356..2674a42f6d 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_local.tf @@ -235,7 +235,7 @@ locals { scs_nic_secondary_ips = try(var.application_tier.scs_nic_secondary_ips, []) scs_admin_nic_ips = try(var.application_tier.scs_admin_nic_ips, []) - webdispatcher_loadbalancer_ips = try(var.application_tier.webdispatcher_loadbalancer_ips, []) + webdispatcher_loadbalancer_ips = try(var.application_tier.webdispatcher_loadbalancer_ips, []) web_nic_ips = try(var.application_tier.web_nic_ips, []) web_nic_secondary_ips = try(var.application_tier.web_nic_secondary_ips, []) web_admin_nic_ips = try(var.application_tier.web_admin_nic_ips, []) @@ -283,7 +283,7 @@ locals { // Default VM config should be merged with any the user passes in - app_sizing = local.enable_deployment && local.application_server_count > 0 ? ( + app_sizing = local.enable_deployment ? ( lookup(local.sizes.app, local.vm_sizing_dictionary_key)) : ( null ) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index 8b188e5f19..1aa569af37 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -16,7 +16,7 @@ resource "azurerm_network_interface" "scs" { ) location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = local.app_sizing.compute.accelerated_networking + enable_accelerated_networking = local.scs_sizing.compute.accelerated_networking tags = var.tags dynamic "ip_configuration" { From 35df090b5b8335fdc341ebde6bd40aa32be85407 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 15:10:50 +0530 Subject: [PATCH 080/607] Update systemd configuration for 5.6 SCSERS Pacemaker --- .../tasks/5.6.7-config-systemd-sap-start.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml index a10a9b7975..2bbaad04a2 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml @@ -40,6 +40,8 @@ # loop_var: service_name - name: "5.6 SCSERS - Disable and Stop the services if they exist" + become: true + become_user: root ansible.builtin.systemd: name: "{{ service_name }}" enabled: false @@ -51,6 +53,7 @@ - name: "5.6 SCSERS Pacemaker - (systemd) Create drop-in file" become: true + become_user: root ansible.builtin.lineinfile: path: '{{ dropfile }}' create: true @@ -67,6 +70,7 @@ - name: "5.6 SCSERS Pacemaker - (systemd) Update drop-in file" become: true + become_user: root ansible.builtin.lineinfile: path: '{{ dropfile }}' create: true From 766a42fe1cc8a511106b112dc381d528f4269d59 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 11:47:16 +0200 Subject: [PATCH 081/607] Simplify AVSet logic --- .../modules/sap_system/app_tier/infrastructure.tf | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf index 89b4e78cb1..ef4d627f53 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf @@ -297,7 +297,7 @@ resource "azurerm_lb_rule" "fs" { resource "azurerm_availability_set" "scs" { provider = azurerm.main count = local.enable_deployment && local.use_scs_avset ? ( - max(length(local.scs_zones), 1)) : ( + length(var.ppg)) : ( 0 ) name = format("%s%s%s", @@ -309,10 +309,7 @@ resource "azurerm_availability_set" "scs" { resource_group_name = var.resource_group[0].name platform_update_domain_count = 20 platform_fault_domain_count = local.faultdomain_count - proximity_placement_group_id = try(local.scs_zonal_deployment ? ( - var.ppg[count.index % length(local.scs_zones)]) : ( - var.ppg[0] - ), null) + proximity_placement_group_id = var.ppg[count.index] managed = true tags = var.tags } @@ -325,7 +322,7 @@ resource "azurerm_availability_set" "scs" { resource "azurerm_availability_set" "app" { provider = azurerm.main count = local.use_app_avset && length(var.application_tier.avset_arm_ids) == 0 ? ( - max(length(local.app_zones), 1)) : ( + length(var.ppg)) : ( 0 ) name = format("%s%s%s", @@ -337,10 +334,7 @@ resource "azurerm_availability_set" "app" { resource_group_name = var.resource_group[0].name platform_update_domain_count = 20 platform_fault_domain_count = local.faultdomain_count - proximity_placement_group_id = try(local.app_zonal_deployment ? ( - var.ppg[count.index % local.app_zone_count]) : ( - var.ppg[0] - ), null) + proximity_placement_group_id = var.ppg[count.index] managed = true tags = var.tags } From a4c0caa39b766fc1fc6942625dccb8c4416109ae Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 13:02:23 +0200 Subject: [PATCH 082/607] avset logic update --- .../terraform-units/modules/sap_system/app_tier/vm-app.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index 9424de5b79..d235889295 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -131,8 +131,8 @@ resource "azurerm_linux_virtual_machine" "app" { //If more than one servers are deployed into a single zone put them in an availability set and not a zone availability_set_id = local.use_app_avset ? ( length(var.application_tier.avset_arm_ids) > 0 ? ( - var.application_tier.avset_arm_ids[count.index % max(local.app_zone_count, 1)]) : ( - azurerm_availability_set.app[count.index % max(local.app_zone_count, 1)].id + var.application_tier.avset_arm_ids[count.index % max(length(var.ppg), 1)]) : ( + azurerm_availability_set.app[count.index % max(length(var.ppg), 1)].id )) : ( null ) From f8b18c783ecc4b132b477726ad8f6aeb62bf0c18 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 13:30:48 +0200 Subject: [PATCH 083/607] PPG count --- .../terraform-units/modules/sap_system/app_tier/vm-app.tf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index d235889295..73573a3b7c 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -124,7 +124,8 @@ resource "azurerm_linux_virtual_machine" "app" { resource_group_name = var.resource_group[0].name proximity_placement_group_id = var.application_tier.app_use_ppg ? ( - local.app_zonal_deployment ? var.ppg[count.index % max(local.app_zone_count, 1)] : var.ppg[0]) : ( + + var.ppg[count.index % max(length(var.ppg), 1)]) : ( null ) From 9ccf99d9e8cb531d0722139f7e9b49708313d9ac Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 18:38:17 +0530 Subject: [PATCH 084/607] Disable and Stop services if they exist in 5.6 SCSERS Pacemaker role --- .../tasks/5.6.7-config-systemd-sap-start.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml index 2bbaad04a2..5175f6f4d9 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml @@ -39,6 +39,7 @@ # loop_control: # loop_var: service_name + - name: "5.6 SCSERS - Disable and Stop the services if they exist" become: true become_user: root From b5e9e779f21930dd5b9d7649fe8b9fdd58fcba22 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 18:38:40 +0530 Subject: [PATCH 085/607] remove extra line in tasks --- .../tasks/5.6.7-config-systemd-sap-start.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml index 5175f6f4d9..2bbaad04a2 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml @@ -39,7 +39,6 @@ # loop_control: # loop_var: service_name - - name: "5.6 SCSERS - Disable and Stop the services if they exist" become: true become_user: root From 83e91810c8ae76d67d2521e5f62c2f34612645ab Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 19:05:40 +0530 Subject: [PATCH 086/607] Add DB load balancer port check for PAS installation --- .../roles-sap/5.2-pas-install/tasks/main.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index 1a1d3709b1..3150fa2257 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -61,6 +61,19 @@ path: "/etc/sap_deployment_automation/{{ sid_to_be_deployed.sid | upper }}/sap_deployment_pas.txt" register: pas_installed +- name: "PAS Install: Check if the DB load balancer port is available and listening" + ansible.builtin.wait_for: + host: "{{ db_lb_virtual_host }}" + port: "3{{ db_instance_number }}13" + state: started + register: db_port_open + when: database_high_availability + +- name: "PAS Install: DEBUG - DB Loadbalancer check" + ansible.builtin.debug: + var: db_port_open + when: database_high_availability + # Returns bom object - name: "PAS Install: Register BoM" ansible.builtin.include_role: From 905972a5a839a03ff5b4803d12fea10f7917b894 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 19:09:57 +0530 Subject: [PATCH 087/607] additional debug --- .../ansible/roles-sap/5.2-pas-install/tasks/main.yaml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index 3150fa2257..738cff5c44 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -64,15 +64,19 @@ - name: "PAS Install: Check if the DB load balancer port is available and listening" ansible.builtin.wait_for: host: "{{ db_lb_virtual_host }}" - port: "3{{ db_instance_number }}13" + port: "3{{ db_instance_number }}18" state: started register: db_port_open - when: database_high_availability + when: + - database_high_availability + - platform == "HANA" - name: "PAS Install: DEBUG - DB Loadbalancer check" ansible.builtin.debug: var: db_port_open - when: database_high_availability + when: + - database_high_availability + - platform == "HANA" # Returns bom object - name: "PAS Install: Register BoM" From d44386b8013dd41786ee022e14bc3bc58c754820 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 19:29:57 +0530 Subject: [PATCH 088/607] Refactor pipeline script to handle extra parameters and improve logging --- deploy/pipelines/05-DB-and-SAP-installation.yaml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index a0f60e3607..abc9eab357 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -232,10 +232,9 @@ stages: fi if [ $EXTRA_PARAMETERS = '$(EXTRA_PARAMETERS)' ]; then - echo "##vso[task.logissue type=warning]No extra parameters were provided." new_parameters=$PIPELINE_EXTRA_PARAMETERS else - echo "##vso[task.logissue type=warning]Extra parameters were provided: $EXTRA_PARAMETERS" + echo "##vso[task.logissue type=warning]Extra parameters were provided - '$EXTRA_PARAMETERS'" new_parameters="$EXTRA_PARAMETERS $PIPELINE_EXTRA_PARAMETERS" fi @@ -247,7 +246,7 @@ stages: echo -e "$green--- az login ---$reset" - #If the deployer_file exists we run on a deployer configured by the framework instead of a azdo hosted one + # If the deployer_file exists we run on a deployer configured by the framework instead of a azdo hosted one deployer_file=/etc/profile.d/deploy_server.sh az login --service-principal -u $AZURE_CLIENT_ID -p=$AZURE_CLIENT_SECRET --tenant $AZURE_TENANT_ID --output none az account set --subscription $AZURE_SUBSCRIPTION_ID @@ -257,7 +256,6 @@ stages: echo -e "$boldred--- Login failed ---$reset" echo "##vso[task.logissue type=error]az login failed." exit $return_code - fi az keyvault secret show --name ${workload_prefix}-sid-sshkey --vault-name $workload_key_vault --query value -o tsv > artifacts/${SAP_SYSTEM_CONFIGURATION_NAME}_sshkey From 733430b3eda94b604e397c4234dd58ea6c2ff506 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 19:33:51 +0530 Subject: [PATCH 089/607] Refactor PAS installation tasks and add DB load balancer port check --- .../roles-sap/5.2-pas-install/tasks/main.yaml | 36 ++++++++++--------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index 738cff5c44..504f628f4a 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -61,23 +61,6 @@ path: "/etc/sap_deployment_automation/{{ sid_to_be_deployed.sid | upper }}/sap_deployment_pas.txt" register: pas_installed -- name: "PAS Install: Check if the DB load balancer port is available and listening" - ansible.builtin.wait_for: - host: "{{ db_lb_virtual_host }}" - port: "3{{ db_instance_number }}18" - state: started - register: db_port_open - when: - - database_high_availability - - platform == "HANA" - -- name: "PAS Install: DEBUG - DB Loadbalancer check" - ansible.builtin.debug: - var: db_port_open - when: - - database_high_availability - - platform == "HANA" - # Returns bom object - name: "PAS Install: Register BoM" ansible.builtin.include_role: @@ -107,6 +90,25 @@ pas_bom_instance_type: "{% if bom.InstanceType is defined %}{{ bom.InstanceType }}{% else %}ABAP{% endif %}" public: true +- name: "PAS Install: Check if the DB load balancer port is available and listening" + ansible.builtin.wait_for: + host: "{{ db_lb_virtual_host }}" + port: "3{{ db_instance_number }}18" + state: started + timeout: 30 + register: db_port_open + failed_when: false + when: + - database_high_availability + - platform == "HANA" + +- name: "PAS Install: DEBUG - DB Loadbalancer check" + ansible.builtin.debug: + var: db_port_open + when: + - database_high_availability + - platform == "HANA" + - name: "PAS Install: Set schema_name variable for HANA" when: platform == "HANA" block: From d339a8994cac88f936816a5d6c016f19dbbcfc36 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 20:08:00 +0530 Subject: [PATCH 090/607] Fix PAS Install failure and handle extra parameters in pipeline --- .../roles-sap/5.2-pas-install/tasks/main.yaml | 12 +++++++++++- deploy/pipelines/05-DB-and-SAP-installation.yaml | 2 +- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index 504f628f4a..d3ee798924 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -96,6 +96,7 @@ port: "3{{ db_instance_number }}18" state: started timeout: 30 + msg: 'INSTALL:0026:PAS Install failed, database is unreachable.' register: db_port_open failed_when: false when: @@ -104,10 +105,19 @@ - name: "PAS Install: DEBUG - DB Loadbalancer check" ansible.builtin.debug: - var: db_port_open + msg: "{{ db_port_open.msg }}" when: - database_high_availability - platform == "HANA" + - db_port_open.msg is defined + +- name: "ErrorHandling" + ansible.builtin.fail: + msg: "INSTALL:0026:PAS Install failed, database is unreachable." + when: + - database_high_availability + - platform == "HANA" + - db_port_open.msg is defined - name: "PAS Install: Set schema_name variable for HANA" when: platform == "HANA" diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index abc9eab357..c46a88dd40 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -231,7 +231,7 @@ stages: export workload_prefix=${az_var} ; echo 'Workload Prefix' ${workload_prefix}; echo 'Workload Prefix' ${workload_prefix} fi - if [ $EXTRA_PARAMETERS = '$(EXTRA_PARAMETERS)' ]; then + if [[ $EXTRA_PARAMETERS == $(EXTRA_PARAMETERS) ]]; then new_parameters=$PIPELINE_EXTRA_PARAMETERS else echo "##vso[task.logissue type=warning]Extra parameters were provided - '$EXTRA_PARAMETERS'" From 3ec16285e9eb3c43a3986cce9ebadd5f35529036 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 20:16:28 +0530 Subject: [PATCH 091/607] Fix comparison operator in pipeline script --- deploy/pipelines/05-DB-and-SAP-installation.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index c46a88dd40..9d1a2c7012 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -231,7 +231,7 @@ stages: export workload_prefix=${az_var} ; echo 'Workload Prefix' ${workload_prefix}; echo 'Workload Prefix' ${workload_prefix} fi - if [[ $EXTRA_PARAMETERS == $(EXTRA_PARAMETERS) ]]; then + if [[ $EXTRA_PARAMETERS = "'$(EXTRA_PARAMETERS)'" ]]; then new_parameters=$PIPELINE_EXTRA_PARAMETERS else echo "##vso[task.logissue type=warning]Extra parameters were provided - '$EXTRA_PARAMETERS'" From 34c28a045d849b59f13d6ae40115747cb0866f7f Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 20:32:52 +0530 Subject: [PATCH 092/607] Add verbosity level 2 to debug message in PAS Install task --- .../roles-sap/5.2-pas-install/tasks/main.yaml | 3 +- .../roles-sap/5.3-app-install/tasks/main.yaml | 30 +++++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index d3ee798924..18338621f0 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -93,7 +93,7 @@ - name: "PAS Install: Check if the DB load balancer port is available and listening" ansible.builtin.wait_for: host: "{{ db_lb_virtual_host }}" - port: "3{{ db_instance_number }}18" + port: "3{{ db_instance_number }}13" state: started timeout: 30 msg: 'INSTALL:0026:PAS Install failed, database is unreachable.' @@ -106,6 +106,7 @@ - name: "PAS Install: DEBUG - DB Loadbalancer check" ansible.builtin.debug: msg: "{{ db_port_open.msg }}" + verbosity: 2 when: - database_high_availability - platform == "HANA" diff --git a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml index 6b82e2f747..c0526aa27f 100644 --- a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml @@ -73,6 +73,36 @@ ansible.builtin.set_fact: app_bom_id: "{{ bom.product_ids.app }}" +- name: "APP Install: Check if the DB load balancer port is available and listening" + ansible.builtin.wait_for: + host: "{{ db_lb_virtual_host }}" + port: "3{{ db_instance_number }}13" + state: started + timeout: 30 + msg: 'INSTALL:0026:APP Install failed, database is unreachable.' + register: db_port_open + failed_when: false + when: + - database_high_availability + - platform == "HANA" + +- name: "APP Install: DEBUG - DB Loadbalancer check" + ansible.builtin.debug: + msg: "{{ db_port_open.msg }}" + verbosity: 2 + when: + - database_high_availability + - platform == "HANA" + - db_port_open.msg is defined + +- name: "ErrorHandling" + ansible.builtin.fail: + msg: "INSTALL:0026:APP Install failed, database is unreachable." + when: + - database_high_availability + - platform == "HANA" + - db_port_open.msg is defined + - name: "APP Install: Set schema_name variable for HANA" when: platform == "HANA" block: From 4d7ddcc7eff72cb5c5b597322e2cc3e6b5514af3 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 20:47:51 +0530 Subject: [PATCH 093/607] add clear errors to stonith and health events in pcmk enablement --- .../1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml index 959f85fe84..a8aa2c3db3 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml @@ -205,6 +205,9 @@ - name: "1.17 Generic Pacemaker - Ensure the STONITH device is enabled" ansible.builtin.command: pcs property set stonith-enabled=true + - name: "1.17 Generic Pacemaker - Clear any errors during enablement of STONITH device" + ansible.builtin.command: pcs resource cleanup + # /*---------------------------------------------------------------------------8 # | | # | Fencing - END | From a2f125ab42f2326c962b2705846522a7f9e1641e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 18:52:57 +0200 Subject: [PATCH 094/607] Also store the hidden config file --- deploy/scripts/installer.sh | 8 ++++++++ deploy/scripts/sync_deployer.sh | 8 ++++---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/deploy/scripts/installer.sh b/deploy/scripts/installer.sh index a1b6349167..2eba6d0ced 100755 --- a/deploy/scripts/installer.sh +++ b/deploy/scripts/installer.sh @@ -1365,7 +1365,15 @@ if [ "${deployment_system}" == sap_system ] ; then az storage blob upload --file sap-parameters.yaml --container-name tfvars/"${state_path}"/"${key}" --name sap-parameters.yaml --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none hosts_file=$(ls *_hosts.yaml) az storage blob upload --file "${hosts_file}" --container-name tfvars/"${state_path}"/"${key}" --name "${hosts_file}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none +fi +if [ "${deployment_system}" == sap_landscape ] ; then + az storage blob upload --file "${system_config_information}" --container-name tfvars/.sap_deployment_automation --name "${environment}${region_code}${network_logical_name}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none +fi +if [ "${deployment_system}" == sap_library ] ; then + deployer_config_information="${automation_config_directory}"/"${environment}""${region_code}" + az storage blob upload --file "${deployer_config_information}" --container-name tfvars/.sap_deployment_automation --name "${environment}${region_code}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none fi + exit $return_value diff --git a/deploy/scripts/sync_deployer.sh b/deploy/scripts/sync_deployer.sh index 69c2dc37f4..8639be4e84 100755 --- a/deploy/scripts/sync_deployer.sh +++ b/deploy/scripts/sync_deployer.sh @@ -68,10 +68,10 @@ done files=$(az storage blob list --container-name tfvars --account-name "${REMOTE_STATE_SA}" --subscription "${STATE_SUBSCRIPTION}" --query "[].name" -o tsv --only-show-errors --output tsv) for name in $files; do - if [ -n $name ] ; then - echo "Downloading file: " $name - dirName=$(dirname $name) - mkdir -p $dirName + if [ -n "$name" ] ; then + echo "Downloading file: " "$name" + dirName=$(dirname "$name") + mkdir -p "$dirName" az storage blob download --container-name tfvars --account-name "${REMOTE_STATE_SA}" --subscription "${STATE_SUBSCRIPTION}" --file "${name}" --name "${name}" --only-show-errors --output none --no-progress fi From 1fcd96776adfb83bbf11b3d1bc43e09961657ce7 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 19:02:28 +0200 Subject: [PATCH 095/607] Bump up Ansible to 2.15 on Ubuntu 22.04 --- deploy/scripts/configure_deployer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/scripts/configure_deployer.sh b/deploy/scripts/configure_deployer.sh index d3c2a9ba90..6e577791df 100755 --- a/deploy/scripts/configure_deployer.sh +++ b/deploy/scripts/configure_deployer.sh @@ -313,7 +313,7 @@ case "$(get_distro_name)" in echo "we are inside ubuntu" rel=$(lsb_release -a | grep Release | cut -d':' -f2 | xargs) if [ "$rel" == "22.04" ]; then - ansible_version="${ansible_version:-2.15}" + ansible_version="2.15" ansible_major="${ansible_version%%.*}" ansible_minor=$(echo "${ansible_version}." | cut -d . -f 2) fi From 5b78ae06b9c88797c2e9e460d5fa6d238b4d12d6 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 22:48:03 +0530 Subject: [PATCH 096/607] Update keystore file handling and ACSS registration configuration --- .../tasks/4.2.1.8-db2_copy_keystore_files.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml index 376cdd0f18..100aab34b5 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml @@ -31,6 +31,14 @@ - "Database is encrypted: {{ db_encrypted }}" when: ansible_hostname == primary_instance_name +- name: "DB2: Debug if the database is encrypted" + ansible.builtin.debug: + msg: + - "Database is encrypted: {{ db_encrypted }}" + - "Keystore files exist: {{ keystore_files_stat.results | map(attribute='stat.exists') | list }}" + - "Keystore files stat: {{ keystore_files_stat.results | map(attribute='stat') | selectattr('exists') | list }}" + when: ansible_hostname == primary_instance_name + - name: "DB2: Fetch keystore files from Primary node to Controller" ansible.builtin.fetch: src: "/db2/db2{{ db_sid | lower }}/keystore/{{ item }}" From 8b32855c4e0c07ad9601d2b6e32d89acfc67feab Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 22:48:03 +0530 Subject: [PATCH 097/607] Fix DB2 keystore file check in ansible playbook --- .../tasks/4.2.1.8-db2_copy_keystore_files.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml index 100aab34b5..b5dfa42fe3 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml @@ -33,7 +33,7 @@ - name: "DB2: Debug if the database is encrypted" ansible.builtin.debug: - msg: + msg: - "Database is encrypted: {{ db_encrypted }}" - "Keystore files exist: {{ keystore_files_stat.results | map(attribute='stat.exists') | list }}" - "Keystore files stat: {{ keystore_files_stat.results | map(attribute='stat') | selectattr('exists') | list }}" From f2db89248ec27ea1ed164654493f8f944896231a Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 22:57:16 +0530 Subject: [PATCH 098/607] Update keystore file handling and ACSS registration configuration --- .../tasks/4.2.1.8-db2_copy_keystore_files.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml index b5dfa42fe3..57ecc9a234 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml @@ -39,6 +39,14 @@ - "Keystore files stat: {{ keystore_files_stat.results | map(attribute='stat') | selectattr('exists') | list }}" when: ansible_hostname == primary_instance_name +- name: "DB2: Debug if the database is encrypted" + ansible.builtin.debug: + msg: + - "Database is encrypted: {{ db_encrypted }}" + - "Keystore files exist: {{ keystore_files_stat.results | map(attribute='stat.exists') | list }}" + - "Keystore files stat: {{ keystore_files_stat.results | map(attribute='stat') | selectattr('exists') | list }}" + when: ansible_hostname == primary_instance_name + - name: "DB2: Fetch keystore files from Primary node to Controller" ansible.builtin.fetch: src: "/db2/db2{{ db_sid | lower }}/keystore/{{ item }}" From c7fe4efa7c41fd1804112f11106f10ca1081b610 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 22:57:35 +0530 Subject: [PATCH 099/607] Fix DB2 keystore file check in ansible playbook --- .../tasks/4.2.1.8-db2_copy_keystore_files.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml index 57ecc9a234..441a2a4d4f 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml @@ -41,7 +41,7 @@ - name: "DB2: Debug if the database is encrypted" ansible.builtin.debug: - msg: + msg: - "Database is encrypted: {{ db_encrypted }}" - "Keystore files exist: {{ keystore_files_stat.results | map(attribute='stat.exists') | list }}" - "Keystore files stat: {{ keystore_files_stat.results | map(attribute='stat') | selectattr('exists') | list }}" From b4b4d1738dbfde1a0eb839835d0043b522062ffb Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 22:59:31 +0530 Subject: [PATCH 100/607] Remove the extra debug messages --- .../tasks/4.2.1.8-db2_copy_keystore_files.yml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml index 441a2a4d4f..daae0b7364 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml @@ -25,12 +25,6 @@ db_encrypted: "{{ (keystore_files_stat.results | map(attribute='stat.exists')) is all }}" when: ansible_hostname == primary_instance_name -- name: "DB2: Debug if the database is encrypted" - ansible.builtin.debug: - msg: - - "Database is encrypted: {{ db_encrypted }}" - when: ansible_hostname == primary_instance_name - - name: "DB2: Debug if the database is encrypted" ansible.builtin.debug: msg: @@ -43,8 +37,6 @@ ansible.builtin.debug: msg: - "Database is encrypted: {{ db_encrypted }}" - - "Keystore files exist: {{ keystore_files_stat.results | map(attribute='stat.exists') | list }}" - - "Keystore files stat: {{ keystore_files_stat.results | map(attribute='stat') | selectattr('exists') | list }}" when: ansible_hostname == primary_instance_name - name: "DB2: Fetch keystore files from Primary node to Controller" From dd3316b380ac0a657c1e48cb0c149f22b53651e1 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 23:03:26 +0530 Subject: [PATCH 101/607] Squashed commit of the following: commit 51afc157af112a2c886dc658ce0461f016816262 Author: Harm Jan Stam Date: Fri Dec 15 18:45:21 2023 +0100 Bugfix kv-secrets debug and import tasks (#516) Because the operation variable has no default within the role its existence should be checked. --- deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml b/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml index 85940d854d..54840b167b 100644 --- a/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml @@ -32,7 +32,9 @@ - "use_msi_for_clusters: {{ use_msi_for_clusters }}" - "platform: {{ platform | upper }}" verbosity: 2 - when: operation == "fencing" + when: + - operation is defined + - operation == "fencing" # -------------------------------------+---------------------------------------8 # @@ -40,6 +42,7 @@ - name: "0.2 Key Vault: - Import S User tasks" ansible.builtin.import_tasks: "s_user.yaml" when: + - operation is defined - operation == "SoftwareAcquisition" @@ -49,6 +52,7 @@ - name: "0.2 Key Vault: - Import Fencing secrets" ansible.builtin.import_tasks: "fencing.yaml" when: + - operation is defined - operation == "fencing" - (database_high_availability and database_cluster_type == "AFA") or (scs_high_availability and scs_cluster_type == "AFA") # AFA (Azure Fencing Agent) @@ -62,6 +66,7 @@ ansible.builtin.import_tasks: "wincluster-witness.yaml" # TODO: update when clause more appropriately when: + - operation is defined - operation == "fencing" - (scs_high_availability or database_high_availability) - not use_msi_for_clusters From 82f5e34a32b463affb3f0dded5bde183435af3cd Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 23:03:52 +0530 Subject: [PATCH 102/607] Systemd-Based SAP Startup Framework --- .../tasks/5.5.4.1-cluster-RedHat.yml | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index 711587ae0b..1a8df6affa 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -177,6 +177,46 @@ loop_var: item failed_when: constraint.rc > 1 +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" + ansible.builtin.set_fact: + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') }}" + when: ansible_distribution_major_version in ["8", "9"] + +- name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" + when: + - ansible_distribution_major_version in ["8", "9"] + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + become: true + ansible.builtin.blockinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + block: | + [Unit] + Description=Pacemaker needs the SAP HANA instance service + Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service + After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service + register: dropinfile + +- name: "5.5.4.1 HANA Cluster configuration - systemd reload" + ansible.builtin.systemd: + daemon_reload: yes + when: + - dropinfile.changed + +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - END | +# | | +# +------------------------------------4--------------------------------------*/ + + - name: "5.5.4.1 HANA Cluster configuration - Disable Maintenance mode for the cluster" ansible.builtin.shell: pcs property set maintenance-mode=false From ab2150833789fe18d320cf6a6466a7235effd5b8 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 23:04:15 +0530 Subject: [PATCH 103/607] Update HANA Cluster and SCSERS profiles --- .../tasks/5.5.4.1-cluster-RedHat.yml | 7 +++---- .../tasks/5.6.4.2-sap-resources-RedHat.yml | 11 +++++++++++ 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index 1a8df6affa..e677da31f0 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -182,15 +182,14 @@ # | Systemd-Based SAP Startup Framework - BEGIN | # | | # +------------------------------------4--------------------------------------*/ +# Follow steps described in https://access.redhat.com/articles/6884531 - name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" ansible.builtin.set_fact: - is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') }}" - when: ansible_distribution_major_version in ["8", "9"] + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" -- name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" +- name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" when: - - ansible_distribution_major_version in ["8", "9"] - is_rhel_82_or_newer is defined - is_rhel_82_or_newer become: true diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index 2c335ac124..bc3352160a 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -131,6 +131,17 @@ - is_rhel_82_or_newer is defined - is_rhel_82_or_newer | default(false) + - name: "5.6 SCSERS - validate that the drop-in file is active" + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + ansible.builtin.shell: >- + systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d' + register: dropinfile_validation + changed_when: false + failed_when: dropinfile_validation.rc > 0 + + # /*---------------------------------------------------------------------------8 # | | # | Systemd-Based SAP Startup Framework - END | From dd27da2498ab1a7cfeab19433ed6b35d8738f28e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 23:04:35 +0530 Subject: [PATCH 104/607] Debugging drop file --- .../5.6-scsers-pacemaker/tasks/main.yml | 100 ++++++++++++++++++ 1 file changed, 100 insertions(+) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml index e4ce2853c7..7989ed3df6 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml @@ -26,6 +26,106 @@ become: true become_user: root +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "5.6 SCSERS - check if the OS version is RHEL 8.2 or newer" + ansible.builtin.set_fact: + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" + +# For systemd services the SAP ASCS/SCS and ERS resources are created as systemd services +# the path for the service file is /etc/systemd/system/SAP_.service +- name: "5.6 SCSERS - check if the SAP ASCS/SCS and ERS resources are created as systemd services" + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + ansible.builtin.stat: + path: "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ sap_instance_number }}.service" + register: systemd_service_file_path + loop: + - "{{ scs_instance_number }}" + - "{{ ers_instance_number }}" + loop_control: + loop_var: sap_instance_number + +- name: "5.6 SCSERS - Set fact for the systemd services existance" + ansible.builtin.set_fact: + systemd_service_names: "{{ + systemd_service_file_path.results + | selectattr('stat.exists') + | map(attribute='stat.exists') + | regex_replace('/etc/systemd/system/', '') + }}" + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + +- name: "5.6 SCSERS - Disable the services and stop auto-restart if they exist" + when: + - systemd_service_names is defined + - systemd_service_names | length > 0 + block: + - name: "5.6 SCSERS - Disable the services if they exist" + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + ansible.builtin.systemd: + name: "{{ service_name }}" + enabled: false + loop: "{{ systemd_service_names }}" + loop_control: + loop_var: service_name + + - name: "5.6 SCSERS Pacemaker - (systemd) Create drop-in file" + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + become: true + ansible.builtin.blockinfile: + path: '{{ dropfile }}' + create: true + backup: true + owner: root + group: root + mode: '0644' + block: >- + [Service] + Restart=no + loop: + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" + loop_control: + loop_var: dropfile + register: dropinfile + + - name: "5.6 SCSERS - systemd reload" + ansible.builtin.systemd: + daemon_reload: true + when: + - dropinfile.changed + + - name: "5.6 SCSERS - validate that the drop-in file is active" + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + ansible.builtin.shell: >- + systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d' + register: dropinfile_validation + changed_when: false + failed_when: dropinfile_validation.rc > 0 + + +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - END | +# | | +# +------------------------------------4--------------------------------------*/ + + + - name: "5.6 SCSERS Pacemaker - provision" ansible.builtin.include_tasks: file: 5.6.4-provision.yml From a50134386e7c2886b9775b4328fefe6dab221a76 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 23:04:35 +0530 Subject: [PATCH 105/607] debug the services retrieval --- deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml index 7989ed3df6..c4bf020175 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml @@ -55,8 +55,8 @@ ansible.builtin.set_fact: systemd_service_names: "{{ systemd_service_file_path.results - | selectattr('stat.exists') - | map(attribute='stat.exists') + | selectattr('stat.exists', 'equalto', true) + | map(attribute='stat.path') | regex_replace('/etc/systemd/system/', '') }}" when: From e40f3aed95e886ef0e2ff17803a4168fc2f56a14 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 23:04:35 +0530 Subject: [PATCH 106/607] don't fail if service does not exist --- deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml index c4bf020175..bed4e83f22 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml @@ -75,6 +75,8 @@ ansible.builtin.systemd: name: "{{ service_name }}" enabled: false + failed_when: false + loop: "{{ systemd_service_names }}" loop: "{{ systemd_service_names }}" loop_control: loop_var: service_name From 80f634638799d40259462eb5c6528fa99c630a8d Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 23:04:35 +0530 Subject: [PATCH 107/607] Add newline --- deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml index bed4e83f22..2fe30bc6ef 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml @@ -94,7 +94,7 @@ group: root mode: '0644' block: >- - [Service] + [Service]\n Restart=no loop: - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" From 39724771bb05b0385c00787a86e9a1fd7d859adb Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 23:04:35 +0530 Subject: [PATCH 108/607] switch to lineinfile --- .../5.6-scsers-pacemaker/tasks/main.yml | 26 ++++++++++++++++--- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml index 2fe30bc6ef..38271fcdee 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml @@ -86,16 +86,34 @@ - is_rhel_82_or_newer is defined - is_rhel_82_or_newer become: true - ansible.builtin.blockinfile: + ansible.builtin.lineinfile: path: '{{ dropfile }}' create: true backup: true owner: root group: root mode: '0644' - block: >- - [Service]\n - Restart=no + line: "[Service]" + loop: + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" + - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" + loop_control: + loop_var: dropfile + + - name: "5.6 SCSERS Pacemaker - (systemd) Update drop-in file" + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + become: true + ansible.builtin.lineinfile: + path: '{{ dropfile }}' + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^[Service]$' + line: "Restart=no" loop: - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" From 79a4921ed8e4adca7aebdb8eaf65204fa3a7caed Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 23:04:59 +0530 Subject: [PATCH 109/607] Moving the task to the correct place --- .../tasks/5.6.4.2-sap-resources-RedHat.yml | 1 - .../5.6-scsers-pacemaker/tasks/main.yml | 120 ------------------ 2 files changed, 121 deletions(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index bc3352160a..8a4ed03d97 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -112,7 +112,6 @@ # - name: "5.6 SCSERS - RHEL - ENSA2 - pause the execution for 2 minutes" # ansible.builtin.wait_for: # timeout: 120 - # /*---------------------------------------------------------------------------8 # | | # | Systemd-Based SAP Startup Framework - BEGIN | diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml index 38271fcdee..e4ce2853c7 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/main.yml @@ -26,126 +26,6 @@ become: true become_user: root -# /*---------------------------------------------------------------------------8 -# | | -# | Systemd-Based SAP Startup Framework - BEGIN | -# | | -# +------------------------------------4--------------------------------------*/ - -- name: "5.6 SCSERS - check if the OS version is RHEL 8.2 or newer" - ansible.builtin.set_fact: - is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" - -# For systemd services the SAP ASCS/SCS and ERS resources are created as systemd services -# the path for the service file is /etc/systemd/system/SAP_.service -- name: "5.6 SCSERS - check if the SAP ASCS/SCS and ERS resources are created as systemd services" - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - ansible.builtin.stat: - path: "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ sap_instance_number }}.service" - register: systemd_service_file_path - loop: - - "{{ scs_instance_number }}" - - "{{ ers_instance_number }}" - loop_control: - loop_var: sap_instance_number - -- name: "5.6 SCSERS - Set fact for the systemd services existance" - ansible.builtin.set_fact: - systemd_service_names: "{{ - systemd_service_file_path.results - | selectattr('stat.exists', 'equalto', true) - | map(attribute='stat.path') - | regex_replace('/etc/systemd/system/', '') - }}" - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - -- name: "5.6 SCSERS - Disable the services and stop auto-restart if they exist" - when: - - systemd_service_names is defined - - systemd_service_names | length > 0 - block: - - name: "5.6 SCSERS - Disable the services if they exist" - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - ansible.builtin.systemd: - name: "{{ service_name }}" - enabled: false - failed_when: false - loop: "{{ systemd_service_names }}" - loop: "{{ systemd_service_names }}" - loop_control: - loop_var: service_name - - - name: "5.6 SCSERS Pacemaker - (systemd) Create drop-in file" - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - become: true - ansible.builtin.lineinfile: - path: '{{ dropfile }}' - create: true - backup: true - owner: root - group: root - mode: '0644' - line: "[Service]" - loop: - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" - loop_control: - loop_var: dropfile - - - name: "5.6 SCSERS Pacemaker - (systemd) Update drop-in file" - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - become: true - ansible.builtin.lineinfile: - path: '{{ dropfile }}' - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^[Service]$' - line: "Restart=no" - loop: - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" - loop_control: - loop_var: dropfile - register: dropinfile - - - name: "5.6 SCSERS - systemd reload" - ansible.builtin.systemd: - daemon_reload: true - when: - - dropinfile.changed - - - name: "5.6 SCSERS - validate that the drop-in file is active" - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - ansible.builtin.shell: >- - systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d' - register: dropinfile_validation - changed_when: false - failed_when: dropinfile_validation.rc > 0 - - -# /*---------------------------------------------------------------------------8 -# | | -# | Systemd-Based SAP Startup Framework - END | -# | | -# +------------------------------------4--------------------------------------*/ - - - - name: "5.6 SCSERS Pacemaker - provision" ansible.builtin.include_tasks: file: 5.6.4-provision.yml From b8b73b6fd7aa0e9b50e0214352b4adf5d697f226 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 19 Dec 2023 18:05:32 +0200 Subject: [PATCH 110/607] don't use the .d --- .../5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index 8a4ed03d97..82ef4ced87 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -135,7 +135,7 @@ - is_rhel_82_or_newer is defined - is_rhel_82_or_newer ansible.builtin.shell: >- - systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d' + systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service' register: dropinfile_validation changed_when: false failed_when: dropinfile_validation.rc > 0 From 5bd585c46ed586fa3ffe153b153ac9d6efa70d13 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 23:06:00 +0530 Subject: [PATCH 111/607] Don't do the validation --- .../tasks/5.6.4.2-sap-resources-RedHat.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index 82ef4ced87..acd6e549ba 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -130,15 +130,15 @@ - is_rhel_82_or_newer is defined - is_rhel_82_or_newer | default(false) - - name: "5.6 SCSERS - validate that the drop-in file is active" - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - ansible.builtin.shell: >- - systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service' - register: dropinfile_validation - changed_when: false - failed_when: dropinfile_validation.rc > 0 + # - name: "5.6 SCSERS - validate that the drop-in file is active" + # when: + # - is_rhel_82_or_newer is defined + # - is_rhel_82_or_newer + # ansible.builtin.shell: >- + # systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service' + # register: dropinfile_validation + # changed_when: false + # failed_when: dropinfile_validation.rc > 0 # /*---------------------------------------------------------------------------8 From 23d80ebce5554b4342d59da252f3af99a95c4bc8 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 23:06:00 +0530 Subject: [PATCH 112/607] move from blockinfile --- .../tasks/5.5.4.1-cluster-RedHat.yml | 66 ++++++++++++++----- 1 file changed, 51 insertions(+), 15 deletions(-) diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index e677da31f0..f6aa145bb9 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -189,25 +189,61 @@ is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" - name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" + block: when: - is_rhel_82_or_newer is defined - is_rhel_82_or_newer become: true - ansible.builtin.blockinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - block: | - [Unit] - Description=Pacemaker needs the SAP HANA instance service - Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service - After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service - register: dropinfile - -- name: "5.5.4.1 HANA Cluster configuration - systemd reload" - ansible.builtin.systemd: - daemon_reload: yes - when: - - dropinfile.changed + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + line: "[Unit]" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^[Unit]$' + line: "Description=Pacemaker needs the SAP HANA instance service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' + line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' + line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + register: dropinfile + + - name: "5.5.4.1 HANA Cluster configuration - systemd reload" + ansible.builtin.systemd: + daemon_reload: yes + when: + - dropinfile.changed # /*---------------------------------------------------------------------------8 # | | From 71bfc0f94e5fac0c3e14e15c505af9031a8056bc Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 23:06:00 +0530 Subject: [PATCH 113/607] Revert "Bugfix kv-secrets debug and import tasks (#516)" This reverts commit 51afc157af112a2c886dc658ce0461f016816262. --- deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml b/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml index 54840b167b..85940d854d 100644 --- a/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml @@ -32,9 +32,7 @@ - "use_msi_for_clusters: {{ use_msi_for_clusters }}" - "platform: {{ platform | upper }}" verbosity: 2 - when: - - operation is defined - - operation == "fencing" + when: operation == "fencing" # -------------------------------------+---------------------------------------8 # @@ -42,7 +40,6 @@ - name: "0.2 Key Vault: - Import S User tasks" ansible.builtin.import_tasks: "s_user.yaml" when: - - operation is defined - operation == "SoftwareAcquisition" @@ -52,7 +49,6 @@ - name: "0.2 Key Vault: - Import Fencing secrets" ansible.builtin.import_tasks: "fencing.yaml" when: - - operation is defined - operation == "fencing" - (database_high_availability and database_cluster_type == "AFA") or (scs_high_availability and scs_cluster_type == "AFA") # AFA (Azure Fencing Agent) @@ -66,7 +62,6 @@ ansible.builtin.import_tasks: "wincluster-witness.yaml" # TODO: update when clause more appropriately when: - - operation is defined - operation == "fencing" - (scs_high_availability or database_high_availability) - not use_msi_for_clusters From a9d451175fce5d7a50d26087d3112f88bc26b722 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 23:06:00 +0530 Subject: [PATCH 114/607] lint --- .../5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index f6aa145bb9..0eb3396dbb 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -189,12 +189,11 @@ is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" - name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" - block: + become: true when: - is_rhel_82_or_newer is defined - is_rhel_82_or_newer - become: true - + block: - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" ansible.builtin.lineinfile: path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf From 5082069123ee44db31043bb4d890d3092c75b908 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 23:06:00 +0530 Subject: [PATCH 115/607] indentation --- .../tasks/5.5.4.1-cluster-RedHat.yml | 138 +++++++++--------- 1 file changed, 69 insertions(+), 69 deletions(-) diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index 0eb3396dbb..a4f33473e7 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -177,78 +177,78 @@ loop_var: item failed_when: constraint.rc > 1 -# /*---------------------------------------------------------------------------8 -# | | -# | Systemd-Based SAP Startup Framework - BEGIN | -# | | -# +------------------------------------4--------------------------------------*/ -# Follow steps described in https://access.redhat.com/articles/6884531 - -- name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" - ansible.builtin.set_fact: - is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" - -- name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" - become: true - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - block: - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - line: "[Unit]" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^[Unit]$' - line: "Description=Pacemaker needs the SAP HANA instance service" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' - line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' - line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" - register: dropinfile + # /*---------------------------------------------------------------------------8 + # | | + # | Systemd-Based SAP Startup Framework - BEGIN | + # | | + # +------------------------------------4--------------------------------------*/ + # Follow steps described in https://access.redhat.com/articles/6884531 + + - name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" + ansible.builtin.set_fact: + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" - - name: "5.5.4.1 HANA Cluster configuration - systemd reload" - ansible.builtin.systemd: - daemon_reload: yes + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" + become: true when: - - dropinfile.changed + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + block: + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + line: "[Unit]" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^[Unit]$' + line: "Description=Pacemaker needs the SAP HANA instance service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' + line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' + line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + register: dropinfile + + - name: "5.5.4.1 HANA Cluster configuration - systemd reload" + ansible.builtin.systemd: + daemon_reload: true + when: + - dropinfile.changed -# /*---------------------------------------------------------------------------8 -# | | -# | Systemd-Based SAP Startup Framework - END | -# | | -# +------------------------------------4--------------------------------------*/ + # /*---------------------------------------------------------------------------8 + # | | + # | Systemd-Based SAP Startup Framework - END | + # | | + # +------------------------------------4--------------------------------------*/ - name: "5.5.4.1 HANA Cluster configuration - Disable Maintenance mode for the cluster" From a2ea3bfcb4a92455935fd7dc3d62b9f7447f2569 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 23:06:00 +0530 Subject: [PATCH 116/607] idnentation --- .../tasks/5.5.4.1-cluster-RedHat.yml | 144 +++++++++--------- 1 file changed, 72 insertions(+), 72 deletions(-) diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index a4f33473e7..ad3df7f571 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -177,78 +177,78 @@ loop_var: item failed_when: constraint.rc > 1 - # /*---------------------------------------------------------------------------8 - # | | - # | Systemd-Based SAP Startup Framework - BEGIN | - # | | - # +------------------------------------4--------------------------------------*/ - # Follow steps described in https://access.redhat.com/articles/6884531 - - - name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" - ansible.builtin.set_fact: - is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" - become: true - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - block: - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - line: "[Unit]" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^[Unit]$' - line: "Description=Pacemaker needs the SAP HANA instance service" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' - line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' - line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" - register: dropinfile - - - name: "5.5.4.1 HANA Cluster configuration - systemd reload" - ansible.builtin.systemd: - daemon_reload: true - when: - - dropinfile.changed - - # /*---------------------------------------------------------------------------8 - # | | - # | Systemd-Based SAP Startup Framework - END | - # | | - # +------------------------------------4--------------------------------------*/ + # /*---------------------------------------------------------------------------8 + # | | + # | Systemd-Based SAP Startup Framework - BEGIN | + # | | + # +------------------------------------4--------------------------------------*/ + # Follow steps described in https://access.redhat.com/articles/6884531 + + - name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" + ansible.builtin.set_fact: + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" + become: true + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + block: + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + line: "[Unit]" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^[Unit]$' + line: "Description=Pacemaker needs the SAP HANA instance service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' + line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' + line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + register: dropinfile + + - name: "5.5.4.1 HANA Cluster configuration - systemd reload" + ansible.builtin.systemd: + daemon_reload: true + when: + - dropinfile.changed + + # /*---------------------------------------------------------------------------8 + # | | + # | Systemd-Based SAP Startup Framework - END | + # | | + # +------------------------------------4--------------------------------------*/ - name: "5.5.4.1 HANA Cluster configuration - Disable Maintenance mode for the cluster" From fc844988ff5eea2d4146bbd45f7cdf8663d1adbf Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 23:06:00 +0530 Subject: [PATCH 117/607] Revert "idnentation" This reverts commit e9b61e3455d25fb9fe5766c37a2f88ed9901e404. --- .../tasks/5.5.4.1-cluster-RedHat.yml | 144 +++++++++--------- 1 file changed, 72 insertions(+), 72 deletions(-) diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index ad3df7f571..a4f33473e7 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -177,78 +177,78 @@ loop_var: item failed_when: constraint.rc > 1 - # /*---------------------------------------------------------------------------8 - # | | - # | Systemd-Based SAP Startup Framework - BEGIN | - # | | - # +------------------------------------4--------------------------------------*/ - # Follow steps described in https://access.redhat.com/articles/6884531 - - - name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" - ansible.builtin.set_fact: - is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" - become: true - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - block: - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - line: "[Unit]" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^[Unit]$' - line: "Description=Pacemaker needs the SAP HANA instance service" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' - line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' - line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" - register: dropinfile - - - name: "5.5.4.1 HANA Cluster configuration - systemd reload" - ansible.builtin.systemd: - daemon_reload: true - when: - - dropinfile.changed - - # /*---------------------------------------------------------------------------8 - # | | - # | Systemd-Based SAP Startup Framework - END | - # | | - # +------------------------------------4--------------------------------------*/ + # /*---------------------------------------------------------------------------8 + # | | + # | Systemd-Based SAP Startup Framework - BEGIN | + # | | + # +------------------------------------4--------------------------------------*/ + # Follow steps described in https://access.redhat.com/articles/6884531 + + - name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" + ansible.builtin.set_fact: + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" + become: true + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + block: + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + line: "[Unit]" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^[Unit]$' + line: "Description=Pacemaker needs the SAP HANA instance service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' + line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' + line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + register: dropinfile + + - name: "5.5.4.1 HANA Cluster configuration - systemd reload" + ansible.builtin.systemd: + daemon_reload: true + when: + - dropinfile.changed + + # /*---------------------------------------------------------------------------8 + # | | + # | Systemd-Based SAP Startup Framework - END | + # | | + # +------------------------------------4--------------------------------------*/ - name: "5.5.4.1 HANA Cluster configuration - Disable Maintenance mode for the cluster" From 994e1fe03323c281c9d191f13ee51f9f1d5a23ac Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 23:06:00 +0530 Subject: [PATCH 118/607] fix indentation --- .../5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index a4f33473e7..9f6df492a2 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -236,7 +236,7 @@ mode: '0644' insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" - register: dropinfile + register: dropinfile - name: "5.5.4.1 HANA Cluster configuration - systemd reload" ansible.builtin.systemd: From b4ff6af5c5e32e84accd6ef88776ae1ff65111bd Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 23:06:27 +0530 Subject: [PATCH 119/607] Refactor HANA Cluster configuration for systemd-based SAP Startup Framework --- .../tasks/5.5.4.1-cluster-RedHat.yml | 153 ++++++++++++------ 1 file changed, 104 insertions(+), 49 deletions(-) diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index 9f6df492a2..11d1210cea 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -177,54 +177,109 @@ loop_var: item failed_when: constraint.rc > 1 - # /*---------------------------------------------------------------------------8 - # | | - # | Systemd-Based SAP Startup Framework - BEGIN | - # | | - # +------------------------------------4--------------------------------------*/ - # Follow steps described in https://access.redhat.com/articles/6884531 + - name: "5.5.4.1 HANA Cluster configuration - Disable Maintenance mode for the cluster" + ansible.builtin.shell: pcs property set maintenance-mode=false - - name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" - ansible.builtin.set_fact: - is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" + - name: "5.5.4.1 HANA Cluster configuration - Wait until cluster has stabilized on RHEL 7" + ansible.builtin.shell: set -o pipefail && pcs status | grep '^Online:' + register: cluster_stable_check + retries: 12 + delay: 10 + until: "(primary_instance_name + ' ' + secondary_instance_name) in cluster_stable_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in cluster_stable_check.stdout" + when: ansible_distribution_major_version != "8" and ansible_distribution_major_version != "9" + # '*' is a special character in regexp and needs to be escaped for literal matching + # if we are worried about character spacing across distros we can match for '\* Online:' + - name: "5.5.4.1 HANA Cluster configuration - Wait until cluster has stabilized on RHEL 8 or 9" + ansible.builtin.shell: set -o pipefail && pcs status | grep '^ \* Online:' + register: cluster_stable_check + retries: 12 + delay: 10 + until: "(primary_instance_name + ' ' + secondary_instance_name) in cluster_stable_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in cluster_stable_check.stdout" + when: ansible_distribution_major_version in ["8", "9"] - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" - become: true - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - block: - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - line: "[Unit]" + # - name: Ensure Cluster resources are started + # ansible.builtin.shell: pcs status | grep '\* Started:' + # register: hana_pcs_cluster_resource_check + # when: ansible_distribution_major_version == "8" - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^[Unit]$' - line: "Description=Pacemaker needs the SAP HANA instance service" + # - name: Ensure Cluster resources are started + # ansible.builtin.shell: pcs status | grep '^Started ' + # register: hana_pcs_cluster_resource_check + # when: ansible_distribution_major_version != "8" - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' - line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + + # - name: Ensure Cluster resources are started + # ansible.builtin.debug: + # var: hana_pcs_cluster_resource_check + + # the leading spaces are irrelevant here as we are looking for *Started: + - name: "5.5.4.1 HANA Cluster configuration - Ensure Cluster resources are started on RHEL 7" + ansible.builtin.shell: set -o pipefail && pcs resource show | grep ' Started:' + register: hana_cluster_resource_check + retries: 12 + delay: 10 + until: "(primary_instance_name + ' ' + secondary_instance_name) in hana_cluster_resource_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in hana_cluster_resource_check.stdout" + when: ansible_distribution_major_version != "8" and ansible_distribution_major_version != "9" + + - name: "5.5.4.1 HANA Cluster configuration - Ensure Cluster resources are started on RHEL 8 or 9" + ansible.builtin.shell: set -o pipefail && pcs resource status | grep '\* Started:' + register: hana_cluster_resource_check + retries: 12 + delay: 10 + until: "(primary_instance_name + ' ' + secondary_instance_name) in hana_cluster_resource_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in hana_cluster_resource_check.stdout" + when: ansible_distribution_major_version in ["8", "9"] + when: ansible_hostname == primary_instance_name + +# End of HANA clustering resources + +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ +# Follow steps described in https://access.redhat.com/articles/6884531 + +- name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" + ansible.builtin.set_fact: + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" + +- name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" + become: true + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + block: + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + line: "[Unit]" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^[Unit]$' + line: "Description=Pacemaker needs the SAP HANA instance service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' + line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" ansible.builtin.lineinfile: @@ -238,11 +293,11 @@ line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" register: dropinfile - - name: "5.5.4.1 HANA Cluster configuration - systemd reload" - ansible.builtin.systemd: - daemon_reload: true - when: - - dropinfile.changed + - name: "5.5.4.1 HANA Cluster configuration - systemd reload" + ansible.builtin.systemd: + daemon_reload: true + when: + - dropinfile.changed # /*---------------------------------------------------------------------------8 # | | From 61a58ea396ae898a104e80cc47a62ab9f3c87254 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 23:07:02 +0530 Subject: [PATCH 120/607] Add Systemd-Based SAP Startup Framework for SLES --- .../5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index acd6e549ba..6598b45827 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -112,6 +112,7 @@ # - name: "5.6 SCSERS - RHEL - ENSA2 - pause the execution for 2 minutes" # ansible.builtin.wait_for: # timeout: 120 + # /*---------------------------------------------------------------------------8 # | | # | Systemd-Based SAP Startup Framework - BEGIN | From f2f879aef5c2e0277845b86fdabb6e1a3f885328 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 23:07:23 +0530 Subject: [PATCH 121/607] Disable and Stop services if they exist in 5.6 SCSERS Pacemaker role --- .../tasks/5.6.7-config-systemd-sap-start.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml index 2bbaad04a2..5175f6f4d9 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml @@ -39,6 +39,7 @@ # loop_control: # loop_var: service_name + - name: "5.6 SCSERS - Disable and Stop the services if they exist" become: true become_user: root From dc600a63a967eab63d882b12bb6d1d9fb98ea839 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 23:07:23 +0530 Subject: [PATCH 122/607] remove extra line in tasks --- .../tasks/5.6.7-config-systemd-sap-start.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml index 5175f6f4d9..2bbaad04a2 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml @@ -39,7 +39,6 @@ # loop_control: # loop_var: service_name - - name: "5.6 SCSERS - Disable and Stop the services if they exist" become: true become_user: root From fd2a2c5b33f21bb64e5631b74aff31b39d3a3f11 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 23:07:23 +0530 Subject: [PATCH 123/607] Add DB load balancer port check for PAS installation --- .../roles-sap/5.2-pas-install/tasks/main.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index 18338621f0..f499270b60 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -61,6 +61,19 @@ path: "/etc/sap_deployment_automation/{{ sid_to_be_deployed.sid | upper }}/sap_deployment_pas.txt" register: pas_installed +- name: "PAS Install: Check if the DB load balancer port is available and listening" + ansible.builtin.wait_for: + host: "{{ db_lb_virtual_host }}" + port: "3{{ db_instance_number }}13" + state: started + register: db_port_open + when: database_high_availability + +- name: "PAS Install: DEBUG - DB Loadbalancer check" + ansible.builtin.debug: + var: db_port_open + when: database_high_availability + # Returns bom object - name: "PAS Install: Register BoM" ansible.builtin.include_role: From 9ed934cc4f4ccd2acd8c6b99862cc76be767fc22 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 23:07:23 +0530 Subject: [PATCH 124/607] additional debug --- .../ansible/roles-sap/5.2-pas-install/tasks/main.yaml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index f499270b60..f3c22c8359 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -64,15 +64,19 @@ - name: "PAS Install: Check if the DB load balancer port is available and listening" ansible.builtin.wait_for: host: "{{ db_lb_virtual_host }}" - port: "3{{ db_instance_number }}13" + port: "3{{ db_instance_number }}18" state: started register: db_port_open - when: database_high_availability + when: + - database_high_availability + - platform == "HANA" - name: "PAS Install: DEBUG - DB Loadbalancer check" ansible.builtin.debug: var: db_port_open - when: database_high_availability + when: + - database_high_availability + - platform == "HANA" # Returns bom object - name: "PAS Install: Register BoM" From 197e800aa33a20349a3b900a0ef0b6842177be94 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 21 Dec 2023 23:07:59 +0530 Subject: [PATCH 125/607] Refactor PAS installation tasks and add DB load balancer port check --- .../roles-sap/5.2-pas-install/tasks/main.yaml | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index f3c22c8359..18338621f0 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -61,23 +61,6 @@ path: "/etc/sap_deployment_automation/{{ sid_to_be_deployed.sid | upper }}/sap_deployment_pas.txt" register: pas_installed -- name: "PAS Install: Check if the DB load balancer port is available and listening" - ansible.builtin.wait_for: - host: "{{ db_lb_virtual_host }}" - port: "3{{ db_instance_number }}18" - state: started - register: db_port_open - when: - - database_high_availability - - platform == "HANA" - -- name: "PAS Install: DEBUG - DB Loadbalancer check" - ansible.builtin.debug: - var: db_port_open - when: - - database_high_availability - - platform == "HANA" - # Returns bom object - name: "PAS Install: Register BoM" ansible.builtin.include_role: From 5eb72e88ec29e7be80ac90be72e56f70f42795a6 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Fri, 22 Dec 2023 00:03:36 +0530 Subject: [PATCH 126/607] resolve merge issues from rebase --- .../tasks/4.2.1.8-db2_copy_keystore_files.yml | 8 -- .../tasks/5.5.4.1-cluster-RedHat.yml | 129 ------------------ 2 files changed, 137 deletions(-) diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml index daae0b7364..376cdd0f18 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml @@ -25,14 +25,6 @@ db_encrypted: "{{ (keystore_files_stat.results | map(attribute='stat.exists')) is all }}" when: ansible_hostname == primary_instance_name -- name: "DB2: Debug if the database is encrypted" - ansible.builtin.debug: - msg: - - "Database is encrypted: {{ db_encrypted }}" - - "Keystore files exist: {{ keystore_files_stat.results | map(attribute='stat.exists') | list }}" - - "Keystore files stat: {{ keystore_files_stat.results | map(attribute='stat') | selectattr('exists') | list }}" - when: ansible_hostname == primary_instance_name - - name: "DB2: Debug if the database is encrypted" ansible.builtin.debug: msg: diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index 11d1210cea..711587ae0b 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -239,135 +239,6 @@ # +------------------------------------4--------------------------------------*/ # Follow steps described in https://access.redhat.com/articles/6884531 -- name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" - ansible.builtin.set_fact: - is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" - -- name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" - become: true - when: - - is_rhel_82_or_newer is defined - - is_rhel_82_or_newer - block: - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - line: "[Unit]" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^[Unit]$' - line: "Description=Pacemaker needs the SAP HANA instance service" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' - line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" - - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" - ansible.builtin.lineinfile: - path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf - create: true - backup: true - owner: root - group: root - mode: '0644' - insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' - line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" - register: dropinfile - - - name: "5.5.4.1 HANA Cluster configuration - systemd reload" - ansible.builtin.systemd: - daemon_reload: true - when: - - dropinfile.changed - - # /*---------------------------------------------------------------------------8 - # | | - # | Systemd-Based SAP Startup Framework - END | - # | | - # +------------------------------------4--------------------------------------*/ - - - - name: "5.5.4.1 HANA Cluster configuration - Disable Maintenance mode for the cluster" - ansible.builtin.shell: pcs property set maintenance-mode=false - - - name: "5.5.4.1 HANA Cluster configuration - Wait until cluster has stabilized on RHEL 7" - ansible.builtin.shell: set -o pipefail && pcs status | grep '^Online:' - register: cluster_stable_check - retries: 12 - delay: 10 - until: "(primary_instance_name + ' ' + secondary_instance_name) in cluster_stable_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in cluster_stable_check.stdout" - when: ansible_distribution_major_version != "8" and ansible_distribution_major_version != "9" - # '*' is a special character in regexp and needs to be escaped for literal matching - # if we are worried about character spacing across distros we can match for '\* Online:' - - name: "5.5.4.1 HANA Cluster configuration - Wait until cluster has stabilized on RHEL 8 or 9" - ansible.builtin.shell: set -o pipefail && pcs status | grep '^ \* Online:' - register: cluster_stable_check - retries: 12 - delay: 10 - until: "(primary_instance_name + ' ' + secondary_instance_name) in cluster_stable_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in cluster_stable_check.stdout" - when: ansible_distribution_major_version in ["8", "9"] - - # - name: Ensure Cluster resources are started - # ansible.builtin.shell: pcs status | grep '\* Started:' - # register: hana_pcs_cluster_resource_check - # when: ansible_distribution_major_version == "8" - - # - name: Ensure Cluster resources are started - # ansible.builtin.shell: pcs status | grep '^Started ' - # register: hana_pcs_cluster_resource_check - # when: ansible_distribution_major_version != "8" - - - # - name: Ensure Cluster resources are started - # ansible.builtin.debug: - # var: hana_pcs_cluster_resource_check - - # the leading spaces are irrelevant here as we are looking for *Started: - - name: "5.5.4.1 HANA Cluster configuration - Ensure Cluster resources are started on RHEL 7" - ansible.builtin.shell: set -o pipefail && pcs resource show | grep ' Started:' - register: hana_cluster_resource_check - retries: 12 - delay: 10 - until: "(primary_instance_name + ' ' + secondary_instance_name) in hana_cluster_resource_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in hana_cluster_resource_check.stdout" - when: ansible_distribution_major_version != "8" and ansible_distribution_major_version != "9" - - - name: "5.5.4.1 HANA Cluster configuration - Ensure Cluster resources are started on RHEL 8 or 9" - ansible.builtin.shell: set -o pipefail && pcs resource status | grep '\* Started:' - register: hana_cluster_resource_check - retries: 12 - delay: 10 - until: "(primary_instance_name + ' ' + secondary_instance_name) in hana_cluster_resource_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in hana_cluster_resource_check.stdout" - when: ansible_distribution_major_version in ["8", "9"] - when: ansible_hostname == primary_instance_name - -# End of HANA clustering resources - -# /*---------------------------------------------------------------------------8 -# | | -# | Systemd-Based SAP Startup Framework - BEGIN | -# | | -# +------------------------------------4--------------------------------------*/ -# Follow steps described in https://access.redhat.com/articles/6884531 - - name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" ansible.builtin.set_fact: is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" From 4aa5f6aa1ed294078673104c669d9d4740c691c1 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 21 Dec 2023 23:55:04 +0200 Subject: [PATCH 127/607] List the resources --- deploy/scripts/advanced_state_management.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/scripts/advanced_state_management.sh b/deploy/scripts/advanced_state_management.sh index a06fb93993..69eb5bcbc9 100755 --- a/deploy/scripts/advanced_state_management.sh +++ b/deploy/scripts/advanced_state_management.sh @@ -331,6 +331,7 @@ if [ "${operation}" == "list" ] ; then echo "# #" echo "#########################################################################################" echo "" + cat resources.lst unset TF_DATA_DIR exit 0 From 244ac5d5f67de5788597d217455f72ef941affb1 Mon Sep 17 00:00:00 2001 From: Devansh Jain <86314060+devanshjainms@users.noreply.github.com> Date: Wed, 10 Jan 2024 00:13:02 -0800 Subject: [PATCH 128/607] Fix iSCSI service tier typo and update iSCSI initiator name (#522) --- .../ansible/roles-os/1.16-services/vars/os-services.yaml | 4 ++-- .../1.17-generic-pacemaker/tasks/1.17.1.1-iSCSI.yml | 7 +++++-- .../ansible/roles-sap-os/2.11-iscsi-server/tasks/main.yaml | 4 ++-- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/deploy/ansible/roles-os/1.16-services/vars/os-services.yaml b/deploy/ansible/roles-os/1.16-services/vars/os-services.yaml index c12f8841de..c192491fe4 100644 --- a/deploy/ansible/roles-os/1.16-services/vars/os-services.yaml +++ b/deploy/ansible/roles-os/1.16-services/vars/os-services.yaml @@ -80,8 +80,8 @@ services: - { tier: 'os', service: 'fstrim.timer', node_tier: 'all', state: 'disabled' } # --------------------------- Begin - Packages required for iSCSI -----------------------------------------8 # https://learn.microsoft.com/en-us/azure/sap/workloads/high-availability-guide-suse-pacemaker#sbd-with-an-iscsi-target-server - - { tier: 'iscs', service: 'targetcli', node_tier: 'iscsi', state: 'enabled' } - - { tier: 'iscs', service: 'targetcli', node_tier: 'iscsi', state: 'started' } + - { tier: 'iscsi', service: 'targetcli', node_tier: 'iscsi', state: 'enabled' } + - { tier: 'iscsi', service: 'targetcli', node_tier: 'iscsi', state: 'started' } # ---------------------------- End - Packages required for iSCSI ------------------------------------------8 oraclelinux8: diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.1-iSCSI.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.1-iSCSI.yml index 981298c0ac..87937f7337 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.1-iSCSI.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.1-iSCSI.yml @@ -26,9 +26,12 @@ ansible.builtin.set_fact: iscsi_node_type: >- {%- set _clusterType = 'scs' -%} - {%- if (['hana', 'db2'] in supported_tiers) -%} + {%- if ('hana' in supported_tiers) -%} {%- set _clusterType = 'db' -%} - {%- endif -%} {{- _clusterType -}} + {%- endif -%} + {%- if ('db2' in supported_tiers) -%} + {%- set _clusterType = 'db' -%} + {%- endif -%} {{- _clusterType -}} - name: "1.17.1 iSCSI packages - Get initiator name" diff --git a/deploy/ansible/roles-sap-os/2.11-iscsi-server/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.11-iscsi-server/tasks/main.yaml index 937422e1ae..aea48b4f33 100644 --- a/deploy/ansible/roles-sap-os/2.11-iscsi-server/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.11-iscsi-server/tasks/main.yaml @@ -105,7 +105,7 @@ - name: "2.11: SBD create iscsi/iqn DB" become_user: root become: true - ansible.builtin.command: "targetcli iscsi/{{ item.iqn }}/tpg1/acls/ create iqn.2006-04.{{ sap_sid }}-db-0.local:{{ sap_sid }}-db-0" + ansible.builtin.command: "targetcli iscsi/{{ item.iqn }}/tpg1/acls/ create iqn.2006-04.{{ sap_sid }}-xdb-0.local:{{ sap_sid }}-xdb-0" register: iscsi_create3_db failed_when: iscsi_create3_db.rc not in [0,1] changed_when: iscsi_create3_db.rc == 0 @@ -118,7 +118,7 @@ - name: "2.11: SBD create iscsi/iqn DB" become_user: root become: true - ansible.builtin.command: "targetcli iscsi/{{ item.iqn }}/tpg1/acls/ create iqn.2006-04.{{ sap_sid }}-db-1.local:{{ sap_sid }}-db-1" + ansible.builtin.command: "targetcli iscsi/{{ item.iqn }}/tpg1/acls/ create iqn.2006-04.{{ sap_sid }}-xdb-1.local:{{ sap_sid }}-xdb-1" register: iscsi_create4_db failed_when: iscsi_create4_db.rc not in [0,1] changed_when: iscsi_create4_db.rc == 0 From fedfa6febce155ae165d88cf78bbb602130bf754 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 11 Jan 2024 15:19:29 +0200 Subject: [PATCH 129/607] Add support for controlling DNS A record creation for secondary names (#523) * Add support for controlling DNS A record creation for secondary names -------- Co-authored-by: Kimmo Forss --- deploy/terraform/run/sap_system/module.tf | 2 +- deploy/terraform/run/sap_system/tfvar_variables.tf | 6 ++++++ .../modules/sap_system/output_files/dns_records.tf | 8 ++++---- .../modules/sap_system/output_files/variables_global.tf | 1 + 4 files changed, 12 insertions(+), 5 deletions(-) diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index aeef2a126d..ec318847ed 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -412,7 +412,7 @@ module "output_files" { management_dns_subscription_id = try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, null) management_dns_resourcegroup_name = try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) dns_zone_names = var.dns_zone_names - + dns_a_records_for_secondary_names = var.dns_a_records_for_secondary_names ######################################################################################### # Server counts # diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index 5663e40f8d..a813215b71 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -1000,6 +1000,12 @@ variable "dns_zone_names" { } } +variable "dns_a_records_for_secondary_names" { + description = "Boolean value indicating if dns a records should be created for the secondary DNS names" + default = true + type = bool + } + ######################################################################################### # # # NFS and Shared Filed settings # diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/dns_records.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/dns_records.tf index f4eef649c6..bfba050e28 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/dns_records.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/dns_records.tf @@ -1,6 +1,6 @@ resource "azurerm_private_dns_a_record" "app_secondary" { provider = azurerm.dnsmanagement - count = var.use_secondary_ips && !var.use_custom_dns_a_registration && length(var.dns) > 0 ? var.app_server_count : 0 + count = var.dns_a_records_for_secondary_names && var.use_secondary_ips && !var.use_custom_dns_a_registration && length(var.dns) > 0 ? var.app_server_count : 0 name = var.naming.virtualmachine_names.APP_SECONDARY_DNSNAME[count.index] zone_name = var.dns resource_group_name = var.management_dns_resourcegroup_name @@ -15,7 +15,7 @@ resource "azurerm_private_dns_a_record" "app_secondary" { resource "azurerm_private_dns_a_record" "scs_secondary" { provider = azurerm.dnsmanagement - count = var.use_secondary_ips && !var.use_custom_dns_a_registration && length(var.dns) > 0 ? var.scs_server_count : 0 + count = var.dns_a_records_for_secondary_names && var.use_secondary_ips && !var.use_custom_dns_a_registration && length(var.dns) > 0 ? var.scs_server_count : 0 name = var.naming.virtualmachine_names.SCS_SECONDARY_DNSNAME[count.index] zone_name = var.dns resource_group_name = var.management_dns_resourcegroup_name @@ -30,7 +30,7 @@ resource "azurerm_private_dns_a_record" "scs_secondary" { resource "azurerm_private_dns_a_record" "web_secondary" { provider = azurerm.dnsmanagement - count = var.use_secondary_ips && !var.use_custom_dns_a_registration && length(var.dns) > 0 ? var.web_server_count : 0 + count = var.dns_a_records_for_secondary_names && var.use_secondary_ips && !var.use_custom_dns_a_registration && length(var.dns) > 0 ? var.web_server_count : 0 name = var.naming.virtualmachine_names.WEB_SECONDARY_DNSNAME[count.index] zone_name = var.dns resource_group_name = var.management_dns_resourcegroup_name @@ -45,7 +45,7 @@ resource "azurerm_private_dns_a_record" "web_secondary" { resource "azurerm_private_dns_a_record" "db_secondary" { provider = azurerm.dnsmanagement - count = var.use_secondary_ips && !var.use_custom_dns_a_registration && length(var.dns) > 0 ? var.db_server_count : 0 + count = var.dns_a_records_for_secondary_names && var.use_secondary_ips && !var.use_custom_dns_a_registration && length(var.dns) > 0 ? var.db_server_count : 0 name = local.db_secondary_dns_names[count.index] zone_name = var.dns resource_group_name = var.management_dns_resourcegroup_name diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf index e33b2667d3..c007e5e91c 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf @@ -66,6 +66,7 @@ variable "dns" { description = "The DNS label" default = "" } +variable "dns_a_records_for_secondary_names" { description = "Boolean value indicating if dns a records should be created for the secondary DNS names"} variable "ers_instance_number" { description = "Instance number for ERS" default = "02" From 2b70a6cee07fced2ddd2a27334f5764f1db30bf8 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 15 Jan 2024 19:19:32 +0200 Subject: [PATCH 130/607] Refactor SAP transport and installation path handling (#524) Co-authored-by: Kimmo Forss --- .../modules/sap_landscape/outputs.tf | 43 +++++++++++++++---- 1 file changed, 34 insertions(+), 9 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf b/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf index c19eaa70d2..18353fd760 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf @@ -353,7 +353,9 @@ output "saptransport_path" { description = "Path to the SAP transport volume" value = var.create_transport_storage && var.NFS_provider == "AFS" ? ( length(var.transport_private_endpoint_id) == 0 ? ( - format("%s:/%s/%s", try(azurerm_private_endpoint.transport[0].private_dns_zone_configs[0].record_sets[0].fqdn, + var.use_private_endpoint ? + ( + format("%s:/%s/%s", try(azurerm_private_endpoint.transport[0].private_dns_zone_configs[0].record_sets[0].fqdn, try(azurerm_private_endpoint.transport[0].private_service_connection[0].private_ip_address, "")), length(var.transport_storage_account_id) > 0 ? split("/", var.transport_storage_account_id)[8] : replace( lower( @@ -362,7 +364,18 @@ output "saptransport_path" { "/[^a-z0-9]/", ""), local.resource_suffixes.transport_volume - )) : ( + )) : + ( + format("%s.file.core.windows.net:/%s/%s", local.landscape_shared_transport_storage_account_name, + length(var.transport_storage_account_id) > 0 ? split("/", var.transport_storage_account_id)[8] : replace( + lower( + format("%s", local.landscape_shared_transport_storage_account_name) + ), + "/[^a-z0-9]/", + ""), + local.resource_suffixes.transport_volume + )) + ) : ( format("%s:/%s/%s", trimsuffix(data.azurerm_private_dns_a_record.transport[0].fqdn, "."), length(var.transport_storage_account_id) > 0 ? split("/", var.transport_storage_account_id)[8] : replace( lower( @@ -392,8 +405,9 @@ output "saptransport_path" { output "install_path" { description = "Path to the SAP installation volume" - value = try(local.use_AFS_for_install ? ( + value = var.NFS_provider == "AFS" || local.use_AFS_for_install ? ( length(var.install_private_endpoint_id) == 0 ? ( + var.use_private_endpoint ? format("%s:/%s/%s", try(azurerm_private_endpoint.install[0].private_dns_zone_configs[0].record_sets[0].fqdn, try(azurerm_private_endpoint.install[0].private_service_connection[0].private_ip_address, "")), length(var.install_storage_account_id) > 0 ? split("/", var.install_storage_account_id)[8] : replace( @@ -404,8 +418,19 @@ output "install_path" { "" ), local.resource_suffixes.install_volume - ) - ) : ( + ) : ( + format("%s.file.core.windows.net:/%s/%s", local.landscape_shared_install_storage_account_name, + length(var.install_storage_account_id) > 0 ? split("/", var.install_storage_account_id)[8] : replace( + lower( + format("%s", local.landscape_shared_install_storage_account_name) + ), + "/[^a-z0-9]/", + "" + ), + local.resource_suffixes.install_volume + ) + + )) : ( format("%s:/%s/%s", trimsuffix(data.azurerm_private_dns_a_record.install[0].fqdn, "."), length(var.install_storage_account_id) > 0 ? split("/", var.install_storage_account_id)[8] : replace( @@ -416,7 +441,8 @@ output "install_path" { "" ), local.resource_suffixes.install_volume) - )) : ( + ) + ) : ( var.NFS_provider == "ANF" ? ( format("%s:/%s", var.ANF_settings.use_existing_install_volume ? ( @@ -429,9 +455,8 @@ output "install_path" { ) ) ) : ( - "" - ) - ), "") + "") + ) } ############################################################################### From d0ef2f1d855be5688c13603dc30428b17c3970e0 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 15 Jan 2024 19:36:40 +0200 Subject: [PATCH 131/607] Use afs for shared storage (#525) * Add support for using AFS for shared media * Refactor storage account count logic --------- Co-authored-by: Kimmo Forss --- deploy/terraform/run/sap_landscape/module.tf | 2 +- .../run/sap_landscape/tfvar_variables.tf | 19 ++++--- .../modules/sap_landscape/anf.tf | 4 +- .../modules/sap_landscape/outputs.tf | 2 +- .../modules/sap_landscape/storage_accounts.tf | 53 ++++++------------- .../modules/sap_landscape/variables_global.tf | 2 +- .../modules/sap_landscape/variables_local.tf | 2 +- 7 files changed, 32 insertions(+), 52 deletions(-) diff --git a/deploy/terraform/run/sap_landscape/module.tf b/deploy/terraform/run/sap_landscape/module.tf index 124bf5daf9..322b8a5771 100644 --- a/deploy/terraform/run/sap_landscape/module.tf +++ b/deploy/terraform/run/sap_landscape/module.tf @@ -55,7 +55,7 @@ module "sap_landscape" { transport_private_endpoint_id = var.transport_private_endpoint_id transport_storage_account_id = var.transport_storage_account_id transport_volume_size = var.transport_volume_size - use_AFS_for_installation_media = var.use_AFS_for_installation_media + use_AFS_for_shared_storage = var.use_AFS_for_shared_storage use_custom_dns_a_registration = var.use_custom_dns_a_registration use_deployer = length(var.deployer_tfstate_key) > 0 use_private_endpoint = var.use_private_endpoint diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index fec9230204..b393888078 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -29,7 +29,7 @@ variable "name_override_file" { } variable "place_delete_lock_on_resources" { - description = "f defined, a delete lock will be placed on the key resources" + description = "If defined, a delete lock will be placed on the key resources" default = false } @@ -519,17 +519,17 @@ variable "ANF_transport_volume_use_existing" { } variable "ANF_transport_volume_name" { - description = "f defined provides the Transport volume name" + description = "If defined provides the Transport volume name" default = false } variable "ANF_transport_volume_throughput" { - description = "f defined provides the throughput of the transport volume" + description = "If defined provides the throughput of the transport volume" default = 128 } variable "ANF_transport_volume_size" { - description = "f defined provides the size of the transport volume" + description = "If defined provides the size of the transport volume" default = 128 } @@ -539,26 +539,25 @@ variable "ANF_install_volume_use_existing" { } variable "ANF_install_volume_name" { - description = "nstall volume name" + description = "Install volume name" default = "" } variable "ANF_install_volume_throughput" { - description = "f defined provides the throughput of the install volume" + description = "If defined provides the throughput of the install volume" default = 128 } variable "ANF_install_volume_size" { - description = "f defined provides the size of the install volume" + description = "If defined provides the size of the install volume" default = 1024 } -variable "use_AFS_for_installation_media" { - description = "f true, will use AFS for installation media." +variable "use_AFS_for_shared_storage" { + description = "If true, will use AFS for all shared storage." default = false } - ######################################################################################### # # # iSCSI definitioms # diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/anf.tf b/deploy/terraform/terraform-units/modules/sap_landscape/anf.tf index 0b1358e75d..c94232a067 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/anf.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/anf.tf @@ -100,7 +100,7 @@ data "azurerm_netapp_pool" "workload_netapp_pool" { resource "azurerm_netapp_volume" "transport" { provider = azurerm.main - count = var.create_transport_storage && var.NFS_provider == "ANF" ? ( + count = var.create_transport_storage && var.NFS_provider == "ANF" && !var.use_AFS_for_shared_storage ? ( var.ANF_settings.use_existing_transport_volume ? ( 0) : ( 1 @@ -209,7 +209,7 @@ data "azurerm_netapp_volume" "transport" { resource "azurerm_netapp_volume" "install" { provider = azurerm.main - count = var.NFS_provider == "ANF" && !var.use_AFS_for_installation_media ? ( + count = var.NFS_provider == "ANF" && !var.use_AFS_for_shared_storage ? ( var.ANF_settings.use_existing_install_volume ? ( 0) : ( 1 diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf b/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf index 18353fd760..d97b7ba553 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf @@ -405,7 +405,7 @@ output "saptransport_path" { output "install_path" { description = "Path to the SAP installation volume" - value = var.NFS_provider == "AFS" || local.use_AFS_for_install ? ( + value = try(local.use_AFS_for_shared ? ( length(var.install_private_endpoint_id) == 0 ? ( var.use_private_endpoint ? format("%s:/%s/%s", try(azurerm_private_endpoint.install[0].private_dns_zone_configs[0].record_sets[0].fqdn, diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf index 4bb36ca98c..eb300c71be 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf @@ -282,13 +282,7 @@ resource "azurerm_private_endpoint" "witness_storage" { resource "azurerm_storage_account" "transport" { provider = azurerm.main - count = var.create_transport_storage && var.NFS_provider == "AFS" ? ( - length(var.transport_storage_account_id) > 0 ? ( - 0) : ( - 1 - )) : ( - 0 - ) + count = var.create_transport_storage && local.use_AFS_for_shared && length(var.transport_storage_account_id) == 0 ? 1 : 0 depends_on = [ azurerm_subnet.app ] @@ -322,7 +316,7 @@ resource "azurerm_storage_account" "transport" { resource "azurerm_storage_account_network_rules" "transport" { provider = azurerm.main - count = var.create_transport_storage && var.NFS_provider == "AFS" && length(var.transport_storage_account_id) == 0 ? 1 : 0 + count = var.create_transport_storage && local.use_AFS_for_shared && length(var.transport_storage_account_id) == 0 ? 1 : 0 storage_account_id = azurerm_storage_account.transport[0].id default_action = "Deny" @@ -358,7 +352,7 @@ resource "azurerm_storage_account_network_rules" "transport" { resource "azurerm_private_dns_a_record" "transport" { provider = azurerm.dnsmanagement - count = var.use_private_endpoint && var.create_transport_storage && local.use_Azure_native_DNS && var.NFS_provider == "AFS" && length(var.transport_private_endpoint_id) == 0 ? 1 : 0 + count = var.use_private_endpoint && var.create_transport_storage && local.use_Azure_native_DNS && local.use_AFS_for_shared && length(var.transport_private_endpoint_id) == 0 ? 1 : 0 name = replace( lower( format("%s", local.landscape_shared_transport_storage_account_name) @@ -394,7 +388,7 @@ data "azurerm_private_dns_a_record" "transport" { resource "azurerm_storage_share" "transport" { provider = azurerm.main - count = var.create_transport_storage && var.NFS_provider == "AFS" ? ( + count = var.create_transport_storage && local.use_AFS_for_shared ? ( length(var.transport_storage_account_id) > 0 ? ( var.install_always_create_fileshares ? 1 : 0) : ( 1 @@ -415,7 +409,7 @@ resource "azurerm_storage_share" "transport" { data "azurerm_storage_account" "transport" { provider = azurerm.main - count = var.create_transport_storage && var.NFS_provider == "AFS" ? ( + count = var.create_transport_storage && local.use_AFS_for_shared ? ( length(var.transport_storage_account_id) > 0 ? ( 1) : ( 0 @@ -432,7 +426,7 @@ resource "azurerm_private_endpoint" "transport" { azurerm_subnet.app, azurerm_private_dns_zone_virtual_network_link.vnet_sap_file ] - count = var.create_transport_storage && var.use_private_endpoint && var.NFS_provider == "AFS" ? ( + count = var.create_transport_storage && var.use_private_endpoint && local.use_AFS_for_shared ? ( length(var.transport_storage_account_id) > 0 ? ( 0) : ( 1 @@ -499,7 +493,7 @@ resource "azurerm_private_endpoint" "transport" { data "azurerm_private_endpoint_connection" "transport" { provider = azurerm.main - count = var.create_transport_storage && var.NFS_provider == "AFS" ? ( + count = var.create_transport_storage && local.use_AFS_for_shared ? ( length(var.transport_private_endpoint_id) > 0 ? ( 1) : ( 0 @@ -520,13 +514,7 @@ data "azurerm_private_endpoint_connection" "transport" { resource "azurerm_storage_account" "install" { provider = azurerm.main - count = local.use_AFS_for_install ? ( - length(var.install_storage_account_id) > 0 ? ( - 0) : ( - 1 - )) : ( - 0 - ) + count = local.use_AFS_for_shared && length(var.install_storage_account_id) == 0 ? 1 : 0 depends_on = [ azurerm_subnet.app, azurerm_subnet.db, @@ -557,12 +545,11 @@ resource "azurerm_storage_account" "install" { public_network_access_enabled = var.public_network_access_enabled tags = var.tags - } resource "azurerm_storage_account_network_rules" "install" { provider = azurerm.main - count = local.use_AFS_for_install && length(var.install_storage_account_id) == 0 ? 1 : 0 + count = local.use_AFS_for_shared && length(var.install_storage_account_id) == 0 ? 1 : 0 depends_on = [ azurerm_subnet.app, azurerm_subnet.db @@ -602,7 +589,7 @@ resource "azurerm_storage_account_network_rules" "install" { resource "azurerm_private_dns_a_record" "install" { provider = azurerm.dnsmanagement - count = var.use_private_endpoint && local.use_Azure_native_DNS && local.use_AFS_for_install && length(var.install_private_endpoint_id) == 0 ? 1 : 0 + count = var.use_private_endpoint && local.use_Azure_native_DNS && local.use_AFS_for_shared && length(var.install_private_endpoint_id) == 0 ? 1 : 0 name = replace( lower( format("%s", local.landscape_shared_install_storage_account_name) @@ -643,20 +630,14 @@ data "azurerm_private_dns_a_record" "install" { data "azurerm_storage_account" "install" { provider = azurerm.main - count = local.use_AFS_for_install ? ( - length(var.install_storage_account_id) > 0 ? ( - 1) : ( - 0 - )) : ( - 0 - ) + count = local.use_AFS_for_shared && length(var.install_storage_account_id) > 0 ? 1 : 0 name = split("/", var.install_storage_account_id)[8] resource_group_name = split("/", var.install_storage_account_id)[4] } data "azurerm_private_endpoint_connection" "install" { provider = azurerm.main - count = local.use_AFS_for_install ? ( + count = local.use_AFS_for_shared ? ( length(var.install_private_endpoint_id) > 0 ? ( 1) : ( 0 @@ -678,7 +659,7 @@ resource "azurerm_private_endpoint" "install" { azurerm_storage_share.install, azurerm_storage_share.install_smb ] - count = local.use_AFS_for_install && var.use_private_endpoint ? ( + count = local.use_AFS_for_shared && var.use_private_endpoint ? ( length(var.install_private_endpoint_id) > 0 ? ( 0) : ( 1 @@ -742,7 +723,7 @@ resource "azurerm_private_endpoint" "install" { resource "azurerm_storage_share" "install" { provider = azurerm.main - count = local.use_AFS_for_install ? ( + count = local.use_AFS_for_shared ? ( length(var.install_storage_account_id) > 0 ? ( var.install_always_create_fileshares ? 1 : 0) : ( 1 @@ -751,7 +732,7 @@ resource "azurerm_storage_share" "install" { ) name = format("%s", local.resource_suffixes.install_volume) - storage_account_name = var.NFS_provider == "AFS" ? ( + storage_account_name = local.use_AFS_for_shared ? ( length(var.install_storage_account_id) > 0 ? ( split("/", var.install_storage_account_id)[8] ) : ( @@ -766,7 +747,7 @@ resource "azurerm_storage_share" "install" { resource "azurerm_storage_share" "install_smb" { provider = azurerm.main - count = local.use_AFS_for_install ? ( + count = local.use_AFS_for_shared ? ( length(var.install_storage_account_id) > 0 ? ( var.install_always_create_fileshares ? 1 : 0) : ( 1 @@ -775,7 +756,7 @@ resource "azurerm_storage_share" "install_smb" { ) name = format("%s", local.resource_suffixes.install_volume_smb) - storage_account_name = var.NFS_provider == "AFS" ? ( + storage_account_name = local.use_AFS_for_shared ? ( length(var.install_storage_account_id) > 0 ? ( split("/", var.install_storage_account_id)[8] ) : ( diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf index 20a210cc47..b623e3bc58 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf @@ -281,7 +281,7 @@ variable "enable_firewall_for_keyvaults_and_storage" { description = "Boolea variable "public_network_access_enabled" { description = "Defines if the public access should be enabled for keyvaults and storage accounts" } -variable "use_AFS_for_installation_media" { +variable "use_AFS_for_shared_storage" { description = "If true, will use AFS for installation media." default = false } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf index cd27d5c956..202833f57c 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf @@ -654,6 +654,6 @@ locals { use_Azure_native_DNS = length(var.dns_label) > 0 && !var.use_custom_dns_a_registration && !local.SAP_virtualnetwork_exists - use_AFS_for_install = (var.NFS_provider == "ANF" && var.use_AFS_for_installation_media) || var.NFS_provider == "AFS" + use_AFS_for_shared = (var.NFS_provider == "ANF" && var.use_AFS_for_shared_storage) || var.NFS_provider == "AFS" } From ec5c161d8f7a3dc2d0aa2998ee86919085a56256 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 15 Jan 2024 19:39:58 +0200 Subject: [PATCH 132/607] Web dispatcher update (#526) * Remove unnecessary 'become' option from playbook * Update SAP log file paths * Update file_path in playbook_05_04_sap_web_install.yaml --------- Co-authored-by: Kimmo Forss --- deploy/ansible/playbook_05_04_sap_web_install.yaml | 5 ++--- deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml | 3 +-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/deploy/ansible/playbook_05_04_sap_web_install.yaml b/deploy/ansible/playbook_05_04_sap_web_install.yaml index 53eea1b254..e4faf2570e 100644 --- a/deploy/ansible/playbook_05_04_sap_web_install.yaml +++ b/deploy/ansible/playbook_05_04_sap_web_install.yaml @@ -62,7 +62,6 @@ name: SAP Installation - WebDispatcher remote_user: "{{ orchestration_ansible_user }}" - become: true gather_facts: true # Important to collect hostvars information vars_files: - vars/ansible-input-api.yaml # API Input template with defaults @@ -80,7 +79,7 @@ sapbits_location_base_path: "{{ hostvars.localhost.sapbits_location_base_path }}" sapbits_sas_token: "{{ hostvars.localhost.sapbits_sas_token }}" always_upload_jinja_templates: false - file_path: /GENERIC/AS/WI_UC + file_path: AS/WI_UC sa_enabled: true tags: @@ -126,7 +125,7 @@ name: roles-sap/7.0.0-post-install vars: suffix: "_WEB" - prefix: "" + prefix: "GENERIC" path: "{{ file_path }}" tier: 'web' this_sid: "{{ sid_to_be_deployed.web_sid | upper }}" diff --git a/deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml b/deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml index eb4784bea4..173bf85227 100644 --- a/deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml @@ -70,11 +70,10 @@ become_user: root become: true ansible.builtin.find: - paths: ["/var/log", "/var/log/pacemaker", "/var/log/cluster", "/usr/sap/{{ sap_sid | upper }}"] + paths: ["/var/log", "/var/log/pacemaker", "/var/log/cluster", "/usr/sap/{{ this_sid | upper }}"] patterns: "pacemaker.log,corosync.log,trans.log" file_type: file register: other_log_files - rescue: - name: "Post Installation: Error while acquiring other SAP logs" ansible.builtin.debug: From 7bb59c121eac4a0a483d0c303255152dcc41ef18 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 15 Jan 2024 19:49:40 +0200 Subject: [PATCH 133/607] Fix value assignment in outputs.tf --- .../terraform/terraform-units/modules/sap_landscape/outputs.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf b/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf index d97b7ba553..c376803a16 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf @@ -405,7 +405,7 @@ output "saptransport_path" { output "install_path" { description = "Path to the SAP installation volume" - value = try(local.use_AFS_for_shared ? ( + value = local.use_AFS_for_shared ? ( length(var.install_private_endpoint_id) == 0 ? ( var.use_private_endpoint ? format("%s:/%s/%s", try(azurerm_private_endpoint.install[0].private_dns_zone_configs[0].record_sets[0].fqdn, From 75f94ce694441c48f9605b82c1e9484f0521344f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 15 Jan 2024 20:32:41 +0200 Subject: [PATCH 134/607] Add use_AFS_for_installation_media and dns_a_records_for_secondary_names properties --- Webapp/SDAF/Models/LandscapeModel.cs | 2 ++ Webapp/SDAF/Models/SystemModel.cs | 2 +- Webapp/SDAF/ParameterDetails/LandscapeDetails.json | 9 +++++++++ Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt | 3 +++ Webapp/SDAF/ParameterDetails/SystemDetails.json | 9 +++++++++ Webapp/SDAF/ParameterDetails/SystemTemplate.txt | 3 +++ Webapp/SDAF/SDAFWebApp.csproj | 10 +++++----- 7 files changed, 32 insertions(+), 6 deletions(-) diff --git a/Webapp/SDAF/Models/LandscapeModel.cs b/Webapp/SDAF/Models/LandscapeModel.cs index 834803e047..9be3a087c3 100644 --- a/Webapp/SDAF/Models/LandscapeModel.cs +++ b/Webapp/SDAF/Models/LandscapeModel.cs @@ -268,6 +268,8 @@ public bool IsValid() public string NFS_provider { get; set; } + public bool? use_AFS_for_installation_media { get; set; } = true; + public bool? create_transport_storage { get; set; } = true; public int? transport_volume_size { get; set; } diff --git a/Webapp/SDAF/Models/SystemModel.cs b/Webapp/SDAF/Models/SystemModel.cs index f3bd3212d4..16d641f524 100644 --- a/Webapp/SDAF/Models/SystemModel.cs +++ b/Webapp/SDAF/Models/SystemModel.cs @@ -197,7 +197,7 @@ public bool IsValid() public Tag[] configuration_settings { get; set; } - + public bool? dns_a_records_for_secondary_names { get; set; } = true; /*---------------------------------------------------------------------------8 | | | Cluster information | diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index 01c72b0f47..1c9e2f2164 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -809,6 +809,15 @@ "Overrules": "", "Display": 1 }, + { + "Name": "use_AFS_for_installation_media", + "Required": false, + "Description": "Defines if shared media is shared from Azure Files when using Azure NetApp Files for data.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, { "Name": "create_transport_storage", "Required": false, diff --git a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt index a656a56a02..0cf7a89f2c 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt @@ -334,6 +334,9 @@ $$dns_server_list$$ # NFS indicates that a custom solution is used for NFS $$NFS_provider$$ +# use_AFS_for_installation_media defines if shared media is on AFS even when using ANF for data +$$use_AFS_for_installation_media$$ + ######################################################################################### # # # Azure NetApp files support # diff --git a/Webapp/SDAF/ParameterDetails/SystemDetails.json b/Webapp/SDAF/ParameterDetails/SystemDetails.json index 0d2c06b99c..36d486ffb5 100644 --- a/Webapp/SDAF/ParameterDetails/SystemDetails.json +++ b/Webapp/SDAF/ParameterDetails/SystemDetails.json @@ -286,6 +286,15 @@ "Overrules": "", "Display": 3 }, + { + "Name": "dns_a_records_for_secondary_names", + "Required": false, + "Description": "Defines if DNS records should be created for the virtual host names.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, { "Name": "use_private_endpoint", "Required": false, diff --git a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt index c8a9673720..4ca284a920 100644 --- a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt @@ -451,6 +451,9 @@ $$deploy_application_security_groups$$ # deploy_v1_monitoring_extension Defines if the Microsoft.AzureCAT.AzureEnhancedMonitoring extension will be deployed $$deploy_v1_monitoring_extension$$ +# dns_a_records_for_secondary_names defines if DNS records should be created for the virtual host names +$$dns_a_records_for_secondary_names$$ + ######################################################################################### # # # NFS support # diff --git a/Webapp/SDAF/SDAFWebApp.csproj b/Webapp/SDAF/SDAFWebApp.csproj index 3d3d086e3d..470b14bb80 100644 --- a/Webapp/SDAF/SDAFWebApp.csproj +++ b/Webapp/SDAF/SDAFWebApp.csproj @@ -17,17 +17,17 @@ - - + + - + - - + + From 2e6ca5a136187f6387aae5d8c0d5eaddbe2b2f0d Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 16 Jan 2024 12:48:10 +0530 Subject: [PATCH 135/607] Update transform.tf to use iscsi subnet NSG if defined --- deploy/terraform/run/sap_landscape/transform.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/run/sap_landscape/transform.tf b/deploy/terraform/run/sap_landscape/transform.tf index 8cda150d6b..dea04c3de2 100644 --- a/deploy/terraform/run/sap_landscape/transform.tf +++ b/deploy/terraform/run/sap_landscape/transform.tf @@ -350,7 +350,7 @@ locals { "prefix" = try(var.infrastructure.vnets.sap.subnet_iscsi.prefix, var.iscsi_subnet_address_prefix) } ), ( - local.subnet_web_nsg_defined ? ( + local.subnet_iscsi_nsg_defined ? ( { "nsg" = { "name" = try(var.infrastructure.vnets.sap.subnet_iscsi.nsg.name, var.iscsi_subnet_nsg_name) From 6f004e0d5e90b047703d3f1ef2be3d061361d51d Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 16 Jan 2024 10:04:24 +0200 Subject: [PATCH 136/607] Fix typo in variable name for iSCSI NSG --- .../modules/sap_landscape/variables_local.tf | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf index 202833f57c..8905439fbb 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf @@ -562,11 +562,12 @@ locals { // iSCSI NSG var_sub_iscsi_nsg = try(var.infrastructure.vnets.sap.subnet_iscsi.nsg, {}) - sub_iscsi_nsg_arm_id = try(var.infrastructure.vnets.sap.subnet_iscsi_nsg.arm_id, "") + sub_iscsi_nsg_arm_id = try(var.infrastructure.vnets.sap.subnet_iscsi.nsg.arm_id, "") sub_iscsi_nsg_exists = length(local.sub_iscsi_nsg_arm_id) > 0 sub_iscsi_nsg_name = local.sub_iscsi_nsg_exists ? ( - try(split("/", local.sub_iscsi_nsg_arm_id)[8], "")) : ( - try(var.infrastructure.vnets.sap.subnet_iscsi_nsg.name, + try(split("/", local.sub_iscsi.nsg.arm_id)[8], "")) : ( + length(try(var.infrastructure.vnets.sap.subnet_iscsi.nsg.name, "")) > 0 ? ( + var.infrastructure.vnets.sap.subnet_iscsi.nsg.name ) : ( format("%s%s%s%s", var.naming.resource_prefixes.iscsi_subnet_nsg, length(local.prefix) > 0 ? ( @@ -576,10 +577,8 @@ locals { var.naming.separator, local.resource_suffixes.iscsi_subnet_nsg) ) - ) - input_iscsi_public_key_secret_name = try(var.key_vault.kv_iscsi_sshkey_pub, "") input_iscsi_private_key_secret_name = try(var.key_vault.kv_iscsi_sshkey_prvt, "") input_iscsi_password_secret_name = try(var.key_vault.kv_iscsi_pwd, "") From 18c704b115d96c2fdd3f87a34604fdae8782af45 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 17 Jan 2024 00:56:46 +0200 Subject: [PATCH 137/607] Add SAP HANA backup folder creation task --- .../4.0.0-hdb-install/tasks/main.yaml | 33 +++++++++---------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/deploy/ansible/roles-db/4.0.0-hdb-install/tasks/main.yaml b/deploy/ansible/roles-db/4.0.0-hdb-install/tasks/main.yaml index 077b209595..57b28315da 100644 --- a/deploy/ansible/roles-db/4.0.0-hdb-install/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.0-hdb-install/tasks/main.yaml @@ -153,6 +153,14 @@ state: touch mode: 0755 + - name: "SAP HANA: Create backup folder" + ansible.builtin.file: + path: "{{ hana_backup_path }}" + state: directory + group: sapsys + owner: "{{ db_sid | lower }}adm" + mode: 0755 + - name: "Retrieve Subscription ID and Resource Group Name" ansible.builtin.uri: url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 @@ -195,15 +203,6 @@ - hana_installation.rc is defined - hana_installation.rc < 1 - - name: "SAP HANA: Create backup folder" - ansible.builtin.file: - path: "{{ hana_backup_path }}" - state: directory - group: sapsys - owner: "{{ db_sid | lower }}adm" - mode: 0755 - - when: - not hana_installed.stat.exists @@ -218,6 +217,14 @@ ansible.builtin.set_fact: hana_already_installed: true + - name: "SAP HANA: Create backup folder" + ansible.builtin.file: + path: "{{ hana_backup_path }}" + state: directory + group: sapsys + owner: "{{ db_sid | lower }}adm" + mode: 0755 + - name: "SAP HANA: check if ARM Deployment done" ansible.builtin.stat: path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb_arm.txt" @@ -259,14 +266,6 @@ when: - not hana_arm_deployment_done.stat.exists - - - name: "SAP HANA: Create backup folder" - ansible.builtin.file: - path: "{{ hana_backup_path }}" - state: directory - group: sapsys - owner: "{{ db_sid | lower }}adm" - mode: 0755 when: - hana_installed.stat.exists From 58c9e94c338b38dba44dc1845955c36ce2b830f4 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 18 Jan 2024 09:48:32 +0200 Subject: [PATCH 138/607] Refactor dbload task condition in main.yaml --- deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml index 14d34a4f28..4cdb041cfb 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml @@ -100,7 +100,7 @@ mode: 0755 when: - dbload_performed_according_to_sapinst is defined - - dbload_performed_according_to_sapinst | length > 0 + - dbload_performed_according_to_sapinst.matched == 1 when: - dbload_performed.stat.exists From 047e239ebc6f8d5b1de2199847df42595758cdd0 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 18 Jan 2024 13:47:33 +0200 Subject: [PATCH 139/607] Update db_lb_virtual_host in PAS and APP Install tasks --- deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml | 2 +- deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index 18338621f0..f7baff7929 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -38,7 +38,7 @@ - name: "PAS Install: Set BOM facts db host" ansible.builtin.set_fact: - db_lb_virtual_host: "{% if platform == 'HANA' %}{{ db_lb_virtual_host_HANA }}{% else %}{{ db_lb_virtual_host_AnyDB }}{% endif %}" + db_lb_virtual_host: "{% if platform == 'HANA' %}{{ custom_db_virtual_hostname | default(db_lb_virtual_host_HANA, true) }}{% else %}{{ custom_db_virtual_hostname | default(db_lb_virtual_host_AnyDB, true) }}{% endif %}" - name: "PAS Install: - Create directories" ansible.builtin.file: diff --git a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml index c0526aa27f..6227461b89 100644 --- a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml @@ -36,7 +36,7 @@ - name: "APP Install: Set BOM facts db host" ansible.builtin.set_fact: - db_lb_virtual_host: "{% if platform == 'HANA' %}{{ db_lb_virtual_host_HANA }}{% else %}{{ db_lb_virtual_host_AnyDB }}{% endif %}" + db_lb_virtual_host: "{% if platform == 'HANA' %}{{ custom_db_virtual_hostname | default(db_lb_virtual_host_HANA, true) }}{% else %}{{ custom_db_virtual_hostname | default(db_lb_virtual_host_AnyDB, true) }}{% endif %}" - name: "APP Install: - Create directories" ansible.builtin.file: From 877fc68ed6092f3d0697a6da871b345a7a9a5fe8 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 22 Jan 2024 15:02:25 +0200 Subject: [PATCH 140/607] Add SAP OS configuration playbook for High Availability services (#528) Co-authored-by: Kimmo Forss --- .../playbook_02_os_sap_specific_config.yaml | 30 ++++++++++++++++--- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/deploy/ansible/playbook_02_os_sap_specific_config.yaml b/deploy/ansible/playbook_02_os_sap_specific_config.yaml index 8790078a5e..83c1b8babd 100644 --- a/deploy/ansible/playbook_02_os_sap_specific_config.yaml +++ b/deploy/ansible/playbook_02_os_sap_specific_config.yaml @@ -165,6 +165,17 @@ tags: - 1.4-packages + - name: "SAP OS configuration playbook: - Ensure the High Availability packages are registered" + when: + - scs_high_availability or database_high_availability + - node_tier in ['scs', 'ers', 'hana', 'db2'] + ansible.builtin.include_role: + name: roles-os/1.4-packages + tags: + - 1.4-packages + vars: + tier: ha + - name: "SAP OS configuration playbook: - Configure volume groups, logical volumes and file systems" ansible.builtin.include_role: name: roles-os/1.5-disk-setup @@ -180,14 +191,14 @@ tags: - 1.5.1.1-disk-setup-asm-sap - - name: Include 1.5.2-disk-setup-ora-multi-sid role + - name: "SAP OS configuration playbook: - Configure the disks for Oracle Multi SID" ansible.builtin.include_role: name: roles-os/1.5.2-disk-setup-ora-multi-sid when: node_tier == "oracle-multi-sid" tags: - 1.5.2-disk-setup-ora-multi-sid - - name: Include 1.9-kernelparameters role + - name: "SAP OS configuration playbook: - Configure the kernel parameters" ansible.builtin.include_role: name: roles-os/1.9-kernelparameters tags: @@ -199,12 +210,23 @@ tags: - 2.5-sap-users - - name: Include 1.16-services role + - name: "SAP OS configuration playbook: - Ensure the services are configured" ansible.builtin.include_role: name: roles-os/1.16-services tags: - 1.16-services + - name: "SAP OS configuration playbook: - Ensure the High Availability services are configured" + when: + - scs_high_availability or database_high_availability + - node_tier in ['scs', 'ers', 'hana', 'db2'] + ansible.builtin.include_role: + name: roles-os/1.16-services + tags: + - 1.16-services + vars: + tier: ha + - name: "SAP OS configuration playbook: - directory permissions" ansible.builtin.include_role: name: roles-sap-os/2.2-sapPermissions @@ -223,7 +245,7 @@ tags: - 2.3-sap-exports - - name: Include 2.6-sap-mounts role + - name: "SAP OS configuration playbook: - Mount the file systems" ansible.builtin.include_role: name: roles-sap-os/2.6-sap-mounts tags: From 2536deb4412ffb30573760ecd450653191a3542e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 24 Jan 2024 10:43:34 +0200 Subject: [PATCH 141/607] Provide a way to perform all deployments using a User Assigned Identity (#530) * TF updates to enable deployments based on MSI. * Ansible updates to properly use the custom_ names during HA deployments. * Updates to webapp to reflect the TF updates for MSI based deployments. --------- Co-authored-by: Kimmo Forss --- Webapp/SDAF/Models/LandscapeModel.cs | 18 +- Webapp/SDAF/Models/SystemModel.cs | 7 + .../ParameterDetails/LandscapeDetails.json | 17 +- .../ParameterDetails/LandscapeTemplate.txt | 21 +- .../SDAF/ParameterDetails/SystemDetails.json | 12 +- .../SDAF/ParameterDetails/SystemTemplate.txt | 30 +- Webapp/SDAF/SDAFWebApp.csproj | 2 +- deploy/ansible/configuration_menu.sh | 10 +- .../playbook_07_00_00_post_installation.yaml | 6 +- .../roles-sap/5.1-dbload/tasks/main.yaml | 19 +- .../roles-sap/5.2-pas-install/tasks/main.yaml | 15 + .../roles-sap/5.3-app-install/tasks/main.yaml | 47 +- deploy/pipelines/01-deploy-control-plane.yaml | 169 +++-- deploy/pipelines/02-sap-workload-zone.yaml | 159 ++-- .../pipelines/03-sap-system-deployment.yaml | 33 +- deploy/pipelines/10-remover-terraform.yaml | 30 +- deploy/pipelines/11-remover-arm-fallback.yaml | 309 ++++---- deploy/pipelines/12-remove-control-plane.yaml | 685 ++++++++++-------- .../12-remove-control-plane-variables.yaml | 4 + deploy/scripts/New-SDAFDevopsProject.ps1 | 384 +++++----- deploy/scripts/configure_deployer.sh | 2 +- deploy/scripts/deploy_controlplane.sh | 155 ++-- deploy/scripts/deploy_utils.sh | 10 +- deploy/scripts/install_workloadzone.sh | 170 +++-- deploy/scripts/installer.sh | 10 +- deploy/scripts/set_secrets.sh | 282 +++---- deploy/scripts/setup_devops.ps1 | 2 +- .../bootstrap/sap_deployer/tfvar_variables.tf | 31 +- .../bootstrap/sap_deployer/transform.tf | 1 + .../bootstrap/sap_library/imports.tf | 12 +- .../bootstrap/sap_library/providers.tf | 14 +- .../bootstrap/sap_library/tfvar_variables.tf | 4 + .../bootstrap/sap_library/variables_local.tf | 14 +- deploy/terraform/run/sap_deployer/imports.tf | 6 +- .../terraform/run/sap_deployer/providers.tf | 12 +- .../run/sap_deployer/tfvar_variables.tf | 31 +- .../terraform/run/sap_deployer/transform.tf | 2 +- .../run/sap_deployer/variables_local.tf | 8 +- deploy/terraform/run/sap_landscape/imports.tf | 8 +- deploy/terraform/run/sap_landscape/output.tf | 6 + .../terraform/run/sap_landscape/providers.tf | 12 +- deploy/terraform/run/sap_library/imports.tf | 12 +- deploy/terraform/run/sap_library/providers.tf | 12 +- .../run/sap_library/tfvar_variables.tf | 7 +- .../run/sap_library/variables_local.tf | 85 +-- deploy/terraform/run/sap_system/imports.tf | 128 ++-- deploy/terraform/run/sap_system/providers.tf | 58 +- deploy/terraform/run/sap_system/transform.tf | 7 +- .../run/sap_system/variables_local.tf | 8 +- .../modules/sap_deployer/app_service.tf | 4 +- .../sap_deployer/configure-deployer.tf | 4 +- .../modules/sap_deployer/infrastructure.tf | 6 +- .../modules/sap_deployer/key_vault.tf | 20 +- .../modules/sap_deployer/output.tf | 2 +- .../modules/sap_deployer/vm-deployer.tf | 21 +- .../sap_landscape/key_vault_sap_landscape.tf | 42 +- .../modules/sap_landscape/variables_local.tf | 4 +- .../sap_system/app_tier/variables_local.tf | 12 +- .../modules/sap_system/app_tier/vm-app.tf | 7 +- .../common_infrastructure/infrastructure.tf | 4 +- .../modules/sap_system/hdb_node/avg.tf | 14 +- 61 files changed, 1822 insertions(+), 1404 deletions(-) diff --git a/Webapp/SDAF/Models/LandscapeModel.cs b/Webapp/SDAF/Models/LandscapeModel.cs index 9be3a087c3..4e71f05efd 100644 --- a/Webapp/SDAF/Models/LandscapeModel.cs +++ b/Webapp/SDAF/Models/LandscapeModel.cs @@ -326,7 +326,7 @@ public bool IsValid() | | +------------------------------------4--------------------------------------*/ - public int iscsi_count { get; set; } = 0; + public int? iscsi_count { get; set; } = 0; public string iscsi_size { get; set; } = "Standard_D2s_v3"; @@ -341,7 +341,23 @@ public bool IsValid() public string[] iscsi_nic_ips { get; set; } + /*---------------------------------------------------------------------------8 + | | + | Identity | + | | + +------------------------------------4--------------------------------------*/ + [UserAssignedIdentityIdValidator(ErrorMessage = "Invalid User Assigned id")] public string user_assigned_identity_id { get; set; } + + + /*---------------------------------------------------------------------------8 + | | + | Deployment | + | | + +------------------------------------4--------------------------------------*/ + + public bool? use_spn{ get; set; } = true; + } } diff --git a/Webapp/SDAF/Models/SystemModel.cs b/Webapp/SDAF/Models/SystemModel.cs index 16d641f524..d7e0d9de12 100644 --- a/Webapp/SDAF/Models/SystemModel.cs +++ b/Webapp/SDAF/Models/SystemModel.cs @@ -562,6 +562,13 @@ public bool IsValid() public bool? enable_purge_control_for_keyvaults { get; set; } = false; + /*---------------------------------------------------------------------------8 + | | + | Deployment | + | | + +------------------------------------4--------------------------------------*/ + public bool? use_spn { get; set; } = true; + } public class Tag diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index 1c9e2f2164..8115bb5374 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -35,7 +35,7 @@ }, { "Section": "Environment", - "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#resource-group-parameters", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#environment-parameters", "Parameters": [ { "Name": "environment", @@ -112,6 +112,21 @@ "Overrules": "", "Display": 2 }, + { + "Name": "use_spn", + "Required": false, + "Description": " If set, the deployment is performed using the Service Principal defined for the workload zone, otherwise the managed identity of the deployer is used", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + } + ] + }, + { + "Section": "Resource Group", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#resource-group-parameters", + "Parameters": [ { "Name": "resourcegroup_name", "Required": false, diff --git a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt index 0cf7a89f2c..e9adea0c32 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt @@ -458,21 +458,24 @@ $$iscsi_vm_zones$$ # user_assigned_identity_id defines the user assigned identity to be assigned to the Virtual machines $$user_assigned_identity_id$$ -########################################################################################## -# # -# Terraform deployment parameters (internal) # -# # -########################################################################################## - -# - tfstate_resource_id is the Azure resource identifier for the Storage account in the SAP Library -# that will contain the Terraform state files -# - deployer_tfstate_key is the state file name for the deployer +######################################################################################### +# # +# Terraform deployment parameters # +# # +######################################################################################### + # These are required parameters, if using the deployment scripts they will be auto populated otherwise they need to be entered +# tfstate_resource_id is the Azure resource identifier for the Storage account in the SAP Library +# that will contain the Terraform state files $$tfstate_resource_id$$ +# deployer_tfstate_key is the state file name for the deployer $$deployer_tfstate_key$$ +# use_spn defines if the deployments are performed using Service Principals or the deployer's managed identiry, true=SPN, false=MSI +$$use_spn$$ + ######################################################################################### # # diff --git a/Webapp/SDAF/ParameterDetails/SystemDetails.json b/Webapp/SDAF/ParameterDetails/SystemDetails.json index 36d486ffb5..d2feef0c54 100644 --- a/Webapp/SDAF/ParameterDetails/SystemDetails.json +++ b/Webapp/SDAF/ParameterDetails/SystemDetails.json @@ -48,7 +48,8 @@ "Value": "" } ], - "Overrules": "" + "Overrules": "", + "Display": 1 } ] }, @@ -175,6 +176,15 @@ "Options": [], "Overrules": "", "Display": 2 + }, + { + "Name": "use_spn", + "Required": false, + "Description": " If set, the deployment is performed using the Service Principal defined for the workload zone, otherwise the managed identity of the deployer is used", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 } ] }, diff --git a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt index 4ca284a920..9983ef172f 100644 --- a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt @@ -619,25 +619,29 @@ $$vm_disk_encryption_set_id$$ # nsg_asg_with_vnet if set controls where the Application Security Groups are created $$nsg_asg_with_vnet$$ +######################################################################################### # RESOURCE GROUP # The two resource group name and arm_id can be used to control the naming and the creation of the resource group # The resourcegroup_name value is optional, it can be used to override the name of the resource group that will be provisioned # The resourcegroup_name arm_id is optional, it can be used to provide an existing resource group for the deployment +######################################################################################### $$resourcegroup_name$$ $$resourcegroup_arm_id$$ -# PPG -# The proximity placement group names and arm_ids are optional can be used to -# control the naming and the creation of the proximity placement groups -# The proximityplacementgroup_names list value is optional, -# it can be used to override the name of the proximity placement groups that will be provisioned -# The proximityplacementgroup_arm_ids list value is optional, -# it can be used to provide an existing proximity placement groups for the deployment +######################################################################################### +# # +# PPG # +# The proximity placement group names and arm_ids are optional can be used to +# control the naming and the creation of the proximity placement groups +# # +######################################################################################### +# If provided, names of the proximity placement groups $$proximityplacementgroup_names$$ +# If provided, azure resource ids for the proximity placement groups $$proximityplacementgroup_arm_ids$$ # Boolean value indicating if an proximity placement group should be used for the app tier VMs @@ -789,22 +793,24 @@ $$anchor_vm_authentication_username$$ ######################################################################################### # # -# Terraform deploy parameters # +# Terraform deployment parameters # # # ######################################################################################### -# - tfstate_resource_id is the Azure resource identifier for the Storage account in the SAP Library -# that will contain the Terraform state files -# - deployer_tfstate_key is the state file name for the deployer -# - landscape_tfstate_key is the state file name for the workload deployment # These are required parameters, if using the deployment scripts they will be auto populated otherwise they need to be entered +# tfstate_resource_id is the Azure resource identifier for the Storage account in the SAP Library +# that will contain the Terraform state files $$tfstate_resource_id$$ +# deployer_tfstate_key is the state file name for the deployer $$deployer_tfstate_key$$ +# landscape_tfstate_key is the state file name for the workload deployment $$landscape_tfstate_key$$ +# use_spn defines if the deployments are performed using Service Principals or the deployer's managed identiry, true=SPN, false=MSI +$$use_spn$$ ######################################################################################### # # diff --git a/Webapp/SDAF/SDAFWebApp.csproj b/Webapp/SDAF/SDAFWebApp.csproj index 470b14bb80..581e83a61e 100644 --- a/Webapp/SDAF/SDAFWebApp.csproj +++ b/Webapp/SDAF/SDAFWebApp.csproj @@ -20,7 +20,7 @@ - + diff --git a/deploy/ansible/configuration_menu.sh b/deploy/ansible/configuration_menu.sh index 348a56f0c5..ab15a71e10 100755 --- a/deploy/ansible/configuration_menu.sh +++ b/deploy/ansible/configuration_menu.sh @@ -46,12 +46,12 @@ fi # the inventory file name to use. sap_sid="$(awk '$1 == "sap_sid:" {print $2}' ${sap_params_file})" -kv_name="$(awk '$1 == "kv_name:" {print $2}' ${sap_params_file})" +workload_vault_name="$(awk '$1 == "kv_name:" {print $2}' ${sap_params_file})" prefix="$(awk '$1 == "secret_prefix:" {print $2}' ${sap_params_file})" password_secret_name=$prefix-sid-password -password_secret=$(az keyvault secret show --vault-name ${kv_name} --name ${password_secret_name} | jq -r .value) +password_secret=$(az keyvault secret show --vault-name ${workload_vault_name} --name ${password_secret_name} --query value --output table) export ANSIBLE_PASSWORD=$password_secret # # Ansible configuration settings. @@ -107,11 +107,10 @@ options=( "Database Load" "Database High Availability Setup" "Primary Application Server installation" - "Oracle High Availability Setup" "Application Server installations" "Web Dispatcher installations" - "HCMT" "ACSS Registration" + "HCMT" # Special menu entries "BOM Download" @@ -133,15 +132,14 @@ all_playbooks=( ${cmd_dir}/playbook_05_01_sap_dbload.yaml ${cmd_dir}/playbook_04_00_01_db_ha.yaml ${cmd_dir}/playbook_05_02_sap_pas_install.yaml - ${cmd_dir}/playbook_04_02_00_oracle_ha_setup.yaml # Post SAP Install Steps ${cmd_dir}/playbook_05_03_sap_app_install.yaml ${cmd_dir}/playbook_05_04_sap_web_install.yaml - ${cmd_dir}/playbook_04_00_02_db_hcmt.yaml ${cmd_dir}/playbook_06_00_acss_registration.yaml + ${cmd_dir}/playbook_04_00_02_db_hcmt.yaml ${cmd_dir}/playbook_bom_downloader.yaml ${cmd_dir}/playbook_07_00_00_post_installation.yaml ) diff --git a/deploy/ansible/playbook_07_00_00_post_installation.yaml b/deploy/ansible/playbook_07_00_00_post_installation.yaml index 14ae9f48e4..e7c70e74aa 100644 --- a/deploy/ansible/playbook_07_00_00_post_installation.yaml +++ b/deploy/ansible/playbook_07_00_00_post_installation.yaml @@ -44,7 +44,11 @@ - hosts: "{{ sap_sid | upper }}_SCS : - {{ sap_sid | upper }}_DB" + {{ sap_sid | upper }}_DB : + {{ sap_sid | upper }}_PAS : + {{ sap_sid | upper }}_APP : + {{ sap_sid | upper }}_WEB" + name: Post Installation remote_user: "{{ orchestration_ansible_user }}" become: true diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml index 4cdb041cfb..1b994859cb 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml @@ -139,7 +139,7 @@ - name: "DBLoad: - Set the server facts" ansible.builtin.set_fact: scs_server: "{% if scs_high_availability %}{{ sid_to_be_deployed.sid | lower }}scs{{ scs_instance_number }}cl1{% else %}{{ hostvars[scs_server_temp | first]['virtual_host'] }}{% endif %}" - db_virtual_hostname: "{{ hostvars[db_server_temp | first]['virtual_host'] }}" + db_virtual_hostname: "{{ hostvars[db_server_temp | first]['virtual_host'] }}" - name: "DBLoad: check media exists" ansible.builtin.stat: @@ -396,6 +396,23 @@ vars: allow_world_readable_tmpfiles: true + - name: "DBLoad: Set DB Virtual Host name ({{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }})" + become: true + become_user: "{{ sid_to_be_deployed.sid | lower }}adm" + when: + - database_high_availability + - platform == 'HANA' + - hdbuserstore_path is defined + ansible.builtin.shell: | + {{ hdbuserstore_path }} SET DEFAULT {{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }}:3{{ db_instance_number }}13@{{ db_sid | upper }} {{ schema_name }} {{ main_password }} + environment: + SAPSYSTEMNAME: "{{ sid_to_be_deployed.sid | upper }}" + TMPDIR: "{{ hdbuserstore_path }}" + ssfs_connect: "1" + register: hdbuserstore + vars: + allow_world_readable_tmpfiles: true + when: - not dbload_installed.stat.exists diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index f7baff7929..95db069bc7 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -286,6 +286,21 @@ - database_high_availability - platform == 'HANA' + - name: "PAS Install: Set DB Virtual Host name ({{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }})" + become: true + become_user: "{{ sid_to_be_deployed.sid | lower }}adm" + ansible.builtin.shell: | + {{ hdbuserstore_path }} SET DEFAULT {{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }}:3{{ db_instance_number }}13@{{ db_sid | upper }} {{ schema_name }} {{ main_password }} + environment: + SAPSYSTEMNAME: "{{ sid_to_be_deployed.sid | upper }}" + ssfs_connect: "1" + register: hdbuserstore + vars: + allow_world_readable_tmpfiles: true + when: + - database_high_availability + - platform == 'HANA' + - name: "PAS Install" ansible.builtin.shell: | umask {{ custom_umask | default('022') }} ; diff --git a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml index 6227461b89..dbed7ba262 100644 --- a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml @@ -33,6 +33,7 @@ dir_params: "{{ tmp_directory }}/.{{ sid_to_be_deployed.sid | upper }}-params" db_lb_virtual_host_HANA: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ hostvars[db_server_temp | first]['virtual_host'] }}{% endif %}" db_lb_virtual_host_AnyDB: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ db_server_temp }}{% endif %}" + app_virtual_hostname: "{{ custom_app_virtual_hostname | default(virtual_host, true) }}" - name: "APP Install: Set BOM facts db host" ansible.builtin.set_fact: @@ -129,7 +130,7 @@ - name: "APP Install: Set the SCS Server name" ansible.builtin.set_fact: scs_server: "{% if scs_high_availability %}{{ sid_to_be_deployed.sid | lower }}scs{{ scs_instance_number }}cl1{% else %}{{ hostvars[scs_server_temp | first]['virtual_host'] }}{% endif %}" - db_virtual_hostname: "{{ hostvars[db_server_temp | first]['virtual_host'] }}" + db_virtual_hostname: "{{ hostvars[db_server_temp | first]['virtual_host'] }}" file_path: "{% if scs_high_availability %}INSTALL/HA/ABAP/APPX{% else %}INSTALL/DISTRIBUTED/ABAP/APPS{% endif %}" - name: "APP Install: check media exists" @@ -165,7 +166,7 @@ sap_scs_hostname: "{{ custom_scs_virtual_hostname | default(scs_server, true) }}" sap_db_hostname: "{{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }}" sap_ciVirtualHostname: "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_APP') | first }}" - sap_appVirtualHostname: "{{ virtual_host }}" + sap_appVirtualHostname: "{{ app_virtual_hostname }}" param_directory: "{{ dir_params }}" sap_sid: "{{ sid_to_be_deployed.sid }}" sidadm_uid: "{{ sid_to_be_deployed.sidadm_uid }}" @@ -184,7 +185,7 @@ - "INIFILE: {{ sap_inifile }}" - "PRODUCT ID: {{ bom.product_ids.app }}" - "DBHOST: {{ custom_db_virtual_hostname | default(db_virtual_hostname, true) }}" - - "HOST: {{ virtual_host }}" + - "HOST: {{ app_virtual_hostname }}" - "SID: {{ sid_to_be_deployed.sid | upper }}" - name: "APP Install: HANA HSR - Update Profile" @@ -215,7 +216,7 @@ ./sapinst SAPINST_INPUT_PARAMETERS_URL={{ dir_params }}/{{ sap_inifile }} \ SAPINST_EXECUTE_PRODUCT_ID={{ app_bom_id }} \ SAPINST_SKIP_DIALOGS=true \ - SAPINST_USE_HOSTNAME={{ virtual_host }} \ + SAPINST_USE_HOSTNAME={{ app_virtual_hostname }} \ SAPINST_START_GUISERVER=false args: chdir: "{{ target_media_location }}/SWPM" @@ -286,7 +287,7 @@ - name: "APP Install: Set DB Virtual Host name" become: true become_user: "{{ sid_to_be_deployed.sid | lower }}adm" - ansible.builtin.shell: "{{ hdbuserstore_path }} -H {{ virtual_host }} SET DEFAULT {{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }}:3{{ db_instance_number }}13@{{ db_sid | upper }} {{ schema_name }} {{ main_password }}" + ansible.builtin.shell: "{{ hdbuserstore_path }} -H {{ app_virtual_hostname }} SET DEFAULT {{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }}:3{{ db_instance_number }}13@{{ db_sid | upper }} {{ schema_name }} {{ main_password }}" environment: SAPSYSTEMNAME: "{{ sid_to_be_deployed.sid | upper }}" ssfs_connect: "1" @@ -296,8 +297,24 @@ when: - database_high_availability - platform == 'HANA' - - pas_installed_according_to_sapinst is defined - - pas_installed_according_to_sapinst | length > 0 + - app_installed_according_to_sapinst is defined + - app_installed_according_to_sapinst | length > 0 + + - name: "APP Install: Set DB Virtual Host name" + become: true + become_user: "{{ sid_to_be_deployed.sid | lower }}adm" + ansible.builtin.shell: "{{ hdbuserstore_path }} SET DEFAULT {{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }}:3{{ db_instance_number }}13@{{ db_sid | upper }} {{ schema_name }} {{ main_password }}" + environment: + SAPSYSTEMNAME: "{{ sid_to_be_deployed.sid | upper }}" + ssfs_connect: "1" + register: hdbuserstore + vars: + allow_world_readable_tmpfiles: true + when: + - database_high_availability + - platform == 'HANA' + - app_installed_according_to_sapinst is defined + - app_installed_according_to_sapinst | length > 0 - name: "APP Install: - status" block: @@ -337,7 +354,21 @@ - platform == 'HANA' - name: "APP Install: Set DB Virtual Host name ({{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }})" - ansible.builtin.shell: "{{ hdbuserstore_path }} -H {{ virtual_host }} SET DEFAULT {{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }}:3{{ db_instance_number }}13@{{ db_sid | upper }} {{ schema_name }} {{ main_password }}" + ansible.builtin.shell: "{{ hdbuserstore_path }} -H {{ app_virtual_hostname }} SET DEFAULT {{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }}:3{{ db_instance_number }}13@{{ db_sid | upper }} {{ schema_name }} {{ main_password }}" + environment: + SAPSYSTEMNAME: "{{ sid_to_be_deployed.sid | upper }}" + ssfs_connect: "1" + register: hdbuserstore + become: true + become_user: "{{ sid_to_be_deployed.sid | lower }}adm" + vars: + allow_world_readable_tmpfiles: true + when: + - database_high_availability + - platform == 'HANA' + + - name: "APP Install: Set DB Virtual Host name ({{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }})" + ansible.builtin.shell: "{{ hdbuserstore_path }} SET DEFAULT {{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }}:3{{ db_instance_number }}13@{{ db_sid | upper }} {{ schema_name }} {{ main_password }}" environment: SAPSYSTEMNAME: "{{ sid_to_be_deployed.sid | upper }}" ssfs_connect: "1" diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 3794e516c7..693ead38a7 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -72,7 +72,15 @@ stages: steps: - template: templates\download.yaml - task: PostBuildCleanup@3 - - bash: | + # Set Variables. + - task: AzureCLI@2 + continueOnError: false + inputs: + azureSubscription: ${{parameters.connection_name}} + scriptType: bash + scriptLocation: inlineScript + addSpnToEnvironment: true + inlineScript: | #!/bin/bash echo "##vso[build.updatebuildnumber]Deploying the control plane defined in $(deployerfolder) $(libraryfolder)" @@ -117,13 +125,13 @@ stages: key_vault=$(cat ${deployer_environment_file_name} | grep keyvault= -m1 | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer Key Vault' ${key_vault} fi - az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code - fi + # az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none + # return_code=$? + # if [ 0 != $return_code ]; then + # echo -e "$boldred--- Login failed ---$reset" + # echo "##vso[task.logissue type=error]az login failed." + # exit $return_code + # fi key_vault_id=$(az resource list --name "${key_vault}" --resource-type Microsoft.KeyVault/vaults --query "[].id | [0]" -o tsv) export TF_VAR_deployer_kv_user_arm_id=${key_vault_id} @@ -162,22 +170,6 @@ stages: if [ -z ${TF_VAR_ansible_core_version} ]; then export TF_VAR_ansible_core_version=2.15 fi - if [ -z ${ARM_SUBSCRIPTION_ID} ]; then - echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined." - exit 2 - fi - if [ -z ${ARM_CLIENT_ID} ]; then - echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined." - exit 2 - fi - if [ -z ${ARM_CLIENT_SECRET} ]; then - echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined." - exit 2 - fi - if [ -z ${ARM_TENANT_ID} ]; then - echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined." - exit 2 - fi export TF_VAR_use_webapp=$(use_webapp) echo -e "$green--- Update .sap_deployment_automation/config as SAP_AUTOMATION_REPO_PATH can change on devops agent ---$reset" cd $CONFIG_REPO_PATH @@ -217,13 +209,6 @@ stages: echo -e "$green--- Configuring variables ---$reset" deployer_environment_file_name=$CONFIG_REPO_PATH/.sap_deployment_automation/${ENVIRONMENT}$LOCATION echo -e "$green--- az login ---$reset" - az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code - fi az account set --subscription $ARM_SUBSCRIPTION_ID echo -e "$green--- Deploy the Control Plane ---$reset" if [ -n $(PAT) ]; then @@ -241,7 +226,9 @@ stages: export TF_VAR_agent_pat=$(PAT) fi if [ -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip ]; then - pass=$(echo $ARM_CLIENT_SECRET | sed 's/-//g') + deployer_random_id=$(cat ${deployer_environment_file_name} | grep deployer_random_id= | awk -F'=' '{print $2}' | xargs) + + pass=$(echo $deployer_random_id | sed 's/-//g') unzip -qq -o -P "${pass}" ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip -d ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder) fi @@ -266,12 +253,22 @@ stages: export TF_LOG_PATH=$CONFIG_REPO_PATH/.sap_deployment_automation/terraform.log set +eu - $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ - --deployer_parameter_file ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) \ - --library_parameter_file ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) \ - --subscription $ARM_SUBSCRIPTION_ID --spn_id $ARM_CLIENT_ID \ - --spn_secret $ARM_CLIENT_SECRET --tenant_id $ARM_TENANT_ID \ - --auto-approve --ado --only_deployer + if [ $USE_MSI == "true" ]; then + export ARM_CLIENT_SECRET=$servicePrincipalKey + + $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ + --deployer_parameter_file ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) \ + --library_parameter_file ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) \ + --subscription $ARM_SUBSCRIPTION_ID --auto-approve --ado --only_deployer --msi + else + $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ + --deployer_parameter_file ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) \ + --library_parameter_file ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) \ + --subscription $ARM_SUBSCRIPTION_ID --spn_id $ARM_CLIENT_ID \ + --spn_secret $ARM_CLIENT_SECRET --tenant_id $ARM_TENANT_ID \ + --auto-approve --ado --only_deployer + + fi return_code=$? echo "Return code from deploy_controlplane $return_code." @@ -291,6 +288,7 @@ stages: echo 'Deployer Key Vault' ${file_key_vault} deployer_random_id=$(cat ${deployer_environment_file_name} | grep deployer_random_id= | awk -F'=' '{print $2}' | xargs) library_random_id=$(cat ${deployer_environment_file_name} | grep library_random_id= | awk -F'=' '{print $2}' | xargs) + echo 'Deployer Random ID' ${deployer_random_id} fi echo -e "$green--- Update repo ---$reset" @@ -304,7 +302,7 @@ stages: fi if [ -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/terraform.tfstate ]; then sudo apt install zip - pass=$(echo $ARM_CLIENT_SECRET | sed 's/-//g') + pass=$(echo $deployer_random_id | sed 's/-//g') zip -j -P "${pass}" ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/terraform.tfstate git add -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip added=1 @@ -346,6 +344,15 @@ stages: az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name ControlPlaneLocation --value ${LOCATION} --output none --only-show-errors fi + if [ -n $deployer_random_id ] ; then + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "DEPLOYER_RANDOM_ID_SEED.value") + if [ -z ${az_var} ]; then + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name DEPLOYER_RANDOM_ID_SEED --value ${deployer_random_id} --output none --only-show-errors + else + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name DEPLOYER_RANDOM_ID_SEED --value ${deployer_random_id} --output none --only-show-errors + fi + fi + fi exit $return_code @@ -372,8 +379,7 @@ stages: TF_LOG: $(TF_LOG) TF_IN_AUTOMATION: true DEPLOYER_TFSTATE_KEY: "${{ parameters.deployer }}.terraform.tfstate" - - failOnStderr: false + USE_MSI: $(Use_MSI) - stage: Deploy_controlplane dependsOn: @@ -451,6 +457,15 @@ stages: fi fi + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "DEPLOYER_RANDOM_ID_SEED.value" | tr -d \") + if [ -n "${az_var}" ]; then + deployer_random_id="${az_var}" + else + if [ -f ${deployer_environment_file_name} ] ; then + deployer_random_id=$(cat ${deployer_environment_file_name} | grep deployer_random_id= | awk -F'=' '{print $2}' | xargs) + fi + fi + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" | tr -d \") if [ -n "${az_var}" ]; then REMOTE_STATE_SA="${az_var}" ; echo 'Terraform state file storage account' $REMOTE_STATE_SA @@ -480,24 +495,28 @@ stages: if [ -z ${TF_VAR_ansible_core_version} ]; then export TF_VAR_ansible_core_version=2.15 fi - if [ -z ${CP_ARM_SUBSCRIPTION_ID} ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_SUBSCRIPTION_ID was not defined." - exit 2 - fi - if [ -z ${CP_ARM_CLIENT_ID} ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_ID was not defined." - exit 2 - fi + if [ $USE_MSI != "true" ]; then - if [ -z ${CP_ARM_CLIENT_SECRET} ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_SECRET was not defined." - exit 2 - fi + if [ -z ${CP_ARM_SUBSCRIPTION_ID} ]; then + echo "##vso[task.logissue type=error]Variable CP_ARM_SUBSCRIPTION_ID was not defined." + exit 2 + fi - if [ -z ${CP_ARM_TENANT_ID} ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_TENANT_ID was not defined." - exit 2 + if [ -z ${CP_ARM_CLIENT_ID} ]; then + echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_ID was not defined." + exit 2 + fi + + if [ -z ${CP_ARM_CLIENT_SECRET} ]; then + echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_SECRET was not defined." + exit 2 + fi + + if [ -z ${CP_ARM_TENANT_ID} ]; then + echo "##vso[task.logissue type=error]Variable CP_ARM_TENANT_ID was not defined." + exit 2 + fi fi if [ $(use_webapp) == "true" ]; then @@ -597,7 +616,7 @@ stages: az account set --subscription $CP_ARM_SUBSCRIPTION_ID else - if [ $LOGON_USING_SPN == "true" ]; then + if [ $USE_MSI != "true" ]; then echo "Login using SPN" export ARM_CLIENT_ID=$CP_ARM_CLIENT_ID export ARM_CLIENT_SECRET=$CP_ARM_CLIENT_SECRET @@ -613,6 +632,7 @@ stages: fi else source /etc/profile.d/deploy_server.sh + export ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID fi fi @@ -667,12 +687,12 @@ stages: fi if [ -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/state.zip ]; then - pass=$(echo $CP_ARM_CLIENT_SECRET | sed 's/-//g') + pass=$(echo $DEPLOYER_RANDOM_ID_SEED | sed 's/-//g') unzip -qq -o -P "${pass}" ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/state.zip -d ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder) fi if [ -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip ]; then - pass=$(echo $CP_ARM_CLIENT_SECRET | sed 's/-//g') + pass=$(echo $DEPLOYER_RANDOM_ID_SEED | sed 's/-//g') unzip -qq -o -P "${pass}" ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip -d ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder) fi @@ -680,13 +700,24 @@ stages: sudo chmod +x $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh - $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ - --deployer_parameter_file ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) \ - --library_parameter_file ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) \ - --subscription $CP_ARM_SUBSCRIPTION_ID --spn_id $CP_ARM_CLIENT_ID \ - --spn_secret $CP_ARM_CLIENT_SECRET --tenant_id $CP_ARM_TENANT_ID \ - --auto-approve --ado \ - ${storage_account_parameter} ${keyvault_parameter} + if [ $USE_MSI == "true" ]; then + echo "Using MSI" + $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ + --deployer_parameter_file ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) \ + --library_parameter_file ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) \ + --subscription $ARM_SUBSCRIPTION_ID \ + --auto-approve --ado --msi \ + ${storage_account_parameter} ${keyvault_parameter} + else + $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ + --deployer_parameter_file ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) \ + --library_parameter_file ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) \ + --subscription $CP_ARM_SUBSCRIPTION_ID --spn_id $CP_ARM_CLIENT_ID \ + --spn_secret $CP_ARM_CLIENT_SECRET --tenant_id $CP_ARM_TENANT_ID \ + --auto-approve --ado \ + ${storage_account_parameter} ${keyvault_parameter} + fi + return_code=$? if [ 0 != $return_code ]; then @@ -740,7 +771,7 @@ stages: if [ -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/terraform.tfstate ]; then sudo apt install zip echo "Compressing the deployer state file" - pass=$(echo $CP_ARM_CLIENT_SECRET | sed 's/-//g') + pass=$(echo $DEPLOYER_RANDOM_ID_SEED | sed 's/-//g') zip -j -P "${pass}" ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/terraform.tfstate git add -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip added=1 @@ -764,7 +795,7 @@ stages: if [ -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/terraform.tfstate ]; then sudo apt install zip echo "Compressing the library state file" - pass=$(echo $CP_ARM_CLIENT_SECRET | sed 's/-//g') + pass=$(echo $DEPLOYER_RANDOM_ID_SEED | sed 's/-//g') zip -j -P "${pass}" ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/state ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/terraform.tfstate git add -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/state.zip added=1 @@ -877,6 +908,8 @@ stages: TF_IN_AUTOMATION: true DEPLOYER_TFSTATE_KEY: "${{ parameters.deployer }}.terraform.tfstate" LOGON_USING_SPN: $(Logon_Using_SPN) + USE_MSI: $(Use_MSI) + DEPLOYER_RANDOM_ID_SEED: $(DEPLOYER_RANDOM_ID_SEED) failOnStderr: false diff --git a/deploy/pipelines/02-sap-workload-zone.yaml b/deploy/pipelines/02-sap-workload-zone.yaml index 7904f72b98..4c601b60ca 100644 --- a/deploy/pipelines/02-sap-workload-zone.yaml +++ b/deploy/pipelines/02-sap-workload-zone.yaml @@ -381,19 +381,21 @@ stages: unzip -qq terraform_$(tf_version)_linux_amd64.zip ; sudo mv terraform /bin/ rm -f terraform_$(tf_version)_linux_amd64.zip - export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID - export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET - export ARM_TENANT_ID=$WL_ARM_TENANT_ID - export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID - export ARM_USE_MSI=false + if [ $USE_MSI != "true" ]; then + export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$WL_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + export ARM_USE_MSI=false - echo -e "$green--- az login ---$reset" - az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code + echo -e "$green--- az login ---$reset" + az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi fi else @@ -413,77 +415,81 @@ stages: exit $return_code fi - echo -e "$green --- Set secrets ---$reset" + if [ $USE_MSI != "true" ]; then + echo -e "$green --- Set secrets ---$reset" - $SAP_AUTOMATION_REPO_PATH/deploy/scripts/set_secrets.sh --workload --vault "${key_vault}" --environment "${ENVIRONMENT}" \ - --region "${LOCATION}" --subscription $WL_ARM_SUBSCRIPTION_ID --spn_id $WL_ARM_CLIENT_ID --spn_secret "${WL_ARM_CLIENT_SECRET}" \ - --tenant_id $WL_ARM_TENANT_ID --keyvault_subscription $STATE_SUBSCRIPTION - secrets_set=$? ; echo -e "$cyan Set Secrets returned $secrets_set $reset" - az keyvault set-policy --name "${key_vault}" --object-id $WL_ARM_OBJECT_ID --secret-permissions get list --output none + $SAP_AUTOMATION_REPO_PATH/deploy/scripts/set_secrets.sh --workload --vault "${key_vault}" --environment "${ENVIRONMENT}" \ + --region "${LOCATION}" --subscription $WL_ARM_SUBSCRIPTION_ID --spn_id $WL_ARM_CLIENT_ID --spn_secret "${WL_ARM_CLIENT_SECRET}" \ + --tenant_id $WL_ARM_TENANT_ID --keyvault_subscription $STATE_SUBSCRIPTION + secrets_set=$? ; echo -e "$cyan Set Secrets returned $secrets_set $reset" + az keyvault set-policy --name "${key_vault}" --object-id $WL_ARM_OBJECT_ID --secret-permissions get list --output none + fi fi debug_variable='--output none' debug_variable='' - az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none + if [ $USE_MSI != "true" ]; then + az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none - isUserAccessAdmin=$(az role assignment list --role "User Access Administrator" --subscription $STATE_SUBSCRIPTION --query "[?principalType=='ServicePrincipal'].principalId | [0] " --assignee $CP_ARM_CLIENT_ID) + isUserAccessAdmin=$(az role assignment list --role "User Access Administrator" --subscription $STATE_SUBSCRIPTION --query "[?principalType=='ServicePrincipal'].principalId | [0] " --assignee $CP_ARM_CLIENT_ID) - tfstate_resource_id=$(az resource list --name "${REMOTE_STATE_SA}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Storage/storageAccounts --query "[].id | [0]" -o tsv) + tfstate_resource_id=$(az resource list --name "${REMOTE_STATE_SA}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Storage/storageAccounts --query "[].id | [0]" -o tsv) - if [ -n "${isUserAccessAdmin}" ]; then - - echo -e "$green--- Set permissions ---$reset" - perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Reader" --query "[?principalId=='$WL_ARM_CLIENT_ID'].principalId | [0]" -o tsv --only-show-errors) - if [ -z "$perms" ]; then - echo -e "$green --- Assign subscription permissions to $perms ---$reset" - az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Reader" --scope "/subscriptions/${STATE_SUBSCRIPTION}" --output none - fi - - perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Storage Account Contributor" --scope "${tfstate_resource_id}" --query "[?principalId=='$WL_ARM_OBJECT_ID'].principalName | [0]" -o tsv --only-show-errors) - if [ -z "$perms" ]; then - echo "Assigning Storage Account Contributor permissions for $WL_ARM_OBJECT_ID to ${tfstate_resource_id}" - az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Storage Account Contributor" --scope "${tfstate_resource_id}" --output none - fi + if [ -n "${isUserAccessAdmin}" ]; then - resource_group_name=$(az resource show --id "${tfstate_resource_id}" --query resourceGroup -o tsv) + echo -e "$green--- Set permissions ---$reset" + perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Reader" --query "[?principalId=='$WL_ARM_CLIENT_ID'].principalId | [0]" -o tsv --only-show-errors) + if [ -z "$perms" ]; then + echo -e "$green --- Assign subscription permissions to $perms ---$reset" + az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Reader" --scope "/subscriptions/${STATE_SUBSCRIPTION}" --output none + fi - if [ -n ${resource_group_name} ]; then - for scope in $(az resource list --resource-group "${resource_group_name}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Network/privateDnsZones --query "[].id" --output tsv); do - perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Private DNS Zone Contributor" --scope $scope --query "[?principalId=='$WL_ARM_OBJECT_ID'].principalId | [0]" -o tsv --only-show-errors) + perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Storage Account Contributor" --scope "${tfstate_resource_id}" --query "[?principalId=='$WL_ARM_OBJECT_ID'].principalName | [0]" -o tsv --only-show-errors) if [ -z "$perms" ]; then - echo "Assigning DNS Zone Contributor permissions for $WL_ARM_OBJECT_ID to ${scope}" - az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Private DNS Zone Contributor" --scope $scope --output none + echo "Assigning Storage Account Contributor permissions for $WL_ARM_OBJECT_ID to ${tfstate_resource_id}" + az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Storage Account Contributor" --scope "${tfstate_resource_id}" --output none fi - done - fi - resource_group_name=$(az keyvault show --name "${key_vault}" --query resourceGroup --subscription ${STATE_SUBSCRIPTION} -o tsv) + resource_group_name=$(az resource show --id "${tfstate_resource_id}" --query resourceGroup -o tsv) if [ -n ${resource_group_name} ]; then - resource_group_id=$(az group show --name ${resource_group_name} --subscription ${STATE_SUBSCRIPTION} --query id -o tsv) + for scope in $(az resource list --resource-group "${resource_group_name}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Network/privateDnsZones --query "[].id" --output tsv); do + perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Private DNS Zone Contributor" --scope $scope --query "[?principalId=='$WL_ARM_OBJECT_ID'].principalId | [0]" -o tsv --only-show-errors) + if [ -z "$perms" ]; then + echo "Assigning DNS Zone Contributor permissions for $WL_ARM_OBJECT_ID to ${scope}" + az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Private DNS Zone Contributor" --scope $scope --output none + fi + done + fi - vnet_resource_id=$(az resource list --resource-group "${resource_group_name}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Network/virtualNetworks -o tsv --query "[].id | [0]") - if [ -n "${vnet_resource_id}" ]; then - perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Network Contributor" --scope $vnet_resource_id --only-show-errors --query "[].principalId | [0]" --assignee $WL_ARM_OBJECT_ID -o tsv --only-show-errors) + resource_group_name=$(az keyvault show --name "${key_vault}" --query resourceGroup --subscription ${STATE_SUBSCRIPTION} -o tsv) - if [ -z "$perms" ]; then - echo "Assigning Network Contributor rights for $WL_ARM_OBJECT_ID to ${vnet_resource_id}" - az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Network Contributor" --scope $vnet_resource_id --output none + if [ -n ${resource_group_name} ]; then + resource_group_id=$(az group show --name ${resource_group_name} --subscription ${STATE_SUBSCRIPTION} --query id -o tsv) + + vnet_resource_id=$(az resource list --resource-group "${resource_group_name}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Network/virtualNetworks -o tsv --query "[].id | [0]") + if [ -n "${vnet_resource_id}" ]; then + perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Network Contributor" --scope $vnet_resource_id --only-show-errors --query "[].principalId | [0]" --assignee $WL_ARM_OBJECT_ID -o tsv --only-show-errors) + + if [ -z "$perms" ]; then + echo "Assigning Network Contributor rights for $WL_ARM_OBJECT_ID to ${vnet_resource_id}" + az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Network Contributor" --scope $vnet_resource_id --output none + fi fi - fi + fi + else + echo "##vso[task.logissue type=warning]Service Principal $CP_ARM_CLIENT_ID does not have 'User Access Administrator' permissions. Please ensure that the service principal $WL_ARM_CLIENT_ID has permissions on the Terrafrom state storage account and if needed on the Private DNS zone and the source management network resource" fi - else - echo "##vso[task.logissue type=warning]Service Principal $CP_ARM_CLIENT_ID does not have 'User Access Administrator' permissions. Please ensure that the service principal $WL_ARM_CLIENT_ID has permissions on the Terrafrom state storage account and if needed on the Private DNS zone and the source management network resource" fi echo -e "$green--- Deploy the workload zone ---$reset" cd $CONFIG_REPO_PATH/LANDSCAPE/$(workload_zone_folder) if [ -f /etc/profile.d/deploy_server.sh ]; then - az logout --output none if [ $LOGON_USING_SPN == "true" ]; then echo "Logon Using SPN" + az logout --output none export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET export ARM_TENANT_ID=$WL_ARM_TENANT_ID @@ -501,26 +507,36 @@ stages: az login --identity --allow-no-subscriptions --output none fi else - export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID - export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET - export ARM_TENANT_ID=$WL_ARM_TENANT_ID - export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID - export ARM_USE_MSI=false - az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code + if [ $USE_MSI != "true" ]; then + az logout --output none + export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$WL_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + export ARM_USE_MSI=false + az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi fi fi - $SAP_AUTOMATION_REPO_PATH/deploy/scripts/install_workloadzone.sh --parameterfile $(workload_zone_configuration_file) \ - --deployer_environment $(deployer_environment) --subscription $(ARM_SUBSCRIPTION_ID) \ - --spn_id $WL_ARM_CLIENT_ID --spn_secret $WL_ARM_CLIENT_SECRET --tenant_id $WL_ARM_TENANT_ID \ - --deployer_tfstate_key "${deployer_tfstate_key}" --keyvault "${key_vault}" --storageaccountname "${REMOTE_STATE_SA}" \ - --state_subscription "${STATE_SUBSCRIPTION}" --auto-approve --ado + if [ $USE_MSI != "true" ]; then + $SAP_AUTOMATION_REPO_PATH/deploy/scripts/install_workloadzone.sh --parameterfile $(workload_zone_configuration_file) \ + --deployer_environment $(deployer_environment) --subscription $(ARM_SUBSCRIPTION_ID) \ + --spn_id $WL_ARM_CLIENT_ID --spn_secret $WL_ARM_CLIENT_SECRET --tenant_id $WL_ARM_TENANT_ID \ + --deployer_tfstate_key "${deployer_tfstate_key}" --keyvault "${key_vault}" --storageaccountname "${REMOTE_STATE_SA}" \ + --state_subscription "${STATE_SUBSCRIPTION}" --auto-approve --ado + else + $SAP_AUTOMATION_REPO_PATH/deploy/scripts/install_workloadzone.sh --parameterfile $(workload_zone_configuration_file) \ + --deployer_environment $(deployer_environment) --subscription $(ARM_SUBSCRIPTION_ID) \ + --deployer_tfstate_key "${deployer_tfstate_key}" --keyvault "${key_vault}" --storageaccountname "${REMOTE_STATE_SA}" \ + --state_subscription "${STATE_SUBSCRIPTION}" --auto-approve --ado --msi + fi return_code=$? echo "Return code: ${return_code}" @@ -668,4 +684,5 @@ stages: SAP_AUTOMATION_REPO_PATH: ${{ parameters.sap_automation_repo_path }} CONFIG_REPO_PATH: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path) LOGON_USING_SPN: $(Logon_Using_SPN) + USE_MSI: $(Use_MSI) failOnStderr: false diff --git a/deploy/pipelines/03-sap-system-deployment.yaml b/deploy/pipelines/03-sap-system-deployment.yaml index aad8d8bed2..3b0858506b 100644 --- a/deploy/pipelines/03-sap-system-deployment.yaml +++ b/deploy/pipelines/03-sap-system-deployment.yaml @@ -235,29 +235,10 @@ stages: exit 2 fi - # Check if running on deployer - if [ -f /etc/profile.d/deploy_server.sh ]; then - az logout --output none - if [ $LOGON_USING_SPN == "true" ]; then - echo "Using SPN" - - export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID - export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET - export ARM_TENANT_ID=$WL_ARM_TENANT_ID - export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID - export ARM_USE_MSI=false - az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code - fi - else - export ARM_USE_MSI=true - az login --identity --allow-no-subscriptions --output none - fi - else + az logout --output none + if [ $LOGON_USING_SPN == "true" ]; then + echo "Using SPN" + export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET export ARM_TENANT_ID=$WL_ARM_TENANT_ID @@ -270,7 +251,11 @@ stages: echo "##vso[task.logissue type=error]az login failed." exit $return_code fi - + else + export ARM_USE_MSI=true + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + unset ARM_TENANT_ID + az login --identity --allow-no-subscriptions --output none fi echo -e "$green--- Define variables ---$reset" diff --git a/deploy/pipelines/10-remover-terraform.yaml b/deploy/pipelines/10-remover-terraform.yaml index b2ad671f15..9d43b3f720 100644 --- a/deploy/pipelines/10-remover-terraform.yaml +++ b/deploy/pipelines/10-remover-terraform.yaml @@ -280,15 +280,29 @@ stages: # Check if running on deployer if [[ ! -f /etc/profile.d/deploy_server.sh ]]; then - az login --service-principal --username $(WL_ARM_CLIENT_ID) --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code + if [ $LOGON_USING_SPN == "true" ]; then + echo "Logon Using SPN" + + export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$WL_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + export ARM_USE_MSI=false + az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi + else + export ARM_USE_MSI=true + az login --identity --allow-no-subscriptions --output none fi else echo -e "$green --- Running on deployer ---$reset" + source /etc/profile.d/deploy_server.sh + export ARM_USE_MSI=true fi echo -e "$green--- Set variables ---$reset" @@ -396,6 +410,8 @@ stages: AZURE_DEVOPS_EXT_PAT: $(WZ_PAT) SAP_AUTOMATION_REPO_PATH: ${{ parameters.sap_automation_repo_path }} CONFIG_REPO_PATH: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path) + LOGON_USING_SPN: $(Logon_Using_SPN) + USE_MSI: $(Use_MSI) failOnStderr: false @@ -721,5 +737,7 @@ stages: AZURE_DEVOPS_EXT_PAT: $(WZ_PAT) SAP_AUTOMATION_REPO_PATH: ${{ parameters.sap_automation_repo_path }} CONFIG_REPO_PATH: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path) + LOGON_USING_SPN: $(Logon_Using_SPN) + USE_MSI: $(Use_MSI) failOnStderr: false diff --git a/deploy/pipelines/11-remover-arm-fallback.yaml b/deploy/pipelines/11-remover-arm-fallback.yaml index 0cfc10354e..2d79d62a6e 100644 --- a/deploy/pipelines/11-remover-arm-fallback.yaml +++ b/deploy/pipelines/11-remover-arm-fallback.yaml @@ -297,176 +297,193 @@ stages: - group: SDAF-${{ parameters.deployer_environment }} steps: - template: templates\download.yaml - - bash: | - #!/bin/bash - green="\e[1;32m" ; reset="\e[0m" - echo -e "$green--- Configure devops CLI extension ---$reset" - az config set extension.use_dynamic_install=yes_without_prompt - - az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none - if [ -n $(PAT) ]; then - export AZURE_DEVOPS_EXT_PAT=$(PAT) - else - export AZURE_DEVOPS_EXT_PAT=$(System.AccessToken) - fi - return_code=0 - - export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(parent_variable_group)'].id | [0]"); echo "Variable group: " $VARIABLE_GROUP_ID - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "CP_ARM_SUBSCRIPTION_ID.value" | tr -d \") - if [ -z $variable_value ]; then - subscription=$ARM_SUBSCRIPTION_ID - else - subscription=$variable_value - fi - - echo -e "$green--- az login ---$reset" - if [ -z $(CP_ARM_SUBSCRIPTION_ID) ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_SUBSCRIPTION_ID was not defined." - exit 2 - fi - if [ -z $(CP_ARM_CLIENT_ID) ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_ID was not defined." - exit 2 - fi - if [ -z $(CP_ARM_CLIENT_SECRET) ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_SECRET was not defined." - exit 2 - fi - if [ -z $(CP_ARM_TENANT_ID) ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_TENANT_ID was not defined." - exit 2 - fi - - az login --service-principal --username $(CP_ARM_CLIENT_ID) --password=$(CP_ARM_CLIENT_SECRET) --tenant $(CP_ARM_TENANT_ID) --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code - fi - - echo "Subscription: $subscription" - - az account set --subscription $(CP_ARM_SUBSCRIPTION_ID) - - rg=$(az group list --query "[?name=='$(library_folder)'].name | [0]" | grep $(library_folder)) - if [ ${#rg} != 0 ]; then - echo "Deleting resource group: $(library_folder)" - az group delete --name $(library_folder) --yes --only-show-errors - return_code=$? - else - echo "Resource group $(library_folder) does not exist." - fi - - rg=$(az group list --query "[?name=='$(deployer_folder)'].name | [0]" | grep $(deployer_folder)) - if [ ${#rg} != 0 ]; then - echo "Deleting resource group: $(deployer_folder)" - az group delete --name $(deployer_folder) --yes --only-show-errors - return_code=$? - else - echo "Resource group $(deployer_folder) does not exist" - fi - - - echo -e "$green--- Removing deployment automation configuration from devops repository ---$reset" - export ENVIRONMENT=$(echo $(deployer_folder) | awk -F'-' '{print $1}' | xargs) ; echo Environment $ENVIRONMENT - export LOCATION=$(echo $(deployer_folder) | awk -F'-' '{print $2}' | xargs) ; echo Location $LOCATION - - if [ 0 == $return_code ] ; then - cd $CONFIG_REPO_PATH - git checkout -q $(Build.SourceBranchName) - git pull - changed=0 - echo "##vso[build.updatebuildnumber]Removing control plane $(deployer_folder) $(library_folder)" - if [ -d "DEPLOYER/$(deployer_folder)/.terraform" ]; then - git rm -q -r --ignore-unmatch DEPLOYER/$(deployer_folder)/.terraform - changed=1 - fi - - if [ -f "DEPLOYER/$(deployer_folder)/state.zip" ]; then - git rm -q --ignore-unmatch DEPLOYER/$(deployer_folder)/state.zip - changed=1 + - task: AzureCLI@2 + continueOnError: false + inputs: + azureSubscription: ${{ parameters.workload_zone_connection }} + scriptType: bash + scriptLocation: inlineScript + addSpnToEnvironment: true + inlineScript: | + #!/bin/bash + green="\e[1;32m" ; reset="\e[0m" + echo -e "$green--- Configure devops CLI extension ---$reset" + az config set extension.use_dynamic_install=yes_without_prompt + + az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none + if [ -n $(PAT) ]; then + export AZURE_DEVOPS_EXT_PAT=$(PAT) + else + export AZURE_DEVOPS_EXT_PAT=$(System.AccessToken) + fi + return_code=0 + + export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(parent_variable_group)'].id | [0]"); echo "Variable group: " $VARIABLE_GROUP_ID + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "CP_ARM_SUBSCRIPTION_ID.value" | tr -d \") + if [ -z $variable_value ]; then + subscription=$ARM_SUBSCRIPTION_ID + else + subscription=$variable_value + fi + export ARM_USE_MSI=false + if [ $USE_MSI != "true" ]; then + echo "use Service Principal" + export ARM_CLIENT_ID=$CP_ARM_CLIENT_ID + export ARM_TENANT_ID=$CP_ARM_TENANT_ID + export ARM_CLIENT_SECRET=$CP_ARM_CLIENT_SECRET + if [ -z $ARM_SUBSCRIPTION_ID ]; then + echo "##vso[task.logissue type=error]Variable CP_ARM_SUBSCRIPTION_ID was not defined." + exit 2 fi - - if [ -d LIBRARY/$(library_folder)/.terraform ]; then - git rm -q -r --ignore-unmatch LIBRARY/$(library_folder)/.terraform - changed=1 + if [ -z $ARM_CLIENT_ID ]; then + echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_ID was not defined." + exit 2 fi - - if [ -f LIBRARY/$(library_folder)/state.zip ]; then - git rm -q --ignore-unmatch LIBRARY/$(library_folder)/state.zip - changed=1 + if [ -z $ARM_CLIENT_SECRET ]; then + echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_SECRET was not defined." + exit 2 fi - - if [ -f .sap_deployment_automation/${ENVIRONMENT}${LOCATION} ]; then - git rm -q --ignore-unmatch .sap_deployment_automation/${ENVIRONMENT}${LOCATION} - changed=1 + if [ -z $ARM_TENANT_ID ]; then + echo "##vso[task.logissue type=error]Variable CP_ARM_TENANT_ID was not defined." + exit 2 fi - if [ -f .sap_deployment_automation/${ENVIRONMENT}${LOCATION}.md ]; then - git rm -q --ignore-unmatch .sap_deployment_automation/${ENVIRONMENT}${LOCATION}.md - changed=1 + echo -e "$green--- az login ---$reset" + az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code fi + else + echo "use MSI" + export ARM_CLIENT_ID=$servicePrincipalId + export ARM_TENANT_ID=$tenantId + export ARM_CLIENT_SECRET=$servicePrincipalKey + fi - if [ -f LIBRARY/$(library_folder)/backend-config.tfvars ]; then - git rm -q --ignore-unmatch LIBRARY/$(library_folder)/backend-config.tfvars - changed=1 - fi + echo "Subscription: $subscription" - if [ 1 == $changed ] ; then - git config --global user.email "$(Build.RequestedForEmail)" - git config --global user.name "$(Build.RequestedFor)" - git commit -m "Added updates from devops deployment $(Build.DefinitionName) [skip ci]" - git -c http.extraheader="AUTHORIZATION: bearer $(System.AccessToken)" push - fi - echo -e "$green--- Deleting variables ---$reset" - if [ ${#VARIABLE_GROUP_ID} != 0 ]; then - echo "Deleting variables" + az account set --subscription $subscription - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" ) - if [ ${#variable_value} != 0 ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --yes --only-show-errors - fi + rg=$(az group list --subscription $subscription --query "[?name=='$(library_folder)'].name | [0]" | grep $(library_folder)) + if [ ${#rg} != 0 ]; then + echo "Deleting resource group: $(library_folder)" + az group delete --subscription $subscription --name $(library_folder) --yes --only-show-errors + return_code=$? + else + echo "Resource group $(library_folder) does not exist." + fi + + rg=$(az group list --subscription $subscription --query "[?name=='$(deployer_folder)'].name | [0]" | grep $(deployer_folder)) + if [ ${#rg} != 0 ]; then + echo "Deleting resource group: $(deployer_folder)" + az group delete --subscription $subscription --name $(deployer_folder) --yes --only-show-errors + return_code=$? + else + echo "Resource group $(deployer_folder) does not exist" + fi + + + echo -e "$green--- Removing deployment automation configuration from devops repository ---$reset" + export ENVIRONMENT=$(echo $(deployer_folder) | awk -F'-' '{print $1}' | xargs) ; echo Environment $ENVIRONMENT + export LOCATION=$(echo $(deployer_folder) | awk -F'-' '{print $2}' | xargs) ; echo Location $LOCATION - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Resource_Group_Name.value" ) - if [ ${#variable_value} != 0 ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Resource_Group_Name --yes --only-show-errors + if [ 0 == $return_code ] ; then + cd $CONFIG_REPO_PATH + git checkout -q $(Build.SourceBranchName) + git pull + changed=0 + echo "##vso[build.updatebuildnumber]Removing control plane $(deployer_folder) $(library_folder)" + if [ -d "DEPLOYER/$(deployer_folder)/.terraform" ]; then + git rm -q -r --ignore-unmatch DEPLOYER/$(deployer_folder)/.terraform + changed=1 fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Subscription.value" ) - if [ ${#variable_value} != 0 ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Subscription --yes --only-show-errors + if [ -f "DEPLOYER/$(deployer_folder)/state.zip" ]; then + git rm -q --ignore-unmatch DEPLOYER/$(deployer_folder)/state.zip + changed=1 fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_State_FileName.value" ) - if [ ${#variable_value} != 0 ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --yes --only-show-errors + if [ -d LIBRARY/$(library_folder)/.terraform ]; then + git rm -q -r --ignore-unmatch LIBRARY/$(library_folder)/.terraform + changed=1 fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" ) - if [ ${#variable_value} != 0 ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --yes --only-show-errors + if [ -f LIBRARY/$(library_folder)/state.zip ]; then + git rm -q --ignore-unmatch LIBRARY/$(library_folder)/state.zip + changed=1 fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WEBAPP_URL_BASE.value" ) - if [ ${#variable_value} != 0 ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name WEBAPP_URL_BASE --yes --only-show-errors + if [ -f .sap_deployment_automation/${ENVIRONMENT}${LOCATION} ]; then + git rm -q --ignore-unmatch .sap_deployment_automation/${ENVIRONMENT}${LOCATION} + changed=1 + fi + if [ -f .sap_deployment_automation/${ENVIRONMENT}${LOCATION}.md ]; then + git rm -q --ignore-unmatch .sap_deployment_automation/${ENVIRONMENT}${LOCATION}.md + changed=1 fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WEBAPP_IDENTITY.value" ) - if [ ${#variable_value} != 0 ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name WEBAPP_IDENTITY --yes --only-show-errors + if [ -f LIBRARY/$(library_folder)/backend-config.tfvars ]; then + git rm -q --ignore-unmatch LIBRARY/$(library_folder)/backend-config.tfvars + changed=1 fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WEBAPP_ID.value" ) - if [ ${#variable_value} != 0 ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name WEBAPP_ID --yes --only-show-errors + if [ 1 == $changed ] ; then + git config --global user.email "$(Build.RequestedForEmail)" + git config --global user.name "$(Build.RequestedFor)" + git commit -m "Added updates from devops deployment $(Build.DefinitionName) [skip ci]" + git -c http.extraheader="AUTHORIZATION: bearer $(System.AccessToken)" push fi + echo -e "$green--- Deleting variables ---$reset" + if [ ${#VARIABLE_GROUP_ID} != 0 ]; then + echo "Deleting variables" + + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" ) + if [ ${#variable_value} != 0 ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --yes --only-show-errors + fi + + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Resource_Group_Name.value" ) + if [ ${#variable_value} != 0 ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Resource_Group_Name --yes --only-show-errors + fi + + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Subscription.value" ) + if [ ${#variable_value} != 0 ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Subscription --yes --only-show-errors + fi + + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_State_FileName.value" ) + if [ ${#variable_value} != 0 ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --yes --only-show-errors + fi + + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" ) + if [ ${#variable_value} != 0 ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --yes --only-show-errors + fi + + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WEBAPP_URL_BASE.value" ) + if [ ${#variable_value} != 0 ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name WEBAPP_URL_BASE --yes --only-show-errors + fi + + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WEBAPP_IDENTITY.value" ) + if [ ${#variable_value} != 0 ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name WEBAPP_IDENTITY --yes --only-show-errors + fi + + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WEBAPP_ID.value" ) + if [ ${#variable_value} != 0 ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name WEBAPP_ID --yes --only-show-errors + fi - fi + fi - fi + fi - exit $return_code + exit $return_code displayName: Remove Control Plane deployment artifacts from WORKSPACES env: @@ -478,4 +495,4 @@ stages: ARM_TENANT_ID: $(CP_ARM_TENANT_ID) SAP_AUTOMATION_REPO_PATH: ${{ parameters.sap_automation_repo_path }} CONFIG_REPO_PATH: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path) - failOnStderr: false + USE_MSI: $(Use_MSI) diff --git a/deploy/pipelines/12-remove-control-plane.yaml b/deploy/pipelines/12-remove-control-plane.yaml index ec72a0598b..f2bbd87f43 100644 --- a/deploy/pipelines/12-remove-control-plane.yaml +++ b/deploy/pipelines/12-remove-control-plane.yaml @@ -34,6 +34,10 @@ parameters: displayName: The local path on the agent where the config repo can be found type: string + - name: connection_name + displayName: Service Connection Name + type: string + stages: - stage: Remove_control_plane_remote displayName: "Control plane removal (on agent)" @@ -90,24 +94,27 @@ stages: echo "##vso[task.logissue type=error]Variable group $(parent_variable_group) could not be found." exit 2 fi - if [ -z $CP_ARM_SUBSCRIPTION_ID ]; then + + if [ -z $ARM_SUBSCRIPTION_ID ]; then echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined." exit 2 fi - if [ -z $CP_ARM_CLIENT_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined." - exit 2 - fi + if [ $USE_MSI != "true" ]; then + if [ -z $ARM_CLIENT_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined." + exit 2 + fi - if [ -z $CP_ARM_CLIENT_SECRET ]; then - echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined." - exit 2 - fi + if [ -z $ARM_CLIENT_SECRET ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined." + exit 2 + fi - if [ -z $CP_ARM_TENANT_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined." - exit 2 + if [ -z $ARM_TENANT_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined." + exit 2 + fi fi if [[ ! -f /etc/profile.d/deploy_server.sh ]]; then @@ -125,19 +132,23 @@ stages: unzip -qq terraform_$(tf_version)_linux_amd64.zip ; sudo mv terraform /bin/ rm -f terraform_$(tf_version)_linux_amd64.zip fi - - az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code + if [ $USE_MSI != "true" ]; then + echo "Login using SPN" + export ARM_USE_MSI=false + az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi + else + source /etc/profile.d/deploy_server.sh + export ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID + export ARM_USE_MSI=true + unset ARM_CLIENT_ID + unset ARM_TENANT_ID fi - export ARM_CLIENT_ID=$CP_ARM_CLIENT_ID - export ARM_CLIENT_SECRET=$CP_ARM_CLIENT_SECRET - export ARM_TENANT_ID=$CP_ARM_TENANT_ID - export ARM_SUBSCRIPTION_ID=$CP_ARM_SUBSCRIPTION_ID - echo -e "$green--- Convert config files to UX format ---$reset" dos2unix -q $CONFIG_REPO_PATH/DEPLOYER/$(deployer_folder)/$(deployer_configuration_file) @@ -249,18 +260,25 @@ stages: REMOTE_STATE_RG=$(cat ${deployer_environment_file_name} | grep REMOTE_STATE_RG | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file resource group' $REMOTE_STATE_RG fi - export STATE_SUBSCRIPTION=$CP_ARM_SUBSCRIPTION_ID ; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION - if [ -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/state.zip ]; then - pass=$(echo $CP_ARM_CLIENT_SECRET | sed 's/-//g') - unzip -qq -o -P "${pass}" ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/state.zip -d ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder) + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "DEPLOYER_RANDOM_ID_SEED.value" | tr -d \") + if [ -n "${az_var}" ]; then + deployer_random_id="${az_var}" + else + if [ -f ${deployer_environment_file_name} ] ; then + deployer_random_id=$(cat ${deployer_environment_file_name} | grep deployer_random_id= | awk -F'=' '{print $2}' | xargs) + fi fi - if [ -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip ]; then - pass=$(echo $CP_ARM_CLIENT_SECRET | sed 's/-//g') - unzip -qq -o -P "${pass}" ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip -d ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder) + export STATE_SUBSCRIPTION=$ARM_SUBSCRIPTION_ID ; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION + if [ -f ${CONFIG_REPO_PATH}/LIBRARY/$(library_folder)/state.zip ]; then + pass=$(echo $DEPLOYER_RANDOM_ID_SEED | sed 's/-//g') + unzip -qq -o -P "${pass}" ${CONFIG_REPO_PATH}/LIBRARY/$(library_folder)/state.zip -d ${CONFIG_REPO_PATH}/LIBRARY/$(library_folder) fi - + if [ -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployer_folder)/state.zip ]; then + pass=$(echo $DEPLOYER_RANDOM_ID_SEED | sed 's/-//g') + unzip -qq -o -P "${pass}" ${CONFIG_REPO_PATH}/DEPLOYER/$(deployer_folder)/state.zip -d ${CONFIG_REPO_PATH}/DEPLOYER/$(deployer_folder) + fi echo -e "$green--- Running the remove region script that destroys deployer VM and SAP library ---$reset" @@ -289,31 +307,31 @@ stages: if [ -f DEPLOYER/$(deployer_folder)/terraform.tfstate ]; then echo "Compressing the state file." sudo apt install zip - pass=$(echo $CP_ARM_CLIENT_SECRET | sed 's/-//g') + pass=$(echo $DEPLOYER_RANDOM_ID_SEED | sed 's/-//g') zip -j -P "${pass}" DEPLOYER/$(deployer_folder)/state DEPLOYER/$(deployer_folder)/terraform.tfstate git add -f DEPLOYER/$(deployer_folder)/state.zip changed=1 fi if [$return_code != 0] ; then - backend=$(grep "local" LIBRARY/$(libraryfolder)/.terraform/terraform.tfstate || true) + backend=$(grep "local" LIBRARY/$(library_folder)/.terraform/terraform.tfstate || true) if [ -n "${backend}" ]; then echo "Local Terraform state" - if [ -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/terraform.tfstate ]; then + if [ -f ${CONFIG_REPO_PATH}/LIBRARY/$(library_folder)/terraform.tfstate ]; then sudo apt install zip echo "Compressing the library state file" - pass=$(echo $CP_ARM_CLIENT_SECRET | sed 's/-//g') - zip -j -P "${pass}" ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/state ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/terraform.tfstate - git add -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/state.zip + pass=$(echo $DEPLOYER_RANDOM_ID_SEED | sed 's/-//g') + zip -j -P "${pass}" ${CONFIG_REPO_PATH}/LIBRARY/$(library_folder)/state ${CONFIG_REPO_PATH}/LIBRARY/$(library_folder)/terraform.tfstate + git add -f ${CONFIG_REPO_PATH}/LIBRARY/$(library_folder)/state.zip changed=1 fi else echo "Remote Terraform state" - if [ -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/terraform.tfstate ]; then - git rm -q -f --ignore-unmatch ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/terraform.tfstate + if [ -f ${CONFIG_REPO_PATH}/LIBRARY/$(library_folder)/terraform.tfstate ]; then + git rm -q -f --ignore-unmatch ${CONFIG_REPO_PATH}/LIBRARY/$(library_folder)/terraform.tfstate added=1 fi - if [ -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/state.zip ]; then - git rm -q --ignore-unmatch -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/state.zip + if [ -f ${CONFIG_REPO_PATH}/LIBRARY/$(library_folder)/state.zip ]; then + git rm -q --ignore-unmatch -f ${CONFIG_REPO_PATH}/LIBRARY/$(library_folder)/state.zip added=1 fi fi @@ -351,15 +369,17 @@ stages: displayName: Remove control plane env: SYSTEM_ACCESSTOKEN: $(System.AccessToken) - CP_ARM_SUBSCRIPTION_ID: $(CP_ARM_SUBSCRIPTION_ID) - CP_ARM_CLIENT_ID: $(CP_ARM_CLIENT_ID) - CP_ARM_CLIENT_SECRET: $(CP_ARM_CLIENT_SECRET) - CP_ARM_TENANT_ID: $(CP_ARM_TENANT_ID) + ARM_SUBSCRIPTION_ID: $(CP_ARM_SUBSCRIPTION_ID) + ARM_CLIENT_ID: $(CP_ARM_CLIENT_ID) + ARM_CLIENT_SECRET: $(CP_ARM_CLIENT_SECRET) + ARM_TENANT_ID: $(CP_ARM_TENANT_ID) SAP_AUTOMATION_REPO_PATH: ${{ parameters.sap_automation_repo_path }} CONFIG_REPO_PATH: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path) key_vault: $(Deployer_Key_Vault) TF_IN_AUTOMATION: true TF_LOG: $(TF_LOG) + USE_MSI: $(Use_MSI) + DEPLOYER_RANDOM_ID_SEED: $(DEPLOYER_RANDOM_ID_SEED) failOnStderr: false - stage: Remove_control_plane @@ -388,337 +408,368 @@ stages: parameters: getLatestFromBranch: true - task: PostBuildCleanup@3 - - bash: | - #!/bin/bash - echo "##vso[build.updatebuildnumber]Removing the control plane defined in $(deployer_folder) $(library_folder)" - green="\e[1;32m" ; reset="\e[0m" - # Treat unset variables as an error when substituting. - set -ue - - # echo -e "$green--- Checkout $(Build.SourceBranchName) ---$reset" - # git fetch -q --all - # git checkout -q $(Build.SourceBranchName) - - # Check if running on deployer - if [[ ! -f /etc/profile.d/deploy_server.sh ]]; then - echo -e "$green --- Install dos2unix ---$reset" - sudo apt-get -qq install dos2unix - - sudo apt -qq install zip - - echo -e "$green --- Install terraform ---$reset" - - wget -q $(tf_url) - return_code=$? - if [ 0 != $return_code ]; then - echo "##vso[task.logissue type=error]Unable to download Terraform version $(tf_version)." - exit 2 - fi - unzip -qq terraform_$(tf_version)_linux_amd64.zip ; sudo mv terraform /bin/ - rm -f terraform_$(tf_version)_linux_amd64.zip - fi - - echo -e "$green--- Update .sap_deployment_automation/config as DEPLOYMENT_REPO_PATH can change on devops agent ---$reset" - DEPLOYMENT_REPO_PATH=$(Build.Repository.LocalPath) - export HOME=$(Build.Repository.LocalPath)/$(Deployment_Configuration_Path) - cd $HOME; mkdir -p .sap_deployment_automation - - echo -e "$green--- Configure devops CLI extension ---$reset" - az config set extension.use_dynamic_install=yes_without_prompt - - az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none - if [ -n $(PAT) ]; then - export AZURE_DEVOPS_EXT_PAT=$(PAT) + - task: AzureCLI@2 + continueOnError: false + inputs: + azureSubscription: ${{parameters.connection_name}} + scriptType: bash + scriptLocation: inlineScript + addSpnToEnvironment: true + inlineScript: | + #!/bin/bash + echo "##vso[build.updatebuildnumber]Removing the control plane defined in $(deployer_folder) $(library_folder)" + green="\e[1;32m" ; reset="\e[0m" + export ARM_USE_MSI=false + if [ $USE_MSI != "true" ]; then + echo "use Service Principal" + export ARM_CLIENT_ID=$CP_ARM_CLIENT_ID + export ARM_TENANT_ID=$CP_ARM_TENANT_ID + export ARM_CLIENT_SECRET=$CP_ARM_CLIENT_SECRET else - export AZURE_DEVOPS_EXT_PAT=$(System.AccessToken) + echo "use MSI" + export ARM_CLIENT_ID=$servicePrincipalId + export ARM_TENANT_ID=$tenantId + export ARM_CLIENT_SECRET=$servicePrincipalKey fi - export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(parent_variable_group)'].id | [0]") - echo '$(variable_group) id: ' $VARIABLE_GROUP_ID - if [ -z ${VARIABLE_GROUP_ID} ]; then - echo "##vso[task.logissue type=error]Variable group $(parent_variable_group) could not be found." - exit 2 - fi - if [ -z $ARM_SUBSCRIPTION_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined." - exit 2 - fi - if [ -z $ARM_CLIENT_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined." - exit 2 - fi - if [ -z $ARM_CLIENT_SECRET ]; then - echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined." - exit 2 - fi - if [ -z $ARM_TENANT_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined." - exit 2 - fi + # Treat unset variables as an error when substituting. + set -ue - echo -e "$green--- Convert config files to UX format ---$reset" - dos2unix -q $CONFIG_REPO_PATH/DEPLOYER/$(deployer_folder)/$(deployer_configuration_file) - dos2unix -q $CONFIG_REPO_PATH/LIBRARY/$(library_folder)/$(library_configuration_file) + # echo -e "$green--- Checkout $(Build.SourceBranchName) ---$reset" + # git fetch -q --all + # git checkout -q $(Build.SourceBranchName) - echo -e "$green--- Running the remove region script that destroys deployer VM and SAP library ---$reset" + # Check if running on deployer + if [[ ! -f /etc/profile.d/deploy_server.sh ]]; then + echo -e "$green --- Install dos2unix ---$reset" + sudo apt-get -qq install dos2unix - ENVIRONMENT=$(grep "^environment" $CONFIG_REPO_PATH/DEPLOYER/$(deployer_folder)/$(deployer_configuration_file) | awk -F'=' '{print $2}' | xargs) - LOCATION=$(grep "^location" $CONFIG_REPO_PATH/DEPLOYER/$(deployer_folder)/$(deployer_configuration_file) | awk -F'=' '{print $2}' | xargs | tr 'A-Z' 'a-z') - echo Environment: ${ENVIRONMENT} - echo Location: ${LOCATION} + sudo apt -qq install zip - ENVIRONMENT_IN_FILENAME=$(echo $(deployer_folder) | awk -F'-' '{print $1}' | xargs ) - LOCATION_CODE=$(echo $(deployer_folder) | awk -F'-' '{print $2}' | xargs ) - case "$LOCATION_CODE" in - "AUCE") LOCATION_IN_FILENAME="australiacentral" ;; - "AUC2") LOCATION_IN_FILENAME="australiacentral2" ;; - "AUEA") LOCATION_IN_FILENAME="australiaeast" ;; - "AUSE") LOCATION_IN_FILENAME="australiasoutheast" ;; - "BRSO") LOCATION_IN_FILENAME="brazilsouth" ;; - "BRSE") LOCATION_IN_FILENAME="brazilsoutheast" ;; - "BRUS") LOCATION_IN_FILENAME="brazilus" ;; - "CACE") LOCATION_IN_FILENAME="canadacentral" ;; - "CAEA") LOCATION_IN_FILENAME="canadaeast" ;; - "CEIN") LOCATION_IN_FILENAME="centralindia" ;; - "CEUS") LOCATION_IN_FILENAME="centralus" ;; - "CEUA") LOCATION_IN_FILENAME="centraluseuap" ;; - "EAAS") LOCATION_IN_FILENAME="eastasia" ;; - "EAUS") LOCATION_IN_FILENAME="eastus" ;; - "EUSA") LOCATION_IN_FILENAME="eastus2euap" ;; - "EUS2") LOCATION_IN_FILENAME="eastus2" ;; - "EUSG") LOCATION_IN_FILENAME="eastusstg" ;; - "FRCE") LOCATION_IN_FILENAME="francecentral" ;; - "FRSO") LOCATION_IN_FILENAME="francesouth" ;; - "GENO") LOCATION_IN_FILENAME="germanynorth" ;; - "GEWE") LOCATION_IN_FILENAME="germanywest" ;; - "GEWC") LOCATION_IN_FILENAME="germanywestcentral" ;; - "ISCE") LOCATION_IN_FILENAME="israelcentral" ;; - "ITNO") LOCATION_IN_FILENAME="italynorth" ;; - "JAEA") LOCATION_IN_FILENAME="japaneast" ;; - "JAWE") LOCATION_IN_FILENAME="japanwest" ;; - "JINC") LOCATION_IN_FILENAME="jioindiacentral" ;; - "JINW") LOCATION_IN_FILENAME="jioindiawest" ;; - "KOCE") LOCATION_IN_FILENAME="koreacentral" ;; - "KOSO") LOCATION_IN_FILENAME="koreasouth" ;; - "NCUS") LOCATION_IN_FILENAME="northcentralus" ;; - "NOEU") LOCATION_IN_FILENAME="northeurope" ;; - "NOEA") LOCATION_IN_FILENAME="norwayeast" ;; - "NOWE") LOCATION_IN_FILENAME="norwaywest" ;; - "PLCE") LOCATION_IN_FILENAME="polandcentral" ;; - "QACE") LOCATION_IN_FILENAME="qatarcentral" ;; - "SANO") LOCATION_IN_FILENAME="southafricanorth" ;; - "SAWE") LOCATION_IN_FILENAME="southafricawest" ;; - "SCUS") LOCATION_IN_FILENAME="southcentralus" ;; - "SCUG") LOCATION_IN_FILENAME="southcentralusstg" ;; - "SOEA") LOCATION_IN_FILENAME="southeastasia" ;; - "SOIN") LOCATION_IN_FILENAME="southindia" ;; - "SECE") LOCATION_IN_FILENAME="swedencentral" ;; - "SWNO") LOCATION_IN_FILENAME="switzerlandnorth" ;; - "SWWE") LOCATION_IN_FILENAME="switzerlandwest" ;; - "UACE") LOCATION_IN_FILENAME="uaecentral" ;; - "UANO") LOCATION_IN_FILENAME="uaenorth" ;; - "UKSO") LOCATION_IN_FILENAME="uksouth" ;; - "UKWE") LOCATION_IN_FILENAME="ukwest" ;; - "WCUS") LOCATION_IN_FILENAME="westcentralus" ;; - "WEEU") LOCATION_IN_FILENAME="westeurope" ;; - "WEIN") LOCATION_IN_FILENAME="westindia" ;; - "WEUS") LOCATION_IN_FILENAME="westus" ;; - "WUS2") LOCATION_IN_FILENAME="westus2" ;; - "WUS3") LOCATION_IN_FILENAME="westus3" ;; - *) LOCATION_IN_FILENAME="westeurope" ;; - esac - - echo "Environment(filename): $ENVIRONMENT_IN_FILENAME" - echo "Location(filename): $LOCATION_IN_FILENAME" - - if [ $ENVIRONMENT != $ENVIRONMENT_IN_FILENAME ]; then - echo "##vso[task.logissue type=error]The environment setting in $(workload_zone_configuration_file) '$ENVIRONMENT' does not match the $(workload_zone_configuration_file) file name '$ENVIRONMENT_IN_FILENAME'. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-INFRASTRUCTURE" - exit 2 - fi + echo -e "$green --- Install terraform ---$reset" - if [ $LOCATION != $LOCATION_IN_FILENAME ]; then - echo "##vso[task.logissue type=error]The location setting in $(workload_zone_configuration_file) '$LOCATION' does not match the $(workload_zone_configuration_file) file name '$LOCATION_IN_FILENAME'. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-INFRASTRUCTURE" - exit 2 - fi - - echo -e "$green--- Running the remove region script that destroys deployer VM and SAP library ---$reset" + wget -q $(tf_url) + return_code=$? + if [ 0 != $return_code ]; then + echo "##vso[task.logissue type=error]Unable to download Terraform version $(tf_version)." + exit 2 + fi + unzip -qq terraform_$(tf_version)_linux_amd64.zip ; sudo mv terraform /bin/ + rm -f terraform_$(tf_version)_linux_amd64.zip + fi - deployer_environment_file_name=$HOME/.sap_deployment_automation/$ENVIRONMENT$LOCATION; echo "Environment file: " $deployer_environment_file_name + echo -e "$green--- Update .sap_deployment_automation/config as DEPLOYMENT_REPO_PATH can change on devops agent ---$reset" + DEPLOYMENT_REPO_PATH=$(Build.Repository.LocalPath) + export HOME=$(Build.Repository.LocalPath)/$(Deployment_Configuration_Path) + cd $HOME; mkdir -p .sap_deployment_automation - echo -e "$green--- az login ---$reset" + echo -e "$green--- Configure devops CLI extension ---$reset" + az config set extension.use_dynamic_install=yes_without_prompt - az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code - fi - az account set --subscription $ARM_SUBSCRIPTION_ID + az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none + if [ -n $(PAT) ]; then + export AZURE_DEVOPS_EXT_PAT=$(PAT) + else + export AZURE_DEVOPS_EXT_PAT=$(System.AccessToken) + fi - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" | tr -d \") - if [ -n "${az_var}" ]; then - key_vault="${az_var}" ; echo 'Deployer Key Vault' ${key_vault} - else - echo "Reading key vault from environment file" - key_vault=$(cat ${deployer_environment_file_name} | grep keyvault= -m1 | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer Key Vault' ${key_vault} - fi + export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(parent_variable_group)'].id | [0]") + echo '$(variable_group) id: ' $VARIABLE_GROUP_ID + if [ -z ${VARIABLE_GROUP_ID} ]; then + echo "##vso[task.logissue type=error]Variable group $(parent_variable_group) could not be found." + exit 2 + fi + if [ -z $ARM_SUBSCRIPTION_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined." + exit 2 + fi + if [ $USE_MSI != "true" ]; then - key_vault_id=$(az resource list --name "${key_vault}" --resource-type Microsoft.KeyVault/vaults --query "[].id | [0]" -o tsv) - if [ -n "${key_vault_id}" ]; then + if [ -z $ARM_CLIENT_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined." + exit 2 + fi + if [ -z $ARM_CLIENT_SECRET ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined." + exit 2 + fi + if [ -z $ARM_TENANT_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined." + exit 2 + fi + else + export ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID - if [ "azure pipelines" = "$(this_agent)" ]; then - this_ip=$(curl -s ipinfo.io/ip) >/dev/null 2>&1 - az keyvault network-rule add --name ${key_vault} --ip-address ${this_ip} --only-show-errors --output none - ip_added=1 fi - fi - echo -e "$green--- Running the remove_deployer script that destroys deployer VM ---$reset" - if [ -f $CONFIG_REPO_PATH/DEPLOYER/$(deployer_folder)/state.zip ]; then - echo "Unzipping state.zip" - pass=$(echo $ARM_CLIENT_SECRET | sed 's/-//g') - unzip -qq -o -P "${pass}" $CONFIG_REPO_PATH/DEPLOYER/$(deployer_folder)/state.zip -d ${CONFIG_REPO_PATH}/DEPLOYER/$(deployer_folder) - fi - - sudo chmod +x $SAP_AUTOMATION_REPO_PATH/deploy/scripts/remove_deployer.sh - cd $CONFIG_REPO_PATH/DEPLOYER/$(deployer_folder) + echo -e "$green--- Convert config files to UX format ---$reset" + dos2unix -q $CONFIG_REPO_PATH/DEPLOYER/$(deployer_folder)/$(deployer_configuration_file) + dos2unix -q $CONFIG_REPO_PATH/LIBRARY/$(library_folder)/$(library_configuration_file) + + echo -e "$green--- Running the remove region script that destroys deployer VM and SAP library ---$reset" + + ENVIRONMENT=$(grep "^environment" $CONFIG_REPO_PATH/DEPLOYER/$(deployer_folder)/$(deployer_configuration_file) | awk -F'=' '{print $2}' | xargs) + LOCATION=$(grep "^location" $CONFIG_REPO_PATH/DEPLOYER/$(deployer_folder)/$(deployer_configuration_file) | awk -F'=' '{print $2}' | xargs | tr 'A-Z' 'a-z') + echo Environment: ${ENVIRONMENT} + echo Location: ${LOCATION} + + ENVIRONMENT_IN_FILENAME=$(echo $(deployer_folder) | awk -F'-' '{print $1}' | xargs ) + LOCATION_CODE=$(echo $(deployer_folder) | awk -F'-' '{print $2}' | xargs ) + case "$LOCATION_CODE" in + "AUCE") LOCATION_IN_FILENAME="australiacentral" ;; + "AUC2") LOCATION_IN_FILENAME="australiacentral2" ;; + "AUEA") LOCATION_IN_FILENAME="australiaeast" ;; + "AUSE") LOCATION_IN_FILENAME="australiasoutheast" ;; + "BRSO") LOCATION_IN_FILENAME="brazilsouth" ;; + "BRSE") LOCATION_IN_FILENAME="brazilsoutheast" ;; + "BRUS") LOCATION_IN_FILENAME="brazilus" ;; + "CACE") LOCATION_IN_FILENAME="canadacentral" ;; + "CAEA") LOCATION_IN_FILENAME="canadaeast" ;; + "CEIN") LOCATION_IN_FILENAME="centralindia" ;; + "CEUS") LOCATION_IN_FILENAME="centralus" ;; + "CEUA") LOCATION_IN_FILENAME="centraluseuap" ;; + "EAAS") LOCATION_IN_FILENAME="eastasia" ;; + "EAUS") LOCATION_IN_FILENAME="eastus" ;; + "EUSA") LOCATION_IN_FILENAME="eastus2euap" ;; + "EUS2") LOCATION_IN_FILENAME="eastus2" ;; + "EUSG") LOCATION_IN_FILENAME="eastusstg" ;; + "FRCE") LOCATION_IN_FILENAME="francecentral" ;; + "FRSO") LOCATION_IN_FILENAME="francesouth" ;; + "GENO") LOCATION_IN_FILENAME="germanynorth" ;; + "GEWE") LOCATION_IN_FILENAME="germanywest" ;; + "GEWC") LOCATION_IN_FILENAME="germanywestcentral" ;; + "ISCE") LOCATION_IN_FILENAME="israelcentral" ;; + "ITNO") LOCATION_IN_FILENAME="italynorth" ;; + "JAEA") LOCATION_IN_FILENAME="japaneast" ;; + "JAWE") LOCATION_IN_FILENAME="japanwest" ;; + "JINC") LOCATION_IN_FILENAME="jioindiacentral" ;; + "JINW") LOCATION_IN_FILENAME="jioindiawest" ;; + "KOCE") LOCATION_IN_FILENAME="koreacentral" ;; + "KOSO") LOCATION_IN_FILENAME="koreasouth" ;; + "NCUS") LOCATION_IN_FILENAME="northcentralus" ;; + "NOEU") LOCATION_IN_FILENAME="northeurope" ;; + "NOEA") LOCATION_IN_FILENAME="norwayeast" ;; + "NOWE") LOCATION_IN_FILENAME="norwaywest" ;; + "PLCE") LOCATION_IN_FILENAME="polandcentral" ;; + "QACE") LOCATION_IN_FILENAME="qatarcentral" ;; + "SANO") LOCATION_IN_FILENAME="southafricanorth" ;; + "SAWE") LOCATION_IN_FILENAME="southafricawest" ;; + "SCUS") LOCATION_IN_FILENAME="southcentralus" ;; + "SCUG") LOCATION_IN_FILENAME="southcentralusstg" ;; + "SOEA") LOCATION_IN_FILENAME="southeastasia" ;; + "SOIN") LOCATION_IN_FILENAME="southindia" ;; + "SECE") LOCATION_IN_FILENAME="swedencentral" ;; + "SWNO") LOCATION_IN_FILENAME="switzerlandnorth" ;; + "SWWE") LOCATION_IN_FILENAME="switzerlandwest" ;; + "UACE") LOCATION_IN_FILENAME="uaecentral" ;; + "UANO") LOCATION_IN_FILENAME="uaenorth" ;; + "UKSO") LOCATION_IN_FILENAME="uksouth" ;; + "UKWE") LOCATION_IN_FILENAME="ukwest" ;; + "WCUS") LOCATION_IN_FILENAME="westcentralus" ;; + "WEEU") LOCATION_IN_FILENAME="westeurope" ;; + "WEIN") LOCATION_IN_FILENAME="westindia" ;; + "WEUS") LOCATION_IN_FILENAME="westus" ;; + "WUS2") LOCATION_IN_FILENAME="westus2" ;; + "WUS3") LOCATION_IN_FILENAME="westus3" ;; + *) LOCATION_IN_FILENAME="westeurope" ;; + esac + + echo "Environment(filename): $ENVIRONMENT_IN_FILENAME" + echo "Location(filename): $LOCATION_IN_FILENAME" + + if [ $ENVIRONMENT != $ENVIRONMENT_IN_FILENAME ]; then + echo "##vso[task.logissue type=error]The environment setting in $(workload_zone_configuration_file) '$ENVIRONMENT' does not match the $(workload_zone_configuration_file) file name '$ENVIRONMENT_IN_FILENAME'. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-INFRASTRUCTURE" + exit 2 + fi - $SAP_AUTOMATION_REPO_PATH/deploy/scripts/remove_deployer.sh --auto-approve \ - --parameterfile $(deployer_configuration_file) + if [ $LOCATION != $LOCATION_IN_FILENAME ]; then + echo "##vso[task.logissue type=error]The location setting in $(workload_zone_configuration_file) '$LOCATION' does not match the $(workload_zone_configuration_file) file name '$LOCATION_IN_FILENAME'. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-INFRASTRUCTURE" + exit 2 + fi - return_code=$? + echo -e "$green--- Running the remove region script that destroys deployer VM and SAP library ---$reset" - echo "Return code from remove_deployer $return_code." - if [ 0 != $return_code ]; then - echo "##vso[task.logissue type=error]Return code from remove_deployer $return_code." - fi + deployer_environment_file_name=$HOME/.sap_deployment_automation/$ENVIRONMENT$LOCATION; echo "Environment file: " $deployer_environment_file_name - echo -e "$green--- Removing deployment automation configuration from devops repository ---$reset" + echo -e "$green--- az login ---$reset" - if [ 0 == $return_code ] ; then - cd $CONFIG_REPO_PATH - changed=0 - echo "##vso[build.updatebuildnumber]Removing control plane $(deployer_folder) $(library_folder)" - if [ -f "DEPLOYER/$(deployer_folder)/.terraform/terraform.tfstate" ]; then - git rm -q -f --ignore-unmatch DEPLOYER/$(deployer_folder)/.terraform/terraform.tfstate - changed=1 + if [ $USE_MSI != "true" ]; then + echo "Login using SPN" + export ARM_USE_MSI=false + az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code fi + fi + az account set --subscription $ARM_SUBSCRIPTION_ID + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" | tr -d \") + if [ -n "${az_var}" ]; then + key_vault="${az_var}" ; echo 'Deployer Key Vault' ${key_vault} + else + echo "Reading key vault from environment file" + key_vault=$(cat ${deployer_environment_file_name} | grep keyvault= -m1 | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer Key Vault' ${key_vault} + fi - if [ -d "DEPLOYER/$(deployer_folder)/.terraform" ]; then - git rm -q -r --ignore-unmatch DEPLOYER/$(deployer_folder)/.terraform - changed=1 - fi + key_vault_id=$(az resource list --name "${key_vault}" --resource-type Microsoft.KeyVault/vaults --query "[].id | [0]" -o tsv) + if [ -n "${key_vault_id}" ]; then - if [ -f "DEPLOYER/$(deployer_folder)/state.zip" ]; then - git rm -q -f --ignore-unmatch DEPLOYER/$(deployer_folder)/state.zip - changed=1 + if [ "azure pipelines" = "$(this_agent)" ]; then + this_ip=$(curl -s ipinfo.io/ip) >/dev/null 2>&1 + az keyvault network-rule add --name ${key_vault} --ip-address ${this_ip} --only-show-errors --output none + ip_added=1 fi + fi - if [ -d LIBRARY/$(library_folder)/.terraform ]; then - git rm -q -r --ignore-unmatch LIBRARY/$(library_folder)/.terraform - changed=1 - fi + echo -e "$green--- Running the remove_deployer script that destroys deployer VM ---$reset" + if [ -f $CONFIG_REPO_PATH/DEPLOYER/$(deployer_folder)/state.zip ]; then + echo "Unzipping state.zip" + pass=$(echo $DEPLOYER_RANDOM_ID_SEED | sed 's/-//g') + unzip -qq -o -P "${pass}" $CONFIG_REPO_PATH/DEPLOYER/$(deployer_folder)/state.zip -d ${CONFIG_REPO_PATH}/DEPLOYER/$(deployer_folder) + fi - if [ -f LIBRARY/$(library_folder)/state.zip ]; then - git rm -q -f --ignore-unmatch LIBRARY/$(library_folder)/state.zip - changed=1 - fi + sudo chmod +x $SAP_AUTOMATION_REPO_PATH/deploy/scripts/remove_deployer.sh + cd $CONFIG_REPO_PATH/DEPLOYER/$(deployer_folder) - if [ -f .sap_deployment_automation/${ENVIRONMENT}${LOCATION_CODE} ]; then - rm .sap_deployment_automation/${ENVIRONMENT}${LOCATION_CODE} - git rm -q --ignore-unmatch .sap_deployment_automation/${ENVIRONMENT}${LOCATION_CODE} - changed=1 - fi - if [ -f .sap_deployment_automation/${ENVIRONMENT}${LOCATION_CODE}.md ]; then - rm .sap_deployment_automation/${ENVIRONMENT}${LOCATION_CODE}.md - git rm -q --ignore-unmatch .sap_deployment_automation/${ENVIRONMENT}${LOCATION_CODE}.md - changed=1 - fi + $SAP_AUTOMATION_REPO_PATH/deploy/scripts/remove_deployer.sh --auto-approve \ + --parameterfile $(deployer_configuration_file) - if [ -f LIBRARY/$(library_folder)/backend-config.tfvars ]; then - git rm -q --ignore-unmatch LIBRARY/$(library_folder)/backend-config.tfvars - changed=1 - fi + return_code=$? - if [ 1 == $changed ] ; then - git config --global user.email "$(Build.RequestedForEmail)" - git config --global user.name "$(Build.RequestedFor)" - git commit -m "Added updates from devops deployment $(Build.DefinitionName) [skip ci]" - git -c http.extraheader="AUTHORIZATION: bearer $(System.AccessToken)" push - fi - echo -e "$green--- Deleting variables ---$reset" - if [ ${#VARIABLE_GROUP_ID} != 0 ]; then - echo "Deleting variables" + echo "Return code from remove_deployer $return_code." + if [ 0 != $return_code ]; then + echo "##vso[task.logissue type=error]Return code from remove_deployer $return_code." + fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" ) - if [ ${#variable_value} != 0 ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --yes --only-show-errors - fi + echo -e "$green--- Removing deployment automation configuration from devops repository ---$reset" - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Resource_Group_Name.value" ) - if [ ${#variable_value} != 0 ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Resource_Group_Name --yes --only-show-errors + if [ 0 == $return_code ] ; then + cd $CONFIG_REPO_PATH + changed=0 + echo "##vso[build.updatebuildnumber]Removing control plane $(deployer_folder) $(library_folder)" + if [ -f "DEPLOYER/$(deployer_folder)/.terraform/terraform.tfstate" ]; then + git rm -q -f --ignore-unmatch DEPLOYER/$(deployer_folder)/.terraform/terraform.tfstate + changed=1 fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Subscription.value" ) - if [ ${#variable_value} != 0 ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Subscription --yes --only-show-errors - fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_State_FileName.value" ) - if [ ${#variable_value} != 0 ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --yes --only-show-errors + if [ -d "DEPLOYER/$(deployer_folder)/.terraform" ]; then + git rm -q -r --ignore-unmatch DEPLOYER/$(deployer_folder)/.terraform + changed=1 fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" ) - if [ ${#variable_value} != 0 ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --yes --only-show-errors + if [ -f "DEPLOYER/$(deployer_folder)/state.zip" ]; then + git rm -q -f --ignore-unmatch DEPLOYER/$(deployer_folder)/state.zip + changed=1 fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WEBAPP_URL_BASE.value" ) - if [ ${#variable_value} != 0 ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name WEBAPP_URL_BASE --yes --only-show-errors + if [ -d LIBRARY/$(library_folder)/.terraform ]; then + git rm -q -r --ignore-unmatch LIBRARY/$(library_folder)/.terraform + changed=1 fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WEBAPP_IDENTITY.value" ) - if [ ${#variable_value} != 0 ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name WEBAPP_IDENTITY --yes --only-show-errors + if [ -f LIBRARY/$(library_folder)/state.zip ]; then + git rm -q -f --ignore-unmatch LIBRARY/$(library_folder)/state.zip + changed=1 fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WEBAPP_ID.value" ) - if [ ${#variable_value} != 0 ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name WEBAPP_ID --yes --only-show-errors + if [ -f .sap_deployment_automation/${ENVIRONMENT}${LOCATION_CODE} ]; then + rm .sap_deployment_automation/${ENVIRONMENT}${LOCATION_CODE} + git rm -q --ignore-unmatch .sap_deployment_automation/${ENVIRONMENT}${LOCATION_CODE} + changed=1 + fi + if [ -f .sap_deployment_automation/${ENVIRONMENT}${LOCATION_CODE}.md ]; then + rm .sap_deployment_automation/${ENVIRONMENT}${LOCATION_CODE}.md + git rm -q --ignore-unmatch .sap_deployment_automation/${ENVIRONMENT}${LOCATION_CODE}.md + changed=1 fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WEBAPP_RESOURCE_GROUP.value" ) - if [ ${#variable_value} != 0 ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name WEBAPP_RESOURCE_GROUP --yes --only-show-errors + if [ -f LIBRARY/$(library_folder)/backend-config.tfvars ]; then + git rm -q --ignore-unmatch LIBRARY/$(library_folder)/backend-config.tfvars + changed=1 fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "INSTALLATION_MEDIA_ACCOUNT.value" ) - if [ ${#variable_value} != 0 ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name INSTALLATION_MEDIA_ACCOUNT --yes --only-show-errors + if [ 1 == $changed ] ; then + git config --global user.email "$(Build.RequestedForEmail)" + git config --global user.name "$(Build.RequestedFor)" + git commit -m "Added updates from devops deployment $(Build.DefinitionName) [skip ci]" + git -c http.extraheader="AUTHORIZATION: bearer $(System.AccessToken)" push fi + echo -e "$green--- Deleting variables ---$reset" + if [ ${#VARIABLE_GROUP_ID} != 0 ]; then + echo "Deleting variables" + + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" ) + if [ ${#variable_value} != 0 ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --yes --only-show-errors + fi + + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Resource_Group_Name.value" ) + if [ ${#variable_value} != 0 ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Resource_Group_Name --yes --only-show-errors + fi + + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Subscription.value" ) + if [ ${#variable_value} != 0 ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Subscription --yes --only-show-errors + fi + + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_State_FileName.value" ) + if [ ${#variable_value} != 0 ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --yes --only-show-errors + fi + + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" ) + if [ ${#variable_value} != 0 ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --yes --only-show-errors + fi + + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WEBAPP_URL_BASE.value" ) + if [ ${#variable_value} != 0 ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name WEBAPP_URL_BASE --yes --only-show-errors + fi + + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WEBAPP_IDENTITY.value" ) + if [ ${#variable_value} != 0 ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name WEBAPP_IDENTITY --yes --only-show-errors + fi + + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WEBAPP_ID.value" ) + if [ ${#variable_value} != 0 ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name WEBAPP_ID --yes --only-show-errors + fi + + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WEBAPP_RESOURCE_GROUP.value" ) + if [ ${#variable_value} != 0 ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name WEBAPP_RESOURCE_GROUP --yes --only-show-errors + fi + + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "INSTALLATION_MEDIA_ACCOUNT.value" ) + if [ ${#variable_value} != 0 ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name INSTALLATION_MEDIA_ACCOUNT --yes --only-show-errors + fi - fi + fi - fi + fi - exit $return_code + exit $return_code displayName: Remove control plane env: SYSTEM_ACCESSTOKEN: $(System.AccessToken) ARM_SUBSCRIPTION_ID: $(CP_ARM_SUBSCRIPTION_ID) - ARM_CLIENT_ID: $(CP_ARM_CLIENT_ID) - ARM_CLIENT_SECRET: $(CP_ARM_CLIENT_SECRET) - ARM_TENANT_ID: $(CP_ARM_TENANT_ID) + CP_ARM_CLIENT_ID: $(CP_ARM_CLIENT_ID) + CP_ARM_CLIENT_SECRET: $(CP_ARM_CLIENT_SECRET) + CP_ARM_TENANT_ID: $(CP_ARM_TENANT_ID) SAP_AUTOMATION_REPO_PATH: ${{ parameters.sap_automation_repo_path }} CONFIG_REPO_PATH: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path) key_vault: $(Deployer_Key_Vault) TF_IN_AUTOMATION: true TF_LOG: $(TF_LOG) - failOnStderr: false + USE_MSI: $(Use_MSI) + DEPLOYER_RANDOM_ID_SEED: $(DEPLOYER_RANDOM_ID_SEED) diff --git a/deploy/pipelines/variables/12-remove-control-plane-variables.yaml b/deploy/pipelines/variables/12-remove-control-plane-variables.yaml index b39edfa5fa..9f943fad06 100644 --- a/deploy/pipelines/variables/12-remove-control-plane-variables.yaml +++ b/deploy/pipelines/variables/12-remove-control-plane-variables.yaml @@ -9,6 +9,7 @@ parameters: deployer_environment: "" library: "" use_deployer: "" + connection_name: "" variables: - group: "SDAF-General" @@ -40,3 +41,6 @@ variables: - name: this_agent value: $[lower(variables['run_on'])] + + - name: connection_name + value: ${{ parameters.connection_name }} diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index db45433b12..a946e3dd03 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -1,3 +1,15 @@ +function Show-Menu($data) { + Write-Host "================ $Title ================" + $i = 1 + foreach ($d in $data) { + Write-Host "($i): Select '$i' for $($d)" + $i++ + } + + Write-Host "q: Select 'q' for Exit" + +} + #region Initialize # Initialize variables from Environment variables @@ -14,7 +26,7 @@ $Workload_zoneSubscriptionName = $Env:SDAF_WorkloadZoneSubscriptionName $ARM_TENANT_ID = $Env:ARM_TENANT_ID #endregion -$versionLabel="v3.10.1.0" +$versionLabel = "v3.10.1.0" az logout @@ -35,6 +47,20 @@ if (Test-Path .\start.md) { Remove-Item .\start.md } +if ($Env:SDAF_AuthenticationMethod.Length -eq 0) { + $Title = "Select the authentication method to use" + $data = @('Service Principal', 'Managed Identity') + Show-Menu($data) + $selection = Read-Host $Title + $authenticationMethod = $data[$selection - 1] + +} +else { + $authenticationMethod = $Env:SDAF_AuthenticationMethod +} + +Write-Host "Using authentication method: $authenticationMethod" -ForegroundColor Yellow + az config set extension.use_dynamic_install=yes_without_prompt --only-show-errors az extension add --name azure-devops --only-show-errors @@ -46,7 +72,6 @@ if ($Control_plane_subscriptionID.Length -eq 0) { $Control_plane_subscriptionID = Read-Host "Please enter your Control plane subscription ID" az account set --sub $Control_plane_subscriptionID $ControlPlaneSubscriptionName = (az account show --query name -o tsv) - exit } else { az account set --sub $Control_plane_subscriptionID @@ -63,8 +88,6 @@ if ($Workload_zone_subscriptionID.Length -eq 0) { $Workload_zone_subscriptionID = Read-Host "Please enter your Workload zone subscription ID" az account set --sub $Workload_zone_subscriptionID $Workload_zoneSubscriptionName = (az account show --query name -o tsv) - - exit } else { az account set --sub $Workload_zone_subscriptionID @@ -185,7 +208,7 @@ else { $repo_id = (az repos list --query "[?name=='$ADO_Project'].id | [0]" --out tsv) az devops configure --defaults organization=$ADO_ORGANIZATION project=$ADO_PROJECT - $repo_size=(az repos list --query "[?id=='$repo_id'].size | [0]") + $repo_size = (az repos list --query "[?id=='$repo_id'].size | [0]") if ($repo_size -eq 0) { Write-Host "Importing the repository from GitHub" -ForegroundColor Green @@ -502,14 +525,14 @@ if ($provideSUser -eq 'y') { } -$groups= New-Object System.Collections.Generic.List[System.Object] -$pipelines= New-Object System.Collections.Generic.List[System.Object] +$groups = New-Object System.Collections.Generic.List[System.Object] +$pipelines = New-Object System.Collections.Generic.List[System.Object] Write-Host "Creating the variable group SDAF-General" -ForegroundColor Green $general_group_id = (az pipelines variable-group list --query "[?name=='SDAF-General'].id | [0]" --only-show-errors) if ($general_group_id.Length -eq 0) { - az pipelines variable-group create --name SDAF-General --variables ANSIBLE_HOST_KEY_CHECKING=false Deployment_Configuration_Path=WORKSPACES Branch=main tf_version="1.6.2" ansible_core_version="2.15" S-Username=$SUserName S-Password=$SPassword --output yaml --authorize true --output none + az pipelines variable-group create --name SDAF-General --variables ANSIBLE_HOST_KEY_CHECKING=false Deployment_Configuration_Path=WORKSPACES Branch=main tf_version="1.7.0" ansible_core_version="2.15" S-Username=$SUserName S-Password=$SPassword --output yaml --authorize true --output none $general_group_id = (az pipelines variable-group list --query "[?name=='SDAF-General'].id | [0]" --only-show-errors) az pipelines variable-group variable update --group-id $general_group_id --name "S-Password" --value $SPassword --secret true --output none --only-show-errors } @@ -685,8 +708,7 @@ if ($found_appRegistration.Length -ne 0) { if ($confirmation -eq 'y') { $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors) } - else - { + else { $WEB_APP_CLIENT_SECRET = Read-Host "Please enter the app registration secret" } } @@ -702,102 +724,122 @@ else { } #endregion +if ($authenticationMethod -eq "Service Principal") { + #region Control plane Service Principal + $spn_name = $ControlPlanePrefix + " Deployment credential" + if ($Env:SDAF_MGMT_SPN_NAME.Length -ne 0) { + $spn_name = $Env:SDAF_MGMT_SPN_NAME + } -#region Control plane Service Principal -$spn_name = $ControlPlanePrefix + " Deployment credential" -if ($Env:SDAF_MGMT_SPN_NAME.Length -ne 0) { - $spn_name = $Env:SDAF_MGMT_SPN_NAME -} + Add-Content -Path $fname -Value ("Control Plane Service Principal: " + $spn_name) -Add-Content -Path $fname -Value ("Control Plane Service Principal: " + $spn_name) + $scopes = "/subscriptions/" + $Control_plane_subscriptionID -$scopes = "/subscriptions/" + $Control_plane_subscriptionID + Write-Host "Creating the deployment credentials for the control plane. Service Principal Name:" $spn_name -ForegroundColor Green -Write-Host "Creating the deployment credentials for the control plane. Service Principal Name:" $spn_name -ForegroundColor Green + $CP_ARM_CLIENT_ID = "" + $CP_ARM_OBJECT_ID = "" + $CP_ARM_TENANT_ID = "" + $CP_ARM_CLIENT_SECRET = "Please update" -$CP_ARM_CLIENT_ID = "" -$CP_ARM_OBJECT_ID = "" -$CP_ARM_TENANT_ID = "" -$CP_ARM_CLIENT_SECRET = "Please update" + $SPN_Created = $false + $bSkip = $true -$SPN_Created = $false -$bSkip=$true + $found_appName = (az ad sp list --all --filter "startswith(displayName,'$spn_name')" --query "[?displayName=='$spn_name'].displayName | [0]" --only-show-errors) + if ($found_appName.Length -gt 0) { + Write-Host "Found an existing Service Principal:" $spn_name + $ExistingData = (az ad sp list --all --filter "startswith(displayName,'$spn_name')" --query "[?displayName=='$spn_name']| [0]" --only-show-errors) | ConvertFrom-Json + Write-Host "Updating the variable group" -$found_appName = (az ad sp list --all --filter "startswith(displayName,'$spn_name')" --query "[?displayName=='$spn_name'].displayName | [0]" --only-show-errors) -if ($found_appName.Length -gt 0) { - Write-Host "Found an existing Service Principal:" $spn_name - $ExistingData = (az ad sp list --all --filter "startswith(displayName,'$spn_name')" --query "[?displayName=='$spn_name']| [0]" --only-show-errors) | ConvertFrom-Json - Write-Host "Updating the variable group" + $CP_ARM_CLIENT_ID = $ExistingData.appId + $CP_ARM_OBJECT_ID = $ExistingData.Id + $CP_ARM_TENANT_ID = $ExistingData.appOwnerOrganizationId - $CP_ARM_CLIENT_ID = $ExistingData.appId - $CP_ARM_OBJECT_ID = $ExistingData.Id - $CP_ARM_TENANT_ID = $ExistingData.appOwnerOrganizationId + $confirmation = Read-Host "Reset the Control Plane Service Principal password y/n?" + if ($confirmation -eq 'y') { - $confirmation = Read-Host "Reset the Control Plane Service Principal password y/n?" - if ($confirmation -eq 'y') { + $CP_ARM_CLIENT_SECRET = (az ad sp credential reset --id $CP_ARM_CLIENT_ID --append --query "password" --out tsv --only-show-errors).Replace("""", "") + } + else { + $CP_ARM_CLIENT_SECRET = Read-Host "Please enter the Control Plane Service Principal password" + } - $CP_ARM_CLIENT_SECRET = (az ad sp credential reset --id $CP_ARM_CLIENT_ID --append --query "password" --out tsv --only-show-errors).Replace("""", "") - } - else - { - $CP_ARM_CLIENT_SECRET = Read-Host "Please enter the Control Plane Service Principal password" } + else { + Write-Host "Creating the Service Principal" $spn_name -ForegroundColor Green + $SPN_Created = $true + $Control_plane_SPN_data = (az ad sp create-for-rbac --role "Contributor" --scopes $scopes --name $spn_name --only-show-errors) | ConvertFrom-Json + $CP_ARM_CLIENT_SECRET = $Control_plane_SPN_data.password + $ExistingData = (az ad sp list --all --filter "startswith(displayName,'$spn_name')" --query "[?displayName=='$spn_name'] | [0]" --only-show-errors) | ConvertFrom-Json + $CP_ARM_CLIENT_ID = $ExistingData.appId + $CP_ARM_TENANT_ID = $ExistingData.appOwnerOrganizationId + $CP_ARM_OBJECT_ID = $ExistingData.Id -} -else { - Write-Host "Creating the Service Principal" $spn_name -ForegroundColor Green - $SPN_Created = $true - $Control_plane_SPN_data = (az ad sp create-for-rbac --role "Contributor" --scopes $scopes --name $spn_name --only-show-errors) | ConvertFrom-Json - $CP_ARM_CLIENT_SECRET = $Control_plane_SPN_data.password - $ExistingData = (az ad sp list --all --filter "startswith(displayName,'$spn_name')" --query "[?displayName=='$spn_name'] | [0]" --only-show-errors) | ConvertFrom-Json - $CP_ARM_CLIENT_ID = $ExistingData.appId - $CP_ARM_TENANT_ID = $ExistingData.appOwnerOrganizationId - $CP_ARM_OBJECT_ID = $ExistingData.Id - -} + } -az role assignment create --assignee $CP_ARM_CLIENT_ID --role "Contributor" --subscription $Workload_zone_subscriptionID --scope /subscriptions/$Workload_zone_subscriptionID --output none -az role assignment create --assignee $CP_ARM_CLIENT_ID --role "Contributor" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none + az role assignment create --assignee $CP_ARM_CLIENT_ID --role "Contributor" --subscription $Workload_zone_subscriptionID --scope /subscriptions/$Workload_zone_subscriptionID --output none + az role assignment create --assignee $CP_ARM_CLIENT_ID --role "Contributor" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none -az role assignment create --assignee $CP_ARM_CLIENT_ID --role "User Access Administrator" --subscription $Workload_zone_subscriptionID --scope /subscriptions/$Workload_zone_subscriptionID --output none -az role assignment create --assignee $CP_ARM_CLIENT_ID --role "User Access Administrator" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none + az role assignment create --assignee $CP_ARM_CLIENT_ID --role "User Access Administrator" --subscription $Workload_zone_subscriptionID --scope /subscriptions/$Workload_zone_subscriptionID --output none + az role assignment create --assignee $CP_ARM_CLIENT_ID --role "User Access Administrator" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none -$Control_plane_groupID = (az pipelines variable-group list --query "[?name=='$ControlPlanePrefix'].id | [0]" --only-show-errors) -if ($Control_plane_groupID.Length -eq 0) { - Write-Host "Creating the variable group" $ControlPlanePrefix -ForegroundColor Green - az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' APP_REGISTRATION_APP_ID=$APP_REGISTRATION_ID CP_ARM_CLIENT_ID=$CP_ARM_CLIENT_ID CP_ARM_OBJECT_ID=$CP_ARM_OBJECT_ID CP_ARM_CLIENT_SECRET='Enter your SPN password here' CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID CP_ARM_TENANT_ID=$CP_ARM_TENANT_ID WEB_APP_CLIENT_SECRET=$WEB_APP_CLIENT_SECRET PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF --output none --authorize true $Control_plane_groupID = (az pipelines variable-group list --query "[?name=='$ControlPlanePrefix'].id | [0]" --only-show-errors) -} + if ($Control_plane_groupID.Length -eq 0) { + Write-Host "Creating the variable group" $ControlPlanePrefix -ForegroundColor Green + az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' APP_REGISTRATION_APP_ID=$APP_REGISTRATION_ID CP_ARM_CLIENT_ID=$CP_ARM_CLIENT_ID CP_ARM_OBJECT_ID=$CP_ARM_OBJECT_ID CP_ARM_CLIENT_SECRET='Enter your SPN password here' CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID CP_ARM_TENANT_ID=$CP_ARM_TENANT_ID WEB_APP_CLIENT_SECRET=$WEB_APP_CLIENT_SECRET PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF --output none --authorize true + $Control_plane_groupID = (az pipelines variable-group list --query "[?name=='$ControlPlanePrefix'].id | [0]" --only-show-errors) + } -if ($CP_ARM_CLIENT_SECRET -ne "Please update") { - az pipelines variable-group variable update --group-id $Control_plane_groupID --name "CP_ARM_CLIENT_SECRET" --value $CP_ARM_CLIENT_SECRET --secret true --output none --only-show-errors - az pipelines variable-group variable update --group-id $Control_plane_groupID --name "CP_ARM_CLIENT_ID" --value $CP_ARM_CLIENT_ID --output none --only-show-errors - az pipelines variable-group variable update --group-id $Control_plane_groupID --name "CP_ARM_OBJECT_ID" --value $CP_ARM_OBJECT_ID --output none --only-show-errors -} + if ($CP_ARM_CLIENT_SECRET -ne "Please update") { + az pipelines variable-group variable update --group-id $Control_plane_groupID --name "CP_ARM_CLIENT_SECRET" --value $CP_ARM_CLIENT_SECRET --secret true --output none --only-show-errors + az pipelines variable-group variable update --group-id $Control_plane_groupID --name "CP_ARM_CLIENT_ID" --value $CP_ARM_CLIENT_ID --output none --only-show-errors + az pipelines variable-group variable update --group-id $Control_plane_groupID --name "CP_ARM_OBJECT_ID" --value $CP_ARM_OBJECT_ID --output none --only-show-errors + } -$groups.Add($Control_plane_groupID) + Write-Host "Create the Service Endpoint in Azure for the control plane" -ForegroundColor Green -Write-Host "Create the Service Endpoint in Azure for the control plane" -ForegroundColor Green + $Service_Connection_Name = "Control_Plane_Service_Connection" + $Env:AZURE_DEVOPS_EXT_AZURE_RM_SERVICE_PRINCIPAL_KEY = $CP_ARM_CLIENT_SECRET -$Service_Connection_Name = "Control_Plane_Service_Connection" -$Env:AZURE_DEVOPS_EXT_AZURE_RM_SERVICE_PRINCIPAL_KEY = $CP_ARM_CLIENT_SECRET + $epExists = (az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].name | [0]") + if ($epExists.Length -eq 0) { + Write-Host "Creating Service Endpoint" $Service_Connection_Name -ForegroundColor Green + az devops service-endpoint azurerm create --azure-rm-service-principal-id $CP_ARM_CLIENT_ID --azure-rm-subscription-id $Control_plane_subscriptionID --azure-rm-subscription-name $ControlPlaneSubscriptionName --azure-rm-tenant-id $CP_ARM_TENANT_ID --name $Service_Connection_Name --output none --only-show-errors + $epId = az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].id" -o tsv + az devops service-endpoint update --id $epId --enable-for-all true --output none --only-show-errors + } + else { + Write-Host "Service Endpoint already exists, recreating it with the updated credentials" -ForegroundColor Green + $epId = az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].id" -o tsv + az devops service-endpoint delete --id $epId --yes + az devops service-endpoint azurerm create --azure-rm-service-principal-id $CP_ARM_CLIENT_ID --azure-rm-subscription-id $Control_plane_subscriptionID --azure-rm-subscription-name $ControlPlaneSubscriptionName --azure-rm-tenant-id $CP_ARM_TENANT_ID --name $Service_Connection_Name --output none --only-show-errors + $epId = az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].id" -o tsv + az devops service-endpoint update --id $epId --enable-for-all true --output none --only-show-errors + } -$epExists = (az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].name | [0]") -if ($epExists.Length -eq 0) { - Write-Host "Creating Service Endpoint" $Service_Connection_Name -ForegroundColor Green - az devops service-endpoint azurerm create --azure-rm-service-principal-id $CP_ARM_CLIENT_ID --azure-rm-subscription-id $Control_plane_subscriptionID --azure-rm-subscription-name $ControlPlaneSubscriptionName --azure-rm-tenant-id $CP_ARM_TENANT_ID --name $Service_Connection_Name --output none --only-show-errors - $epId = az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].id" -o tsv - az devops service-endpoint update --id $epId --enable-for-all true --output none --only-show-errors } else { - Write-Host "Service Endpoint already exists, recreating it with the updated credentials" -ForegroundColor Green - $epId = az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].id" -o tsv - az devops service-endpoint delete --id $epId --yes - az devops service-endpoint azurerm create --azure-rm-service-principal-id $CP_ARM_CLIENT_ID --azure-rm-subscription-id $Control_plane_subscriptionID --azure-rm-subscription-name $ControlPlaneSubscriptionName --azure-rm-tenant-id $CP_ARM_TENANT_ID --name $Service_Connection_Name --output none --only-show-errors - $epId = az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].id" -o tsv - az devops service-endpoint update --id $epId --enable-for-all true --output none --only-show-errors + $Control_plane_groupID = (az pipelines variable-group list --query "[?name=='$ControlPlanePrefix'].id | [0]" --only-show-errors) + if ($Control_plane_groupID.Length -eq 0) { + Write-Host "Creating the variable group" $ControlPlanePrefix -ForegroundColor Green + az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' APP_REGISTRATION_APP_ID=$APP_REGISTRATION_ID CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID WEB_APP_CLIENT_SECRET=$WEB_APP_CLIENT_SECRET PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF USE_MSI=true --output none --authorize true + $Control_plane_groupID = (az pipelines variable-group list --query "[?name=='$ControlPlanePrefix'].id | [0]" --only-show-errors) + } + + Write-Host + + Write-Host "" + Write-Host "The browser will now open, Please create a service connection with the name 'Control_Plane_Service_Connection'." + + $connections_url = $ADO_ORGANIZATION + "/" + [uri]::EscapeDataString($ADO_Project) + "/_settings/adminservices" + + Start-Process $connections_url + Read-Host -Prompt "Once you have created and validated the connection, Press any key to continue" + } +$groups.Add($Control_plane_groupID) + az pipelines variable-group variable update --group-id $Control_plane_groupID --name "WEB_APP_CLIENT_SECRET" --value $WEB_APP_CLIENT_SECRET --secret true --output none --only-show-errors @@ -815,72 +857,84 @@ if ($Env:SDAF_WorkloadZone_SPN_NAME.Length -ne 0) { $workload_zone_spn_name = $Env:SDAF_WorkloadZone_SPN_NAME } -Add-Content -path $fname -value ("Workload zone Service Principal: " + $workload_zone_spn_name) +if ($authenticationMethod -eq "Service Principal") { -$SPN_Created = $false -$found_appName = (az ad sp list --all --filter "startswith(displayName,'$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'].displayName | [0]" --only-show-errors) + Add-Content -path $fname -value ("Workload zone Service Principal: " + $workload_zone_spn_name) -if ($found_appName.Length -ne 0) { - Write-Host "Found an existing Service Principal:" $workload_zone_spn_name -ForegroundColor Green - $ExistingData = (az ad sp list --all --filter "startswith(displayName,'$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'] | [0]" --only-show-errors) | ConvertFrom-Json - $ARM_CLIENT_ID = $ExistingData.appId - $ARM_TENANT_ID = $ExistingData.appOwnerOrganizationId - $ARM_OBJECT_ID = $ExistingData.Id + $SPN_Created = $false + $found_appName = (az ad sp list --all --filter "startswith(displayName,'$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'].displayName | [0]" --only-show-errors) - $confirmation = Read-Host "Reset the Workload zone Service Principal password y/n?" - if ($confirmation -eq 'y') { - $ARM_CLIENT_SECRET = (az ad sp credential reset --id $ARM_CLIENT_ID --append --query "password" --out tsv --only-show-errors) + if ($found_appName.Length -ne 0) { + Write-Host "Found an existing Service Principal:" $workload_zone_spn_name -ForegroundColor Green + $ExistingData = (az ad sp list --all --filter "startswith(displayName,'$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'] | [0]" --only-show-errors) | ConvertFrom-Json + $ARM_CLIENT_ID = $ExistingData.appId + $ARM_TENANT_ID = $ExistingData.appOwnerOrganizationId + $ARM_OBJECT_ID = $ExistingData.Id + + $confirmation = Read-Host "Reset the Workload zone Service Principal password y/n?" + if ($confirmation -eq 'y') { + $ARM_CLIENT_SECRET = (az ad sp credential reset --id $ARM_CLIENT_ID --append --query "password" --out tsv --only-show-errors) + } + else { + $ARM_CLIENT_SECRET = Read-Host "Enter the Workload zone Service Principal password" + } } else { - $ARM_CLIENT_SECRET = Read-Host "Enter the Workload zone Service Principal password" + Write-Host "Creating the Service Principal" $workload_zone_spn_name -ForegroundColor Green + $SPN_Created = $true + $Data = (az ad sp create-for-rbac --role="Contributor" --scopes=$workload_zone_scopes --name=$workload_zone_spn_name --only-show-errors) | ConvertFrom-Json + $ARM_CLIENT_SECRET = $Data.password + $ExistingData = (az ad sp list --all --filter "startswith(displayName,'$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'] | [0]" --only-show-errors) | ConvertFrom-Json + $ARM_CLIENT_ID = $ExistingData.appId + $ARM_TENANT_ID = $ExistingData.appOwnerOrganizationId + $ARM_OBJECT_ID = $ExistingData.Id } -} -else { - Write-Host "Creating the Service Principal" $workload_zone_spn_name -ForegroundColor Green - $SPN_Created = $true - $Data = (az ad sp create-for-rbac --role="Contributor" --scopes=$workload_zone_scopes --name=$workload_zone_spn_name --only-show-errors) | ConvertFrom-Json - $ARM_CLIENT_SECRET = $Data.password - $ExistingData = (az ad sp list --all --filter "startswith(displayName,'$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'] | [0]" --only-show-errors) | ConvertFrom-Json - $ARM_CLIENT_ID = $ExistingData.appId - $ARM_TENANT_ID = $ExistingData.appOwnerOrganizationId - $ARM_OBJECT_ID = $ExistingData.Id -} -Write-Host "Assigning reader permissions to the control plane subscription" -ForegroundColor Green -az role assignment create --assignee $ARM_CLIENT_ID --role "Reader" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none -az role assignment create --assignee $ARM_CLIENT_ID --role "User Access Administrator" --subscription $Workload_zone_subscriptionID --scope /subscriptions/$Workload_zone_subscriptionID --output none -az role assignment create --assignee $ARM_CLIENT_ID --role "Storage Account Contributor" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none + Write-Host "Assigning reader permissions to the control plane subscription" -ForegroundColor Green + az role assignment create --assignee $ARM_CLIENT_ID --role "Reader" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none + az role assignment create --assignee $ARM_CLIENT_ID --role "User Access Administrator" --subscription $Workload_zone_subscriptionID --scope /subscriptions/$Workload_zone_subscriptionID --output none + az role assignment create --assignee $ARM_CLIENT_ID --role "Storage Account Contributor" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none + $Service_Connection_Name = $Workload_zone_code + "_WorkloadZone_Service_Connection" -$Service_Connection_Name = $Workload_zone_code + "_WorkloadZone_Service_Connection" + $GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --only-show-errors ) + if ($GroupID.Length -eq 0) { + Write-Host "Creating the variable group" $WorkloadZonePrefix -ForegroundColor Green + az pipelines variable-group create --name $WorkloadZonePrefix --variables Agent='Azure Pipelines' ARM_CLIENT_ID=$ARM_CLIENT_ID ARM_OBJECT_ID=$ARM_OBJECT_ID ARM_CLIENT_SECRET=$ARM_CLIENT_SECRET ARM_SUBSCRIPTION_ID=$Workload_zone_subscriptionID ARM_TENANT_ID=$ARM_TENANT_ID WZ_PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME=$Service_Connection_Name TF_LOG=OFF Logon_Using_SPN=true --output none --authorize true + $GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --only-show-errors) + } -$GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --only-show-errors ) -if ($GroupID.Length -eq 0) { - Write-Host "Creating the variable group" $WorkloadZonePrefix -ForegroundColor Green - az pipelines variable-group create --name $WorkloadZonePrefix --variables Agent='Azure Pipelines' ARM_CLIENT_ID=$ARM_CLIENT_ID ARM_OBJECT_ID=$ARM_OBJECT_ID ARM_CLIENT_SECRET=$ARM_CLIENT_SECRET ARM_SUBSCRIPTION_ID=$Workload_zone_subscriptionID ARM_TENANT_ID=$ARM_TENANT_ID WZ_PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME=$Service_Connection_Name TF_LOG=OFF Logon_Using_SPN=true --output none --authorize true - $GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --only-show-errors) + if ($ARM_CLIENT_SECRET -ne "Please update") { + az pipelines variable-group variable update --group-id $GroupID --name "ARM_CLIENT_SECRET" --value $ARM_CLIENT_SECRET --secret true --output none --only-show-errors + az pipelines variable-group variable update --group-id $GroupID --name "ARM_CLIENT_ID" --value $ARM_CLIENT_ID --output none --only-show-errors + az pipelines variable-group variable update --group-id $GroupID --name "ARM_OBJECT_ID" --value $ARM_OBJECT_ID --output none --only-show-errors + $Env:AZURE_DEVOPS_EXT_AZURE_RM_SERVICE_PRINCIPAL_KEY = $ARM_CLIENT_SECRET + + $epExists = (az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].name | [0]") + if ($epExists.Length -eq 0) { + Write-Host "Creating Service Endpoint" $Service_Connection_Name -ForegroundColor Green + az devops service-endpoint azurerm create --azure-rm-service-principal-id $ARM_CLIENT_ID --azure-rm-subscription-id $Workload_zone_subscriptionID --azure-rm-subscription-name $Workload_zoneSubscriptionName --azure-rm-tenant-id $ARM_TENANT_ID --name $Service_Connection_Name --output none --only-show-errors + $epId = az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].id" -o tsv + az devops service-endpoint update --id $epId --enable-for-all true --output none --only-show-errors + } + else { + Write-Host "Service Endpoint already exists, recreating it with the updated credentials" -ForegroundColor Green + $epId = az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].id" -o tsv + az devops service-endpoint delete --id $epId --yes + az devops service-endpoint azurerm create --azure-rm-service-principal-id $ARM_CLIENT_ID --azure-rm-subscription-id $Workload_zone_subscriptionID --azure-rm-subscription-name $Workload_zoneSubscriptionName --azure-rm-tenant-id $ARM_TENANT_ID --name $Service_Connection_Name --output none --only-show-errors + $epId = az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].id" -o tsv + az devops service-endpoint update --id $epId --enable-for-all true --output none --only-show-errors + } + } } +else { + $Service_Connection_Name = "Control_Plane_Service_Connection" -if ($ARM_CLIENT_SECRET -ne "Please update") { - az pipelines variable-group variable update --group-id $GroupID --name "ARM_CLIENT_SECRET" --value $ARM_CLIENT_SECRET --secret true --output none --only-show-errors - az pipelines variable-group variable update --group-id $GroupID --name "ARM_CLIENT_ID" --value $ARM_CLIENT_ID --output none --only-show-errors - az pipelines variable-group variable update --group-id $GroupID --name "ARM_OBJECT_ID" --value $ARM_OBJECT_ID --output none --only-show-errors - $Env:AZURE_DEVOPS_EXT_AZURE_RM_SERVICE_PRINCIPAL_KEY = $ARM_CLIENT_SECRET - - $epExists = (az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].name | [0]") - if ($epExists.Length -eq 0) { - Write-Host "Creating Service Endpoint" $Service_Connection_Name -ForegroundColor Green - az devops service-endpoint azurerm create --azure-rm-service-principal-id $ARM_CLIENT_ID --azure-rm-subscription-id $Workload_zone_subscriptionID --azure-rm-subscription-name $Workload_zoneSubscriptionName --azure-rm-tenant-id $ARM_TENANT_ID --name $Service_Connection_Name --output none --only-show-errors - $epId = az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].id" -o tsv - az devops service-endpoint update --id $epId --enable-for-all true --output none --only-show-errors - } - else { - Write-Host "Service Endpoint already exists, recreating it with the updated credentials" -ForegroundColor Green - $epId = az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].id" -o tsv - az devops service-endpoint delete --id $epId --yes - az devops service-endpoint azurerm create --azure-rm-service-principal-id $ARM_CLIENT_ID --azure-rm-subscription-id $Workload_zone_subscriptionID --azure-rm-subscription-name $Workload_zoneSubscriptionName --azure-rm-tenant-id $ARM_TENANT_ID --name $Service_Connection_Name --output none --only-show-errors - $epId = az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].id" -o tsv - az devops service-endpoint update --id $epId --enable-for-all true --output none --only-show-errors + $GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --only-show-errors ) + if ($GroupID.Length -eq 0) { + Write-Host "Creating the variable group" $WorkloadZonePrefix -ForegroundColor Green + az pipelines variable-group create --name $WorkloadZonePrefix --variables Agent='Azure Pipelines' ARM_SUBSCRIPTION_ID=$Workload_zone_subscriptionID WZ_PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME=$Service_Connection_Name TF_LOG=OFF Logon_Using_SPN=false Use_MSI=true --output none --authorize true + $GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --only-show-errors) } } $groups.Add($GroupID) @@ -914,7 +968,7 @@ if (!$AlreadySet -or $ResetPAT ) { if ($POOL_NAME_FOUND.Length -gt 0) { Write-Host "Agent pool" $Pool_Name "already exists" -ForegroundColor Yellow $POOL_ID = (az pipelines pool list --query "[?name=='$Pool_Name'].id | [0]" --output tsv) - $queue_id=(az pipelines queue list --query "[?name=='$Pool_Name'].id | [0]" --output tsv) + $queue_id = (az pipelines queue list --query "[?name=='$Pool_Name'].id | [0]" --output tsv) } else { @@ -924,7 +978,7 @@ if (!$AlreadySet -or $ResetPAT ) { az devops invoke --area distributedtask --resource pools --http-method POST --api-version "7.1-preview" --in-file .\pool.json --query-parameters authorizePipelines=true --query id --output none --only-show-errors $POOL_ID = (az pipelines pool list --query "[?name=='$Pool_Name'].id | [0]" --output tsv) Write-Host "Agent pool" $Pool_Name "created" - $queue_id=(az pipelines queue list --query "[?name=='$Pool_Name'].id | [0]" --output tsv) + $queue_id = (az pipelines queue list --query "[?name=='$Pool_Name'].id | [0]" --output tsv) } @@ -936,39 +990,37 @@ if (!$AlreadySet -or $ResetPAT ) { $base64AuthInfo = [Convert]::ToBase64String([Text.Encoding]::ASCII.GetBytes((":{0}" -f $PAT))) $bodyText = [PSCustomObject]@{ - allPipelines= @{ - authorized = $false + allPipelines = @{ + authorized = $false } - resource = @{ - id = 000 - type= "variablegroup" + resource = @{ + id = 000 + type = "variablegroup" } - pipelines = @([ordered]@{ + pipelines = @([ordered]@{ id = 000 authorized = $true }) } - foreach($group in $groups) - { - $bodyText.resource.id=$group - $pipeline_permission_url=$ADO_ORGANIZATION + "/" + $Project_ID+"/_apis/pipelines/pipelinePermissions/variablegroup/"+$group.ToString() + "?api-version=5.1-preview.1" - Write-Host "Setting permissions for variable group:" $group.ToString() -ForegroundColor Yellow - - foreach($pipeline in $pipelines) - { - $bodyText.pipelines[0].id=$pipeline - $body = $bodyText | ConvertTo-Json -Depth 10 - Write-Host " Allowing pipeline id:" $pipeline.ToString() -ForegroundColor Yellow - $response=Invoke-RestMethod -Method PATCH -Uri $pipeline_permission_url -Headers @{Authorization = "Basic $base64AuthInfo"} -Body $body -ContentType "application/json" - } + foreach ($group in $groups) { + $bodyText.resource.id = $group + $pipeline_permission_url = $ADO_ORGANIZATION + "/" + $Project_ID + "/_apis/pipelines/pipelinePermissions/variablegroup/" + $group.ToString() + "?api-version=5.1-preview.1" + Write-Host "Setting permissions for variable group:" $group.ToString() -ForegroundColor Yellow + + foreach ($pipeline in $pipelines) { + $bodyText.pipelines[0].id = $pipeline + $body = $bodyText | ConvertTo-Json -Depth 10 + Write-Host " Allowing pipeline id:" $pipeline.ToString() -ForegroundColor Yellow + $response = Invoke-RestMethod -Method PATCH -Uri $pipeline_permission_url -Headers @{Authorization = "Basic $base64AuthInfo" } -Body $body -ContentType "application/json" + } } $bodyText = [PSCustomObject]@{ - allPipelines= @{ - authorized = $false + allPipelines = @{ + authorized = $false } - pipelines = @([ordered]@{ + pipelines = @([ordered]@{ id = 000 authorized = $true }) @@ -977,14 +1029,13 @@ if (!$AlreadySet -or $ResetPAT ) { # Read-Host -Prompt "Press any key to continue" - $pipeline_permission_url=$ADO_ORGANIZATION + "/" + $Project_ID+"/_apis/pipelines/pipelinePermissions/queue/"+$queue_id.ToString() + "?api-version=5.1-preview.1" + $pipeline_permission_url = $ADO_ORGANIZATION + "/" + $Project_ID + "/_apis/pipelines/pipelinePermissions/queue/" + $queue_id.ToString() + "?api-version=5.1-preview.1" Write-Host "Setting permissions for agent pool:" $Pool_Name "(" $queue_id ")" -ForegroundColor Yellow - foreach($pipeline in $pipelines) - { - $bodyText.pipelines[0].id=$pipeline - $body = $bodyText | ConvertTo-Json -Depth 10 - Write-Host " Allowing pipeline id:" $pipeline.ToString() " access to " $Pool_Name -ForegroundColor Yellow - $response=Invoke-RestMethod -Method PATCH -Uri $pipeline_permission_url -Headers @{Authorization = "Basic $base64AuthInfo"} -Body $body -ContentType "application/json" + foreach ($pipeline in $pipelines) { + $bodyText.pipelines[0].id = $pipeline + $body = $bodyText | ConvertTo-Json -Depth 10 + Write-Host " Allowing pipeline id:" $pipeline.ToString() " access to " $Pool_Name -ForegroundColor Yellow + $response = Invoke-RestMethod -Method PATCH -Uri $pipeline_permission_url -Headers @{Authorization = "Basic $base64AuthInfo" } -Body $body -ContentType "application/json" } } @@ -994,6 +1045,7 @@ Write-Host "The browser will now open, Select the'" $ADO_PROJECT "Build Service' $permissions_url = $ADO_ORGANIZATION + "/" + [uri]::EscapeDataString($ADO_Project) + "/_settings/repositories?_a=permissions" + Start-Process $permissions_url Read-Host -Prompt "Once you have verified the permission, Press any key to continue" diff --git a/deploy/scripts/configure_deployer.sh b/deploy/scripts/configure_deployer.sh index 6e577791df..75f84f9239 100755 --- a/deploy/scripts/configure_deployer.sh +++ b/deploy/scripts/configure_deployer.sh @@ -64,7 +64,7 @@ export local_user=$USER # if [ -z "${TF_VERSION}" ]; then - TF_VERSION="1.6.2" + TF_VERSION="1.7.0" fi diff --git a/deploy/scripts/deploy_controlplane.sh b/deploy/scripts/deploy_controlplane.sh index 34296b669d..312a6d91b1 100755 --- a/deploy/scripts/deploy_controlplane.sh +++ b/deploy/scripts/deploy_controlplane.sh @@ -39,8 +39,9 @@ source "${script_directory}/helpers/script_helpers.sh" force=0 recover=0 ado_flag="" +deploy_using_msi_only=0 -INPUT_ARGUMENTS=$(getopt -n deploy_controlplane -o d:l:s:c:p:t:a:k:ifohrv --longoptions deployer_parameter_file:,library_parameter_file:,subscription:,spn_id:,spn_secret:,tenant_id:,storageaccountname:,vault:,auto-approve,force,only_deployer,help,recover,ado -- "$@") +INPUT_ARGUMENTS=$(getopt -n deploy_controlplane -o d:l:s:c:p:t:a:k:ifohrvm --longoptions deployer_parameter_file:,library_parameter_file:,subscription:,spn_id:,spn_secret:,tenant_id:,storageaccountname:,vault:,auto-approve,force,only_deployer,help,recover,ado,msi -- "$@") VALID_ARGUMENTS=$? if [ "$VALID_ARGUMENTS" != "0" ]; then @@ -51,19 +52,20 @@ eval set -- "$INPUT_ARGUMENTS" while :; do case "$1" in + -a | --storageaccountname) REMOTE_STATE_SA="$2" ; shift 2 ;; + -c | --spn_id) client_id="$2" ; shift 2 ;; -d | --deployer_parameter_file) deployer_parameter_file="$2" ; shift 2 ;; + -k | --vault) keyvault="$2" ; shift 2 ;; -l | --library_parameter_file) library_parameter_file="$2" ; shift 2 ;; - -s | --subscription) subscription="$2" ; shift 2 ;; - -c | --spn_id) client_id="$2" ; shift 2 ;; -p | --spn_secret) spn_secret="$2" ; shift 2 ;; + -s | --subscription) subscription="$2" ; shift 2 ;; -t | --tenant_id) tenant_id="$2" ; shift 2 ;; - -a | --storageaccountname) REMOTE_STATE_SA="$2" ; shift 2 ;; - -k | --vault) keyvault="$2" ; shift 2 ;; - -v | --ado) ado_flag="--ado" ; shift ;; -f | --force) force=1 ; shift ;; + -i | --auto-approve) approve="--auto-approve" ; shift ;; + -m | --msi) deploy_using_msi_only=1 ; shift ;; -o | --only_deployer) only_deployer=1 ; shift ;; -r | --recover) recover=1 ; shift ;; - -i | --auto-approve) approve="--auto-approve" ; shift ;; + -v | --ado) ado_flag="--ado" ; shift ;; -h | --help) control_plane_showhelp exit 3 ; shift ;; --) shift; break ;; @@ -189,8 +191,12 @@ if [ -n "${subscription}" ]; then echo "# #" echo "#########################################################################################" echo "" - az account set --sub "${subscription}" - export ARM_SUBSCRIPTION_ID="${subscription}" + + if [ -n "${subscription}" ]; + then + az account set --sub "${subscription}" + export ARM_SUBSCRIPTION_ID="${subscription}" + fi kv_found=$(az keyvault list --subscription "${subscription}" --query [].name | grep "${keyvault}") if [ -z "${kv_found}" ] ; then @@ -212,7 +218,13 @@ fi if [ -n "$keyvault" ]; then set_executing_user_environment_variables "none" else - set_executing_user_environment_variables "${spn_secret}" + if [ 0 = "${deploy_using_msi_only:-}" ]; then + echo "Using Service Principal for deployment" + set_executing_user_environment_variables "${spn_secret}" + else + echo "Using Managed Identity for deployment" + set_executing_user_environment_variables "none" + fi fi @@ -296,53 +308,10 @@ if [ 0 == $step ]; then cd "$root_dirname" || exit - echo "#########################################################################################" - echo "# #" - echo -e "# $cyan Copying the parameterfiles $resetformatting #" - echo "# #" - echo "#########################################################################################" - echo "" - load_config_vars "${deployer_config_information}" "sshsecret" load_config_vars "${deployer_config_information}" "keyvault" load_config_vars "${deployer_config_information}" "deployer_public_ip_address" - if [ -n "${deployer_public_ip_address}" ]; then - if [ "$this_ip" != "$deployer_public_ip_address" ]; then - # Only run this when not on deployer - if [ -n "${sshsecret}" ] - then - echo "#########################################################################################" - echo "# #" - echo -e "# $cyan Collecting secrets from KV $resetformatting #" - echo "# #" - echo "#########################################################################################" - echo "" - temp_file=$(mktemp) - ppk=$(az keyvault secret show --vault-name "${keyvault}" --name "${sshsecret}" --query value --output tsv) - echo "${ppk}" > "${temp_file}" - chmod 600 "${temp_file}" - - remote_deployer_dir="$HOME/Azure_SAP_Automated_Deployment/WORKSPACES/"$(dirname "$deployer_parameter_file") - remote_library_dir="$HOME/Azure_SAP_Automated_Deployment/WORKSPACES/"$(dirname "$library_parameter_file") - remote_config_dir="$CONFIG_REPO_PATH/.sap_deployment_automation" - - ssh -i "${temp_file}" -o StrictHostKeyChecking=no -o ConnectTimeout=10 azureadm@"${deployer_public_ip_address}" "mkdir -p ${remote_deployer_dir}"/.terraform 2> /dev/null - scp -i "${temp_file}" -q -o StrictHostKeyChecking=no -o ConnectTimeout=120 "$deployer_parameter_file" azureadm@"${deployer_public_ip_address}":"${remote_deployer_dir}"/. 2> /dev/null - scp -i "${temp_file}" -q -o StrictHostKeyChecking=no -o ConnectTimeout=120 "$(dirname "$deployer_parameter_file")"/.terraform/terraform.tfstate azureadm@"${deployer_public_ip_address}":"${remote_deployer_dir}"/.terraform/terraform.tfstate 2> /dev/null - scp -i "${temp_file}" -q -o StrictHostKeyChecking=no -o ConnectTimeout=120 "$(dirname "$deployer_parameter_file")"/terraform.tfstate azureadm@"${deployer_public_ip_address}":"${remote_deployer_dir}"/terraform.tfstate 2> /dev/null - - ssh -i "${temp_file}" -o StrictHostKeyChecking=no -o ConnectTimeout=10 azureadm@"${deployer_public_ip_address}" " mkdir -p ${remote_library_dir}"/.terraform 2> /dev/null - scp -i "${temp_file}" -q -o StrictHostKeyChecking=no -o ConnectTimeout=120 "$library_parameter_file" azureadm@"${deployer_public_ip_address}":"$remote_library_dir"/. 2> /dev/null - - ssh -i "${temp_file}" -o StrictHostKeyChecking=no -o ConnectTimeout=10 azureadm@"${deployer_public_ip_address}" "mkdir -p ${remote_config_dir}" 2> /dev/null - scp -i "${temp_file}" -q -o StrictHostKeyChecking=no -o ConnectTimeout=120 "${deployer_config_information}" azureadm@"${deployer_public_ip_address}":"${remote_config_dir}"/. 2> /dev/null - - rm "${temp_file}" - fi - fi - - fi echo "##vso[task.setprogress value=20;]Progress Indicator" else echo "" @@ -375,27 +344,36 @@ if [ 1 == $step ] || [ 3 == $step ] ; then fi fi + if [ -z "$keyvault" ]; then + if [ $ado_flag != "--ado" ] ; then + read -r -p "Deployer keyvault name: " keyvault + else + exit 10 + fi + fi - secretname="${environment}"-client-id + secretname="${environment}"-subscription-id echo "" echo "#########################################################################################" echo "# #" - echo -e "# $cyan Validating keyvault access $resetformatting #" + echo -e "# $cyan Validating keyvault access to $keyvault $resetformatting #" echo "# #" echo "#########################################################################################" echo "" - if [ -z "$keyvault" ]; then - if [ $ado_flag != "--ado" ] ; then - - read -r -p "Deployer keyvault name: " keyvault - else - exit 10 - fi + kv_name_check=$(az keyvault list --query "[?name=='$keyvault'].name | [0]" --subscription "${subscription}") + if [ -z $kv_name_check ]; then + echo "" + echo "#########################################################################################" + echo "# #" + echo -e "# $cyan Retrying keyvault access $resetformatting #" + echo "# #" + echo "#########################################################################################" + echo "" + sleep 60 + kv_name_check=$(az keyvault list --query "[?name=='$keyvault'].name | [0]" --subscription "${subscription}") fi - kv_name_check=$(az keyvault list --query "[?name=='$keyvault'].name | [0]") - if [ -z $kv_name_check ]; then echo "#########################################################################################" echo "# #" @@ -407,10 +385,10 @@ if [ 1 == $step ] || [ 3 == $step ] ; then exit 10 fi - access_error=$(az keyvault secret list --vault "$keyvault" --only-show-errors | grep "The user, group or application") + access_error=$(az keyvault secret list --vault "$keyvault" --subscription "${subscription}" --only-show-errors | grep "The user, group or application") if [ -z "${access_error}" ]; then - save_config_var "client_id" "${deployer_config_information}" - save_config_var "tenant_id" "${deployer_config_information}" + # save_config_var "client_id" "${deployer_config_information}" + # save_config_var "tenant_id" "${deployer_config_information}" if [ -n "$spn_secret" ]; then allParams=$(printf " -e %s -r %s -v %s --spn_secret %s " "${environment}" "${region_code}" "${keyvault}" "${spn_secret}") @@ -429,19 +407,34 @@ if [ 1 == $step ] || [ 3 == $step ] ; then exit $return_code fi else - read -p "Do you want to specify the SPN Details Y/N?" ans - answer=${ans^^} - if [ "$answer" == 'Y' ]; then - allParams=$(printf " -e %s -r %s -v %s " "${environment}" "${region_code}" "${keyvault}" ) - - #$allParams as an array (); array math can be done in shell, allowing dynamic parameter lists to be created - #"${allParams[@]}" - quotes all elements of the array - - "${SAP_AUTOMATION_REPO_PATH}"/deploy/scripts/set_secrets.sh $allParams - return_code=$? - if [ 0 != $return_code ]; then - exit $return_code - fi + if [ 0 = "${deploy_using_msi_only:-}" ]; then + + + read -p "Do you want to specify the SPN Details Y/N?" ans + answer=${ans^^} + if [ "$answer" == 'Y' ]; then + allParams=$(printf " -e %s -r %s -v %s " "${environment}" "${region_code}" "${keyvault}" ) + + #$allParams as an array (); array math can be done in shell, allowing dynamic parameter lists to be created + #"${allParams[@]}" - quotes all elements of the array + + "${SAP_AUTOMATION_REPO_PATH}"/deploy/scripts/set_secrets.sh $allParams + return_code=$? + if [ 0 != $return_code ]; then + exit $return_code + fi + fi + else + allParams=$(printf " -e %s -r %s -v %s --subscription %s --msi " "${environment}" "${region_code}" "${keyvault}" "${subscription}") + + "${SAP_AUTOMATION_REPO_PATH}"/deploy/scripts/set_secrets.sh $allParams + if [ -f secret.err ]; then + error_message=$(cat secret.err) + echo "##vso[task.logissue type=error]${error_message}" + + exit 65 + fi + fi fi @@ -471,10 +464,12 @@ if [ 1 == $step ] || [ 3 == $step ] ; then exit 65 fi + else echo "##vso[task.setprogress value=40;]Progress Indicator" fi unset TF_DATA_DIR + cd "$root_dirname" || exit if [ 1 = "${only_deployer:-}" ]; then diff --git a/deploy/scripts/deploy_utils.sh b/deploy/scripts/deploy_utils.sh index e1d2ad5e51..42e2753ad0 100755 --- a/deploy/scripts/deploy_utils.sh +++ b/deploy/scripts/deploy_utils.sh @@ -286,9 +286,9 @@ function set_executing_user_environment_variables() { set_azure_cloud_environment - az_exec_user_type=$(az account show | jq -r .user.type) - az_exec_user_name=$(az account show -o json | jq -r .user.name) - az_tenant_id=$(az account show -o json | jq -r .tenantId) + az_exec_user_type=$(az account show --query user.type --output tsv) + az_exec_user_name=$(az account show --query user.name --output tsv) + az_tenant_id=$(az account show --query tenantId --output tsv) echo -e "\t\t[set_executing_user_environment_variables]: User type: "${az_exec_user_type}"" @@ -331,13 +331,13 @@ function set_executing_user_environment_variables() { case "${az_client_id}" in "systemAssignedIdentity") - echo -e "\t[set_executing_user_environment_variables]: logged in using '${az_exec_user_type}'" + echo -e "\t[set_executing_user_environment_variables]: logged in using System Assigned Identity '${az_exec_user_type}'" echo -e "\t[set_executing_user_environment_variables]: unset ARM_CLIENT_SECRET" unset ARM_CLIENT_SECRET ;; "userAssignedIdentity") echo -e "\t[set_executing_user_environment_variables]: logged in using User Assigned Identity: '${az_exec_user_type}'" - echo -e "\t[set_executing_user_environment_variables]: logged in using User Assigned Identity: unset ARM_CLIENT_SECRET" + echo -e "\t[set_executing_user_environment_variables]: unset ARM_CLIENT_SECRET" unset ARM_CLIENT_SECRET ;; *) diff --git a/deploy/scripts/install_workloadzone.sh b/deploy/scripts/install_workloadzone.sh index 9e3a7e4105..5d2a295cad 100755 --- a/deploy/scripts/install_workloadzone.sh +++ b/deploy/scripts/install_workloadzone.sh @@ -19,8 +19,9 @@ source "${script_directory}/helpers/script_helpers.sh" force=0 called_from_ado=0 +deploy_using_msi_only=0 -INPUT_ARGUMENTS=$(getopt -n install_workloadzone -o p:d:e:k:o:s:c:n:t:v:aifh --longoptions parameterfile:,deployer_tfstate_key:,deployer_environment:,subscription:,spn_id:,spn_secret:,tenant_id:,state_subscription:,keyvault:,storageaccountname:,ado,auto-approve,force,help -- "$@") +INPUT_ARGUMENTS=$(getopt -n install_workloadzone -o p:d:e:k:o:s:c:n:t:v:aifhm --longoptions parameterfile:,deployer_tfstate_key:,deployer_environment:,subscription:,spn_id:,spn_secret:,tenant_id:,state_subscription:,keyvault:,storageaccountname:,ado,auto-approve,force,help,msi -- "$@") VALID_ARGUMENTS=$? if [ "$VALID_ARGUMENTS" != "0" ]; then showhelp @@ -30,19 +31,21 @@ eval set -- "$INPUT_ARGUMENTS" while : do case "$1" in - -p | --parameterfile) parameterfile="$2" ; shift 2 ;; + -a | --ado) called_from_ado=1 ; shift ;; + -c | --spn_id) client_id="$2" ; shift 2 ;; -d | --deployer_tfstate_key) deployer_tfstate_key="$2" ; shift 2 ;; -e | --deployer_environment) deployer_environment="$2" ; shift 2 ;; + -f | --force) force=1 ; shift ;; + -i | --auto-approve) approve="--auto-approve" ; shift ;; -k | --state_subscription) STATE_SUBSCRIPTION="$2" ; shift 2 ;; + -m | --msi) deploy_using_msi_only=1 ; shift ;; + -n | --spn_secret) spn_secret="$2" ; shift 2 ;; -o | --storageaccountname) REMOTE_STATE_SA="$2" ; shift 2 ;; + -p | --parameterfile) parameterfile="$2" ; shift 2 ;; -s | --subscription) subscription="$2" ; shift 2 ;; - -c | --spn_id) client_id="$2" ; shift 2 ;; - -v | --keyvault) keyvault="$2" ; shift 2 ;; - -n | --spn_secret) spn_secret="$2" ; shift 2 ;; - -a | --ado) called_from_ado=1 ; shift ;; -t | --tenant_id) tenant_id="$2" ; shift 2 ;; - -f | --force) force=1 ; shift ;; - -i | --auto-approve) approve="--auto-approve" ; shift ;; + -v | --keyvault) keyvault="$2" ; shift 2 ;; + -h | --help) workload_zone_showhelp exit 3 ; shift ;; --) shift; break ;; @@ -312,41 +315,44 @@ then exit 65 fi fi +if [ 0 = "${deploy_using_msi_only:-}" ]; then + if [ -n "$client_id" ] + then + if is_valid_guid "$client_id" ; then + echo "Valid spn id format" + else + printf -v val %-40.40s "$client_id" + echo "#########################################################################################" + echo "# #" + echo -e "# The provided spn_id is not valid:$boldred ${val} $resetformatting #" + echo "# #" + echo "#########################################################################################" + exit 65 + fi + fi -if [ -n "$client_id" ] -then - if is_valid_guid "$client_id" ; then - echo "Valid spn id format" - else - printf -v val %-40.40s "$client_id" - echo "#########################################################################################" - echo "# #" - echo -e "# The provided spn_id is not valid:$boldred ${val} $resetformatting #" - echo "# #" - echo "#########################################################################################" - exit 65 - fi -fi - -if [ -n "$tenant_id" ] -then - if is_valid_guid "$tenant_id" ; then - echo "Valid tenant id format" - else - printf -v val %-40.40s "$tenant_id" - echo "#########################################################################################" - echo "# #" - echo -e "# The provided tenant_id is not valid:$boldred ${val} $resetformatting #" - echo "# #" - echo "#########################################################################################" - exit 65 - fi + if [ -n "$tenant_id" ] + then + if is_valid_guid "$tenant_id" ; then + echo "Valid tenant id format" + else + printf -v val %-40.40s "$tenant_id" + echo "#########################################################################################" + echo "# #" + echo -e "# The provided tenant_id is not valid:$boldred ${val} $resetformatting #" + echo "# #" + echo "#########################################################################################" + exit 65 + fi + fi + #setting the user environment variables + set_executing_user_environment_variables "${spn_secret}" +else + #setting the user environment variables + set_executing_user_environment_variables "N/A" fi -#setting the user environment variables -set_executing_user_environment_variables "${spn_secret}" - if [[ -z ${REMOTE_STATE_SA} ]]; then load_config_vars "${workload_config_information}" "REMOTE_STATE_SA" fi @@ -410,49 +416,69 @@ else fi fi -if [ -n "${keyvault}" ] -then - echo "Setting the secrets" +if [ 1 = "${deploy_using_msi_only:-}" ]; then + if [ -n "${keyvault}" ] + then + echo "Setting the secrets" - save_config_var "client_id" "${workload_config_information}" - save_config_var "tenant_id" "${workload_config_information}" + allParams=$(printf " --workload --environment %s --region %s --vault %s --subscription %s --msi " "${environment}" "${region_code}" "${keyvault}" "${subscription}" ) - if [ -n "$spn_secret" ] - then - allParams=$(printf " --workload --environment %s --region %s --vault %s --spn_secret ***** --subscription %s --spn_id %s --tenant_id %s " "${environment}" "${region_code}" "${keyvault}" "${subscription}" "${client_id}" "${tenant_id}" ) + echo "Calling set_secrets with " "${allParams}" - echo "Calling set_secrets with " "${allParams}" + "${SAP_AUTOMATION_REPO_PATH}"/deploy/scripts/set_secrets.sh ${allParams} - allParams=$(printf " --workload --environment %s --region %s --vault %s --spn_secret %s --subscription %s --spn_id %s --tenant_id %s " "${environment}" "${region_code}" "${keyvault}" "${spn_secret}" "${subscription}" "${client_id}" "${tenant_id}" ) + if [ -f secret.err ]; then + error_message=$(cat secret.err) + echo "##vso[task.logissue type=error]${error_message}" + rm secret.err + exit 65 + fi + fi - "${SAP_AUTOMATION_REPO_PATH}"/deploy/scripts/set_secrets.sh ${allParams} +else + if [ -n "${keyvault}" ] + then + echo "Setting the secrets" - if [ -f secret.err ]; then - error_message=$(cat secret.err) - echo "##vso[task.logissue type=error]${error_message}" + save_config_var "client_id" "${workload_config_information}" + save_config_var "tenant_id" "${workload_config_information}" - exit 65 - fi - else - read -p "Do you want to specify the Workload SPN Details Y/N?" ans - answer=${ans^^} - if [ ${answer} == 'Y' ]; then - allParams=$(printf " --workload --environment %s --region %s --vault %s --subscription %s --spn_id %s " "${environment}" "${region_code}" "${keyvault}" "${subscription}" "${client_id}" ) + if [ -n "$spn_secret" ] + then + allParams=$(printf " --workload --environment %s --region %s --vault %s --spn_secret ***** --subscription %s --spn_id %s --tenant_id %s " "${environment}" "${region_code}" "${keyvault}" "${subscription}" "${client_id}" "${tenant_id}" ) - "${SAP_AUTOMATION_REPO_PATH}"/deploy/scripts/set_secrets.sh ${allParams} - if [ $? -eq 255 ] - then - exit $? - fi - fi - fi + echo "Calling set_secrets with " "${allParams}" - if [ -f kv.log ] - then - rm kv.log - fi -fi + allParams=$(printf " --workload --environment %s --region %s --vault %s --spn_secret %s --subscription %s --spn_id %s --tenant_id %s " "${environment}" "${region_code}" "${keyvault}" "${spn_secret}" "${subscription}" "${client_id}" "${tenant_id}" ) + "${SAP_AUTOMATION_REPO_PATH}"/deploy/scripts/set_secrets.sh ${allParams} + + if [ -f secret.err ]; then + error_message=$(cat secret.err) + echo "##vso[task.logissue type=error]${error_message}" + + exit 65 + fi + else + read -p "Do you want to specify the Workload SPN Details Y/N?" ans + answer=${ans^^} + if [ ${answer} == 'Y' ]; then + allParams=$(printf " --workload --environment %s --region %s --vault %s --subscription %s --spn_id %s " "${environment}" "${region_code}" "${keyvault}" "${subscription}" "${client_id}" ) + + "${SAP_AUTOMATION_REPO_PATH}"/deploy/scripts/set_secrets.sh ${allParams} + if [ $? -eq 255 ] + then + exit $? + fi + fi + fi + + if [ -f kv.log ] + then + rm kv.log + fi + fi +fi if [ -z "${deployer_tfstate_key}" ] then load_config_vars "${workload_config_information}" "deployer_tfstate_key" diff --git a/deploy/scripts/installer.sh b/deploy/scripts/installer.sh index 2eba6d0ced..e2511f3c42 100755 --- a/deploy/scripts/installer.sh +++ b/deploy/scripts/installer.sh @@ -1133,6 +1133,12 @@ then echo "" echo "" + if [ -n ${ARM_CLIENT_SECRET} ] ; then + az login --service-principal --username "${ARM_CLIENT_ID}" --password=$ARM_CLIENT_SECRET --tenant "${ARM_TENANT_ID}" --output none + else + az login --identity --output none + fi + az deployment group create --resource-group ${created_resource_group_name} --name "ControlPlane_Deployer_${created_resource_group_name}" --template-file "${script_directory}/templates/empty-deployment.json" --output none return_value=0 if [ 1 == $called_from_ado ] ; then @@ -1250,10 +1256,6 @@ then # fi # fi - if [ -n ${ARM_CLIENT_SECRET} ] ; then - az login --service-principal --username "${ARM_CLIENT_ID}" --password=$ARM_CLIENT_SECRET --tenant "${ARM_TENANT_ID}" --output none - fi - rg_name=$(terraform -chdir="${terraform_module_directory}" output -no-color -raw created_resource_group_name | tr -d \") echo "" diff --git a/deploy/scripts/set_secrets.sh b/deploy/scripts/set_secrets.sh index d43185008c..2044dbc7ef 100755 --- a/deploy/scripts/set_secrets.sh +++ b/deploy/scripts/set_secrets.sh @@ -48,7 +48,9 @@ function showhelp { echo "#########################################################################################" } -INPUT_ARGUMENTS=$(getopt -n set_secrets -o e:r:v:s:c:p:t:b:hw --longoptions environment:,region:,vault:,subscription:,spn_id:,spn_secret:,tenant_id:,keyvault_subscription:,workload,help -- "$@") +deploy_using_msi_only=0 + +INPUT_ARGUMENTS=$(getopt -n set_secrets -o e:r:v:s:c:p:t:b:hwm --longoptions environment:,region:,vault:,subscription:,spn_id:,spn_secret:,tenant_id:,keyvault_subscription:,workload,help,msi -- "$@") VALID_ARGUMENTS=$? if [ "$VALID_ARGUMENTS" != "0" ]; then @@ -94,6 +96,10 @@ while :; do workload=1 shift ;; + -m | --msi) + deploy_using_msi_only=1 + shift + ;; -h | --help) showhelp exit 3 @@ -175,57 +181,93 @@ if [ -z "$keyvault" ]; then return_code=65 exit $return_code fi - fi - -if [ -z "${client_id}" ]; then - load_config_vars "${environment_config_information}" "client_id" - if [ -z "$client_id" ]; then - read -r -p "SPN App ID: " client_id - fi -else - if is_valid_guid "${client_id}" ; then - echo "Valid client_id specified" - else - printf -v val %-40.40s "$client_id" - echo "#########################################################################################" - echo "# #" - echo -e "# The provided client_id is not valid:$boldred ${val} $resetformatting #" - echo "# #" - echo "#########################################################################################" - return_code=65 - echo "The provided client_id is not valid " "${val}" > secret.err - exit $return_code - fi +if [ -z "${keyvault}" ]; then + echo "Missing keyvault" + echo "No keyvault specified" > secret.err + showhelp + return_code=65 #/* data format error */ + echo $return_code + exit $return_code fi -if [ ! -n "$client_secret" ]; then - #do not output the secret to screen - read -rs -p " -> Kindly provide SPN Password: " client_secret - echo "********" -fi -if [ -z "${tenant_id}" ]; then - load_config_vars "${environment_config_information}" "tenant_id" - if [ -z "${tenant_id}" ]; then - read -r -p "SPN Tenant ID: " tenant_id - fi -else - if is_valid_guid "${tenant_id}" ; then - echo "Valid tenant_id specified" - else - printf -v val %-40.40s "$tenant_id" - echo "#########################################################################################" - echo "# #" - echo -e "# The provided tenant_id is not valid:$boldred ${val} $resetformatting #" - echo "# #" - echo "#########################################################################################" - return_code=65 - echo "The provided tenant_id is not valid " "${val}" > secret.err - exit $return_code - fi -fi +if [ 0 = "${deploy_using_msi_only:-}" ]; then + if [ -z "${client_id}" ]; then + load_config_vars "${environment_config_information}" "client_id" + if [ -z "$client_id" ]; then + read -r -p "SPN App ID: " client_id + fi + else + if is_valid_guid "${client_id}" ; then + echo "Valid client_id specified" + else + printf -v val %-40.40s "$client_id" + echo "#########################################################################################" + echo "# #" + echo -e "# The provided client_id is not valid:$boldred ${val} $resetformatting #" + echo "# #" + echo "#########################################################################################" + return_code=65 + echo "The provided client_id is not valid " "${val}" > secret.err + exit $return_code + fi + fi + + if [ ! -n "$client_secret" ]; then + #do not output the secret to screen + read -rs -p " -> Kindly provide SPN Password: " client_secret + echo "********" + fi + + if [ -z "${tenant_id}" ]; then + load_config_vars "${environment_config_information}" "tenant_id" + if [ -z "${tenant_id}" ]; then + read -r -p "SPN Tenant ID: " tenant_id + fi + else + if is_valid_guid "${tenant_id}" ; then + echo "Valid tenant_id specified" + else + printf -v val %-40.40s "$tenant_id" + echo "#########################################################################################" + echo "# #" + echo -e "# The provided tenant_id is not valid:$boldred ${val} $resetformatting #" + echo "# #" + echo "#########################################################################################" + return_code=65 + echo "The provided tenant_id is not valid " "${val}" > secret.err + exit $return_code + fi + fi + if [ -z "${client_id}" ]; then + echo "Missing client_id" + echo "No client_id specified" > secret.err + showhelp + return_code=65 #/* data format error */ + echo $return_code + exit $return_code + fi + + if [ -z "$client_secret" ]; then + echo "Missing client_secret" + echo "No client_secret specified" > secret.err + showhelp + return_code=65 #/* data format error */ + echo $return_code + exit $return_code + fi + + if [ -z "${tenant_id}" ]; then + echo "Missing tenant_id" + echo "No tenant_id specified" > secret.err + showhelp + return_code=65 #/* data format error */ + echo $return_code + exit $return_code + fi +fi if [ -z "${subscription}" ]; then read -r -p "SPN Subscription: " subscription else @@ -244,41 +286,6 @@ else fi fi -if [ -z "${keyvault}" ]; then - echo "Missing keyvault" - echo "No keyvault specified" > secret.err - showhelp - return_code=65 #/* data format error */ - echo $return_code - exit $return_code -fi - -if [ -z "${client_id}" ]; then - echo "Missing client_id" - echo "No client_id specified" > secret.err - showhelp - return_code=65 #/* data format error */ - echo $return_code - exit $return_code -fi - -if [ -z "$client_secret" ]; then - echo "Missing client_secret" - echo "No client_secret specified" > secret.err - showhelp - return_code=65 #/* data format error */ - echo $return_code - exit $return_code -fi - -if [ -z "${tenant_id}" ]; then - echo "Missing tenant_id" - echo "No tenant_id specified" > secret.err - showhelp - return_code=65 #/* data format error */ - echo $return_code - exit $return_code -fi echo "#########################################################################################" echo "# #" @@ -362,65 +369,66 @@ if [ -f stdout.az ]; then fi fi -#turn off output, we do not want to show the details being uploaded to keyvault -secretname="${environment}"-client-id -deleted=$(az keyvault secret list-deleted --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) -if [ "${deleted}" == "${secretname}" ]; then - echo -e "\t $cyan Recovering secret ${secretname} in keyvault ${keyvault} $resetformatting \n" - az keyvault secret recover --name "${secretname}" --vault-name "${keyvault}" --subscription $STATE_SUBSCRIPTION - sleep 10 -fi +if [ 0 = "${deploy_using_msi_only:-}" ]; then + #turn off output, we do not want to show the details being uploaded to keyvault + secretname="${environment}"-client-id + deleted=$(az keyvault secret list-deleted --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) + if [ "${deleted}" == "${secretname}" ]; then + echo -e "\t $cyan Recovering secret ${secretname} in keyvault ${keyvault} $resetformatting \n" + az keyvault secret recover --name "${secretname}" --vault-name "${keyvault}" --subscription $STATE_SUBSCRIPTION + sleep 10 + fi -v="" -secret=$(az keyvault secret list --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) -if [ "${secret}" == "${secretname}" ]; -then - v=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query value -o tsv) - if [ "${v}" != "${client_id}" ] ; then - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${client_id}" --only-show-errors --output none - fi -else - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${client_id}" --only-show-errors --output none -fi + v="" + secret=$(az keyvault secret list --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) + if [ "${secret}" == "${secretname}" ]; + then + v=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query value -o tsv) + if [ "${v}" != "${client_id}" ] ; then + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${client_id}" --only-show-errors --output none + fi + else + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${client_id}" --only-show-errors --output none + fi -secretname="${environment}"-tenant-id -deleted=$(az keyvault secret list-deleted --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) -if [ "${deleted}" == "${secretname}" ]; then - echo -e "\t $cyan Recovering secret ${secretname} in keyvault ${keyvault} $resetformatting \n" - az keyvault secret recover --name "${secretname}" --vault-name "${keyvault}" --subscription $STATE_SUBSCRIPTION - sleep 10 -fi -v="" -secret=$(az keyvault secret list --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) -if [ "${secret}" == "${secretname}" ]; -then - v=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query value -o tsv) - if [ "${v}" != "${tenant_id}" ] ; then - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${tenant_id}" --only-show-errors --output none - fi -else - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${tenant_id}" --only-show-errors --output none -fi + secretname="${environment}"-tenant-id + deleted=$(az keyvault secret list-deleted --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) + if [ "${deleted}" == "${secretname}" ]; then + echo -e "\t $cyan Recovering secret ${secretname} in keyvault ${keyvault} $resetformatting \n" + az keyvault secret recover --name "${secretname}" --vault-name "${keyvault}" --subscription $STATE_SUBSCRIPTION + sleep 10 + fi + v="" + secret=$(az keyvault secret list --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) + if [ "${secret}" == "${secretname}" ]; + then + v=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query value -o tsv) + if [ "${v}" != "${tenant_id}" ] ; then + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${tenant_id}" --only-show-errors --output none + fi + else + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${tenant_id}" --only-show-errors --output none + fi -secretname="${environment}"-client-secret -deleted=$(az keyvault secret list-deleted --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) -if [ "${deleted}" == "${secretname}" ]; then - echo -e "\t $cyan Recovering secret ${secretname} in keyvault ${keyvault} $resetformatting \n" - az keyvault secret recover --name "${secretname}" --vault-name "${keyvault}" --subscription $STATE_SUBSCRIPTION - sleep 10 -fi + secretname="${environment}"-client-secret + deleted=$(az keyvault secret list-deleted --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) + if [ "${deleted}" == "${secretname}" ]; then + echo -e "\t $cyan Recovering secret ${secretname} in keyvault ${keyvault} $resetformatting \n" + az keyvault secret recover --name "${secretname}" --vault-name "${keyvault}" --subscription $STATE_SUBSCRIPTION + sleep 10 + fi -v="" -secret=$(az keyvault secret list --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) -if [ "${secret}" == "${secretname}" ]; -then - v=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query value -o tsv) - if [ "${v}" != "${client_secret}" ] ; then - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value="${client_secret}" --only-show-errors --output none - fi -else - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value="${client_secret}" --only-show-errors --output none + v="" + secret=$(az keyvault secret list --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) + if [ "${secret}" == "${secretname}" ]; + then + v=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query value -o tsv) + if [ "${v}" != "${client_secret}" ] ; then + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value="${client_secret}" --only-show-errors --output none + fi + else + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value="${client_secret}" --only-show-errors --output none + fi fi - exit $return_code diff --git a/deploy/scripts/setup_devops.ps1 b/deploy/scripts/setup_devops.ps1 index a8259a0292..24dca5349a 100644 --- a/deploy/scripts/setup_devops.ps1 +++ b/deploy/scripts/setup_devops.ps1 @@ -482,7 +482,7 @@ Write-Host "Creating the variable group SDAF-General" -ForegroundColor Green $general_group_id = (az pipelines variable-group list --query "[?name=='SDAF-General'].id | [0]" --only-show-errors) if ($general_group_id.Length -eq 0) { - az pipelines variable-group create --name SDAF-General --variables ANSIBLE_HOST_KEY_CHECKING=false Deployment_Configuration_Path=WORKSPACES Branch=main tf_version="1.6.2" ansible_core_version="2.15" S-Username=$SUserName S-Password=$SPassword --output yaml --authorize true --output none + az pipelines variable-group create --name SDAF-General --variables ANSIBLE_HOST_KEY_CHECKING=false Deployment_Configuration_Path=WORKSPACES Branch=main tf_version="1.7.0" ansible_core_version="2.15" S-Username=$SUserName S-Password=$SPassword --output yaml --authorize true --output none $general_group_id = (az pipelines variable-group list --query "[?name=='SDAF-General'].id | [0]" --only-show-errors) az pipelines variable-group variable update --group-id $general_group_id --name "S-Password" --value $SPassword --secret true --output none --only-show-errors diff --git a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf index 495ca563e7..8571b12937 100644 --- a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf @@ -255,12 +255,6 @@ variable "deployer_private_ip_address" { default = [""] } -variable "add_system_assigned_identity" { - description = "Boolean flag indicating if a system assigned identity should be added to the deployer" - default = false - type = bool - } - ############################################################################### # # # Deployer authentication # @@ -373,7 +367,7 @@ variable "deployer_diagnostics_account_arm_id" { variable "tf_version" { description = "Terraform version to install on deployer" - default = "1.6.2" + default = "1.7.0" } variable "name_override_file" { @@ -508,3 +502,26 @@ variable "Agent_IP" { description = "IP address of the agent" default = "" } + + +############################################################################### +# # +# Identity # +# # +############################################################################### + +variable "user_assigned_identity_id" { + description = "User assigned Identity resource Id" + default = "" + } + +variable "add_system_assigned_identity" { + description = "Boolean flag indicating if a system assigned identity should be added to the deployer" + default = false + type = bool + } + +variable "use_spn" { + description = "Log in using a service principal when performing the deployment" + default = false + } diff --git a/deploy/terraform/bootstrap/sap_deployer/transform.tf b/deploy/terraform/bootstrap/sap_deployer/transform.tf index 36fc6bd889..ae130cb930 100644 --- a/deploy/terraform/bootstrap/sap_deployer/transform.tf +++ b/deploy/terraform/bootstrap/sap_deployer/transform.tf @@ -184,6 +184,7 @@ locals { deployer_diagnostics_account_arm_id = var.deployer_diagnostics_account_arm_id app_service_SKU = var.app_service_SKU_name + user_assigned_identity_id = var.user_assigned_identity_id } diff --git a/deploy/terraform/bootstrap/sap_library/imports.tf b/deploy/terraform/bootstrap/sap_library/imports.tf index af7d6fa0a1..02cb5d22ba 100644 --- a/deploy/terraform/bootstrap/sap_library/imports.tf +++ b/deploy/terraform/bootstrap/sap_library/imports.tf @@ -5,7 +5,7 @@ data "terraform_remote_state" "deployer" { backend = "local" - count = length(var.deployer_statefile_foldername) > 0 || var.use_deployer ? 1 : 0 + count = length(var.deployer_statefile_foldername) > 0 || local.use_spn ? 1 : 0 config = { path = length(var.deployer_statefile_foldername) > 0 ? ( "${var.deployer_statefile_foldername}/terraform.tfstate") : ( @@ -16,34 +16,34 @@ data "terraform_remote_state" "deployer" { data "azurerm_key_vault_secret" "subscription_id" { provider = azurerm.deployer - count = var.use_deployer ? 1 : 0 + count = local.use_spn ? 1 : 0 name = format("%s-subscription-id", upper(local.infrastructure.environment)) key_vault_id = local.spn_key_vault_arm_id } data "azurerm_key_vault_secret" "client_id" { provider = azurerm.deployer - count = var.use_deployer ? 1 : 0 + count = local.use_spn ? 1 : 0 name = format("%s-client-id", upper(local.infrastructure.environment)) key_vault_id = local.spn_key_vault_arm_id } data "azurerm_key_vault_secret" "client_secret" { provider = azurerm.deployer - count = var.use_deployer ? 1 : 0 + count = local.use_spn ? 1 : 0 name = format("%s-client-secret", upper(local.infrastructure.environment)) key_vault_id = local.spn_key_vault_arm_id } data "azurerm_key_vault_secret" "tenant_id" { provider = azurerm.deployer - count = var.use_deployer ? 1 : 0 + count = local.use_spn ? 1 : 0 name = format("%s-tenant-id", upper(local.infrastructure.environment)) key_vault_id = local.spn_key_vault_arm_id } // Import current service principal data "azuread_service_principal" "sp" { - count = var.use_deployer ? 1 : 0 + count = local.use_spn ? 1 : 0 client_id = local.spn.client_id } diff --git a/deploy/terraform/bootstrap/sap_library/providers.tf b/deploy/terraform/bootstrap/sap_library/providers.tf index 8e35b40384..d2eb883650 100644 --- a/deploy/terraform/bootstrap/sap_library/providers.tf +++ b/deploy/terraform/bootstrap/sap_library/providers.tf @@ -35,10 +35,10 @@ provider "azurerm" { } - subscription_id = var.use_deployer ? local.spn.subscription_id : null - client_id = var.use_deployer ? local.spn.client_id : null - client_secret = var.use_deployer ? local.spn.client_secret : null - tenant_id = var.use_deployer ? local.spn.tenant_id : null + subscription_id = local.use_spn || var.use_spn ? local.spn.subscription_id : null + client_id = local.use_spn ? local.spn.client_id : null + client_secret = local.use_spn ? local.spn.client_secret : null + tenant_id = local.use_spn ? local.spn.tenant_id : null alias = "main" skip_provider_registration = true @@ -55,9 +55,9 @@ provider "azurerm" { provider "azurerm" { features {} subscription_id = try(coalesce(var.management_dns_subscription_id, local.spn.subscription_id), null) - client_id = var.use_deployer ? local.spn.client_id : null - client_secret = var.use_deployer ? local.spn.client_secret : null - tenant_id = var.use_deployer ? local.spn.tenant_id : null + client_id = local.use_spn ? local.spn.client_id : null + client_secret = local.use_spn ? local.spn.client_secret : null + tenant_id = local.use_spn ? local.spn.tenant_id : null alias = "dnsmanagement" skip_provider_registration = true } diff --git a/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf index d18f6845af..ab7fed1731 100644 --- a/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf @@ -37,6 +37,10 @@ variable "place_delete_lock_on_resources" { default = false } +variable "use_spn" { + description = "Log in using a service principal when performing the deployment" + default = true + } #######################################4#######################################8 # # diff --git a/deploy/terraform/bootstrap/sap_library/variables_local.tf b/deploy/terraform/bootstrap/sap_library/variables_local.tf index ac15bf4128..503283cb6b 100644 --- a/deploy/terraform/bootstrap/sap_library/variables_local.tf +++ b/deploy/terraform/bootstrap/sap_library/variables_local.tf @@ -7,6 +7,8 @@ locals { version_label = trimspace(file("${path.module}/../../../configs/version.txt")) deployer_prefix = module.sap_namegenerator.naming.prefix.DEPLOYER + use_spn = !var.use_deployer ? false : var.use_spn + // If custom names are used for deployer, providing resource_group_name and msi_name will override the naming convention deployer_rg_name = try(local.deployer.resource_group_name, format("%s%s", @@ -18,20 +20,20 @@ locals { spn_key_vault_arm_id = try(data.terraform_remote_state.deployer[0].outputs.deployer_kv_user_arm_id, "") spn = { - subscription_id = var.use_deployer ? data.azurerm_key_vault_secret.subscription_id[0].value : null, - client_id = var.use_deployer ? data.azurerm_key_vault_secret.client_id[0].value : null, - client_secret = var.use_deployer ? data.azurerm_key_vault_secret.client_secret[0].value : null, - tenant_id = var.use_deployer ? data.azurerm_key_vault_secret.tenant_id[0].value : null + subscription_id = local.use_spn ? data.azurerm_key_vault_secret.subscription_id[0].value : null, + client_id = local.use_spn ? data.azurerm_key_vault_secret.client_id[0].value : null, + client_secret = local.use_spn ? data.azurerm_key_vault_secret.client_secret[0].value : null, + tenant_id = local.use_spn ? data.azurerm_key_vault_secret.tenant_id[0].value : null } service_principal = { subscription_id = local.spn.subscription_id, tenant_id = local.spn.tenant_id, - object_id = var.use_deployer ? data.azuread_service_principal.sp[0].id : null + object_id = local.use_spn ? data.azuread_service_principal.sp[0].id : null } account = { - subscription_id = var.use_deployer ? data.azurerm_key_vault_secret.subscription_id[0].value : null, + subscription_id = local.use_spn ? data.azurerm_key_vault_secret.subscription_id[0].value : null, tenant_id = data.azurerm_client_config.current.tenant_id, object_id = data.azurerm_client_config.current.object_id } diff --git a/deploy/terraform/run/sap_deployer/imports.tf b/deploy/terraform/run/sap_deployer/imports.tf index 245b30f1f2..5d5a0fd92e 100644 --- a/deploy/terraform/run/sap_deployer/imports.tf +++ b/deploy/terraform/run/sap_deployer/imports.tf @@ -11,19 +11,19 @@ data "azurerm_key_vault_secret" "subscription_id" { } data "azurerm_key_vault_secret" "client_id" { - count = length(var.deployer_kv_user_arm_id) > 0 ? 1 : 0 + count = length(var.deployer_kv_user_arm_id) > 0 ? (var.use_spn ? 1 : 0) : 0 name = format("%s-client-id", upper(local.infrastructure.environment)) key_vault_id = var.deployer_kv_user_arm_id } data "azurerm_key_vault_secret" "client_secret" { - count = length(var.deployer_kv_user_arm_id) > 0 ? 1 : 0 + count = length(var.deployer_kv_user_arm_id) > 0 ? (var.use_spn ? 1 : 0) : 0 name = format("%s-client-secret", upper(local.infrastructure.environment)) key_vault_id = var.deployer_kv_user_arm_id } data "azurerm_key_vault_secret" "tenant_id" { - count = length(var.deployer_kv_user_arm_id) > 0 ? 1 : 0 + count = length(var.deployer_kv_user_arm_id) > 0 ? (var.use_spn ? 1 : 0) : 0 name = format("%s-tenant-id", upper(local.infrastructure.environment)) key_vault_id = var.deployer_kv_user_arm_id } diff --git a/deploy/terraform/run/sap_deployer/providers.tf b/deploy/terraform/run/sap_deployer/providers.tf index 5cda6aa3f0..4ce66a38ed 100644 --- a/deploy/terraform/run/sap_deployer/providers.tf +++ b/deploy/terraform/run/sap_deployer/providers.tf @@ -45,9 +45,9 @@ provider "azurerm" { skip_provider_registration = true subscription_id = local.spn.subscription_id - client_id = local.spn.client_id - client_secret = local.spn.client_secret - tenant_id = local.spn.tenant_id + client_id = var.use_spn ? local.spn.client_id : null + client_secret = var.use_spn ? local.spn.client_secret: null + tenant_id = var.use_spn ? local.spn.tenant_id: null alias = "main" } @@ -55,9 +55,9 @@ provider "azurerm" { features {} alias = "dnsmanagement" subscription_id = try(var.management_dns_subscription_id, null) - client_id = local.spn.client_id - client_secret = local.spn.client_secret - tenant_id = local.spn.tenant_id + client_id = var.use_spn ? local.spn.client_id : null + client_secret = var.use_spn ? local.spn.client_secret: null + tenant_id = var.use_spn ? local.spn.tenant_id: null skip_provider_registration = true } diff --git a/deploy/terraform/run/sap_deployer/tfvar_variables.tf b/deploy/terraform/run/sap_deployer/tfvar_variables.tf index 7347cbd4fe..f0fd374dd0 100644 --- a/deploy/terraform/run/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/run/sap_deployer/tfvar_variables.tf @@ -255,12 +255,6 @@ variable "deployer_private_ip_address" { default = [""] } -variable "add_system_assigned_identity" { - description = "Boolean flag indicating if a system assigned identity should be added to the deployer" - default = false - type = bool - } - ############################################################################### # # # Deployer authentication # @@ -379,7 +373,7 @@ variable "deployer_diagnostics_account_arm_id" { variable "tf_version" { description = "Terraform version to install on deployer" - default = "1.6.2" + default = "1.7.0" } variable "name_override_file" { @@ -523,3 +517,26 @@ variable "tfstate_resource_id" { } } + +############################################################################### +# # +# Identity # +# # +############################################################################### + +variable "user_assigned_identity_id" { + description = "User assigned Identity resource Id" + default = "" + } + +variable "add_system_assigned_identity" { + description = "Boolean flag indicating if a system assigned identity should be added to the deployer" + default = false + type = bool + } + +variable "use_spn" { + description = "Log in using a service principal when performing the deployment" + default = true + } + diff --git a/deploy/terraform/run/sap_deployer/transform.tf b/deploy/terraform/run/sap_deployer/transform.tf index 7c1b38f164..b2822b8441 100644 --- a/deploy/terraform/run/sap_deployer/transform.tf +++ b/deploy/terraform/run/sap_deployer/transform.tf @@ -184,7 +184,7 @@ locals { deployer_diagnostics_account_arm_id = var.deployer_diagnostics_account_arm_id app_service_SKU = var.app_service_SKU_name - + user_assigned_identity_id = var.user_assigned_identity_id } authentication = { diff --git a/deploy/terraform/run/sap_deployer/variables_local.tf b/deploy/terraform/run/sap_deployer/variables_local.tf index 5a435fb623..6dbb79d863 100644 --- a/deploy/terraform/run/sap_deployer/variables_local.tf +++ b/deploy/terraform/run/sap_deployer/variables_local.tf @@ -36,10 +36,10 @@ locals { ) spn = { - subscription_id = length(var.deployer_kv_user_arm_id) > 0 ? data.azurerm_key_vault_secret.subscription_id[0].value : null, - client_id = length(var.deployer_kv_user_arm_id) > 0 ? data.azurerm_key_vault_secret.client_id[0].value : null, - client_secret = length(var.deployer_kv_user_arm_id) > 0 ? data.azurerm_key_vault_secret.client_secret[0].value : null, - tenant_id = length(var.deployer_kv_user_arm_id) > 0 ? data.azurerm_key_vault_secret.tenant_id[0].value : null + subscription_id = length(var.deployer_kv_user_arm_id) > 0 && var.use_spn ? data.azurerm_key_vault_secret.subscription_id[0].value : null, + client_id = length(var.deployer_kv_user_arm_id) > 0 && var.use_spn ? data.azurerm_key_vault_secret.client_id[0].value : null, + client_secret = length(var.deployer_kv_user_arm_id) > 0 && var.use_spn ? data.azurerm_key_vault_secret.client_secret[0].value : null, + tenant_id = length(var.deployer_kv_user_arm_id) > 0 && var.use_spn ? data.azurerm_key_vault_secret.tenant_id[0].value : null } } diff --git a/deploy/terraform/run/sap_landscape/imports.tf b/deploy/terraform/run/sap_landscape/imports.tf index 30e02ce83a..c825131459 100644 --- a/deploy/terraform/run/sap_landscape/imports.tf +++ b/deploy/terraform/run/sap_landscape/imports.tf @@ -42,25 +42,25 @@ data "azurerm_key_vault_secret" "tenant_id" { } data "azurerm_key_vault_secret" "cp_subscription_id" { - count = length(try(data.terraform_remote_state.deployer[0].outputs.environment, "")) > 0 ? 1 : 0 + count = length(try(data.terraform_remote_state.deployer[0].outputs.environment, "")) > 0 ? (var.use_spn ? 1 : 0) : 0 name = format("%s-subscription-id", data.terraform_remote_state.deployer[0].outputs.environment) key_vault_id = local.spn_key_vault_arm_id } data "azurerm_key_vault_secret" "cp_client_id" { - count = length(try(data.terraform_remote_state.deployer[0].outputs.environment, "")) > 0 ? 1 : 0 + count = length(try(data.terraform_remote_state.deployer[0].outputs.environment, "")) > 0 ? (var.use_spn ? 1 : 0) : 0 name = format("%s-client-id", data.terraform_remote_state.deployer[0].outputs.environment) key_vault_id = local.spn_key_vault_arm_id } data "azurerm_key_vault_secret" "cp_client_secret" { - count = length(try(data.terraform_remote_state.deployer[0].outputs.environment, "")) > 0 ? 1 : 0 + count = length(try(data.terraform_remote_state.deployer[0].outputs.environment, "")) > 0 ? (var.use_spn ? 1 : 0) : 0 name = format("%s-client-secret", data.terraform_remote_state.deployer[0].outputs.environment) key_vault_id = local.spn_key_vault_arm_id } data "azurerm_key_vault_secret" "cp_tenant_id" { - count = length(try(data.terraform_remote_state.deployer[0].outputs.environment, "")) > 0 ? 1 : 0 + count = length(try(data.terraform_remote_state.deployer[0].outputs.environment, "")) > 0 ? (var.use_spn ? 1 : 0) : 0 name = format("%s-tenant-id", data.terraform_remote_state.deployer[0].outputs.environment) key_vault_id = local.spn_key_vault_arm_id } diff --git a/deploy/terraform/run/sap_landscape/output.tf b/deploy/terraform/run/sap_landscape/output.tf index 65edb75fc0..a41775a96e 100644 --- a/deploy/terraform/run/sap_landscape/output.tf +++ b/deploy/terraform/run/sap_landscape/output.tf @@ -280,6 +280,12 @@ output "controlplane_environment" { description = "Control plane environment" value = try(data.terraform_remote_state.deployer[0].outputs.environment, "") } + +output "use_spn" { + description = "Perform deployments using a service principal" + value = var.use_spn + } + ############################################################################### # # # iSCSI # diff --git a/deploy/terraform/run/sap_landscape/providers.tf b/deploy/terraform/run/sap_landscape/providers.tf index 765a5edc49..6481d50f88 100644 --- a/deploy/terraform/run/sap_landscape/providers.tf +++ b/deploy/terraform/run/sap_landscape/providers.tf @@ -73,12 +73,12 @@ provider "azurerm" { provider "azurerm" { features {} - subscription_id = length(local.deployer_subscription_id) > 0 ? local.deployer_subscription_id : null - use_msi = false - client_id = var.use_spn ? local.cp_spn.client_id : null - client_secret = var.use_spn ? local.cp_spn.client_secret : null - tenant_id = var.use_spn ? local.cp_spn.tenant_id : null - alias = "peering" + subscription_id = length(local.deployer_subscription_id) > 0 ? local.deployer_subscription_id : null + use_msi = var.use_spn ? false : true + client_id = var.use_spn ? local.cp_spn.client_id : null + client_secret = var.use_spn ? local.cp_spn.client_secret : null + tenant_id = var.use_spn ? local.cp_spn.tenant_id : null + alias = "peering" skip_provider_registration = true } diff --git a/deploy/terraform/run/sap_library/imports.tf b/deploy/terraform/run/sap_library/imports.tf index d9cd5b78c9..663e139953 100644 --- a/deploy/terraform/run/sap_library/imports.tf +++ b/deploy/terraform/run/sap_library/imports.tf @@ -5,7 +5,7 @@ data "terraform_remote_state" "deployer" { backend = "azurerm" - count = length(var.deployer_tfstate_key) > 0 || var.use_deployer ? 1 : 0 + count = length(var.deployer_tfstate_key) > 0 || local.use_spn ? 1 : 0 config = { resource_group_name = local.saplib_resource_group_name storage_account_name = local.tfstate_storage_account_name @@ -17,34 +17,34 @@ data "terraform_remote_state" "deployer" { data "azurerm_key_vault_secret" "subscription_id" { provider = azurerm.deployer - count = var.use_deployer ? 1 : 0 + count = local.use_spn ? 1 : 0 name = format("%s-subscription-id", upper(local.infrastructure.environment)) key_vault_id = local.spn_key_vault_arm_id } data "azurerm_key_vault_secret" "client_id" { provider = azurerm.deployer - count = var.use_deployer ? 1 : 0 + count = local.use_spn ? 1 : 0 name = format("%s-client-id", upper(local.infrastructure.environment)) key_vault_id = local.spn_key_vault_arm_id } data "azurerm_key_vault_secret" "client_secret" { provider = azurerm.deployer - count = var.use_deployer ? 1 : 0 + count = local.use_spn ? 1 : 0 name = format("%s-client-secret", upper(local.infrastructure.environment)) key_vault_id = local.spn_key_vault_arm_id } data "azurerm_key_vault_secret" "tenant_id" { provider = azurerm.deployer - count = var.use_deployer ? 1 : 0 + count = local.use_spn ? 1 : 0 name = format("%s-tenant-id", upper(local.infrastructure.environment)) key_vault_id = local.spn_key_vault_arm_id } // Import current service principal data "azuread_service_principal" "sp" { - count = var.use_deployer ? 1 : 0 + count = local.use_spn ? 1 : 0 client_id = local.spn.client_id } diff --git a/deploy/terraform/run/sap_library/providers.tf b/deploy/terraform/run/sap_library/providers.tf index 106ea43ff1..f9d445fbca 100644 --- a/deploy/terraform/run/sap_library/providers.tf +++ b/deploy/terraform/run/sap_library/providers.tf @@ -31,9 +31,9 @@ provider "azurerm" { } subscription_id = local.spn.subscription_id - client_id = var.use_deployer ? local.spn.client_id : null - client_secret = var.use_deployer ? local.spn.client_secret : null - tenant_id = var.use_deployer ? local.spn.tenant_id : null + client_id = local.use_spn ? local.spn.client_id : null + client_secret = local.use_spn ? local.spn.client_secret : null + tenant_id = local.use_spn ? local.spn.tenant_id : null partner_id = "140c3bc9-c937-4139-874f-88288bab08bb" alias = "main" @@ -53,9 +53,9 @@ provider "azurerm" { } alias = "dnsmanagement" subscription_id = try(coalesce(var.management_dns_subscription_id, local.spn.subscription_id), null) - client_id = var.use_deployer ? local.spn.client_id : null - client_secret = var.use_deployer ? local.spn.client_secret : null - tenant_id = var.use_deployer ? local.spn.tenant_id : null + client_id = local.use_spn ? local.spn.client_id : null + client_secret = local.use_spn ? local.spn.client_secret : null + tenant_id = local.use_spn ? local.spn.tenant_id : null skip_provider_registration = true } diff --git a/deploy/terraform/run/sap_library/tfvar_variables.tf b/deploy/terraform/run/sap_library/tfvar_variables.tf index 255f245c31..ce2cba69bd 100644 --- a/deploy/terraform/run/sap_library/tfvar_variables.tf +++ b/deploy/terraform/run/sap_library/tfvar_variables.tf @@ -37,12 +37,17 @@ variable "place_delete_lock_on_resources" { default = false } -variable "short_named_endpoints_nics" { +variable "short_named_endpoints_nics" { description = "If defined, uses short names for private endpoints nics" default = false } +variable "use_spn" { + description = "Log in using a service principal when performing the deployment" + default = true + } + #######################################4#######################################8 # # # Resource group definitioms # diff --git a/deploy/terraform/run/sap_library/variables_local.tf b/deploy/terraform/run/sap_library/variables_local.tf index 7956c69e9b..59798b8039 100644 --- a/deploy/terraform/run/sap_library/variables_local.tf +++ b/deploy/terraform/run/sap_library/variables_local.tf @@ -1,57 +1,60 @@ ############################################################################### -# # -# Local Variables # -# # +# # +# Local Variables # +# # ############################################################################### locals { - version_label = trimspace(file("${path.module}/../../../configs/version.txt")) - deployer_prefix = module.sap_namegenerator.naming.prefix.DEPLOYER + version_label = trimspace(file("${path.module}/../../../configs/version.txt")) + deployer_prefix = module.sap_namegenerator.naming.prefix.DEPLOYER + + use_spn = !var.use_deployer ? false : var.use_spn + // If custom names are used for deployer, providing resource_group_name and msi_name will override the naming convention - deployer_rg_name = try(local.deployer.resource_group_name, format("%s%s%s", - module.sap_namegenerator.naming.resource_prefixes.deployer_rg, - local.deployer_prefix, - module.sap_namegenerator.naming.resource_suffixes.deployer_rg - )) + deployer_rg_name = try(local.deployer.resource_group_name, format("%s%s%s", + module.sap_namegenerator.naming.resource_prefixes.deployer_rg, + local.deployer_prefix, + module.sap_namegenerator.naming.resource_suffixes.deployer_rg + )) // Locate the tfstate storage account - saplib_subscription_id = split("/", var.tfstate_resource_id)[2] - saplib_resource_group_name = split("/", var.tfstate_resource_id)[4] - tfstate_storage_account_name = split("/", var.tfstate_resource_id)[8] - tfstate_container_name = module.sap_namegenerator.naming.resource_suffixes.tfstate - deployer_tfstate_key = length(var.deployer_tfstate_key) > 0 ? ( - var.deployer_tfstate_key) : ( - format("%s%s", local.deployer_rg_name, ".terraform.tfstate") - ) + saplib_subscription_id = split("/", var.tfstate_resource_id)[2] + saplib_resource_group_name = split("/", var.tfstate_resource_id)[4] + tfstate_storage_account_name = split("/", var.tfstate_resource_id)[8] + tfstate_container_name = module.sap_namegenerator.naming.resource_suffixes.tfstate + deployer_tfstate_key = length(var.deployer_tfstate_key) > 0 ? ( + var.deployer_tfstate_key) : ( + format("%s%s", local.deployer_rg_name, ".terraform.tfstate") + ) // Retrieve the arm_id of deployer's Key Vault from deployer's terraform.tfstate spn_key_vault_arm_id = try(data.terraform_remote_state.deployer[0].outputs.deployer_kv_user_arm_id, "") - spn = { - subscription_id = var.use_deployer ? data.azurerm_key_vault_secret.subscription_id[0].value : null, - client_id = var.use_deployer ? data.azurerm_key_vault_secret.client_id[0].value : null, - client_secret = var.use_deployer ? data.azurerm_key_vault_secret.client_secret[0].value : null, - tenant_id = var.use_deployer ? data.azurerm_key_vault_secret.tenant_id[0].value : null - } - - service_principal = { - subscription_id = local.spn.subscription_id, - tenant_id = local.spn.tenant_id, - object_id = var.use_deployer ? data.azuread_service_principal.sp[0].id : null - } - - account = { - subscription_id = var.use_deployer ? data.azurerm_key_vault_secret.subscription_id[0].value : null, - tenant_id = data.azurerm_client_config.current.tenant_id, - object_id = data.azurerm_client_config.current.object_id - } - - custom_names = length(var.name_override_file) > 0 ? ( - jsondecode(file(format("%s/%s", path.cwd, var.name_override_file)))) : ( - null - ) + spn = { + subscription_id = local.use_spn ? data.azurerm_key_vault_secret.subscription_id[0].value : null, + client_id = local.use_spn ? data.azurerm_key_vault_secret.client_id[0].value : null, + client_secret = local.use_spn ? data.azurerm_key_vault_secret.client_secret[0].value : null, + tenant_id = local.use_spn ? data.azurerm_key_vault_secret.tenant_id[0].value : null + } + + service_principal = { + subscription_id = local.spn.subscription_id, + tenant_id = local.spn.tenant_id, + object_id = local.use_spn ? data.azuread_service_principal.sp[0].id : null + } + + account = { + subscription_id = local.use_spn ? data.azurerm_key_vault_secret.subscription_id[0].value : null, + tenant_id = data.azurerm_client_config.current.tenant_id, + object_id = data.azurerm_client_config.current.object_id + } + + custom_names = length(var.name_override_file) > 0 ? ( + jsondecode(file(format("%s/%s", path.cwd, var.name_override_file)))) : ( + null + ) } diff --git a/deploy/terraform/run/sap_system/imports.tf b/deploy/terraform/run/sap_system/imports.tf index 932c8f6ce3..42edf47c8f 100644 --- a/deploy/terraform/run/sap_system/imports.tf +++ b/deploy/terraform/run/sap_system/imports.tf @@ -1,84 +1,86 @@ + /* Description: - Retrieve remote tfstate file(s) and current environment's SPN + Retrieve remote tfstate file of Deployer and current environment's SPN */ -data "azurerm_client_config" "current" { - } +data "azurerm_client_config" "current" {} data "terraform_remote_state" "deployer" { - backend = "azurerm" - count = length(try(var.deployer_tfstate_key, "")) > 1 ? 1 : 0 - config = { - resource_group_name = local.saplib_resource_group_name - storage_account_name = local.tfstate_storage_account_name - container_name = local.tfstate_container_name - key = try(var.deployer_tfstate_key, "") - subscription_id = local.saplib_subscription_id - } - } + backend = "azurerm" + count = length(try(var.deployer_tfstate_key, "")) > 0 ? 1 : 0 + config = { + resource_group_name = local.saplib_resource_group_name + storage_account_name = local.tfstate_storage_account_name + container_name = local.tfstate_container_name + key = var.deployer_tfstate_key + subscription_id = local.saplib_subscription_id + } +} data "terraform_remote_state" "landscape" { - backend = "azurerm" - config = { - resource_group_name = local.saplib_resource_group_name - storage_account_name = local.tfstate_storage_account_name - container_name = "tfstate" - key = var.landscape_tfstate_key - subscription_id = local.saplib_subscription_id - } + backend = "azurerm" + config = { + resource_group_name = local.saplib_resource_group_name + storage_account_name = local.tfstate_storage_account_name + container_name = "tfstate" + key = var.landscape_tfstate_key + subscription_id = local.saplib_subscription_id + } } data "azurerm_key_vault_secret" "subscription_id" { - name = format("%s-subscription-id", local.environment) - key_vault_id = local.spn_key_vault_arm_id - } + name = format("%s-subscription-id", local.environment) + key_vault_id = local.spn_key_vault_arm_id + } data "azurerm_key_vault_secret" "client_id" { - count = var.use_spn ? 1 : 0 - name = format("%s-client-id", local.environment) - key_vault_id = local.spn_key_vault_arm_id - } + count = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? 1 : 0 + name = format("%s-client-id", local.environment) + key_vault_id = local.spn_key_vault_arm_id + } -data "azurerm_key_vault_secret" "client_secret" { - count = var.use_spn ? 1 : 0 - name = format("%s-client-secret", local.environment) - key_vault_id = local.spn_key_vault_arm_id - } +data "azurerm_key_vault_secret" "client_secret" { + count = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? 1 : 0 + name = format("%s-client-secret", local.environment) + key_vault_id = local.spn_key_vault_arm_id + } -data "azurerm_key_vault_secret" "tenant_id" { - count = var.use_spn ? 1 : 0 - name = format("%s-tenant-id", local.environment) - key_vault_id = local.spn_key_vault_arm_id - } +data "azurerm_key_vault_secret" "tenant_id" { + count = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? 1 : 0 + name = format("%s-tenant-id", local.environment) + key_vault_id = local.spn_key_vault_arm_id + } -// Import current service principal -data "azuread_service_principal" "sp" { - count = var.use_spn ? 1 : 0 - client_id = local.spn.client_id - } +data "azurerm_key_vault_secret" "cp_subscription_id" { + count = length(try(data.terraform_remote_state.deployer[0].outputs.environment, "")) > 0 ? (try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? 1 : 0) : 0 + name = format("%s-subscription-id", data.terraform_remote_state.deployer[0].outputs.environment) + key_vault_id = local.spn_key_vault_arm_id + } -data "azurerm_key_vault_secret" "cp_subscription_id" { - count = length(try(data.terraform_remote_state.landscape.outputs.controlplane_environment, "")) > 0 ? 1 : 0 - name = format("%s-subscription-id", data.terraform_remote_state.landscape.outputs.controlplane_environment) - key_vault_id = local.spn_key_vault_arm_id - } +data "azurerm_key_vault_secret" "cp_client_id" { + count = length(try(data.terraform_remote_state.deployer[0].outputs.environment, "")) > 0 ? (try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? 1 : 0) : 0 + name = format("%s-client-id", data.terraform_remote_state.deployer[0].outputs.environment) + key_vault_id = local.spn_key_vault_arm_id + } -data "azurerm_key_vault_secret" "cp_client_id" { - count = length(try(data.terraform_remote_state.landscape.outputs.controlplane_environment, "")) > 0 ? 1 : 0 - name = format("%s-client-id", data.terraform_remote_state.landscape.outputs.controlplane_environment) - key_vault_id = local.spn_key_vault_arm_id - } +data "azurerm_key_vault_secret" "cp_client_secret" { + count = length(try(data.terraform_remote_state.deployer[0].outputs.environment, "")) > 0 ? (try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? 1 : 0) : 0 + name = format("%s-client-secret", data.terraform_remote_state.deployer[0].outputs.environment) + key_vault_id = local.spn_key_vault_arm_id + } + +data "azurerm_key_vault_secret" "cp_tenant_id" { + count = length(try(data.terraform_remote_state.deployer[0].outputs.environment, "")) > 0 ? (try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? 1 : 0) : 0 + name = format("%s-tenant-id", data.terraform_remote_state.deployer[0].outputs.environment) + key_vault_id = local.spn_key_vault_arm_id + } + +// Import current service principal +data "azuread_service_principal" "sp" { + count = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? 1 : 0 + client_id = local.spn.client_id + } -data "azurerm_key_vault_secret" "cp_client_secret" { - count = length(try(data.terraform_remote_state.landscape.outputs.controlplane_environment, "")) > 0 ? 1 : 0 - name = format("%s-client-secret", data.terraform_remote_state.landscape.outputs.controlplane_environment) - key_vault_id = local.spn_key_vault_arm_id - } -data "azurerm_key_vault_secret" "cp_tenant_id" { - count = length(try(data.terraform_remote_state.landscape.outputs.controlplane_environment, "")) > 0 ? 1 : 0 - name = format("%s-tenant-id", data.terraform_remote_state.landscape.outputs.controlplane_environment) - key_vault_id = local.spn_key_vault_arm_id - } diff --git a/deploy/terraform/run/sap_system/providers.tf b/deploy/terraform/run/sap_system/providers.tf index 7c3632ecf5..a268a8eecd 100644 --- a/deploy/terraform/run/sap_system/providers.tf +++ b/deploy/terraform/run/sap_system/providers.tf @@ -12,52 +12,55 @@ ~> 0.8.4 is equivalent to >= 0.8.4, < 0.9 */ +provider "azurerm" { + features {} + subscription_id = length(local.deployer_subscription_id) > 0 ? local.deployer_subscription_id : null + } + provider "azurerm" { features { resource_group { - prevent_deletion_if_contains_resources = true + prevent_deletion_if_contains_resources = true } + key_vault { + purge_soft_delete_on_destroy = !var.enable_purge_control_for_keyvaults + purge_soft_deleted_keys_on_destroy = !var.enable_purge_control_for_keyvaults + purge_soft_deleted_secrets_on_destroy = !var.enable_purge_control_for_keyvaults + purge_soft_deleted_certificates_on_destroy = !var.enable_purge_control_for_keyvaults + } } - subscription_id = local.spn.subscription_id - client_id = local.spn.client_id - client_secret = local.spn.client_secret - tenant_id = local.spn.tenant_id + subscription_id = data.azurerm_key_vault_secret.subscription_id.value + client_id = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? local.spn.client_id : null + client_secret = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? local.spn.client_secret : null + tenant_id = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? local.spn.tenant_id : null + use_msi = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? false : true - partner_id = "3179cd51-f54b-4c73-ac10-8e99417efce7" - alias = "system" - skip_provider_registration = true - } + storage_use_azuread = true -provider "azurerm" { - features {} - subscription_id = length(local.deployer_subscription_id) > 0 ? local.deployer_subscription_id : null + partner_id = "3179cd51-f54b-4c73-ac10-8e99417efce7" + alias = "system" skip_provider_registration = true } provider "azurerm" { features {} alias = "dnsmanagement" - subscription_id = length(try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, "")) > 1 ? data.terraform_remote_state.landscape.outputs.management_dns_subscription_id : length(local.deployer_subscription_id) > 0 ? local.deployer_subscription_id : null - client_id = local.cp_spn.client_id - client_secret = local.cp_spn.client_secret - tenant_id = local.cp_spn.tenant_id + subscription_id = coalesce(var.management_dns_subscription_id, length(local.deployer_subscription_id) > 0 ? local.deployer_subscription_id : "") + client_id = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? local.cp_spn.client_id : null + client_secret = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? local.cp_spn.client_secret : null + tenant_id = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? local.cp_spn.tenant_id : null + use_msi = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? false : true + storage_use_azuread = true skip_provider_registration = true } + + provider "azuread" { - client_id = local.spn.client_id - client_secret = local.spn.client_secret + client_id = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? local.spn.client_id : null + client_secret = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? local.spn.client_secret : null tenant_id = local.spn.tenant_id } - -# provider "azapi" { -# alias = "api" -# subscription_id = local.spn.subscription_id -# client_id = local.spn.client_id -# client_secret = local.spn.client_secret -# tenant_id = local.spn.tenant_id -# } - terraform { required_version = ">= 1.0" required_providers { @@ -83,4 +86,3 @@ terraform { } } } - diff --git a/deploy/terraform/run/sap_system/transform.tf b/deploy/terraform/run/sap_system/transform.tf index b2473c5153..3c4074a311 100644 --- a/deploy/terraform/run/sap_system/transform.tf +++ b/deploy/terraform/run/sap_system/transform.tf @@ -177,10 +177,11 @@ locals { false) : ( var.application_server_use_ppg ) : false - app_use_avset = var.application_server_count == 0 || var.use_scalesets_for_deployment || length(var.application_server_zones) > 0 || !local.enable_app_tier_deployment ? ( + app_use_avset = var.application_server_count == 0 || var.use_scalesets_for_deployment || !local.enable_app_tier_deployment ? ( false) : ( var.application_server_use_avset ) + avset_arm_ids = var.application_server_vm_avset_arm_ids scs_server_count = local.enable_app_tier_deployment ? ( max(var.scs_server_count, try(var.application_tier.scs_server_count, 0)) @@ -200,7 +201,7 @@ locals { false) : ( var.scs_server_use_ppg ) : false - scs_use_avset = var.scs_server_count == 0 || var.use_scalesets_for_deployment || length(var.scs_server_zones) > 0 || !local.enable_app_tier_deployment ? ( + scs_use_avset = var.scs_server_count == 0 || var.use_scalesets_for_deployment || !local.enable_app_tier_deployment ? ( false) : ( var.scs_server_use_avset ) @@ -565,7 +566,7 @@ locals { infrastructure = merge(local.temp_infrastructure, ( local.resource_group_defined ? { resource_group = local.resource_group } : null), ( - local.app_ppg_defined ? { ppg = local.app_ppg } : null), ( + local.app_ppg_defined ? { app_ppg = local.app_ppg } : null), ( local.ppg_defined ? { ppg = local.ppg } : null), ( local.deploy_anchor_vm ? { anchor_vms = local.anchor_vms } : null), { vnets = local.temp_vnet } diff --git a/deploy/terraform/run/sap_system/variables_local.tf b/deploy/terraform/run/sap_system/variables_local.tf index a596c5760c..a0683743df 100644 --- a/deploy/terraform/run/sap_system/variables_local.tf +++ b/deploy/terraform/run/sap_system/variables_local.tf @@ -42,7 +42,7 @@ locals { } cp_spn = { - subscription_id = try(data.azurerm_key_vault_secret.cp_subscription_id[0].value, null) + subscription_id = local.deployer_subscription_id client_id = var.use_spn ? try(coalesce(data.azurerm_key_vault_secret.cp_client_id[0].value, data.azurerm_key_vault_secret.client_id[0].value), null) : null, client_secret = var.use_spn ? try(coalesce(data.azurerm_key_vault_secret.cp_client_secret[0].value, data.azurerm_key_vault_secret.client_secret[0].value), null) : null, tenant_id = var.use_spn ? try(coalesce(data.azurerm_key_vault_secret.cp_tenant_id[0].value, data.azurerm_key_vault_secret.tenant_id[0].value), null) : null @@ -50,14 +50,14 @@ locals { service_principal = { subscription_id = local.spn.subscription_id, - tenant_id = local.spn.tenant_id, + tenant_id = var.use_spn ? local.spn.tenant_id : null, object_id = var.use_spn ? data.azuread_service_principal.sp[0].id : null } account = { subscription_id = data.azurerm_key_vault_secret.subscription_id.value, - tenant_id = data.azurerm_client_config.current.tenant_id, - object_id = data.azurerm_client_config.current.object_id + tenant_id = var.use_spn ? data.azurerm_client_config.current.tenant_id : null, + object_id = var.use_spn ? data.azurerm_client_config.current.object_id : null } custom_names = length(var.name_override_file) > 0 ? ( diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf index a75326fb4b..ac4f9169f2 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf @@ -133,11 +133,11 @@ resource "azurerm_windows_web_app" "webapp" { # scm_use_main_ip_restriction = true } - key_vault_reference_identity_id = azurerm_user_assigned_identity.deployer.id + key_vault_reference_identity_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].id : data.azurerm_user_assigned_identity.deployer[0].id identity { type = "SystemAssigned, UserAssigned" - identity_ids = [azurerm_user_assigned_identity.deployer.id] + identity_ids = [length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].id : data.azurerm_user_assigned_identity.deployer[0].id ] } connection_string { name = "sa_tfstate_conn_str" diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/configure-deployer.tf b/deploy/terraform/terraform-units/modules/sap_deployer/configure-deployer.tf index ef64f1a8fb..1c4afaea38 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/configure-deployer.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/configure-deployer.tf @@ -23,7 +23,7 @@ resource "null_resource" "prepare-deployer" { content = templatefile(format("%s/templates/configure_deployer.sh.tmpl", path.module), { tfversion = var.tf_version rg_name = local.resourcegroup_name, - client_id = azurerm_user_assigned_identity.deployer.client_id, + client_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].client_id : data.azurerm_user_assigned_identity.deployer[0].client_id , subscription_id = data.azurerm_subscription.primary.subscription_id, tenant_id = data.azurerm_subscription.primary.tenant_id, local_user = local.username, @@ -62,7 +62,7 @@ resource "local_file" "configure_deployer" { content = templatefile(format("%s/templates/configure_deployer.sh.tmpl", path.module), { tfversion = var.tf_version, rg_name = local.resourcegroup_name, - client_id = azurerm_user_assigned_identity.deployer.client_id, + client_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].client_id : data.azurerm_user_assigned_identity.deployer[0].client_id, subscription_id = data.azurerm_subscription.primary.subscription_id, tenant_id = data.azurerm_subscription.primary.tenant_id, local_user = local.username, diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf index 38ab790e46..7eec0d1085 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf @@ -109,7 +109,7 @@ resource "azurerm_role_assignment" "deployer_boot_diagnostics_contributor_msi" { count = var.assign_subscription_permissions ? 1 : 0 scope = length(var.deployer.deployer_diagnostics_account_arm_id) > 0 ? var.deployer.deployer_diagnostics_account_arm_id : azurerm_storage_account.deployer[0].id role_definition_name = "Storage Account Contributor" - principal_id = azurerm_user_assigned_identity.deployer.principal_id + principal_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].principal_id : data.azurerm_user_assigned_identity.deployer[0].principal_id } resource "azurerm_role_assignment" "resource_group_contributor" { @@ -125,7 +125,7 @@ resource "azurerm_role_assignment" "resource_group_contributor_contributor_msi" count = var.assign_subscription_permissions ? 1 : 0 scope = local.resource_group_exists ? data.azurerm_resource_group.deployer[0].id : azurerm_resource_group.deployer[0].id role_definition_name = "Contributor" - principal_id = azurerm_user_assigned_identity.deployer.principal_id + principal_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].principal_id : data.azurerm_user_assigned_identity.deployer[0].principal_id } resource "azurerm_role_assignment" "resource_group_acsservice" { @@ -141,6 +141,6 @@ resource "azurerm_role_assignment" "resource_group_acsservice_msi" { count = var.assign_subscription_permissions ? 1 : 0 scope = local.resource_group_exists ? data.azurerm_resource_group.deployer[0].id : azurerm_resource_group.deployer[0].id role_definition_name = "Azure Center for SAP solutions administrator" - principal_id = azurerm_user_assigned_identity.deployer.principal_id + principal_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].principal_id : data.azurerm_user_assigned_identity.deployer[0].principal_id } diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/key_vault.tf b/deploy/terraform/terraform-units/modules/sap_deployer/key_vault.tf index 8aacf17c4a..ecd65b2d87 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/key_vault.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/key_vault.tf @@ -19,7 +19,8 @@ resource "azurerm_key_vault" "kv_user" { data.azurerm_resource_group.deployer[0].location) : ( azurerm_resource_group.deployer[0].location ) - tenant_id = azurerm_user_assigned_identity.deployer.tenant_id + tenant_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].tenant_id : data.azurerm_user_assigned_identity.deployer[0].tenant_id + soft_delete_retention_days = 7 purge_protection_enabled = var.enable_purge_control_for_keyvaults @@ -197,7 +198,7 @@ resource "azurerm_key_vault_secret" "web_pwd" { depends_on = [ azurerm_key_vault_access_policy.kv_user_pre_deployer[0], azurerm_key_vault_access_policy.kv_user_msi, - azurerm_key_vault_access_policy.kv_user_systemidentity + azurerm_key_vault_access_policy.kv_user_systemidentity, ] name = "WEB-PWD" @@ -277,8 +278,9 @@ resource "azurerm_key_vault_access_policy" "kv_user_msi" { provider = azurerm.main key_vault_id = var.key_vault.kv_exists ? data.azurerm_key_vault.kv_user[0].id : azurerm_key_vault.kv_user[0].id - tenant_id = azurerm_user_assigned_identity.deployer.tenant_id - object_id = azurerm_user_assigned_identity.deployer.principal_id + tenant_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].tenant_id : data.azurerm_user_assigned_identity.deployer[0].tenant_id + object_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].principal_id : data.azurerm_user_assigned_identity.deployer[0].principal_id + secret_permissions = [ "Get", @@ -317,7 +319,7 @@ resource "azurerm_key_vault_access_policy" "kv_user_pre_deployer" { count = var.key_vault.kv_exists && length(var.spn_id) > 0 ? 0 : 1 key_vault_id = azurerm_key_vault.kv_user[0].id - tenant_id = azurerm_user_assigned_identity.deployer.tenant_id + tenant_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].tenant_id : data.azurerm_user_assigned_identity.deployer[0].tenant_id # If running as a normal user use the object ID of the user otherwise use the object_id from AAD object_id = coalesce(data.azurerm_client_config.deployer.object_id, var.spn_id, @@ -336,6 +338,12 @@ resource "azurerm_key_vault_access_policy" "kv_user_pre_deployer" { "Purge" ] + lifecycle { + ignore_changes = [ + object_id + ] + } + } @@ -347,7 +355,7 @@ resource "azurerm_key_vault_access_policy" "kv_user_additional_users" { ) key_vault_id = azurerm_key_vault.kv_user[0].id - tenant_id = azurerm_user_assigned_identity.deployer.tenant_id + tenant_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].tenant_id : data.azurerm_user_assigned_identity.deployer[0].tenant_id object_id = var.additional_users_to_add_to_keyvault_policies[count.index] secret_permissions = [ "Get", diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/output.tf b/deploy/terraform/terraform-units/modules/sap_deployer/output.tf index f455b93789..f764b93035 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/output.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/output.tf @@ -59,7 +59,7 @@ output "deployer_id" { // Details of the user assigned identity for deployer(s) output "deployer_uai" { description = "Deployer User Assigned Identity" - value = azurerm_user_assigned_identity.deployer + value = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0] : data.azurerm_user_assigned_identity.deployer[0] } output "deployer_public_ip_address" { diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf b/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf index 0cd795cf1f..8e72c62bd7 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf @@ -39,8 +39,8 @@ resource "azurerm_public_ip" "deployer" { } resource "azurerm_network_interface" "deployer" { -count = var.deployer_vm_count - name = format("%s%s%s%s%s", + count = var.deployer_vm_count + name = format("%s%s%s%s%s", var.naming.resource_prefixes.nic, local.prefix, var.naming.separator, @@ -81,18 +81,27 @@ count = var.deployer_vm_count // User defined identity for all Deployers, assign contributor to the current subscription resource "azurerm_user_assigned_identity" "deployer" { + count = length(var.deployer.user_assigned_identity_id) == 0 ? 1 : 0 name = format("%s%s%s", var.naming.resource_prefixes.msi, local.prefix, var.naming.resource_suffixes.msi) resource_group_name = local.resource_group_exists ? data.azurerm_resource_group.deployer[0].name : azurerm_resource_group.deployer[0].name location = local.resource_group_exists ? data.azurerm_resource_group.deployer[0].location : azurerm_resource_group.deployer[0].location } +// User defined identity for all Deployers, assign contributor to the current subscription +data "azurerm_user_assigned_identity" "deployer" { + count = length(var.deployer.user_assigned_identity_id) > 0 ? 1 : 0 + name = split("/", var.deployer.user_assigned_identity_id)[8] + resource_group_name = split("/", var.deployer.user_assigned_identity_id)[4] +} + + # // Add role to be able to deploy resources resource "azurerm_role_assignment" "sub_contributor" { provider = azurerm.main - count = var.assign_subscription_permissions ? 1 : 0 + count = var.assign_subscription_permissions && length(var.deployer.user_assigned_identity_id) == 0 ? 1 : 0 scope = data.azurerm_subscription.primary.id role_definition_name = "Reader" - principal_id = azurerm_user_assigned_identity.deployer.principal_id + principal_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].principal_id : data.azurerm_user_assigned_identity.deployer[0].principal_id } // Linux Virtual Machine for Deployer @@ -159,7 +168,7 @@ resource "azurerm_linux_virtual_machine" "deployer" { identity { type = var.deployer.add_system_assigned_identity ? "SystemAssigned, UserAssigned" : "UserAssigned" - identity_ids = [azurerm_user_assigned_identity.deployer.id] + identity_ids = [length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].id : data.azurerm_user_assigned_identity.deployer[0].id ] } dynamic "admin_ssh_key" { @@ -227,7 +236,7 @@ resource "azurerm_virtual_machine_extension" "configure" { { tfversion = var.tf_version, rg_name = local.resourcegroup_name, - client_id = azurerm_user_assigned_identity.deployer.client_id, + client_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].client_id : data.azurerm_user_assigned_identity.deployer[0].client_id, subscription_id = data.azurerm_subscription.primary.subscription_id, tenant_id = data.azurerm_subscription.primary.tenant_id, local_user = local.username diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf index cc6a4c7c5d..d6b8ba9f68 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf @@ -96,7 +96,10 @@ resource "azurerm_role_assignment" "role_assignment_spn" { resource "azurerm_key_vault_access_policy" "kv_user" { provider = azurerm.main - count = (local.enable_landscape_kv && !local.user_keyvault_exist) && !var.enable_rbac_authorization_for_keyvault ? 1 : 0 + count = (var.key_vault.exists || var.enable_rbac_authorization_for_keyvault) ? ( + 0) : ( + (var.deployer_tfstate.deployer_uai.principal_id == local.service_principal.object_id) ? 0 : 1 + ) key_vault_id = local.user_keyvault_exist ? local.user_key_vault_id : azurerm_key_vault.kv_user[0].id tenant_id = local.service_principal.tenant_id object_id = local.service_principal.object_id != "" ? local.service_principal.object_id : "00000000-0000-0000-0000-000000000000" @@ -139,7 +142,9 @@ resource "azurerm_key_vault_secret" "sid_ppk" { count = !local.sid_key_exist ? 1 : 0 depends_on = [ azurerm_key_vault_access_policy.kv_user, - azurerm_role_assignment.role_assignment_spn + azurerm_role_assignment.role_assignment_spn, + azurerm_role_assignment.role_assignment_msi, + azurerm_key_vault_access_policy.kv_user_msi ] content_type = "" name = local.sid_ppk_name @@ -159,7 +164,9 @@ resource "azurerm_key_vault_secret" "sid_pk" { count = !local.sid_key_exist ? 1 : 0 depends_on = [ azurerm_key_vault_access_policy.kv_user, - azurerm_role_assignment.role_assignment_spn + azurerm_role_assignment.role_assignment_spn, + azurerm_role_assignment.role_assignment_msi, + azurerm_key_vault_access_policy.kv_user_msi ] content_type = "" name = local.sid_pk_name @@ -185,6 +192,8 @@ resource "azurerm_key_vault_secret" "sid_username" { depends_on = [ azurerm_key_vault_access_policy.kv_user, azurerm_role_assignment.role_assignment_spn, + azurerm_role_assignment.role_assignment_msi, + azurerm_key_vault_access_policy.kv_user_msi ] content_type = "" name = local.sid_username_secret_name @@ -207,7 +216,9 @@ resource "azurerm_key_vault_secret" "sid_password" { count = (!local.sid_credentials_secret_exist) ? 1 : 0 depends_on = [ azurerm_key_vault_access_policy.kv_user, - azurerm_role_assignment.role_assignment_spn + azurerm_role_assignment.role_assignment_spn, + azurerm_role_assignment.role_assignment_msi, + azurerm_key_vault_access_policy.kv_user_msi ] name = local.sid_password_secret_name content_type = "" @@ -232,7 +243,9 @@ resource "azurerm_key_vault_secret" "witness_access_key" { count = 1 depends_on = [ azurerm_key_vault_access_policy.kv_user, - azurerm_role_assignment.role_assignment_spn + azurerm_role_assignment.role_assignment_spn, + azurerm_role_assignment.role_assignment_msi, + azurerm_key_vault_access_policy.kv_user_msi ] content_type = "" name = replace( @@ -263,7 +276,9 @@ resource "azurerm_key_vault_secret" "witness_name" { count = 1 depends_on = [ azurerm_key_vault_access_policy.kv_user, - azurerm_role_assignment.role_assignment_spn + azurerm_role_assignment.role_assignment_spn, + azurerm_role_assignment.role_assignment_msi, + azurerm_key_vault_access_policy.kv_user_msi ] content_type = "" name = replace( @@ -309,9 +324,13 @@ resource "azurerm_key_vault_access_policy" "kv_user_msi" { object_id = var.deployer_tfstate.deployer_uai.principal_id secret_permissions = [ - "Get", - "List", - "Set" + "Get", + "List", + "Set", + "Delete", + "Recover", + "Restore", + "Purge" ] } @@ -319,7 +338,10 @@ resource "azurerm_key_vault_access_policy" "kv_user_msi" { resource "azurerm_key_vault_secret" "deployer_keyvault_user_name" { provider = azurerm.main depends_on = [ - azurerm_key_vault_access_policy.kv_user + azurerm_key_vault_access_policy.kv_user, + azurerm_role_assignment.role_assignment_spn, + azurerm_role_assignment.role_assignment_msi, + azurerm_key_vault_access_policy.kv_user_msi ] content_type = "" name = "deployer-kv-name" diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf index 8905439fbb..9c880dccea 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf @@ -561,11 +561,11 @@ locals { sub_iscsi_prefix = local.sub_iscsi_exists ? "" : try(var.infrastructure.vnets.sap.subnet_iscsi.prefix, "") // iSCSI NSG - var_sub_iscsi_nsg = try(var.infrastructure.vnets.sap.subnet_iscsi.nsg, {}) + var_sub_iscsi_nsg = try(var.infrastructure.vnets.sap.subnet_iscsi.nsg, {arm_id=""}) sub_iscsi_nsg_arm_id = try(var.infrastructure.vnets.sap.subnet_iscsi.nsg.arm_id, "") sub_iscsi_nsg_exists = length(local.sub_iscsi_nsg_arm_id) > 0 sub_iscsi_nsg_name = local.sub_iscsi_nsg_exists ? ( - try(split("/", local.sub_iscsi.nsg.arm_id)[8], "")) : ( + try(split("/", local.sub_iscsi_nsg_arm_id)[8], "")) : ( length(try(var.infrastructure.vnets.sap.subnet_iscsi.nsg.name, "")) > 0 ? ( var.infrastructure.vnets.sap.subnet_iscsi.nsg.name ) : ( format("%s%s%s%s", diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_local.tf index 2674a42f6d..7d33edcace 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_local.tf @@ -208,7 +208,7 @@ locals { #--------------------------------------+---------------------------------------8 scs_server_count = var.application_tier.scs_server_count * (var.application_tier.scs_high_availability ? 2 : 1) firewall_exists = length(var.firewall_id) > 0 - enable_deployment = var.application_tier.enable_deployment && length(try(var.landscape_tfstate.vnet_sap_arm_id, "")) > 0 + enable_deployment = var.application_tier.enable_deployment scs_instance_number = var.application_tier.scs_instance_number ers_instance_number = var.application_tier.ers_instance_number application_server_count = var.application_tier.application_server_count @@ -378,19 +378,13 @@ locals { app_zonal_deployment = length(local.app_zones) > 0 ? true : false app_zone_count = length(local.app_zones) //If we deploy more than one server in zone put them in an availability set unless specified otherwise - use_app_avset = local.application_server_count > 0 && var.application_tier.app_use_avset ? ( - true && local.enable_deployment) : ( - false && local.enable_deployment - ) + use_app_avset = var.application_tier.app_use_avset scs_zones = try(var.application_tier.scs_zones, []) scs_zonal_deployment = length(local.scs_zones) > 0 ? true : false scs_zone_count = length(local.scs_zones) //If we deploy more than one server in zone put them in an availability set - use_scs_avset = local.scs_server_count > 0 && (var.application_tier.scs_use_avset) ? ( - !local.scs_zonal_deployment || local.scs_server_count != local.scs_zone_count) : ( - false - ) + use_scs_avset = var.application_tier.scs_use_avset web_zones = try(var.application_tier.web_zones, []) web_zonal_deployment = length(local.web_zones) > 0 ? true : false diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index 73573a3b7c..e7582b8717 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -124,16 +124,15 @@ resource "azurerm_linux_virtual_machine" "app" { resource_group_name = var.resource_group[0].name proximity_placement_group_id = var.application_tier.app_use_ppg ? ( - var.ppg[count.index % max(length(var.ppg), 1)]) : ( null ) //If more than one servers are deployed into a single zone put them in an availability set and not a zone - availability_set_id = local.use_app_avset ? ( + availability_set_id = var.application_tier.app_use_avset ? ( length(var.application_tier.avset_arm_ids) > 0 ? ( - var.application_tier.avset_arm_ids[count.index % max(length(var.ppg), 1)]) : ( - azurerm_availability_set.app[count.index % max(length(var.ppg), 1)].id + var.application_tier.avset_arm_ids[count.index % max(length(var.application_tier.avset_arm_ids), 1)]) : ( + azurerm_availability_set.app[count.index % max(length(var.application_tier.avset_arm_ids), 1)].id )) : ( null ) diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/infrastructure.tf index 8ea96a1bb8..2aa4edecac 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/infrastructure.tf @@ -94,8 +94,8 @@ resource "azurerm_proximity_placement_group" "app_ppg" { data "azurerm_proximity_placement_group" "app_ppg" { provider = azurerm.main count = var.infrastructure.use_app_proximityplacementgroups ? (local.app_ppg_exists ? max(length(local.zones), 1) : 0) : 0 - name = split("/", var.infrastructure.app_ppg.arm_id[count.index])[8] - resource_group_name = split("/", var.infrastructure.app_ppg.arm_id[count.index])[4] + name = split("/", var.infrastructure.app_ppg.arm_ids[count.index])[8] + resource_group_name = split("/", var.infrastructure.app_ppg.arm_ids[count.index])[4] } //ASG diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/avg.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/avg.tf index 91e333a50c..d76c99c93a 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/avg.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/avg.tf @@ -6,7 +6,7 @@ resource "azurerm_netapp_volume_group_sap_hana" "avg_HANA" { provider = azurerm.main - count = var.NFS_provider == "ANF" && local.use_avg ? length(var.ppg) : 0 + count = local.use_avg ? length(var.ppg) : 0 name = format("%s%s%s%s%d", var.naming.resource_prefixes.hana_avg, local.prefix, @@ -54,7 +54,7 @@ resource "azurerm_netapp_volume_group_sap_hana" "avg_HANA" { data "azurerm_netapp_pool" "workload_netapp_pool" { provider = azurerm.main - count = var.NFS_provider == "ANF" && length(local.ANF_pool_settings.pool_name) > 0 ? 1 : 0 + count = length(local.ANF_pool_settings.pool_name) > 0 ? 1 : 0 resource_group_name = data.azurerm_netapp_account.workload_netapp_account[0].resource_group_name name = try(local.ANF_pool_settings.pool_name, "") account_name = local.ANF_pool_settings.account_name @@ -63,7 +63,7 @@ data "azurerm_netapp_pool" "workload_netapp_pool" { data "azurerm_netapp_account" "workload_netapp_account" { provider = azurerm.main - count = var.NFS_provider == "ANF" && length(local.ANF_pool_settings.account_id) > 0 ? 1 : 0 + count = length(local.ANF_pool_settings.account_id) > 0 ? 1 : 0 name = try(split("/", local.ANF_pool_settings.account_id)[8], "") resource_group_name = try(split("/", local.ANF_pool_settings.account_id)[4], "") } @@ -192,14 +192,10 @@ locals { } volumes_primary = [ - var.hana_ANF_volumes.use_for_data ? local.hana_data1 : null, - var.hana_ANF_volumes.use_for_log ? local.hana_log1 : null, - var.hana_ANF_volumes.use_for_shared ? local.hana_shared1 : null + local.hana_data1, local.hana_log1, local.hana_shared1 ] volumes_secondary = [ - var.hana_ANF_volumes.use_for_data ? local.hana_data2 : null, - var.hana_ANF_volumes.use_for_log ? local.hana_log2 : null, - var.hana_ANF_volumes.use_for_shared ? local.hana_shared2 : null + local.hana_data2, local.hana_log2, local.hana_shared2 ] } From 6d6a5e413e0ed513116cf529a0a558010b954700 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 24 Jan 2024 10:57:59 +0200 Subject: [PATCH 142/607] Remove custom app_virtual_hostname variable in main.yaml --- deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml index dbed7ba262..1ed018df28 100644 --- a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml @@ -33,7 +33,7 @@ dir_params: "{{ tmp_directory }}/.{{ sid_to_be_deployed.sid | upper }}-params" db_lb_virtual_host_HANA: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ hostvars[db_server_temp | first]['virtual_host'] }}{% endif %}" db_lb_virtual_host_AnyDB: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ db_server_temp }}{% endif %}" - app_virtual_hostname: "{{ custom_app_virtual_hostname | default(virtual_host, true) }}" + app_virtual_hostname: "{{ default(virtual_host, true) }}" - name: "APP Install: Set BOM facts db host" ansible.builtin.set_fact: From 3f98a0ec9d8c5174c67edba222ab1b4e05e54ff5 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 24 Jan 2024 20:38:20 +0200 Subject: [PATCH 143/607] Fix ARM_CLIENT_SECRET variable in installer.sh --- deploy/scripts/installer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/scripts/installer.sh b/deploy/scripts/installer.sh index e2511f3c42..041818a7b6 100755 --- a/deploy/scripts/installer.sh +++ b/deploy/scripts/installer.sh @@ -1133,7 +1133,7 @@ then echo "" echo "" - if [ -n ${ARM_CLIENT_SECRET} ] ; then + if [ -n "${ARM_CLIENT_SECRET}" ] ; then az login --service-principal --username "${ARM_CLIENT_ID}" --password=$ARM_CLIENT_SECRET --tenant "${ARM_TENANT_ID}" --output none else az login --identity --output none From 37d489ad3e002f13b861e13f976057797c661c32 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 24 Jan 2024 20:38:46 +0200 Subject: [PATCH 144/607] Remove the login from the script --- deploy/scripts/New-SDAFDevopsProject.ps1 | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index a946e3dd03..ac4f84f2ea 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -28,16 +28,16 @@ $ARM_TENANT_ID = $Env:ARM_TENANT_ID $versionLabel = "v3.10.1.0" -az logout +# az logout -az account clear +# az account clear -if ($ARM_TENANT_ID.Length -eq 0) { - az login --output none --only-show-errors -} -else { - az login --output none --tenant $ARM_TENANT_ID --only-show-errors -} +# if ($ARM_TENANT_ID.Length -eq 0) { +# az login --output none --only-show-errors +# } +# else { +# az login --output none --tenant $ARM_TENANT_ID --only-show-errors +# } Write-Host "" Write-Host "" @@ -390,6 +390,7 @@ else { $gh_connection_url = $ADO_ORGANIZATION + "/" + [uri]::EscapeDataString($ADO_Project) + "/_settings/adminservices" Write-Host "" Write-Host "The browser will now open, please create a new Github connection, record the name of the connection." + Write-Host "URL: " $gh_connection_url Start-Process $gh_connection_url Read-Host "Please press enter when you have created the connection" @@ -830,8 +831,9 @@ else { Write-Host "" Write-Host "The browser will now open, Please create a service connection with the name 'Control_Plane_Service_Connection'." - $connections_url = $ADO_ORGANIZATION + "/" + [uri]::EscapeDataString($ADO_Project) + "/_settings/adminservices" + Write-Host "URL: " $connections_url + Start-Process $connections_url Read-Host -Prompt "Once you have created and validated the connection, Press any key to continue" @@ -958,6 +960,7 @@ if (!$AlreadySet -or $ResetPAT ) { Write-Host "" Write-Host "The browser will now open, please create a Personal Access Token. Ensure that Read & manage is selected for Agent Pools, Read & write is selected for Code, Read & execute is selected for Build, and Read, create, & manage is selected for Variable Groups" + Write-Host "URL: " pat_url Start-Process $pat_url $PAT = Read-Host -Prompt "Please enter the PAT " az pipelines variable-group variable update --group-id $Control_plane_groupID --name "PAT" --value $PAT --secret true --only-show-errors --output none @@ -1044,7 +1047,7 @@ Write-Host "" Write-Host "The browser will now open, Select the'" $ADO_PROJECT "Build Service' user and ensure that it has 'Allow' in the Contribute section." $permissions_url = $ADO_ORGANIZATION + "/" + [uri]::EscapeDataString($ADO_Project) + "/_settings/repositories?_a=permissions" - +Write-Host "URL: " $permissions_url Start-Process $permissions_url Read-Host -Prompt "Once you have verified the permission, Press any key to continue" @@ -1076,6 +1079,7 @@ else { $page_id = (az devops wiki page show --path 'Next steps' --wiki SDAF --query page.id ) $wiki_url = $ADO_ORGANIZATION + "/" + [uri]::EscapeDataString($ADO_Project) + "/_wiki/wikis/SDAF/" + $page_id + "/Next-steps" +Write-Host "URL: " $wiki_url Start-Process $wiki_url if (Test-Path .\start.md) { From 9644f7b33be28444a876aac5b4a5380d2d92bf83 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 25 Jan 2024 13:00:29 +0200 Subject: [PATCH 145/607] Add IP rules for sapmnt storage account --- .../sap_system/common_infrastructure/storage_accounts.tf | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf index 72d00479b4..879b54cd07 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf @@ -64,6 +64,11 @@ resource "azurerm_storage_account_network_rules" "sapmnt" { try(var.landscape_tfstate.subnet_mgmt_id, "") ] ) + ip_rules = compact( + [ + length(var.Agent_IP) > 0 ? var.Agent_IP : "" + ] + } From 9caf3814091b05e9c68aa41099f7d3b62729ee1a Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 25 Jan 2024 14:17:28 +0200 Subject: [PATCH 146/607] Add IP rules to storage accounts --- .../modules/sap_landscape/storage_accounts.tf | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf index eb300c71be..230ce16440 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf @@ -344,6 +344,11 @@ resource "azurerm_storage_account_network_rules" "transport" { ] ) + ip_rules = compact( + [ + length(var.Agent_IP) > 0 ? var.Agent_IP : "" + ] + lifecycle { ignore_changes = [virtual_network_subnet_ids] } @@ -581,7 +586,11 @@ resource "azurerm_storage_account_network_rules" "install" { ] ) - lifecycle { + ip_rules = compact( + [ + length(var.Agent_IP) > 0 ? var.Agent_IP : "" + ] + lifecycle { ignore_changes = [virtual_network_subnet_ids] } From 5fa55d01684b3664bbd5b86ac08c3c8baa0c231e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 25 Jan 2024 16:38:24 +0200 Subject: [PATCH 147/607] Refactor storage account IP rules --- .../terraform-units/modules/sap_landscape/storage_accounts.tf | 4 ---- .../sap_system/common_infrastructure/storage_accounts.tf | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf index 230ce16440..ffcd4ec184 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf @@ -344,10 +344,6 @@ resource "azurerm_storage_account_network_rules" "transport" { ] ) - ip_rules = compact( - [ - length(var.Agent_IP) > 0 ? var.Agent_IP : "" - ] lifecycle { ignore_changes = [virtual_network_subnet_ids] diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf index 879b54cd07..70d9c24c62 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf @@ -63,7 +63,7 @@ resource "azurerm_storage_account_network_rules" "sapmnt" { try(var.landscape_tfstate.web_subnet_id, ""), try(var.landscape_tfstate.subnet_mgmt_id, "") ] - ) + ), ip_rules = compact( [ length(var.Agent_IP) > 0 ? var.Agent_IP : "" From c3f4c27ac6c73b219a5e543cf0dda11cf82ab491 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 25 Jan 2024 16:45:12 +0200 Subject: [PATCH 148/607] Fix variable interpolation in deploy script --- deploy/pipelines/01-deploy-control-plane.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 693ead38a7..9ed3a18dff 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -211,10 +211,10 @@ stages: echo -e "$green--- az login ---$reset" az account set --subscription $ARM_SUBSCRIPTION_ID echo -e "$green--- Deploy the Control Plane ---$reset" - if [ -n $(PAT) ]; then + if [ -n "$(PAT)" ]; then echo 'Deployer Agent PAT is defined' fi - if [ -n $(POOL) ]; then + if [ -n "$(POOL)" ]; then echo 'Deployer Agent Pool' $(POOL) POOL_NAME=$(az pipelines pool list --organization $(System.CollectionUri) --query "[?name=='$(POOL)'].name | [0]") if [ ${#POOL_NAME} -eq 0 ]; then @@ -344,7 +344,7 @@ stages: az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name ControlPlaneLocation --value ${LOCATION} --output none --only-show-errors fi - if [ -n $deployer_random_id ] ; then + if [ -n "${deployer_random_id}" ] ; then az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "DEPLOYER_RANDOM_ID_SEED.value") if [ -z ${az_var} ]; then az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name DEPLOYER_RANDOM_ID_SEED --value ${deployer_random_id} --output none --only-show-errors From c6594ebe573c4fa1cd4e5c4f70a1adb154ec0b0f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 25 Jan 2024 17:03:58 +0200 Subject: [PATCH 149/607] Add condition to check if "Use WebApp" is selected --- deploy/pipelines/01-deploy-control-plane.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 9ed3a18dff..9078b08db4 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -247,7 +247,8 @@ stages: export TF_VAR_app_registration_app_id=$(APP_REGISTRATION_APP_ID); echo 'App Registration App ID' ${TF_VAR_app_registration_app_id} export TF_VAR_webapp_client_secret=$(WEB_APP_CLIENT_SECRET) export TF_VAR_use_webapp=true - + else + echo "Use WebApp is not selected" fi export TF_LOG_PATH=$CONFIG_REPO_PATH/.sap_deployment_automation/terraform.log From a1a6a26914f720f0f6411e6cb1e0b942c1c7b405 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 25 Jan 2024 17:10:18 +0200 Subject: [PATCH 150/607] Fix syntax error in storage_accounts.tf --- .../sap_system/common_infrastructure/storage_accounts.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf index 70d9c24c62..879b54cd07 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf @@ -63,7 +63,7 @@ resource "azurerm_storage_account_network_rules" "sapmnt" { try(var.landscape_tfstate.web_subnet_id, ""), try(var.landscape_tfstate.subnet_mgmt_id, "") ] - ), + ) ip_rules = compact( [ length(var.Agent_IP) > 0 ? var.Agent_IP : "" From 23e434fdf34576659682144a7128df4ee50cef7a Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 25 Jan 2024 17:39:07 +0200 Subject: [PATCH 151/607] Add network rules for sapmnt storage account --- .../modules/sap_system/common_infrastructure/storage_accounts.tf | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf index 879b54cd07..c5c39ca314 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf @@ -68,6 +68,7 @@ resource "azurerm_storage_account_network_rules" "sapmnt" { [ length(var.Agent_IP) > 0 ? var.Agent_IP : "" ] + ) } From ac3650c20172e266cbdc16939f6a3e9851751685 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 25 Jan 2024 19:29:40 +0200 Subject: [PATCH 152/607] Update deploy control plane pipeline and sap_deployer module This commit updates the deploy control plane pipeline and sap_deployer module. Changes include handling undefined variables, exporting environment variables, and modifying conditional statements. --- deploy/pipelines/01-deploy-control-plane.yaml | 37 +++++++++---------- .../bootstrap/sap_deployer/module.tf | 6 +-- deploy/terraform/run/sap_deployer/module.tf | 6 +-- 3 files changed, 24 insertions(+), 25 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 9078b08db4..81955095c3 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -235,20 +235,15 @@ stages: if [ $(use_webapp) == "true" ]; then echo "Use WebApp is selected" - if [ -z ${APP_REGISTRATION_APP_ID} ]; then + if [ $TF_VAR_app_registration_app_id == '$(APP_REGISTRATION_APP_ID)' ]; then echo "##vso[task.logissue type=error]Variable APP_REGISTRATION_APP_ID was not defined." exit 2 fi - if [ -z ${WEB_APP_CLIENT_SECRET} ]; then + if [ -z $TF_VAR_webapp_client_secret == '$(WEB_APP_CLIENT_SECRET)' ]; then echo "##vso[task.logissue type=error]Variable WEB_APP_CLIENT_SECRET was not defined." exit 2 fi - export TF_VAR_app_registration_app_id=$(APP_REGISTRATION_APP_ID); echo 'App Registration App ID' ${TF_VAR_app_registration_app_id} - export TF_VAR_webapp_client_secret=$(WEB_APP_CLIENT_SECRET) - export TF_VAR_use_webapp=true - else - echo "Use WebApp is not selected" fi export TF_LOG_PATH=$CONFIG_REPO_PATH/.sap_deployment_automation/terraform.log @@ -360,27 +355,31 @@ stages: displayName: Prepare control plane env: - SYSTEM_ACCESSTOKEN: $(System.AccessToken) - ARM_SUBSCRIPTION_ID: $(CP_ARM_SUBSCRIPTION_ID) ARM_CLIENT_ID: $(CP_ARM_CLIENT_ID) ARM_CLIENT_SECRET: $(CP_ARM_CLIENT_SECRET) + ARM_SUBSCRIPTION_ID: $(CP_ARM_SUBSCRIPTION_ID) ARM_TENANT_ID: $(CP_ARM_TENANT_ID) - TF_VAR_agent_ado_url: $(System.CollectionUri) - TF_VAR_spn_id: $(CP_ARM_OBJECT_ID) - TF_VAR_deployer_kv_user_arm_id: $(Deployer_Key_Vault) - TF_VAR_tf_version: $(tf_version) AZURE_DEVOPS_EXT_PAT: $(PAT) + CONFIG_REPO_PATH: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path) + DEPLOYER_TFSTATE_KEY: "${{ parameters.deployer }}.terraform.tfstate" IS_PIPELINE_DEPLOYMENT: true - WEB_APP_CLIENT_SECRET: $(WEB_APP_CLIENT_SECRET) - APP_REGISTRATION_APP_ID: $(APP_REGISTRATION_APP_ID) POOL: $(POOL) SAP_AUTOMATION_REPO_PATH: ${{ parameters.sap_automation_repo_path }} - CONFIG_REPO_PATH: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path) - TF_VAR_ansible_core_version: $(ansible_core_version) - TF_LOG: $(TF_LOG) + SYSTEM_ACCESSTOKEN: $(System.AccessToken) + TF_APP_REGISTRATION_APP_ID: $(APP_REGISTRATION_APP_ID) TF_IN_AUTOMATION: true - DEPLOYER_TFSTATE_KEY: "${{ parameters.deployer }}.terraform.tfstate" + TF_LOG: $(TF_LOG) + TF_VAR_agent_ado_url: $(System.CollectionUri) + TF_VAR_ansible_core_version: $(ansible_core_version) + TF_VAR_app_registration_app_id: $(APP_REGISTRATION_APP_ID); + TF_VAR_deployer_kv_user_arm_id: $(Deployer_Key_Vault) + TF_VAR_spn_id: $(CP_ARM_OBJECT_ID) + TF_VAR_tf_version: $(tf_version) + TF_VAR_use_webapp: ${{ parameters.use_webapp_param }} + TF_VAR_webapp_client_secret: $(WEB_APP_CLIENT_SECRET) USE_MSI: $(Use_MSI) + USE_WEBAPP: ${{ parameters.use_webapp_param }} + WEB_APP_CLIENT_SECRET: $(WEB_APP_CLIENT_SECRET) - stage: Deploy_controlplane dependsOn: diff --git a/deploy/terraform/bootstrap/sap_deployer/module.tf b/deploy/terraform/bootstrap/sap_deployer/module.tf index fff98b1052..54db2dea8f 100644 --- a/deploy/terraform/bootstrap/sap_deployer/module.tf +++ b/deploy/terraform/bootstrap/sap_deployer/module.tf @@ -16,10 +16,10 @@ module "sap_deployer" { additional_users_to_add_to_keyvault_policies = var.additional_users_to_add_to_keyvault_policies agent_ado_url = var.agent_ado_url Agent_IP = var.Agent_IP - agent_pat = var.agent_pat - agent_pool = var.agent_pool + agent_pat = var.use_webapp ? var.agent_pat : "" + agent_pool = var.use_webapp ? var.agent_pool : "" ansible_core_version = var.ansible_core_version - app_registration_app_id = var.app_registration_app_id + app_registration_app_id = var.use_webapp ? var.app_registration_app_id : "" app_service = local.app_service arm_client_id = var.arm_client_id assign_subscription_permissions = var.deployer_assign_subscription_permissions diff --git a/deploy/terraform/run/sap_deployer/module.tf b/deploy/terraform/run/sap_deployer/module.tf index 2047235fb4..7bb058f551 100644 --- a/deploy/terraform/run/sap_deployer/module.tf +++ b/deploy/terraform/run/sap_deployer/module.tf @@ -16,10 +16,10 @@ module "sap_deployer" { additional_users_to_add_to_keyvault_policies = var.additional_users_to_add_to_keyvault_policies agent_ado_url = var.agent_ado_url Agent_IP = var.Agent_IP - agent_pat = var.agent_pat - agent_pool = var.agent_pool + agent_pat = var.use_webapp ? var.agent_pat : "" + agent_pool = var.use_webapp ? var.agent_pool : "" ansible_core_version = var.ansible_core_version - app_registration_app_id = var.app_registration_app_id + app_registration_app_id = var.use_webapp ? var.app_registration_app_id : "" app_service = local.app_service arm_client_id = var.arm_client_id assign_subscription_permissions = var.deployer_assign_subscription_permissions From 607f1606eb598cbff4a61f09f0c4aadd06bc5b94 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 25 Jan 2024 20:00:09 +0200 Subject: [PATCH 153/607] Add Agent IP to storage account network rules --- .../modules/sap_landscape/storage_accounts.tf | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf index ffcd4ec184..611a080493 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf @@ -165,7 +165,8 @@ resource "azurerm_storage_account_network_rules" "witness" { bypass = ["AzureServices", "Logging", "Metrics"] ip_rules = compact([ - length(local.deployer_public_ip_address) > 0 ? local.deployer_public_ip_address : "" + length(local.deployer_public_ip_address) > 0 ? local.deployer_public_ip_address : "", + length(var.Agent_IP) > 0 ? var.Agent_IP : "" ]) virtual_network_subnet_ids = compact([ local.database_subnet_defined ? ( @@ -321,7 +322,8 @@ resource "azurerm_storage_account_network_rules" "transport" { default_action = "Deny" ip_rules = compact([ - length(local.deployer_public_ip_address) > 0 ? local.deployer_public_ip_address : "" + length(local.deployer_public_ip_address) > 0 ? local.deployer_public_ip_address : "", + length(var.Agent_IP) > 0 ? var.Agent_IP : "" ]) bypass = ["AzureServices", "Logging", "Metrics"] @@ -560,7 +562,8 @@ resource "azurerm_storage_account_network_rules" "install" { default_action = "Deny" ip_rules = compact([ - length(local.deployer_public_ip_address) > 0 ? local.deployer_public_ip_address : "" + length(local.deployer_public_ip_address) > 0 ? local.deployer_public_ip_address : "", + length(var.Agent_IP) > 0 ? var.Agent_IP : "" ]) bypass = ["AzureServices", "Logging", "Metrics"] virtual_network_subnet_ids = compact( @@ -582,10 +585,6 @@ resource "azurerm_storage_account_network_rules" "install" { ] ) - ip_rules = compact( - [ - length(var.Agent_IP) > 0 ? var.Agent_IP : "" - ] lifecycle { ignore_changes = [virtual_network_subnet_ids] } From c0f6fd35241abb712098e18317fc27c309006268 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 25 Jan 2024 20:10:17 +0200 Subject: [PATCH 154/607] Update second task --- deploy/pipelines/01-deploy-control-plane.yaml | 61 +++++++++---------- 1 file changed, 30 insertions(+), 31 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 81955095c3..925e1578bf 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -522,21 +522,16 @@ stages: if [ $(use_webapp) == "true" ]; then echo "Use WebApp is selected" - if [ -z ${APP_REGISTRATION_APP_ID} ]; then + if [ $TF_VAR_app_registration_app_id == '$(APP_REGISTRATION_APP_ID)' ]; then echo "##vso[task.logissue type=error]Variable APP_REGISTRATION_APP_ID was not defined." exit 2 fi - if [ -z ${WEB_APP_CLIENT_SECRET} ]; then + if [ -z $TF_VAR_webapp_client_secret == '$(WEB_APP_CLIENT_SECRET)' ]; then echo "##vso[task.logissue type=error]Variable WEB_APP_CLIENT_SECRET was not defined." exit 2 fi - export TF_VAR_app_registration_app_id=$(APP_REGISTRATION_APP_ID); echo 'App Registration App ID' ${TF_VAR_app_registration_app_id} - export TF_VAR_webapp_client_secret=$(WEB_APP_CLIENT_SECRET) - export TF_VAR_use_webapp=true - fi - bootstrapped=0 if [ ! -f $deployer_environment_file_name ]; then @@ -886,30 +881,34 @@ stages: displayName: Deploy control plane env: - SYSTEM_ACCESSTOKEN: $(System.AccessToken) - ARM_SUBSCRIPTION_ID: $(CP_ARM_SUBSCRIPTION_ID) - CP_ARM_CLIENT_ID: $(CP_ARM_CLIENT_ID) - CP_ARM_CLIENT_SECRET: $(CP_ARM_CLIENT_SECRET) - CP_ARM_TENANT_ID: $(CP_ARM_TENANT_ID) - TF_VAR_spn_id: $(CP_ARM_OBJECT_ID) - TF_VAR_agent_pool: $(POOL) - TF_VAR_agent_ado_url: $(System.CollectionUri) - TF_VAR_tf_version: $(tf_version) - AZURE_DEVOPS_EXT_PAT: $(PAT) - IS_PIPELINE_DEPLOYMENT: true - WEB_APP_CLIENT_SECRET: $(WEB_APP_CLIENT_SECRET) - APP_REGISTRATION_APP_ID: $(APP_REGISTRATION_APP_ID) - keyvault: $(Deployer_Key_Vault) - POOL: $(POOL) - SAP_AUTOMATION_REPO_PATH: ${{ parameters.sap_automation_repo_path }} - CONFIG_REPO_PATH: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path) - TF_VAR_ansible_core_version: $(ansible_core_version) - TF_LOG: $(TF_LOG) - TF_IN_AUTOMATION: true - DEPLOYER_TFSTATE_KEY: "${{ parameters.deployer }}.terraform.tfstate" - LOGON_USING_SPN: $(Logon_Using_SPN) - USE_MSI: $(Use_MSI) - DEPLOYER_RANDOM_ID_SEED: $(DEPLOYER_RANDOM_ID_SEED) + ARM_CLIENT_ID: $(CP_ARM_CLIENT_ID) + ARM_CLIENT_SECRET: $(CP_ARM_CLIENT_SECRET) + ARM_SUBSCRIPTION_ID: $(CP_ARM_SUBSCRIPTION_ID) + ARM_TENANT_ID: $(CP_ARM_TENANT_ID) + AZURE_DEVOPS_EXT_PAT: $(PAT) + CONFIG_REPO_PATH: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path) + DEPLOYER_RANDOM_ID_SEED: $(DEPLOYER_RANDOM_ID_SEED) + DEPLOYER_TFSTATE_KEY: "${{ parameters.deployer }}.terraform.tfstate" + IS_PIPELINE_DEPLOYMENT: true + keyvault: $(Deployer_Key_Vault) + LOGON_USING_SPN: $(Logon_Using_SPN) + POOL: $(POOL) + SAP_AUTOMATION_REPO_PATH: ${{ parameters.sap_automation_repo_path }} + SYSTEM_ACCESSTOKEN: $(System.AccessToken) + TF_APP_REGISTRATION_APP_ID: $(APP_REGISTRATION_APP_ID) + TF_IN_AUTOMATION: true + TF_LOG: $(TF_LOG) + TF_VAR_agent_ado_url: $(System.CollectionUri) + TF_VAR_ansible_core_version: $(ansible_core_version) + TF_VAR_app_registration_app_id: $(APP_REGISTRATION_APP_ID); + TF_VAR_deployer_kv_user_arm_id: $(Deployer_Key_Vault) + TF_VAR_spn_id: $(CP_ARM_OBJECT_ID) + TF_VAR_tf_version: $(tf_version) + TF_VAR_use_webapp: ${{ parameters.use_webapp_param }} + TF_VAR_webapp_client_secret: $(WEB_APP_CLIENT_SECRET) + USE_MSI: $(Use_MSI) + USE_WEBAPP: ${{ parameters.use_webapp_param }} + WEB_APP_CLIENT_SECRET: $(WEB_APP_CLIENT_SECRET) failOnStderr: false From 8933e4e42d2ad9094d3f702c48e1c3236707bb3d Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 25 Jan 2024 20:19:13 +0200 Subject: [PATCH 155/607] Refactor conditional statements in module.tf --- deploy/terraform/bootstrap/sap_deployer/module.tf | 8 ++++---- deploy/terraform/run/sap_deployer/module.tf | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/deploy/terraform/bootstrap/sap_deployer/module.tf b/deploy/terraform/bootstrap/sap_deployer/module.tf index 54db2dea8f..d12af8bdf4 100644 --- a/deploy/terraform/bootstrap/sap_deployer/module.tf +++ b/deploy/terraform/bootstrap/sap_deployer/module.tf @@ -16,10 +16,10 @@ module "sap_deployer" { additional_users_to_add_to_keyvault_policies = var.additional_users_to_add_to_keyvault_policies agent_ado_url = var.agent_ado_url Agent_IP = var.Agent_IP - agent_pat = var.use_webapp ? var.agent_pat : "" - agent_pool = var.use_webapp ? var.agent_pool : "" + agent_pat = lower(var.use_webapp) ? var.agent_pat : "" + agent_pool = lower(var.use_webapp) ? var.agent_pool : "" ansible_core_version = var.ansible_core_version - app_registration_app_id = var.use_webapp ? var.app_registration_app_id : "" + app_registration_app_id = lower(var.use_webapp) ? var.app_registration_app_id : "" app_service = local.app_service arm_client_id = var.arm_client_id assign_subscription_permissions = var.deployer_assign_subscription_permissions @@ -51,7 +51,7 @@ module "sap_deployer" { use_custom_dns_a_registration = var.use_custom_dns_a_registration use_private_endpoint = var.use_private_endpoint use_service_endpoint = var.use_service_endpoint - use_webapp = var.use_webapp + use_webapp = lower(var.use_webapp) webapp_client_secret = var.webapp_client_secret } diff --git a/deploy/terraform/run/sap_deployer/module.tf b/deploy/terraform/run/sap_deployer/module.tf index 7bb058f551..c74be61400 100644 --- a/deploy/terraform/run/sap_deployer/module.tf +++ b/deploy/terraform/run/sap_deployer/module.tf @@ -16,10 +16,10 @@ module "sap_deployer" { additional_users_to_add_to_keyvault_policies = var.additional_users_to_add_to_keyvault_policies agent_ado_url = var.agent_ado_url Agent_IP = var.Agent_IP - agent_pat = var.use_webapp ? var.agent_pat : "" - agent_pool = var.use_webapp ? var.agent_pool : "" + agent_pat = lower(var.use_webapp) ? var.agent_pat : "" + agent_pool = lower(var.use_webapp) ? var.agent_pool : "" ansible_core_version = var.ansible_core_version - app_registration_app_id = var.use_webapp ? var.app_registration_app_id : "" + app_registration_app_id = lower(var.use_webapp) ? var.app_registration_app_id : "" app_service = local.app_service arm_client_id = var.arm_client_id assign_subscription_permissions = var.deployer_assign_subscription_permissions @@ -51,7 +51,7 @@ module "sap_deployer" { use_custom_dns_a_registration = var.use_custom_dns_a_registration use_private_endpoint = var.use_private_endpoint use_service_endpoint = var.use_service_endpoint - use_webapp = var.use_webapp + use_webapp = lower(var.use_webapp) webapp_client_secret = var.webapp_client_secret } From 00d7d768218de9e45fe650e26987b4bc911b7973 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 25 Jan 2024 20:22:30 +0200 Subject: [PATCH 156/607] Commented out unused code for APP_REGISTRATION_APP_ID and WEB_APP_CLIENT_SECRET --- deploy/pipelines/01-deploy-control-plane.yaml | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 925e1578bf..1407e55032 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -235,15 +235,15 @@ stages: if [ $(use_webapp) == "true" ]; then echo "Use WebApp is selected" - if [ $TF_VAR_app_registration_app_id == '$(APP_REGISTRATION_APP_ID)' ]; then - echo "##vso[task.logissue type=error]Variable APP_REGISTRATION_APP_ID was not defined." - exit 2 - fi + # if [ $TF_VAR_app_registration_app_id == '$(APP_REGISTRATION_APP_ID)' ]; then + # echo "##vso[task.logissue type=error]Variable APP_REGISTRATION_APP_ID was not defined." + # exit 2 + # fi - if [ -z $TF_VAR_webapp_client_secret == '$(WEB_APP_CLIENT_SECRET)' ]; then - echo "##vso[task.logissue type=error]Variable WEB_APP_CLIENT_SECRET was not defined." - exit 2 - fi + # if [ -z $TF_VAR_webapp_client_secret == '$(WEB_APP_CLIENT_SECRET)' ]; then + # echo "##vso[task.logissue type=error]Variable WEB_APP_CLIENT_SECRET was not defined." + # exit 2 + # fi fi export TF_LOG_PATH=$CONFIG_REPO_PATH/.sap_deployment_automation/terraform.log @@ -522,15 +522,15 @@ stages: if [ $(use_webapp) == "true" ]; then echo "Use WebApp is selected" - if [ $TF_VAR_app_registration_app_id == '$(APP_REGISTRATION_APP_ID)' ]; then - echo "##vso[task.logissue type=error]Variable APP_REGISTRATION_APP_ID was not defined." - exit 2 - fi + # if [ $TF_VAR_app_registration_app_id == '$(APP_REGISTRATION_APP_ID)' ]; then + # echo "##vso[task.logissue type=error]Variable APP_REGISTRATION_APP_ID was not defined." + # exit 2 + # fi - if [ -z $TF_VAR_webapp_client_secret == '$(WEB_APP_CLIENT_SECRET)' ]; then - echo "##vso[task.logissue type=error]Variable WEB_APP_CLIENT_SECRET was not defined." - exit 2 - fi + # if [ -z $TF_VAR_webapp_client_secret == '$(WEB_APP_CLIENT_SECRET)' ]; then + # echo "##vso[task.logissue type=error]Variable WEB_APP_CLIENT_SECRET was not defined." + # exit 2 + # fi fi bootstrapped=0 From 786fb81b14e4ab2221276e12be1faa6e4a9a2c4a Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 25 Jan 2024 20:27:30 +0200 Subject: [PATCH 157/607] Update USE_WEBAPP parameter to lowercase --- deploy/pipelines/01-deploy-control-plane.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 1407e55032..a8d3a57d1e 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -378,7 +378,7 @@ stages: TF_VAR_use_webapp: ${{ parameters.use_webapp_param }} TF_VAR_webapp_client_secret: $(WEB_APP_CLIENT_SECRET) USE_MSI: $(Use_MSI) - USE_WEBAPP: ${{ parameters.use_webapp_param }} + USE_WEBAPP: ${{ lower(parameters.use_webapp_param) }} WEB_APP_CLIENT_SECRET: $(WEB_APP_CLIENT_SECRET) - stage: Deploy_controlplane @@ -907,7 +907,7 @@ stages: TF_VAR_use_webapp: ${{ parameters.use_webapp_param }} TF_VAR_webapp_client_secret: $(WEB_APP_CLIENT_SECRET) USE_MSI: $(Use_MSI) - USE_WEBAPP: ${{ parameters.use_webapp_param }} + USE_WEBAPP: ${{ lower(parameters.use_webapp_param) }} WEB_APP_CLIENT_SECRET: $(WEB_APP_CLIENT_SECRET) failOnStderr: false From 2283f04ec3273600eceb2f1260833c265b1f2664 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 25 Jan 2024 20:30:52 +0200 Subject: [PATCH 158/607] Update TF_VAR_use_webapp to use lower case parameter value --- deploy/pipelines/01-deploy-control-plane.yaml | 4 ++-- deploy/terraform/bootstrap/sap_deployer/module.tf | 8 ++++---- deploy/terraform/run/sap_deployer/module.tf | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index a8d3a57d1e..0d3aabb0c1 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -375,7 +375,7 @@ stages: TF_VAR_deployer_kv_user_arm_id: $(Deployer_Key_Vault) TF_VAR_spn_id: $(CP_ARM_OBJECT_ID) TF_VAR_tf_version: $(tf_version) - TF_VAR_use_webapp: ${{ parameters.use_webapp_param }} + TF_VAR_use_webapp: ${{ lower(parameters.use_webapp_param) }} TF_VAR_webapp_client_secret: $(WEB_APP_CLIENT_SECRET) USE_MSI: $(Use_MSI) USE_WEBAPP: ${{ lower(parameters.use_webapp_param) }} @@ -904,7 +904,7 @@ stages: TF_VAR_deployer_kv_user_arm_id: $(Deployer_Key_Vault) TF_VAR_spn_id: $(CP_ARM_OBJECT_ID) TF_VAR_tf_version: $(tf_version) - TF_VAR_use_webapp: ${{ parameters.use_webapp_param }} + TF_VAR_use_webapp: ${{ lower(parameters.use_webapp_param) }} TF_VAR_webapp_client_secret: $(WEB_APP_CLIENT_SECRET) USE_MSI: $(Use_MSI) USE_WEBAPP: ${{ lower(parameters.use_webapp_param) }} diff --git a/deploy/terraform/bootstrap/sap_deployer/module.tf b/deploy/terraform/bootstrap/sap_deployer/module.tf index d12af8bdf4..54db2dea8f 100644 --- a/deploy/terraform/bootstrap/sap_deployer/module.tf +++ b/deploy/terraform/bootstrap/sap_deployer/module.tf @@ -16,10 +16,10 @@ module "sap_deployer" { additional_users_to_add_to_keyvault_policies = var.additional_users_to_add_to_keyvault_policies agent_ado_url = var.agent_ado_url Agent_IP = var.Agent_IP - agent_pat = lower(var.use_webapp) ? var.agent_pat : "" - agent_pool = lower(var.use_webapp) ? var.agent_pool : "" + agent_pat = var.use_webapp ? var.agent_pat : "" + agent_pool = var.use_webapp ? var.agent_pool : "" ansible_core_version = var.ansible_core_version - app_registration_app_id = lower(var.use_webapp) ? var.app_registration_app_id : "" + app_registration_app_id = var.use_webapp ? var.app_registration_app_id : "" app_service = local.app_service arm_client_id = var.arm_client_id assign_subscription_permissions = var.deployer_assign_subscription_permissions @@ -51,7 +51,7 @@ module "sap_deployer" { use_custom_dns_a_registration = var.use_custom_dns_a_registration use_private_endpoint = var.use_private_endpoint use_service_endpoint = var.use_service_endpoint - use_webapp = lower(var.use_webapp) + use_webapp = var.use_webapp webapp_client_secret = var.webapp_client_secret } diff --git a/deploy/terraform/run/sap_deployer/module.tf b/deploy/terraform/run/sap_deployer/module.tf index c74be61400..7bb058f551 100644 --- a/deploy/terraform/run/sap_deployer/module.tf +++ b/deploy/terraform/run/sap_deployer/module.tf @@ -16,10 +16,10 @@ module "sap_deployer" { additional_users_to_add_to_keyvault_policies = var.additional_users_to_add_to_keyvault_policies agent_ado_url = var.agent_ado_url Agent_IP = var.Agent_IP - agent_pat = lower(var.use_webapp) ? var.agent_pat : "" - agent_pool = lower(var.use_webapp) ? var.agent_pool : "" + agent_pat = var.use_webapp ? var.agent_pat : "" + agent_pool = var.use_webapp ? var.agent_pool : "" ansible_core_version = var.ansible_core_version - app_registration_app_id = lower(var.use_webapp) ? var.app_registration_app_id : "" + app_registration_app_id = var.use_webapp ? var.app_registration_app_id : "" app_service = local.app_service arm_client_id = var.arm_client_id assign_subscription_permissions = var.deployer_assign_subscription_permissions @@ -51,7 +51,7 @@ module "sap_deployer" { use_custom_dns_a_registration = var.use_custom_dns_a_registration use_private_endpoint = var.use_private_endpoint use_service_endpoint = var.use_service_endpoint - use_webapp = lower(var.use_webapp) + use_webapp = var.use_webapp webapp_client_secret = var.webapp_client_secret } From 146bdd937d8ea187e14a2877ac885705f9979e8b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 25 Jan 2024 21:04:25 +0200 Subject: [PATCH 159/607] Fix BOM upload and download tasks --- .../0.1-bom-validator/tasks/bom_download.yaml | 23 +++++++++++++++++++ .../tasks/bom_processor.yaml | 2 +- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_download.yaml b/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_download.yaml index ff2d4ed2b6..8a31be737f 100644 --- a/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_download.yaml +++ b/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_download.yaml @@ -391,6 +391,29 @@ - azresult.rc != 0 - azresult.stderr is defined - azresult.stderr.find("BlobAlreadyExists") == -1 + when: + - sapbits_sas_token is defined + + - name: "BOM: {{ bom_name }} Upload File {{ bom_media_entry.archive }}" + + ansible.builtin.command: >- + az storage blob upload + --account-name {{ account }} + --container-name {{ container }} + --name {{ bom_media_entry.archive }} + --file {{ result.dest }} + --if-none-match "*" + --no-progress + delegate_to: localhost + register: azresult + ignore_errors: true + failed_when: + - azresult.rc != 0 + - azresult.stderr is defined + - azresult.stderr.find("BlobAlreadyExists") == -1 + when: + - sapbits_sas_token is undefined + # Step: 05-04-01 - END # -------------------------------------+---------------------------------------8 diff --git a/deploy/ansible/roles-sap/3.3-bom-processing/tasks/bom_processor.yaml b/deploy/ansible/roles-sap/3.3-bom-processing/tasks/bom_processor.yaml index 051a070e07..241a582f22 100644 --- a/deploy/ansible/roles-sap/3.3-bom-processing/tasks/bom_processor.yaml +++ b/deploy/ansible/roles-sap/3.3-bom-processing/tasks/bom_processor.yaml @@ -125,7 +125,7 @@ - name: "3.3 BoM Processing: - Download Files on {{ distribution_id }}" ansible.builtin.get_url: url: "{{ sapbits_location_base_path }}/{{ sapbits_bom_files }}/archives/{{ item.archive | string | trim }}\ - {% if sapbits_sas_token is not undefined %}?{{ sapbits_sas_token }}{% endif %}" + {% if sapbits_sas_token is defined %}?{{ sapbits_sas_token }}{% endif %}" dest: "{{ target_media_location }}/{% if item.path is undefined %}downloads{% else %}{{ item.path }}{% endif %}/\ {% if item.filename is undefined %}{{ item.archive }}{% else %}{{ item.filename }}{% endif %}" mode: "{% if item.permissions is undefined %}0644{% else %}{{ item.permissions }}{% endif %}" From f101348e35a5c9b302bbcc3412ebeacdedbb9828 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 25 Jan 2024 23:51:26 +0200 Subject: [PATCH 160/607] Update user authentication logic --- deploy/pipelines/01-deploy-control-plane.yaml | 69 +++++-------------- 1 file changed, 16 insertions(+), 53 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 0d3aabb0c1..537fc35ea4 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -108,8 +108,8 @@ stages: export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") echo "$(variable_group) id: ${VARIABLE_GROUP_ID}" - echo ${{ parameters.force_reset }} - if [ ${{ parameters.force_reset }} == "True" ]; then + echo "${{ parameters.force_reset }}" + if [ "${{ parameters.force_reset }}" = "True" ]; then echo "##vso[task.logissue type=warning]Forcing a re-install" echo "running on $(this_agent)" sed -i 's/step=1/step=0/' $deployer_environment_file_name @@ -232,7 +232,7 @@ stages: unzip -qq -o -P "${pass}" ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip -d ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder) fi - if [ $(use_webapp) == "true" ]; then + if [ "$USE_WEBAPP" = "true" ]; then echo "Use WebApp is selected" # if [ $TF_VAR_app_registration_app_id == '$(APP_REGISTRATION_APP_ID)' ]; then @@ -249,7 +249,7 @@ stages: export TF_LOG_PATH=$CONFIG_REPO_PATH/.sap_deployment_automation/terraform.log set +eu - if [ $USE_MSI == "true" ]; then + if [ "$USE_MSI" = "true" ]; then export ARM_CLIENT_SECRET=$servicePrincipalKey $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ @@ -303,7 +303,7 @@ stages: git add -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip added=1 fi - if [ 1 == $added ]; then + if [ 1 = $added ]; then git config --global user.email "$(Build.RequestedForEmail)" git config --global user.name "$(Build.RequestedFor)" git commit -m "Added updates from devops deployment $(Build.DefinitionName) [skip ci]" @@ -313,7 +313,7 @@ stages: echo "##vso[task.uploadsummary]$CONFIG_REPO_PATH/.sap_deployment_automation/${ENVIRONMENT}${LOCATION}.md" fi echo -e "$green--- Adding variables to the variable group:" $(variable_group) "---$reset" - if [ 0 == $return_code ]; then + if [ 0 = $return_code ]; then az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_State_FileName.value") if [ -z ${az_var} ]; then az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --value ${file_deployer_tfstate_key} --output none --only-show-errors @@ -496,41 +496,8 @@ stages: export TF_VAR_ansible_core_version=2.15 fi - if [ $USE_MSI != "true" ]; then - - if [ -z ${CP_ARM_SUBSCRIPTION_ID} ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_SUBSCRIPTION_ID was not defined." - exit 2 - fi - - if [ -z ${CP_ARM_CLIENT_ID} ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_ID was not defined." - exit 2 - fi - - if [ -z ${CP_ARM_CLIENT_SECRET} ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_SECRET was not defined." - exit 2 - fi - - if [ -z ${CP_ARM_TENANT_ID} ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_TENANT_ID was not defined." - exit 2 - fi - fi - - if [ $(use_webapp) == "true" ]; then + if [ "$USE_WEBAPP" = "true" ]; then echo "Use WebApp is selected" - - # if [ $TF_VAR_app_registration_app_id == '$(APP_REGISTRATION_APP_ID)' ]; then - # echo "##vso[task.logissue type=error]Variable APP_REGISTRATION_APP_ID was not defined." - # exit 2 - # fi - - # if [ -z $TF_VAR_webapp_client_secret == '$(WEB_APP_CLIENT_SECRET)' ]; then - # echo "##vso[task.logissue type=error]Variable WEB_APP_CLIENT_SECRET was not defined." - # exit 2 - # fi fi bootstrapped=0 @@ -600,7 +567,7 @@ stages: az extension add --name storage-blob-preview >/dev/null echo -e "$green--- az login ---$reset" - az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none + az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none return_code=$? if [ 0 != $return_code ]; then echo -e "$boldred--- Login failed ---$reset" @@ -608,17 +575,13 @@ stages: exit $return_code fi - az account set --subscription $CP_ARM_SUBSCRIPTION_ID + az account set --subscription $ARM_SUBSCRIPTION_ID else if [ $USE_MSI != "true" ]; then echo "Login using SPN" - export ARM_CLIENT_ID=$CP_ARM_CLIENT_ID - export ARM_CLIENT_SECRET=$CP_ARM_CLIENT_SECRET - export ARM_TENANT_ID=$CP_ARM_TENANT_ID - export ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID export ARM_USE_MSI=false - az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none + az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none return_code=$? if [ 0 != $return_code ]; then echo -e "$boldred--- Login failed ---$reset" @@ -645,7 +608,7 @@ stages: ip_added=0 if [ -f ${deployer_environment_file_name} ]; then - if [ 0 == $bootstrapped ]; then + if [ 0 = $bootstrapped ]; then export key_vault=$(cat ${deployer_environment_file_name} | grep key_vault | awk -F'=' '{print $2}' | xargs) ; echo "Key Vault: $key_vault" if [ -n "${key_vault}" ]; then echo 'Deployer Key Vault' ${key_vault} @@ -695,7 +658,7 @@ stages: sudo chmod +x $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh - if [ $USE_MSI == "true" ]; then + if [ "$USE_MSI" = "true" ]; then echo "Using MSI" $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ --deployer_parameter_file ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) \ @@ -707,8 +670,8 @@ stages: $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ --deployer_parameter_file ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) \ --library_parameter_file ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) \ - --subscription $CP_ARM_SUBSCRIPTION_ID --spn_id $CP_ARM_CLIENT_ID \ - --spn_secret $CP_ARM_CLIENT_SECRET --tenant_id $CP_ARM_TENANT_ID \ + --subscription $ARM_SUBSCRIPTION_ID --spn_id $ARM_CLIENT_ID \ + --spn_secret $ARM_CLIENT_SECRET --tenant_id $ARM_TENANT_ID \ --auto-approve --ado \ ${storage_account_parameter} ${keyvault_parameter} fi @@ -812,7 +775,7 @@ stages: added=1 fi - if [ 1 == $added ]; then + if [ 1 = $added ]; then git config --global user.email "$(Build.RequestedForEmail)" git config --global user.name "$(Build.RequestedFor)" git commit -m "Added updates from control plane deployment $(Build.DefinitionName) [skip ci]" @@ -825,7 +788,7 @@ stages: fi echo -e "$green--- Adding variables to the variable group:" $(variable_group) "---$reset" - if [ 0 == $return_code ]; then + if [ 0 = $return_code ]; then az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value") if [ -z ${az_var} ]; then az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --value ${file_REMOTE_STATE_SA} --output none --only-show-errors From 3fe7ab6dca82ba1fe16c67366326112b067165c9 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 26 Jan 2024 00:28:18 +0200 Subject: [PATCH 161/607] Add deployment variable to tfvar_variables.tf --- deploy/terraform/run/sap_deployer/tfvar_variables.tf | 5 +++++ deploy/terraform/run/sap_library/tfvar_variables.tf | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/deploy/terraform/run/sap_deployer/tfvar_variables.tf b/deploy/terraform/run/sap_deployer/tfvar_variables.tf index f0fd374dd0..bdf241741c 100644 --- a/deploy/terraform/run/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/run/sap_deployer/tfvar_variables.tf @@ -403,6 +403,11 @@ variable "subnets_to_add_to_firewall_for_keyvaults_and_storage" { default = [] } +variable "deployment" { + description = "The type of deployment" + default = "update" + } + ######################################################################################### # # # DNS settings # diff --git a/deploy/terraform/run/sap_library/tfvar_variables.tf b/deploy/terraform/run/sap_library/tfvar_variables.tf index ce2cba69bd..c799ed685f 100644 --- a/deploy/terraform/run/sap_library/tfvar_variables.tf +++ b/deploy/terraform/run/sap_library/tfvar_variables.tf @@ -212,6 +212,12 @@ variable "spn_keyvault_id" { default = "" } +variable "deployment" { + description = "The type of deployment" + default = "update" + } + + ######################################################################################### # # # Web App definitioms # From 9c9c3593a85ec461e9f897dac1eff788556d98bc Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 26 Jan 2024 00:50:33 +0200 Subject: [PATCH 162/607] Remove unused variable "deployment" --- deploy/terraform/run/sap_deployer/tfvar_variables.tf | 5 ----- 1 file changed, 5 deletions(-) diff --git a/deploy/terraform/run/sap_deployer/tfvar_variables.tf b/deploy/terraform/run/sap_deployer/tfvar_variables.tf index bdf241741c..f0fd374dd0 100644 --- a/deploy/terraform/run/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/run/sap_deployer/tfvar_variables.tf @@ -403,11 +403,6 @@ variable "subnets_to_add_to_firewall_for_keyvaults_and_storage" { default = [] } -variable "deployment" { - description = "The type of deployment" - default = "update" - } - ######################################################################################### # # # DNS settings # From ff4182b32e487d17431d2fcb79d6272430161dae Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 26 Jan 2024 01:04:16 +0200 Subject: [PATCH 163/607] Refactor deployment script to support both Service Principal and Managed Identity --- deploy/scripts/deploy_controlplane.sh | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/deploy/scripts/deploy_controlplane.sh b/deploy/scripts/deploy_controlplane.sh index 312a6d91b1..3adefc03b7 100755 --- a/deploy/scripts/deploy_controlplane.sh +++ b/deploy/scripts/deploy_controlplane.sh @@ -211,26 +211,17 @@ if [ -n "${subscription}" ]; then fi -if [ 3 == $step ]; then - spn_secret="none" -fi - -if [ -n "$keyvault" ]; then - set_executing_user_environment_variables "none" -else - if [ 0 = "${deploy_using_msi_only:-}" ]; then - echo "Using Service Principal for deployment" - set_executing_user_environment_variables "${spn_secret}" - else - echo "Using Managed Identity for deployment" - set_executing_user_environment_variables "none" - fi -fi - load_config_vars "${deployer_config_information}" "step" load_config_vars "${deployer_config_information}" "keyvault" +if [ 0 = "${deploy_using_msi_only:-}" ]; then + echo "Using Service Principal for deployment" + set_executing_user_environment_variables "${spn_secret}" +else + echo "Using Managed Identity for deployment" + set_executing_user_environment_variables "none" +fi if [ $recover == 1 ]; then if [ -n "$REMOTE_STATE_SA" ]; then From 907363965f4cbd5eb6f746a83de12627b1e12f44 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 29 Jan 2024 10:48:51 +0200 Subject: [PATCH 164/607] Provide a way to control SAS token access to storage accounts (#531) * Add support for deploying storage accounts that use RBAC * Add storage_use_azuread flag to azurerm provider * Update providers.tf to conditionally set use_msi based on var.use_spn * Add flags to disable SAS tokens for SAP Library and SAP Landscape * Fix variable checks in deploy control plane pipeline * testing * Fix variable comparison in deploy script * Refactor deployment pipeline: Remove unnecessary code and comments * Update Azure login credentials * Update Azure login credentials * Remove unnecessary use_azuread_auth flag * Update storage_use_azuread based on shared_access_key_enabled variable * Add default OAuth authentication for storage accounts * Only set flag for the other accounts * Update tfvar_variables.tf: Added 'deployment' variable * Remove unused variable "deployment" * Add shared access key enabled variable to tfvar_variables.tf * Remove unnecessary code in imports.tf * Update azuread provider configuration * Update backend configuration in sap_library backend.tf * Remove use_msi variable from backend.tf * Add Azure AD authentication to azurerm backend * Add shared access key configuration for storage account * Add Azure AD authentication to backend.tf and imports.tf for SAP Workload zone * Update storage_use_azuread to true in providers.tf * Add Azure AD credentials to azurerm and azuread providers * Remove unnecessary provider configuration * Add Azure AD authentication to azurerm backend and update azuread provider configuration * Add use_msi and use_azuread_auth options to terraform_remote_state * Support sas-token less access * Add installation media storage account check and extract SAS token * Remove unnecessary line in main.yaml * Update account_name parameter in main.yaml * Refactor installation media storage details tasks * Refactor storage details and add parameter review debug * Fix indentation in allowSharedKeyAccess condition * Refactor validation check for allowSharedKeyAccess variable * Add --auth-mode login * Update BOM upload command to include SAS keys * Update BOM Validator tasks and pre-checks * Check the file using CLI * Refactor bom_download.yaml task * Fix storage account check in bom_download.yaml * Refactor bom_download.yaml and bom_validator.yaml * Add account information retrieval and debug task * Allow Ansible to run as MSI * Remove ARM_TENANT_ID from deployment scripts * Remove unnecessary Azure login step in pipeline * Update container name in bom_download.yaml * Fix BOM download and iSCSI key vault dependencies * Fix storage account firewall subnet configuration * Add useSAS variable to install_workloadzone.sh script * Refactor network rule addition in install_workloadzone.sh * Update authentication mode for Azure storage blob upload * Fix condition in bom_download.yaml * Fix Azure subscription issue in deployment pipeline * Update Azure subscription ID in run-ansible.yaml * Update run-ansible.yaml to source deploy_server.sh instead of using az login with identity * Add SAP OS configuration playbook: Set sapos storage account facts * Remove unused variable sapbits_sas_token * Move sas token to other task * Always enable SAS on SAP Library storage account * Refactor key_vault.tf to remove unnecessary condition * Refactor set_fact task in playbook_03_bom_processing.yaml * Fix app registration app ID formatting * Fix app registration app ID formatting in control plane deployment pipeline * Update bom_download.yaml lint fixes for additional new lines * Update pre_checks.yaml fix additional new lines lint errors --------- Co-authored-by: Kimmo Forss Co-authored-by: hdamecharla <71097261+hdamecharla@users.noreply.github.com> --- .../playbook_02_os_sap_specific_config.yaml | 10 +++- .../ansible/playbook_03_bom_processing.yaml | 7 ++- .../tasks/main.yaml | 51 +++++++++++++--- .../0.1-bom-validator/tasks/bom_download.yaml | 50 +++++++++++++++- .../tasks/bom_validator.yaml | 3 +- .../0.1-bom-validator/tasks/main.yaml | 43 ++++++++++++- .../0.1-bom-validator/tasks/pre_checks.yaml | 60 +++++++++++++++++-- deploy/pipelines/01-deploy-control-plane.yaml | 5 +- .../pipelines/04-sap-software-download.yaml | 60 +++++++++++-------- .../pipelines/05-DB-and-SAP-installation.yaml | 44 +++++++++++++- deploy/pipelines/templates/run-ansible.yaml | 34 ++++++----- deploy/scripts/configure_deployer.sh | 8 +-- deploy/scripts/install_workloadzone.sh | 22 +++++-- deploy/scripts/installer.sh | 32 ++++++++-- deploy/scripts/sync_deployer.sh | 15 ++++- .../bootstrap/sap_deployer/tfvar_variables.tf | 9 +++ .../bootstrap/sap_deployer/transform.tf | 5 ++ .../bootstrap/sap_library/providers.tf | 4 ++ .../bootstrap/sap_library/tfvar_variables.tf | 13 ++++ .../bootstrap/sap_library/transform.tf | 2 + deploy/terraform/run/sap_deployer/backend.tf | 4 +- .../terraform/run/sap_deployer/providers.tf | 6 ++ .../run/sap_deployer/tfvar_variables.tf | 7 +++ .../terraform/run/sap_deployer/transform.tf | 1 + deploy/terraform/run/sap_landscape/backend.tf | 4 +- deploy/terraform/run/sap_landscape/imports.tf | 2 + .../terraform/run/sap_landscape/providers.tf | 19 +++--- deploy/terraform/run/sap_library/backend.tf | 4 +- deploy/terraform/run/sap_library/imports.tf | 2 + deploy/terraform/run/sap_library/providers.tf | 26 +++++--- .../run/sap_library/tfvar_variables.tf | 6 ++ deploy/terraform/run/sap_library/transform.tf | 4 ++ deploy/terraform/run/sap_system/backend.tf | 4 +- deploy/terraform/run/sap_system/imports.tf | 4 ++ deploy/terraform/run/sap_system/providers.tf | 1 + .../modules/sap_deployer/infrastructure.tf | 1 + .../templates/configure_deployer.sh.tmpl | 2 +- .../modules/sap_landscape/iscsi.tf | 36 +++++++---- .../modules/sap_library/key_vault.tf | 3 +- .../modules/sap_library/storage_accounts.tf | 3 + 40 files changed, 505 insertions(+), 111 deletions(-) diff --git a/deploy/ansible/playbook_02_os_sap_specific_config.yaml b/deploy/ansible/playbook_02_os_sap_specific_config.yaml index 83c1b8babd..99b067b0ca 100644 --- a/deploy/ansible/playbook_02_os_sap_specific_config.yaml +++ b/deploy/ansible/playbook_02_os_sap_specific_config.yaml @@ -101,7 +101,6 @@ - name: "SAP OS configuration playbook: - Set sapos storage account facts" ansible.builtin.set_fact: sapbits_location_base_path: "{{ hostvars.localhost.sapbits_location_base_path }}" - sapbits_sas_token: "{{ hostvars.localhost.sapbits_sas_token }}" sa_enabled: true when: - not is_run_with_infraCreate_only @@ -109,6 +108,15 @@ tags: - is_run_with_infraCreate_only + - name: "SAP OS configuration playbook: - Set sapos storage account facts" + ansible.builtin.set_fact: + sapbits_sas_token: "{{ hostvars.localhost.sapbits_sas_token }}" + when: + - not is_run_with_infraCreate_only + - hostvars.localhost.sapbits_sas_token is defined + tags: + - is_run_with_infraCreate_only + - name: "SAP OS configuration playbook: - Get the IP information from instance meta data service" ansible.builtin.uri: url: http://169.254.169.254/metadata/instance/network/interface/0?api-version=2021-02-01 diff --git a/deploy/ansible/playbook_03_bom_processing.yaml b/deploy/ansible/playbook_03_bom_processing.yaml index fade38af95..928a3c53b8 100644 --- a/deploy/ansible/playbook_03_bom_processing.yaml +++ b/deploy/ansible/playbook_03_bom_processing.yaml @@ -61,9 +61,14 @@ ansible.builtin.set_fact: tier: preparation sapbits_location_base_path: "{{ hostvars.localhost.sapbits_location_base_path }}" - sapbits_sas_token: "{{ hostvars.localhost.sapbits_sas_token }}" sa_enabled: true + - name: Set facts + ansible.builtin.set_fact: + sapbits_sas_token: "{{ hostvars.localhost.sapbits_sas_token }}" + when: hostvars.localhost.sapbits_sas_token is defined + + - name: 3.3-bom-processing role for Linux become: true when: ansible_os_family != "Windows" diff --git a/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml b/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml index ad2a99d7c0..4a3ba7c8ea 100644 --- a/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml @@ -46,11 +46,11 @@ ansible.builtin.debug: msg: # Best method for formatting output with Azure Devops Logs - "kv_name: {{ kv_name }}" - - "deployer_kv_name_secret: {{ deployer_kv_name_secret }}" - - "deployer_kv_name_secret_result: {{ deployer_kv_name_secret_result }}" + - "deployer_kv_name_secret: {{ deployer_kv_name_secret }}" - "deployer_kv_name: {{ deployer_kv_name }}" verbosity: 2 + - name: "0.4 Installation Media: - Extract SAP Binaries Storage Account secrets" block: - name: "0.4 Installation Media: - Extract SAP Binaries Storage Account information" @@ -70,8 +70,37 @@ cacheable: true when: sapbits_location_base_path is not defined +- name: "0.4 Installation Media: - Save SAP Binaries Storage Account information" + ansible.builtin.set_fact: + account_name: "{{ sapbits_location_base_path.rpartition('//')[2].split('.')[0] }}" + when: sapbits_location_base_path is defined + + +- name: "0.4 Installation Media: - Check Binaries Storage Account" + ansible.builtin.command: >- + az storage account show \ + --name {{ account_name }} \ + --query allowSharedKeyAccess \ + --out tsv + changed_when: false + register: az_allowSharedKeyAccess + +- name: "0.4 Installation Media: - Extract SAS token" + ansible.builtin.set_fact: + allowSharedKeyAccess: "{{ az_allowSharedKeyAccess.stdout | bool }}" + when: az_allowSharedKeyAccess is defined + +- name: "Parameter review..." + ansible.builtin.debug: + msg: # Best method for formatting output with Azure Devops Logs + - "account_name: {{ account_name }}" + - "allowSharedKeyAccess: {{ allowSharedKeyAccess }}" + verbosity: 2 + - name: "0.4 Installation Media: - Retrieve Access Key secret" - when: sapbits_access_key is not defined + when: + - sapbits_access_key is not defined + - allowSharedKeyAccess block: - name: "0.4 Installation Media: - Check if Access Key secret exists" ansible.builtin.command: >- @@ -104,6 +133,7 @@ when: - sapbits_access_key is not defined - sapbits_sas_token is not defined or (sapbits_sas_token | string | length == 0) + - allowSharedKeyAccess block: - name: "0.4 Installation Media: - Check if SAS token secret exists" ansible.builtin.command: >- @@ -132,7 +162,13 @@ no_log: true when: keyvault_secret_show_sas_token is defined + - name: "0.4 Installation Media: - Create SAP Binaries Storage Account SAS token" + when: + - sapbits_sas_token is not defined or (sapbits_sas_token | string | length == 0) + - sapbits_access_key is defined + - allowSharedKeyAccess is defined + - allowSharedKeyAccess block: - name: "0.4 Installation Media: - SAS token" @@ -146,7 +182,7 @@ - name: "0.4 Installation Media: - Create SAP Binaries Storage Account SAS" ansible.builtin.command: >- az storage account generate-sas \ - --account-name {{ sapbits_location_base_path.rpartition('//')[2].split('.')[0] }} \ + --account-name {{ account_name }} \ --expiry {{ expiry.stdout }} \ --permissions crwl \ --services b \ @@ -160,11 +196,11 @@ ansible.builtin.set_fact: sapbits_sas_token: "{{ az_sapbits_sas_token.stdout }}" - when: - - sapbits_sas_token is not defined or (sapbits_sas_token | string | length == 0) - - sapbits_access_key is defined - name: "0.0 Validations - Check required variables are present and not empty" + when: + - allowSharedKeyAccess is defined + - allowSharedKeyAccess ansible.builtin.assert: that: - sapbits_sas_token is defined # Has the variable been defined @@ -179,6 +215,7 @@ - "URL : {{ sapbits_location_base_path }}" - name: "0.4 Installation Media: - Debug storage account details (sas)" + when: sapbits_sas_token is defined ansible.builtin.debug: msg: - "SAS : {{ sapbits_sas_token }}" diff --git a/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_download.yaml b/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_download.yaml index 8a31be737f..720ae43411 100644 --- a/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_download.yaml +++ b/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_download.yaml @@ -35,6 +35,10 @@ # Description: # - name: "{{ task_prefix }} - BOM: {{ bom_name }} Check Storage Account for {{ bom_media_entry.archive }}" + when: + - check_storage_account | bool + - sa_enabled + - allowSharedKeyAccess block: # -------------------------------------+---------------------------------------8 @@ -77,9 +81,52 @@ # Step: 03-03 - END # -------------------------------------+---------------------------------------8 + +- name: "{{ task_prefix }} - BOM: {{ bom_name }} Check Storage Account for {{ bom_media_entry.archive }}" when: - check_storage_account | bool - sa_enabled + - not allowSharedKeyAccess + block: + +# -------------------------------------+---------------------------------------8 +# Step: 03-01 +# Description: +# + - name: "{{ task_prefix }} - BOM: {{ bom_name }} Check is file {{ bom_media_entry.archive }} is already downloaded" + ansible.builtin.command: >- + az storage blob show + --account-name {{ account }} + --container-name {{ sapbits_location_base_path.rpartition('//')[2].split('/')[1] }} + --auth-mode login + --name {{ sapbits_bom_files }}/archives/{{ bom_media_entry.archive }} + --query name --output tsv + delegate_to: localhost + register: azresult + ignore_errors: true + + - name: "{{ task_prefix }} - Informational" + ansible.builtin.debug: + var: azresult + verbosity: 2 +# Step: 03-01 - END +# -------------------------------------+---------------------------------------8 + +# -------------------------------------+---------------------------------------8 +# Step: 03-02 - END +# -------------------------------------+---------------------------------------8 + +# -------------------------------------+---------------------------------------8 +# Step: 03-03 +# Description: +# + - name: "{{ task_prefix }} - BOM: {{ bom_name }} Set Fact {{ bom_media_entry.archive }}" + ansible.builtin.set_fact: + proceed: false + when: azresult.rc == 0 +# Step: 03-03 - END +# -------------------------------------+---------------------------------------8 + # Step: 03 - END # -------------------------------------+---------------------------------------8 @@ -373,7 +420,7 @@ # Step: 05-04-01 # Description: # - - name: "BOM: {{ bom_name }} Upload File {{ bom_media_entry.archive }}" + - name: "BOM: {{ bom_name }} Upload File {{ bom_media_entry.archive }} using SAS keys" ansible.builtin.command: >- az storage blob upload @@ -400,6 +447,7 @@ az storage blob upload --account-name {{ account }} --container-name {{ container }} + --auth-mode login --name {{ bom_media_entry.archive }} --file {{ result.dest }} --if-none-match "*" diff --git a/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_validator.yaml b/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_validator.yaml index 6f1966cb8a..bc2669b60a 100644 --- a/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_validator.yaml +++ b/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_validator.yaml @@ -109,7 +109,7 @@ # Step: 04-02 # Description: # - - name: "{{ task_prefix }} - Caculate expiration date (+3 Days)" + - name: "{{ task_prefix }} - Calculate expiration date (+3 Days)" ansible.builtin.set_fact: expiry: "{{ '%Y-%m-%d' | strftime((ansible_date_time.epoch | int) + (86400 * 3)) }}" # Step: 04-02 - END @@ -148,6 +148,7 @@ task_prefix: Generate SAS token block when: - sa_enabled + - allowSharedKeyAccess # Step: 04 - END # -------------------------------------+---------------------------------------8 diff --git a/deploy/ansible/roles-sap/0.1-bom-validator/tasks/main.yaml b/deploy/ansible/roles-sap/0.1-bom-validator/tasks/main.yaml index 8caef588bb..22eccf35b3 100644 --- a/deploy/ansible/roles-sap/0.1-bom-validator/tasks/main.yaml +++ b/deploy/ansible/roles-sap/0.1-bom-validator/tasks/main.yaml @@ -270,7 +270,7 @@ # Step: 10-01 # Description: # - - name: "0.1 BoM Validator: - delete combined BoM" + - name: "0.1 BoM Validator: - delete combined BoM using SAS Token" ansible.builtin.command: >- az storage blob delete --account-name {{ sapbits_location_base_path.rpartition('//')[2].split('.')[0] }} @@ -284,6 +284,24 @@ - azresult.rc != 0 - azresult.stderr is defined - azresult.stderr.find("BlobNotFound") == -1 + when: allowSharedKeyAccess + + - name: "0.1 BoM Validator: - delete combined BoM" + ansible.builtin.command: >- + az storage blob delete + --account-name {{ sapbits_location_base_path.rpartition('//')[2].split('.')[0] }} + --auth-mode login + --container-name {{ sapbits_location_base_path.rpartition('//')[2].split('/')[1] }}/{{ sapbits_bom_files }}/boms/{{ new_bom_name }} + --name {{ bom_base_name }}.yaml + delegate_to: localhost + register: azresult + changed_when: false + failed_when: + - azresult.rc != 0 + - azresult.stderr is defined + - azresult.stderr.find("BlobNotFound") == -1 + when: not allowSharedKeyAccess + # Step: 10-01 - END # -------------------------------------+---------------------------------------8 @@ -292,7 +310,7 @@ # Step: 10-02 # Description: # - - name: "0.1 BoM Validator: - Upload combined BoM" + - name: "0.1 BoM Validator: - Upload combined BoM using SAS Token" ansible.builtin.command: >- az storage blob upload-batch --account-name {{ sapbits_location_base_path.rpartition('//')[2].split('.')[0] }} @@ -309,6 +327,27 @@ - azresult.rc != 0 - azresult.stderr is defined - azresult.stderr.find("BlobAlreadyExists") == -1 + when: allowSharedKeyAccess + + - name: "0.1 BoM Validator: - Upload combined BoM" + ansible.builtin.command: >- + az storage blob upload-batch + --account-name {{ sapbits_location_base_path.rpartition('//')[2].split('.')[0] }} + --auth-mode login + --destination {{ sapbits_location_base_path.rpartition('//')[2].split('/')[1] }}/{{ sapbits_bom_files }}/boms/{{ new_bom_name }} + --source {{ download_directory }}/bom/{{ new_bom_name }} + --overwrite True + --if-none-match "*" + --no-progress + delegate_to: localhost + register: azresult + changed_when: false + failed_when: + - azresult.rc != 0 + - azresult.stderr is defined + - azresult.stderr.find("BlobAlreadyExists") == -1 + when: not allowSharedKeyAccess + # Step: 10-02 - END # -------------------------------------+---------------------------------------8 diff --git a/deploy/ansible/roles-sap/0.1-bom-validator/tasks/pre_checks.yaml b/deploy/ansible/roles-sap/0.1-bom-validator/tasks/pre_checks.yaml index 73a6de6951..48cd5d8e5a 100644 --- a/deploy/ansible/roles-sap/0.1-bom-validator/tasks/pre_checks.yaml +++ b/deploy/ansible/roles-sap/0.1-bom-validator/tasks/pre_checks.yaml @@ -330,6 +330,19 @@ # Step: 08 - END # -------------------------------------+---------------------------------------8 +- name: "(pre_checks.yaml) - {{ task_prefix }} - Get account information" + ansible.builtin.command: >- + az account show + vars: + task_prefix: Storage Account validation + delegate_to: localhost + register: azresult + ignore_errors: true + changed_when: false + +- name: "(pre_checks.yaml) - {{ task_prefix }} - Show account information" + ansible.builtin.debug: + var: azresult # -------------------------------------+---------------------------------------8 # Step: 09 @@ -356,12 +369,12 @@ when: - "sapbits_location_base_path is defined" # Has the variable been defined - "sapbits_location_base_path | trim | length != 0" # and given a value - + - not allowSharedKeyAccess or (sapbits_sas_token is defined and (sapbits_sas_token | trim | length != 0)) # - "sapbits_access_key is defined" # Has the variable been defined # - "sapbits_access_key | trim | length != 0" # and given a value - - "sapbits_sas_token is defined" # Has the variable been defined - - "sapbits_sas_token | trim | length != 0" # and given a value + # - "sapbits_sas_token is defined" # Has the variable been defined + # - "sapbits_sas_token | trim | length != 0" # and given a value # Step: 10 - END # -------------------------------------+---------------------------------------8 @@ -370,7 +383,7 @@ # Step: 11 # Description: # -- name: "(pre_checks.yaml) - {{ task_prefix }} - Check storage account container" +- name: "(pre_checks.yaml) - {{ task_prefix }} - Check storage account container when using SAS Token" ansible.builtin.command: >- az storage blob upload --account-name {{ sapbits_location_base_path.rpartition('//')[2].split('.')[0] }} @@ -393,6 +406,33 @@ no_log: true # censor output of secret when: - sa_enabled + - allowSharedKeyAccess + +- name: "(pre_checks.yaml) - {{ task_prefix }} - Check storage account container" + ansible.builtin.command: >- + az storage blob upload + --account-name {{ sapbits_location_base_path.rpartition('//')[2].split('.')[0] }} + --auth-mode login + --container-name {{ sapbits_location_base_path.rpartition('//')[2].split('/')[1] }}/{{ sapbits_bom_files }}/archives + --name readme.md + --file {{ readme_file.dest }} + --if-none-match "*" + --no-progress + vars: + task_prefix: Storage Account validation + delegate_to: localhost + register: azresult + ignore_errors: true + changed_when: false + failed_when: + - azresult.rc != 0 + - azresult.stderr is defined + - azresult.stderr.find("BlobAlreadyExists") == -1 + no_log: true # censor output of secret + when: + - sa_enabled + - not allowSharedKeyAccess + # Step: 11 - END # -------------------------------------+---------------------------------------8 @@ -409,7 +449,18 @@ that: - "sapbits_location_base_path is defined" # Has the variable been defined - "sapbits_location_base_path | trim | length != 0" # and given a value + success_msg: |- + Storage Account access ENABLED + sa_enabled: {{ sa_enabled }} + fail_msg: |- + Storage Account access DISABLED + sa_enabled: {{ sa_enabled }} + failed_when: false +- name: "(pre_checks.yaml) - Informational check of Storage Account parameters" + when: allowSharedKeyAccess + ansible.builtin.assert: + that: - "sapbits_access_key is defined" # Has the variable been defined - "sapbits_access_key | trim | length != 0" # and given a value @@ -422,6 +473,7 @@ Storage Account access DISABLED sa_enabled: {{ sa_enabled }} failed_when: false + # Step: 12 - END # -------------------------------------+---------------------------------------8 diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 537fc35ea4..e06fe2e9d4 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -371,7 +371,7 @@ stages: TF_LOG: $(TF_LOG) TF_VAR_agent_ado_url: $(System.CollectionUri) TF_VAR_ansible_core_version: $(ansible_core_version) - TF_VAR_app_registration_app_id: $(APP_REGISTRATION_APP_ID); + TF_VAR_app_registration_app_id: $(APP_REGISTRATION_APP_ID) TF_VAR_deployer_kv_user_arm_id: $(Deployer_Key_Vault) TF_VAR_spn_id: $(CP_ARM_OBJECT_ID) TF_VAR_tf_version: $(tf_version) @@ -591,6 +591,7 @@ stages: else source /etc/profile.d/deploy_server.sh export ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID + unset ARM_TENANT_ID fi fi @@ -863,7 +864,7 @@ stages: TF_LOG: $(TF_LOG) TF_VAR_agent_ado_url: $(System.CollectionUri) TF_VAR_ansible_core_version: $(ansible_core_version) - TF_VAR_app_registration_app_id: $(APP_REGISTRATION_APP_ID); + TF_VAR_app_registration_app_id: $(APP_REGISTRATION_APP_ID) TF_VAR_deployer_kv_user_arm_id: $(Deployer_Key_Vault) TF_VAR_spn_id: $(CP_ARM_OBJECT_ID) TF_VAR_tf_version: $(tf_version) diff --git a/deploy/pipelines/04-sap-software-download.yaml b/deploy/pipelines/04-sap-software-download.yaml index 1d61d68a54..ce58cbbbae 100644 --- a/deploy/pipelines/04-sap-software-download.yaml +++ b/deploy/pipelines/04-sap-software-download.yaml @@ -94,13 +94,18 @@ stages: fi echo -e "$green--- az login ---$reset" + if [ "$USE_MSI" = "true" ]; then + echo "Using MSI" + source /etc/profile.d/deploy_server.sh - az login --service-principal --username $ARM_CLIENT_ID --password=${ARM_CLIENT_SECRET} --tenant $ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code + else + az login --service-principal --username $ARM_CLIENT_ID --password=${ARM_CLIENT_SECRET} --tenant $ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi fi az account set --subscription $ARM_SUBSCRIPTION_ID --output none @@ -159,6 +164,7 @@ stages: SAP_AUTOMATION_REPO_PATH: ${{ parameters.sap_automation_repo_path }} SAMPLE_REPO_PATH: ${{ parameters.sample_repo_path }} CONFIG_REPO_PATH: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path) + USE_MSI: $(Use_MSI) failOnStderr: false - stage: Software_download @@ -195,27 +201,32 @@ stages: exit 2 fi - if [ ! -n $ARM_CLIENT_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined." - exit 2 - fi + if [ "$USE_MSI" = "true" ]; then + echo "Using MSI" + source /etc/profile.d/deploy_server.sh - if [ ! -n $ARM_CLIENT_SECRET ]; then - echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined." - exit 2 - fi + else + if [ ! -n $ARM_CLIENT_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined." + exit 2 + fi - if [ ! -n $ARM_TENANT_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined." - exit 2 - fi + if [ ! -n $ARM_CLIENT_SECRET ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined." + exit 2 + fi - az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code + if [ ! -n $ARM_TENANT_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined." + exit 2 + fi + az login --service-principal --username $ARM_CLIENT_ID --password=${ARM_CLIENT_SECRET} --tenant $ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi fi command="ansible-playbook '-e "download_directory=$(Agent.TempDirectory)" -e "BOM_directory=${sample_path}" -e "bom_base_name=$(BOM_NAME)" -e "deployer_kv_name=$(KV_NAME)" -e "check_storage_account=$(check_storage_account)" ' $ExtraParams ${{ parameters.sap_automation_repo_path }}/deploy/ansible/playbook_bom_downloader.yaml" @@ -237,6 +248,7 @@ stages: SAMPLE_REPO_PATH: ${{ parameters.sample_repo_path }} CONFIG_REPO_PATH: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path) ANSIBLE_COLLECTIONS_PATHS: ~/.ansible/collections:/opt/ansible/collections + USE_MSI: $(Use_MSI) ExtraParams: ${{ parameters.ExtraParams }} failOnStderr: false diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index 9d1a2c7012..0b8e6a6252 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -248,8 +248,35 @@ stages: echo -e "$green--- az login ---$reset" # If the deployer_file exists we run on a deployer configured by the framework instead of a azdo hosted one deployer_file=/etc/profile.d/deploy_server.sh - az login --service-principal -u $AZURE_CLIENT_ID -p=$AZURE_CLIENT_SECRET --tenant $AZURE_TENANT_ID --output none - az account set --subscription $AZURE_SUBSCRIPTION_ID + if [ "$USE_MSI" = "true" ]; then + echo "Using MSI" + source /etc/profile.d/deploy_server.sh + az account set --subscription $ARM_SUBSCRIPTION_ID + + else + if [ ! -n $AZURE_CLIENT_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined." + exit 2 + fi + + if [ ! -n $AZURE_CLIENT_SECRET ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined." + exit 2 + fi + + if [ ! -n $AZURE_TENANT_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined." + exit 2 + fi + az login --service-principal --username $AZURE_CLIENT_ID --password=${AZURE_CLIENT_SECRET} --tenant $AZURE_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi + az account set --subscription $AZURE_SUBSCRIPTION_ID + fi return_code=$? if [ 0 != $return_code ]; then @@ -280,6 +307,7 @@ stages: SAP_SYSTEM_CONFIGURATION_NAME: ${{ parameters.sap_system_configuration_name }} EXTRA_PARAMETERS: $(EXTRA_PARAMETERS) PIPELINE_EXTRA_PARAMETERS: ${{ parameters.extra_params }} + USE_MSI: $(USE_MSI) - template: templates\run-ansible.yaml parameters: displayName: "Parameter validation" @@ -296,6 +324,7 @@ stages: azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + USE_MSI: $(USE_MSI) - ${{ if eq(parameters.base_os_configuration, true) }}: - template: templates\run-ansible.yaml parameters: @@ -313,6 +342,7 @@ stages: azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + USE_MSI: $(USE_MSI) - ${{ if eq(parameters.sap_os_configuration, true) }}: - template: templates\run-ansible.yaml parameters: @@ -330,6 +360,7 @@ stages: azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + USE_MSI: $(USE_MSI) - ${{ if eq(parameters.bom_processing, true) }}: - template: templates\run-ansible.yaml parameters: @@ -347,6 +378,7 @@ stages: azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + USE_MSI: $(USE_MSI) - ${{ if eq(parameters.scs_installation, true) }}: - template: templates\run-ansible.yaml parameters: @@ -364,6 +396,7 @@ stages: azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + USE_MSI: $(USE_MSI) - ${{ if eq(parameters.database_install, true) }}: - template: templates\run-ansible.yaml parameters: @@ -381,6 +414,7 @@ stages: azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + USE_MSI: $(USE_MSI) - ${{ if eq(parameters.db_load, true) }}: - template: templates\run-ansible.yaml parameters: @@ -398,6 +432,7 @@ stages: azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + USE_MSI: $(USE_MSI) - ${{ if eq(parameters.high_availability_configuration, true) }}: - template: templates\run-ansible.yaml parameters: @@ -415,6 +450,7 @@ stages: azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + USE_MSI: $(USE_MSI) - ${{ if eq(parameters.pas_installation, true) }}: - template: templates\run-ansible.yaml parameters: @@ -432,6 +468,7 @@ stages: azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + USE_MSI: $(USE_MSI) - ${{ if eq(parameters.application_server_installation, true) }}: - template: templates\run-ansible.yaml parameters: @@ -449,6 +486,7 @@ stages: azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + USE_MSI: $(USE_MSI) - ${{ if eq(parameters.webdispatcher_installation, true) }}: - template: templates\run-ansible.yaml parameters: @@ -466,6 +504,7 @@ stages: azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + USE_MSI: $(USE_MSI) - ${{ if eq(parameters.acss_registration, true) }}: - template: templates\acss-registration.yaml parameters: @@ -485,6 +524,7 @@ stages: azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) acssEnvironment: ${{ parameters.acss_environment }} acssSapProduct: ${{ parameters.acss_sap_product }} + USE_MSI: $(USE_MSI) - template: templates\collect-log-files.yaml parameters: diff --git a/deploy/pipelines/templates/run-ansible.yaml b/deploy/pipelines/templates/run-ansible.yaml index 9fa299f914..8180e824b5 100644 --- a/deploy/pipelines/templates/run-ansible.yaml +++ b/deploy/pipelines/templates/run-ansible.yaml @@ -3,6 +3,7 @@ parameters: azureClientSecret: '' azureTenantId: '' azureSubscriptionId: '' + USE_MSI: '' displayName: '' ansibleFilePath: '' sidHosts: '' @@ -15,7 +16,7 @@ parameters: userNameSecretName: '' steps: -- script: | +- bash: | #!/bin/bash # Exit immediately if a command exits with a non-zero status. # Treat unset variables as an error when substituting. @@ -23,24 +24,24 @@ steps: #Stage could be executed on a different machine by default, need to login again for ansible #If the deployer_file exists we run on a deployer configured by the framework instead of a azdo hosted one - # deployer_file=/etc/profile.d/deploy_server.sh - # if [ -f "$deployer_file" ]; then - # echo "##[section]Running on a deployer..." - # az login --identity --allow-no-subscriptions --output none - # noAccess=$( az account show --query name | grep "N/A(tenant level account)") - # if [ -z "$noAccess" ]; then - # az account set --subscription $AZURE_SUBSCRIPTION_ID --output none - # fi - # else - # echo "##[section]Running on an Azure DevOps agent..." - # az login --service-principal -u $AZURE_CLIENT_ID -p=$AZURE_CLIENT_SECRET --tenant $AZURE_TENANT_ID --output none - # az account set --subscription $AZURE_SUBSCRIPTION_ID --output none + deployer_file=/etc/profile.d/deploy_server.sh - # fi + if [ $USE_MSI == "true" ]; then + echo "##[section]Running on a deployer..." + source /etc/profile.d/deploy_server.sh + noAccess=$( az account show --query name | grep "N/A(tenant level account)") + + if [ -z "$noAccess" ]; then + az account set --subscription $ARM_SUBSCRIPTION_ID --output none + fi + else + echo "##[section]Running on an Azure DevOps agent..." + az login --service-principal -u $AZURE_CLIENT_ID -p=$AZURE_CLIENT_SECRET --tenant $AZURE_TENANT_ID --output none + az account set --subscription $AZURE_SUBSCRIPTION_ID --output none + + fi - az login --service-principal -u "${AZURE_CLIENT_ID}" -p="$AZURE_CLIENT_SECRET" --tenant "$AZURE_TENANT_ID" --output none - az account set --subscription "$AZURE_SUBSCRIPTION_ID" --output none set -eu if [ ! -f $PARAMETERS_FOLDER/sshkey ]; then @@ -149,6 +150,7 @@ steps: AZURE_CLIENT_SECRET: ${{ parameters.azureClientSecret }} AZURE_TENANT_ID: ${{ parameters.azureTenantId }} AZURE_SUBSCRIPTION_ID: ${{ parameters.azureSubscriptionId }} + USE_MSI: ${{ parameters.USE_MSI }} ANSIBLE_COLLECTIONS_PATHS: ~/.ansible/collections:/opt/ansible/collections ANSIBLE_PYTHON_INTERPRETER: auto_silent ANSIBLE_DISPLAY_SKIPPED_HOSTS: false diff --git a/deploy/scripts/configure_deployer.sh b/deploy/scripts/configure_deployer.sh index 75f84f9239..46ed059e8e 100755 --- a/deploy/scripts/configure_deployer.sh +++ b/deploy/scripts/configure_deployer.sh @@ -780,10 +780,10 @@ else echo "export ARM_CLIENT_ID=${client_id}" | tee -a /tmp/deploy_server.sh fi - if [ -n "${tenant_id}" ]; then - export ARM_TENANT_ID=${tenant_id} - echo "export ARM_TENANT_ID=${tenant_id}" | tee -a /tmp/deploy_server.sh - fi + # if [ -n "${tenant_id}" ]; then + # export ARM_TENANT_ID=${tenant_id} + # echo "export ARM_TENANT_ID=${tenant_id}" | tee -a /tmp/deploy_server.sh + # fi fi diff --git a/deploy/scripts/install_workloadzone.sh b/deploy/scripts/install_workloadzone.sh index 5d2a295cad..0ad47b625c 100755 --- a/deploy/scripts/install_workloadzone.sh +++ b/deploy/scripts/install_workloadzone.sh @@ -1076,13 +1076,16 @@ fi echo "" echo "#########################################################################################" echo "# #" -echo -e "# $cyan Adding the subnets to storage account firewall $resetformatting #" +echo -e "# $cyan Adding the subnets to storage account firewalls $resetformatting #" echo "# #" echo "#########################################################################################" echo "" subnet_id=$(terraform -chdir="${terraform_module_directory}" output -no-color -raw app_subnet_id | tr -d \") +useSAS=$(az storage account show --name "${REMOTE_STATE_SA}" --query allowSharedKeyAccess --subscription "${STATE_SUBSCRIPTION}" --out tsv) +echo "useSAS = $useSAS" + if [ -n "${subnet_id}" ]; then echo "Adding the app subnet" az storage account network-rule add --resource-group "${REMOTE_STATE_RG}" --account-name "${REMOTE_STATE_SA}" --subscription "${STATE_SUBSCRIPTION}" --subnet $subnet_id --output none @@ -1108,14 +1111,25 @@ unset TF_DATA_DIR # # ################################################################################# -container_exists=$(az storage container exists --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --name tfvars --only-show-errors --query exists) +if [ "$useSAS" = "true" ] ; then + container_exists=$(az storage container exists --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --name tfvars --only-show-errors --query exists) +else + container_exists=$(az storage container exists --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --name tfvars --only-show-errors --query exists --auth-mode login) +fi if [ "${container_exists}" == "false" ]; then + if [ "$useSAS" = "true" ] ; then az storage container create --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --name tfvars --only-show-errors + else + az storage container create --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --name tfvars --auth-mode login --only-show-errors + fi fi -az storage blob upload --file "${parameterfile}" --container-name tfvars/LANDSCAPE/"${key}" --name "${parameterfile_name}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none - +if [ "$useSAS" = "true" ] ; then + az storage blob upload --file "${parameterfile}" --container-name tfvars/LANDSCAPE/"${key}" --name "${parameterfile_name}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none +else + az storage blob upload --file "${parameterfile}" --container-name tfvars/LANDSCAPE/"${key}" --name "${parameterfile_name}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --auth-mode login --only-show-errors --output none +fi exit $return_value diff --git a/deploy/scripts/installer.sh b/deploy/scripts/installer.sh index 041818a7b6..9a73805471 100755 --- a/deploy/scripts/installer.sh +++ b/deploy/scripts/installer.sh @@ -1360,21 +1360,43 @@ unset TF_DATA_DIR # # ################################################################################# -az storage blob upload --file "${parameterfile}" --container-name tfvars/"${state_path}"/"${key}" --name "${parameterfile_name}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none +useSAS=$(az storage account show --name "${REMOTE_STATE_SA}" --query allowSharedKeyAccess --subscription "${STATE_SUBSCRIPTION}" --out tsv) + +if [ "$useSAS" = "true" ] ; then + az storage blob upload --file "${parameterfile}" --container-name tfvars/"${state_path}"/"${key}" --name "${parameterfile_name}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none +else + az storage blob upload --file "${parameterfile}" --container-name tfvars/"${state_path}"/"${key}" --name "${parameterfile_name}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --auth-mode login --no-progress --overwrite --only-show-errors --output none +fi if [ "${deployment_system}" == sap_system ] ; then echo "Uploading the yaml files from ${param_dirname} to the storage account" - az storage blob upload --file sap-parameters.yaml --container-name tfvars/"${state_path}"/"${key}" --name sap-parameters.yaml --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none + if [ "$useSAS" = "true" ] ; then + az storage blob upload --file sap-parameters.yaml --container-name tfvars/"${state_path}"/"${key}" --name sap-parameters.yaml --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none + else + az storage blob upload --file sap-parameters.yaml --container-name tfvars/"${state_path}"/"${key}" --name sap-parameters.yaml --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --auth-mode login --no-progress --overwrite --only-show-errors --output none + fi hosts_file=$(ls *_hosts.yaml) - az storage blob upload --file "${hosts_file}" --container-name tfvars/"${state_path}"/"${key}" --name "${hosts_file}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none + if [ "$useSAS" = "true" ] ; then + az storage blob upload --file "${hosts_file}" --container-name tfvars/"${state_path}"/"${key}" --name "${hosts_file}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none + else + az storage blob upload --file "${hosts_file}" --container-name tfvars/"${state_path}"/"${key}" --name "${hosts_file}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --auth-mode login --no-progress --overwrite --only-show-errors --output none + fi fi if [ "${deployment_system}" == sap_landscape ] ; then - az storage blob upload --file "${system_config_information}" --container-name tfvars/.sap_deployment_automation --name "${environment}${region_code}${network_logical_name}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none + if [ "$useSAS" = "true" ] ; then + az storage blob upload --file "${system_config_information}" --container-name tfvars/.sap_deployment_automation --name "${environment}${region_code}${network_logical_name}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none + else + az storage blob upload --file "${system_config_information}" --container-name tfvars/.sap_deployment_automation --name "${environment}${region_code}${network_logical_name}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --auth-mode login --no-progress --overwrite --only-show-errors --output none + fi fi if [ "${deployment_system}" == sap_library ] ; then deployer_config_information="${automation_config_directory}"/"${environment}""${region_code}" - az storage blob upload --file "${deployer_config_information}" --container-name tfvars/.sap_deployment_automation --name "${environment}${region_code}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none + if [ "$useSAS" = "true" ] ; then + az storage blob upload --file "${deployer_config_information}" --container-name tfvars/.sap_deployment_automation --name "${environment}${region_code}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none + else + az storage blob upload --file "${deployer_config_information}" --container-name tfvars/.sap_deployment_automation --name "${environment}${region_code}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --auth-mode login --no-progress --overwrite --only-show-errors --output none + fi fi diff --git a/deploy/scripts/sync_deployer.sh b/deploy/scripts/sync_deployer.sh index 8639be4e84..d401d56813 100755 --- a/deploy/scripts/sync_deployer.sh +++ b/deploy/scripts/sync_deployer.sh @@ -65,15 +65,24 @@ while :; do esac done -files=$(az storage blob list --container-name tfvars --account-name "${REMOTE_STATE_SA}" --subscription "${STATE_SUBSCRIPTION}" --query "[].name" -o tsv --only-show-errors --output tsv) +useSAS=$(az storage account show --name "${REMOTE_STATE_SA}" --query allowSharedKeyAccess --out tsv) + +if [ $useSAS = "true" ] ; then + files=$(az storage blob list --container-name tfvars --account-name "${REMOTE_STATE_SA}" --subscription "${STATE_SUBSCRIPTION}" --query "[].name" -o tsv --only-show-errors --output tsv) +else + files=$(az storage blob list --container-name tfvars --account-name "${REMOTE_STATE_SA}" --subscription "${STATE_SUBSCRIPTION}" --auth-mode login --query "[].name" -o tsv --only-show-errors --output tsv) +fi for name in $files; do if [ -n "$name" ] ; then echo "Downloading file: " "$name" dirName=$(dirname "$name") mkdir -p "$dirName" - - az storage blob download --container-name tfvars --account-name "${REMOTE_STATE_SA}" --subscription "${STATE_SUBSCRIPTION}" --file "${name}" --name "${name}" --only-show-errors --output none --no-progress + if [ $useSAS = "true" ] ; then + az storage blob download --container-name tfvars --account-name "${REMOTE_STATE_SA}" --subscription "${STATE_SUBSCRIPTION}" --file "${name}" --name "${name}" --only-show-errors --output none --no-progress + else + az storage blob download --container-name tfvars --account-name "${REMOTE_STATE_SA}" --subscription "${STATE_SUBSCRIPTION}" --auth-mode login --file "${name}" --name "${name}" --only-show-errors --output none --no-progress + fi fi done diff --git a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf index 8571b12937..46621aef71 100644 --- a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf @@ -255,6 +255,15 @@ variable "deployer_private_ip_address" { default = [""] } + +variable "shared_access_key_enabled" { + description = "Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key." + default = true + type = bool + } + + + ############################################################################### # # # Deployer authentication # diff --git a/deploy/terraform/bootstrap/sap_deployer/transform.tf b/deploy/terraform/bootstrap/sap_deployer/transform.tf index ae130cb930..f36779694d 100644 --- a/deploy/terraform/bootstrap/sap_deployer/transform.tf +++ b/deploy/terraform/bootstrap/sap_deployer/transform.tf @@ -1,5 +1,8 @@ locals { + + use_webapp = lower(var.use_webapp) + infrastructure = { environment = coalesce( var.environment, @@ -185,6 +188,8 @@ locals { deployer_diagnostics_account_arm_id = var.deployer_diagnostics_account_arm_id app_service_SKU = var.app_service_SKU_name user_assigned_identity_id = var.user_assigned_identity_id + shared_access_key_enabled = var.shared_access_key_enabled + } diff --git a/deploy/terraform/bootstrap/sap_library/providers.tf b/deploy/terraform/bootstrap/sap_library/providers.tf index d2eb883650..165b85580b 100644 --- a/deploy/terraform/bootstrap/sap_library/providers.tf +++ b/deploy/terraform/bootstrap/sap_library/providers.tf @@ -25,6 +25,7 @@ provider "azurerm" { } skip_provider_registration = true + storage_use_azuread = true } provider "azurerm" { @@ -42,6 +43,7 @@ provider "azurerm" { alias = "main" skip_provider_registration = true + storage_use_azuread = true } @@ -50,6 +52,7 @@ provider "azurerm" { } alias = "deployer" skip_provider_registration = true + storage_use_azuread = true } provider "azurerm" { @@ -60,6 +63,7 @@ provider "azurerm" { tenant_id = local.use_spn ? local.spn.tenant_id : null alias = "dnsmanagement" skip_provider_registration = true + storage_use_azuread = true } provider "azuread" { diff --git a/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf index ab7fed1731..4313df950e 100644 --- a/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf @@ -200,6 +200,13 @@ variable "short_named_endpoints_nics" { default = false } +variable "shared_access_key_enabled" { + description = "Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key." + default = true + type = bool + } + + ######################################################################################### # # @@ -212,6 +219,12 @@ variable "spn_keyvault_id" { default = "" } +variable "shared_access_key_enabled" { + description = "Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key." + default = true + type = bool + } + ######################################################################################### # # diff --git a/deploy/terraform/bootstrap/sap_library/transform.tf b/deploy/terraform/bootstrap/sap_library/transform.tf index da5e1591bc..b4c38e67a9 100644 --- a/deploy/terraform/bootstrap/sap_library/transform.tf +++ b/deploy/terraform/bootstrap/sap_library/transform.tf @@ -65,6 +65,7 @@ locals { ) ) } + shared_access_key_enabled = var.shared_access_key_enabled } storage_account_tfstate = { arm_id = try( @@ -112,6 +113,7 @@ locals { try(var.storage_account_tfstate.ansible_blob_container.name, "ansible") ) } + shared_access_key_enabled = var.shared_access_key_enabled } } diff --git a/deploy/terraform/run/sap_deployer/backend.tf b/deploy/terraform/run/sap_deployer/backend.tf index 706e3eade0..b9335f77a9 100644 --- a/deploy/terraform/run/sap_deployer/backend.tf +++ b/deploy/terraform/run/sap_deployer/backend.tf @@ -4,5 +4,7 @@ Description: To use remote backend to deploy deployer(s). */ terraform { - backend "azurerm" {} + backend "azurerm" { + use_azuread_auth = true + } } diff --git a/deploy/terraform/run/sap_deployer/providers.tf b/deploy/terraform/run/sap_deployer/providers.tf index 4ce66a38ed..772f4a02d4 100644 --- a/deploy/terraform/run/sap_deployer/providers.tf +++ b/deploy/terraform/run/sap_deployer/providers.tf @@ -27,6 +27,8 @@ provider "azurerm" { } partner_id = "f94f50f2-2539-42f8-9c8e-c65b28c681f7" skip_provider_registration = true + storage_use_azuread = true + use_msi = var.use_spn ? false : true } provider "azurerm" { @@ -48,7 +50,9 @@ provider "azurerm" { client_id = var.use_spn ? local.spn.client_id : null client_secret = var.use_spn ? local.spn.client_secret: null tenant_id = var.use_spn ? local.spn.tenant_id: null + use_msi = var.use_spn ? false : true alias = "main" + storage_use_azuread = true } provider "azurerm" { @@ -59,6 +63,8 @@ provider "azurerm" { client_secret = var.use_spn ? local.spn.client_secret: null tenant_id = var.use_spn ? local.spn.tenant_id: null skip_provider_registration = true + storage_use_azuread = true + use_msi = var.use_spn ? false : true } terraform { diff --git a/deploy/terraform/run/sap_deployer/tfvar_variables.tf b/deploy/terraform/run/sap_deployer/tfvar_variables.tf index f0fd374dd0..1b0ac83e0e 100644 --- a/deploy/terraform/run/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/run/sap_deployer/tfvar_variables.tf @@ -403,6 +403,13 @@ variable "subnets_to_add_to_firewall_for_keyvaults_and_storage" { default = [] } +variable "shared_access_key_enabled" { + description = "Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key." + default = true + type = bool + } + + ######################################################################################### # # # DNS settings # diff --git a/deploy/terraform/run/sap_deployer/transform.tf b/deploy/terraform/run/sap_deployer/transform.tf index b2822b8441..d77878fe60 100644 --- a/deploy/terraform/run/sap_deployer/transform.tf +++ b/deploy/terraform/run/sap_deployer/transform.tf @@ -185,6 +185,7 @@ locals { deployer_diagnostics_account_arm_id = var.deployer_diagnostics_account_arm_id app_service_SKU = var.app_service_SKU_name user_assigned_identity_id = var.user_assigned_identity_id + shared_access_key_enabled = var.shared_access_key_enabled } authentication = { diff --git a/deploy/terraform/run/sap_landscape/backend.tf b/deploy/terraform/run/sap_landscape/backend.tf index 7be6350c59..a32e75c6fe 100644 --- a/deploy/terraform/run/sap_landscape/backend.tf +++ b/deploy/terraform/run/sap_landscape/backend.tf @@ -5,5 +5,7 @@ Description: */ terraform { - backend "azurerm" {} + backend "azurerm" { + use_azuread_auth = true + } } diff --git a/deploy/terraform/run/sap_landscape/imports.tf b/deploy/terraform/run/sap_landscape/imports.tf index c825131459..6b2010c8f1 100644 --- a/deploy/terraform/run/sap_landscape/imports.tf +++ b/deploy/terraform/run/sap_landscape/imports.tf @@ -15,6 +15,8 @@ data "terraform_remote_state" "deployer" { container_name = local.tfstate_container_name key = var.deployer_tfstate_key subscription_id = local.saplib_subscription_id + use_msi = var.use_spn ? false : true + use_azuread_auth = true } } diff --git a/deploy/terraform/run/sap_landscape/providers.tf b/deploy/terraform/run/sap_landscape/providers.tf index 6481d50f88..2c5b8a9165 100644 --- a/deploy/terraform/run/sap_landscape/providers.tf +++ b/deploy/terraform/run/sap_landscape/providers.tf @@ -15,6 +15,8 @@ provider "azurerm" { features {} subscription_id = length(local.deployer_subscription_id) > 0 ? local.deployer_subscription_id : null + use_msi = var.use_spn ? false : true + storage_use_azuread = true } provider "azurerm" { @@ -29,11 +31,11 @@ provider "azurerm" { purge_soft_deleted_certificates_on_destroy = !var.enable_purge_control_for_keyvaults } } - subscription_id = data.azurerm_key_vault_secret.subscription_id.value - client_id = var.use_spn ? local.spn.client_id : null - client_secret = var.use_spn ? local.spn.client_secret : null - tenant_id = var.use_spn ? local.spn.tenant_id : null - use_msi = var.use_spn ? false : true + subscription_id = data.azurerm_key_vault_secret.subscription_id.value + client_id = var.use_spn ? local.spn.client_id : null + client_secret = var.use_spn ? local.spn.client_secret : null + tenant_id = var.use_spn ? local.spn.tenant_id : null + use_msi = var.use_spn ? false : true storage_use_azuread = true @@ -83,9 +85,10 @@ provider "azurerm" { } provider "azuread" { - client_id = var.use_spn ? local.spn.client_id : null - client_secret = var.use_spn ? local.spn.client_secret : null - tenant_id = local.spn.tenant_id + client_id = var.use_spn ? local.spn.client_id : null + client_secret = var.use_spn ? local.spn.client_secret : null + tenant_id = local.spn.tenant_id + use_msi = var.use_spn ? false : true } terraform { required_version = ">= 1.0" diff --git a/deploy/terraform/run/sap_library/backend.tf b/deploy/terraform/run/sap_library/backend.tf index 41d19d1881..59c1fd0b6b 100644 --- a/deploy/terraform/run/sap_library/backend.tf +++ b/deploy/terraform/run/sap_library/backend.tf @@ -4,5 +4,7 @@ */ terraform { - backend "azurerm" {} + backend "azurerm" { + use_azuread_auth = true + } } diff --git a/deploy/terraform/run/sap_library/imports.tf b/deploy/terraform/run/sap_library/imports.tf index 663e139953..2c67219195 100644 --- a/deploy/terraform/run/sap_library/imports.tf +++ b/deploy/terraform/run/sap_library/imports.tf @@ -12,6 +12,8 @@ data "terraform_remote_state" "deployer" { container_name = local.tfstate_container_name key = local.deployer_tfstate_key subscription_id = local.saplib_subscription_id + use_msi = var.use_spn ? false : true + use_azuread_auth = true } } diff --git a/deploy/terraform/run/sap_library/providers.tf b/deploy/terraform/run/sap_library/providers.tf index f9d445fbca..ac3d6bae92 100644 --- a/deploy/terraform/run/sap_library/providers.tf +++ b/deploy/terraform/run/sap_library/providers.tf @@ -21,6 +21,9 @@ provider "azurerm" { features { } skip_provider_registration = true + storage_use_azuread = true + use_msi = var.use_spn ? false : true + } provider "azurerm" { @@ -30,11 +33,13 @@ provider "azurerm" { } } - subscription_id = local.spn.subscription_id - client_id = local.use_spn ? local.spn.client_id : null - client_secret = local.use_spn ? local.spn.client_secret : null - tenant_id = local.use_spn ? local.spn.tenant_id : null - partner_id = "140c3bc9-c937-4139-874f-88288bab08bb" + subscription_id = local.spn.subscription_id + client_id = local.use_spn ? local.spn.client_id : null + client_secret = local.use_spn ? local.spn.client_secret : null + tenant_id = local.use_spn ? local.spn.tenant_id : null + partner_id = "140c3bc9-c937-4139-874f-88288bab08bb" + storage_use_azuread = true + use_msi = var.use_spn ? false : true alias = "main" skip_provider_registration = true @@ -45,6 +50,8 @@ provider "azurerm" { } skip_provider_registration = true alias = "deployer" + storage_use_azuread = true + use_msi = var.use_spn ? false : true } @@ -57,12 +64,15 @@ provider "azurerm" { client_secret = local.use_spn ? local.spn.client_secret : null tenant_id = local.use_spn ? local.spn.tenant_id : null skip_provider_registration = true + storage_use_azuread = true + use_msi = var.use_spn ? false : true } provider "azuread" { - client_id = local.spn.client_id - client_secret = local.spn.client_secret - tenant_id = local.spn.tenant_id + client_id = local.use_spn ? local.spn.client_id : null + client_secret = local.use_spn ? local.spn.client_secret : null + tenant_id = local.spn.tenant_id + use_msi = var.use_spn ? false : true } terraform { diff --git a/deploy/terraform/run/sap_library/tfvar_variables.tf b/deploy/terraform/run/sap_library/tfvar_variables.tf index c799ed685f..aaed70aab2 100644 --- a/deploy/terraform/run/sap_library/tfvar_variables.tf +++ b/deploy/terraform/run/sap_library/tfvar_variables.tf @@ -217,6 +217,12 @@ variable "deployment" { default = "update" } +variable "shared_access_key_enabled" { + description = "Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key." + default = true + type = bool + } + ######################################################################################### # # diff --git a/deploy/terraform/run/sap_library/transform.tf b/deploy/terraform/run/sap_library/transform.tf index da5e1591bc..6e3e13a878 100644 --- a/deploy/terraform/run/sap_library/transform.tf +++ b/deploy/terraform/run/sap_library/transform.tf @@ -65,7 +65,9 @@ locals { ) ) } + shared_access_key_enabled = var.shared_access_key_enabled } + storage_account_tfstate = { arm_id = try( coalesce( @@ -112,6 +114,8 @@ locals { try(var.storage_account_tfstate.ansible_blob_container.name, "ansible") ) } + + shared_access_key_enabled = var.shared_access_key_enabled } } diff --git a/deploy/terraform/run/sap_system/backend.tf b/deploy/terraform/run/sap_system/backend.tf index cd6ec3f21a..7c88d101e6 100644 --- a/deploy/terraform/run/sap_system/backend.tf +++ b/deploy/terraform/run/sap_system/backend.tf @@ -4,5 +4,7 @@ */ terraform { - backend "azurerm" {} + backend "azurerm" { + use_azuread_auth = true + } } diff --git a/deploy/terraform/run/sap_system/imports.tf b/deploy/terraform/run/sap_system/imports.tf index 42edf47c8f..0171babd7e 100644 --- a/deploy/terraform/run/sap_system/imports.tf +++ b/deploy/terraform/run/sap_system/imports.tf @@ -16,6 +16,8 @@ data "terraform_remote_state" "deployer" { container_name = local.tfstate_container_name key = var.deployer_tfstate_key subscription_id = local.saplib_subscription_id + use_msi = var.use_spn ? false : true + use_azuread_auth = true } } @@ -27,6 +29,8 @@ data "terraform_remote_state" "landscape" { container_name = "tfstate" key = var.landscape_tfstate_key subscription_id = local.saplib_subscription_id + use_msi = var.use_spn ? false : true + use_azuread_auth = true } } diff --git a/deploy/terraform/run/sap_system/providers.tf b/deploy/terraform/run/sap_system/providers.tf index a268a8eecd..21f61e5dec 100644 --- a/deploy/terraform/run/sap_system/providers.tf +++ b/deploy/terraform/run/sap_system/providers.tf @@ -60,6 +60,7 @@ provider "azuread" { client_id = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? local.spn.client_id : null client_secret = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? local.spn.client_secret : null tenant_id = local.spn.tenant_id + use_msi = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? false : true } terraform { required_version = ">= 1.0" diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf index 7eec0d1085..03ef7bca79 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf @@ -87,6 +87,7 @@ resource "azurerm_storage_account" "deployer" { enable_https_traffic_only = local.enable_secure_transfer min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false + shared_access_key_enabled = var.deployer.shared_access_key_enabled } data "azurerm_storage_account" "deployer" { diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl index 89e7301f81..44ff5224c6 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl @@ -705,7 +705,7 @@ else (echo export ARM_CLIENT_ID="$${client_id}" | sudo tee -a /etc/profile.d/deploy_server.sh) > /dev/null 2>&1 (echo export ARM_SUBSCRIPTION_ID="$${subscription_id}" | sudo tee -a /etc/profile.d/deploy_server.sh) > /dev/null 2>&1 - (echo export ARM_TENANT_ID="$${tenant_id}" | sudo tee -a /etc/profile.d/deploy_server.sh) > /dev/null 2>&1 +# (echo export ARM_TENANT_ID="$${tenant_id}" | sudo tee -a /etc/profile.d/deploy_server.sh) > /dev/null 2>&1 # if [[ -n $${TOKEN} ]]; then # echo export AZURE_DEVOPS_EXT_PAT=$${TOKEN} | sudo tee -a /etc/profile.d/deploy_server.sh diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf index 71891afeae..8fa86ab21f 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf @@ -206,7 +206,10 @@ resource "azurerm_key_vault_secret" "iscsi_ppk" { provider = azurerm.main count = (local.enable_landscape_kv && local.enable_iscsi_auth_key && !local.iscsi_key_exist) ? 1 : 0 depends_on = [ - azurerm_key_vault_access_policy.kv_user + azurerm_key_vault_access_policy.kv_user, + azurerm_role_assignment.role_assignment_spn, + azurerm_role_assignment.role_assignment_msi, + azurerm_key_vault_access_policy.kv_user_msi ] content_type = "" name = local.iscsi_ppk_name @@ -218,7 +221,10 @@ resource "azurerm_key_vault_secret" "iscsi_pk" { provider = azurerm.main count = (local.enable_landscape_kv && local.enable_iscsi_auth_key && !local.iscsi_key_exist) ? 1 : 0 depends_on = [ - azurerm_key_vault_access_policy.kv_user + azurerm_key_vault_access_policy.kv_user, + azurerm_role_assignment.role_assignment_spn, + azurerm_role_assignment.role_assignment_msi, + azurerm_key_vault_access_policy.kv_user_msi ] content_type = "" name = local.iscsi_pk_name @@ -230,7 +236,10 @@ resource "azurerm_key_vault_secret" "iscsi_username" { provider = azurerm.main count = (local.enable_landscape_kv && local.enable_iscsi && !local.iscsi_username_exist) ? 1 : 0 depends_on = [ - azurerm_key_vault_access_policy.kv_user + azurerm_key_vault_access_policy.kv_user, + azurerm_role_assignment.role_assignment_spn, + azurerm_role_assignment.role_assignment_msi, + azurerm_key_vault_access_policy.kv_user_msi ] content_type = "" name = local.iscsi_username_name @@ -242,7 +251,10 @@ resource "azurerm_key_vault_secret" "iscsi_password" { provider = azurerm.main count = (local.enable_landscape_kv && local.enable_iscsi_auth_password && !local.iscsi_pwd_exist) ? 1 : 0 depends_on = [ - azurerm_key_vault_access_policy.kv_user + azurerm_key_vault_access_policy.kv_user, + azurerm_role_assignment.role_assignment_spn, + azurerm_role_assignment.role_assignment_msi, + azurerm_key_vault_access_policy.kv_user_msi ] content_type = "" name = local.iscsi_pwd_name @@ -292,13 +304,13 @@ data "azurerm_key_vault_secret" "iscsi_username" { // Using TF tls to generate SSH key pair for iscsi devices and store in user KV resource "tls_private_key" "iscsi" { - count = ( - local.enable_landscape_kv - && local.enable_iscsi_auth_key - && !local.iscsi_key_exist - && try(file(var.authentication.path_to_public_key), null) == null - ) ? 1 : 0 - algorithm = "RSA" - rsa_bits = 2048 + count = ( + local.enable_landscape_kv + && local.enable_iscsi_auth_key + && !local.iscsi_key_exist + && try(file(var.authentication.path_to_public_key), null) == null + ) ? 1 : 0 + algorithm = "RSA" + rsa_bits = 2048 } diff --git a/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf b/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf index 2f9492f312..0e1534c3a0 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf @@ -13,7 +13,8 @@ resource "time_offset" "secret_expiry_date" { resource "azurerm_key_vault_secret" "saplibrary_access_key" { provider = azurerm.deployer - count = length(var.key_vault.kv_spn_id) > 0 ? 1 : 0 + + count = length(var.key_vault.kv_spn_id) > 0 ? 1 : 0 depends_on = [azurerm_private_endpoint.kv_user] name = "sapbits-access-key" value = local.sa_sapbits_exists ? ( diff --git a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf index 62f884d495..5ea0e76220 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf @@ -33,6 +33,9 @@ resource "azurerm_storage_account" "storage_tfstate" { ) enable_https_traffic_only = true + + shared_access_key_enabled = var.storage_account_sapbits.shared_access_key_enabled + blob_properties { delete_retention_policy { days = 7 From 2b749290a6bca5da8919e870a7d9ec948cf94647 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 29 Jan 2024 20:22:09 +0200 Subject: [PATCH 165/607] Update DEPLOYER_RANDOM_ID_SEED in deploy control plane pipeline --- deploy/pipelines/01-deploy-control-plane.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index e06fe2e9d4..b526583dae 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -228,7 +228,7 @@ stages: if [ -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip ]; then deployer_random_id=$(cat ${deployer_environment_file_name} | grep deployer_random_id= | awk -F'=' '{print $2}' | xargs) - pass=$(echo $deployer_random_id | sed 's/-//g') + pass=$(echo $DEPLOYER_RANDOM_ID_SEED | sed 's/-//g') unzip -qq -o -P "${pass}" ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip -d ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder) fi @@ -361,6 +361,7 @@ stages: ARM_TENANT_ID: $(CP_ARM_TENANT_ID) AZURE_DEVOPS_EXT_PAT: $(PAT) CONFIG_REPO_PATH: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path) + DEPLOYER_RANDOM_ID_SEED: $(DEPLOYER_RANDOM_ID_SEED) DEPLOYER_TFSTATE_KEY: "${{ parameters.deployer }}.terraform.tfstate" IS_PIPELINE_DEPLOYMENT: true POOL: $(POOL) From 8b0b058ecd6f2f206e763a2b0cc44adbba2e57ed Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 29 Jan 2024 20:41:22 +0200 Subject: [PATCH 166/607] Update ARM_SUBSCRIPTION_ID usage in deploy script --- deploy/pipelines/01-deploy-control-plane.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index b526583dae..cdc5778bbb 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -591,7 +591,7 @@ stages: fi else source /etc/profile.d/deploy_server.sh - export ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID + # export ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID unset ARM_TENANT_ID fi fi @@ -665,14 +665,14 @@ stages: $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ --deployer_parameter_file ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) \ --library_parameter_file ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) \ - --subscription $ARM_SUBSCRIPTION_ID \ + --subscription $STATE_SUBSCRIPTION \ --auto-approve --ado --msi \ ${storage_account_parameter} ${keyvault_parameter} else $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ --deployer_parameter_file ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) \ --library_parameter_file ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) \ - --subscription $ARM_SUBSCRIPTION_ID --spn_id $ARM_CLIENT_ID \ + --subscription $STATE_SUBSCRIPTION --spn_id $ARM_CLIENT_ID \ --spn_secret $ARM_CLIENT_SECRET --tenant_id $ARM_TENANT_ID \ --auto-approve --ado \ ${storage_account_parameter} ${keyvault_parameter} From b39372a243823ccfaa4919844983eb03ffaf2059 Mon Sep 17 00:00:00 2001 From: Harm Jan Stam Date: Mon, 29 Jan 2024 21:00:13 +0100 Subject: [PATCH 167/607] Several DB2 HADR improvements (#532) * Bugfix sub_iscsi_nsg_arm_id variable Error: A local value with the name "sub_iscsi" has not been declared * Add boolean to configure pacemaker for Azure scheduled events By default the boolean is set to true. * Add custom pacemaker NFS filesystem and SAP monitor timeouts When using a custom NFS solution different time-outs may be required. Refactored the cluster-Suse tasks so that it utilizes the calculated filesystem timeout and SAP resource monitor timeout from the 5.6.1-set_runtime_facts task * Use virtual_host names for DB2 HADR local host name configuration * Remove tags from ignore_changes for Linux dbserver * Add SSL certificate generation and distribution for virtual_hostname * Encrypt communication between HADR Primary and Standby Instances --- .../tasks/4.2.1.4-db2_haparameters.yaml | 14 ++-- .../tasks/4.2.1.7-sap-profile-changes.yaml | 2 +- .../4.2.1.9-db2_generate_distribute_ssl.yml | 82 +++++++++++++++++++ .../4.2.1-db2-hainstall/tasks/main.yml | 10 ++- .../tasks/1.17.2.0-cluster-RedHat.yml | 13 +-- .../tasks/5.6.1-set_runtime_facts.yml | 4 +- .../tasks/5.6.4.0-cluster-Suse.yml | 30 +------ deploy/ansible/vars/ansible-input-api.yaml | 7 +- .../modules/sap_system/anydb_node/vm-anydb.tf | 1 - 9 files changed, 117 insertions(+), 46 deletions(-) create mode 100644 deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.9-db2_generate_distribute_ssl.yml diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml index 4b41c70f71..c1d7e9582d 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml @@ -5,8 +5,8 @@ block: - name: " DB2 Primary DB - Set Fact for hadr local host and remote host " ansible.builtin.set_fact: - db_hadr_local_host: "{{ primary_instance_name }}.{{ sap_fqdn }}" - db_hadr_remote_host: "{{ secondary_instance_name }}.{{ sap_fqdn }}" + db_hadr_local_host: "{{ hostvars[primary_instance_name]['virtual_host'] }}.{{ sap_fqdn }}" + db_hadr_remote_host: "{{ hostvars[secondary_instance_name]['virtual_host'] }}.{{ sap_fqdn }}" - name: "DB2 Primary DB- Switch user to db2" ansible.builtin.shell: whoami @@ -54,6 +54,7 @@ db2 update db cfg for {{ db_sid }} using HADR_REMOTE_HOST {{ db_hadr_remote_host }} immediate db2 update db cfg for {{ db_sid }} using HADR_REMOTE_SVC {{ db_sid | upper }}_HADR_2 immediate db2 update db cfg for {{ db_sid }} using HADR_REMOTE_INST db2{{ db_sid | lower }} immediate + db2 update db cfg for {{ db_sid }} using HADR_SSL_LABEL {{ db2_ssl_label }} db2 update db cfg for {{ db_sid }} using HADR_TIMEOUT 60 immediate db2 update db cfg for {{ db_sid }} using HADR_SYNCMODE NEARSYNC immediate db2 update db cfg for {{ db_sid }} using HADR_SPOOL_LIMIT 1000 immediate @@ -80,6 +81,7 @@ db2 update db cfg for {{ db_sid }} using HADR_REMOTE_HOST {{ db_hadr_remote_host }} immediate db2 update db cfg for {{ db_sid }} using HADR_REMOTE_SVC {{ db_sid | upper }}_HADR_2 immediate db2 update db cfg for {{ db_sid }} using HADR_REMOTE_INST db2{{ db_sid | lower }} immediate + db2 update db cfg for {{ db_sid }} using HADR_SSL_LABEL {{ db2_ssl_label }} db2 update db cfg for {{ db_sid }} using HADR_TIMEOUT 45 immediate db2 update db cfg for {{ db_sid }} using HADR_SYNCMODE NEARSYNC immediate db2 update db cfg for {{ db_sid }} using HADR_SPOOL_LIMIT 1000 immediate @@ -106,8 +108,8 @@ block: - name: " DB2 Secondary DB - Set Fact for hadr local host and remote host " ansible.builtin.set_fact: - db_hadr_local_host: "{{ secondary_instance_name }}.{{ sap_fqdn }}" - db_hadr_remote_host: "{{ primary_instance_name }}.{{ sap_fqdn }}" + db_hadr_local_host: "{{ hostvars[secondary_instance_name]['virtual_host'] }}.{{ sap_fqdn }}" + db_hadr_remote_host: "{{ hostvars[primary_instance_name]['virtual_host'] }}.{{ sap_fqdn }}" - name: "DB2 Secondary DB- Switch user to db2" ansible.builtin.shell: whoami @@ -127,7 +129,7 @@ failed_when: false changed_when: false - - name: "DB2 Secondary: Print return information from the Secondary db2status" + - name: "DB2 Secondary: Print return information from the Secondary db2status" ansible.builtin.debug: msg: "Result: {{ secdb2status.stdout }}" @@ -155,6 +157,7 @@ db2 update db cfg for {{ db_sid }} using HADR_REMOTE_HOST {{ db_hadr_remote_host }} immediate db2 update db cfg for {{ db_sid }} using HADR_REMOTE_SVC {{ db_sid | upper }}_HADR_1 immediate db2 update db cfg for {{ db_sid }} using HADR_REMOTE_INST db2{{ db_sid | lower }} immediate + db2 update db cfg for {{ db_sid }} using HADR_SSL_LABEL {{ db2_ssl_label }} db2 update db cfg for {{ db_sid }} using HADR_TIMEOUT 60 immediate db2 update db cfg for {{ db_sid }} using HADR_SYNCMODE NEARSYNC immediate db2 update db cfg for {{ db_sid }} using HADR_SPOOL_LIMIT 1000 immediate @@ -181,6 +184,7 @@ db2 update db cfg for {{ db_sid }} using HADR_REMOTE_HOST {{ db_hadr_remote_host }} immediate db2 update db cfg for {{ db_sid }} using HADR_REMOTE_SVC {{ db_sid | upper }}_HADR_1 immediate db2 update db cfg for {{ db_sid }} using HADR_REMOTE_INST db2{{ db_sid | lower }} immediate + db2 update db cfg for {{ db_sid }} using HADR_SSL_LABEL {{ db2_ssl_label }} db2 update db cfg for {{ db_sid }} using HADR_TIMEOUT 45 immediate db2 update db cfg for {{ db_sid }} using HADR_SYNCMODE NEARSYNC immediate db2 update db cfg for {{ db_sid }} using HADR_SPOOL_LIMIT 1000 immediate diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.7-sap-profile-changes.yaml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.7-sap-profile-changes.yaml index 4ae9ceaf47..74910f4ad5 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.7-sap-profile-changes.yaml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.7-sap-profile-changes.yaml @@ -46,7 +46,7 @@ - name: "4.2.1.7 - SAP db2cli.ini profile changes " ansible.builtin.lineinfile: path: /sapmnt/{{ sap_sid | upper }}/global/db6/db2cli.ini - line: Hostname = {{ custom_db_virtual_hostname | default(db_virtual_hostname, true) }} + line: Hostname={{ custom_db_virtual_hostname | default(db_virtual_hostname, true) }} insertafter: '#Hostname' tags: - virtdbhostpara diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.9-db2_generate_distribute_ssl.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.9-db2_generate_distribute_ssl.yml new file mode 100644 index 0000000000..e28d2056b6 --- /dev/null +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.9-db2_generate_distribute_ssl.yml @@ -0,0 +1,82 @@ +--- +- name: "DB2: variables for SSL certificate" + ansible.builtin.set_fact: + db2_ssl_cn: "{{ custom_db_virtual_hostname | default(db_virtual_hostname, true) }}.{{ sap_fqdn }}" + db2_ssl_keydb_file: sapdb2{{ db_sid | lower }}_ssl_comm.kdb + db2_ssl_stash_file: sapdb2{{ db_sid | lower }}_ssl_comm.sth + db2_ssl_label: sap_db2_{{ custom_db_virtual_hostname | default(db_virtual_hostname, true) }}_ssl_comm_000 + +- name: "DB2 Primary DB: Generate SSL" + when: ansible_hostname == primary_instance_name + become: true + become_user: db2{{ db_sid | lower }} + block: + - name: "DB2 Primary DB - Create SSL Certificate" + ansible.builtin.shell: gsk8capicmd_64 -cert -create -db {{ db2_ssl_keydb_file }} -pw {{ main_password }} -label {{ db2_ssl_label }} -dn 'CN={{ db2_ssl_cn }}' -expire 3650 -size 4096 + args: + executable: /bin/csh + chdir: /db2/db2{{ db_sid | lower }}/keystore + environment: + PATH: "{{ ansible_env.PATH }}:/db2/db2{{ db_sid | lower }}/sqllib/gskit/bin" + LD_LIBRARY_PATH: /db2/db2{{ db_sid | lower }}/sqllib/lib64:/db2/db2{{ db_sid | lower }}/sqllib/lib64/gskit:/db2/db2{{ db_sid | lower }}/sqllib/lib + + - name: "DB2 Primary DB - Extract SSL Certificate" + ansible.builtin.shell: gsk8capicmd_64 -cert -extract -db {{ db2_ssl_keydb_file }} -pw {{ main_password }} -label {{ db2_ssl_label }} -target {{ db2_ssl_label }}.arm -format ascii -fips + args: + executable: /bin/csh + chdir: /db2/db2{{ db_sid | lower }}/keystore + environment: + PATH: "{{ ansible_env.PATH }}:/db2/db2{{ db_sid | lower }}/sqllib/gskit/bin" + LD_LIBRARY_PATH: /db2/db2{{ db_sid | lower }}/sqllib/lib64:/db2/db2{{ db_sid | lower }}/sqllib/lib64/gskit:/db2/db2{{ db_sid | lower }}/sqllib/lib + +- name: "DB2 Primary DB - Copy SSL Certificate and Keystore files" + when: ansible_hostname == primary_instance_name + block: + - name: "DB2 Primary DB - Copy SSL certificate to SSL_client directory" + ansible.builtin.copy: + src: /db2/db2{{ db_sid | lower }}/keystore/{{ db2_ssl_label }}.arm + dest: /usr/sap/{{ db_sid | upper }}/SYS/global/SSL_client/ + remote_src: true + owner: "{{ db_sid | lower }}adm" + group: sapsys + mode: 0640 + + - name: "DB2 Primary DB: Fetch keystore files to Controller" + ansible.builtin.fetch: + src: "/db2/db2{{ db_sid | lower }}/keystore/{{ item }}" + dest: /tmp/keystore_files/ + flat: true + loop: + - "{{ db2_ssl_keydb_file }}" + - "{{ db2_ssl_stash_file }}" + + - name: "DB2 Primary DB: Update SSL certificate in db2cli.ini" + ansible.builtin.lineinfile: + path: /sapmnt/{{ sap_sid | upper }}/global/db6/db2cli.ini + regexp: '^SSLServerCertificate=' + line: SSLServerCertificate=/usr/sap/{{ db_sid | upper }}/SYS/global/SSL_client/{{ db2_ssl_label }}.arm + +- name: "DB2: Copy keystore files from Controller to Secondary node" + when: ansible_hostname == secondary_instance_name + ansible.builtin.copy: + src: /tmp/keystore_files/{{ item }} + dest: /db2/db2{{ db_sid | lower }}/keystore/ + mode: 0600 + owner: db2{{ db_sid | lower }} + group: db{{ db_sid | lower }}adm + loop: + - "{{ db2_ssl_keydb_file }}" + - "{{ db2_ssl_stash_file }}" + +- name: "DB2 DB - Set SSL parameters" + become: true + become_user: db2{{ db_sid | lower }} + ansible.builtin.shell: | + db2 update dbm cfg using SSL_SVR_LABEL {{ db2_ssl_label }} + db2 update dbm cfg using SSL_VERSIONS TLSV13 + register: db2_update + failed_when: db2_update.rc not in [0,2] + args: + executable: /bin/csh + environment: + PATH: "{{ ansible_env.PATH }}:/db2/db2{{ db_sid | lower }}/sqllib/gskit/bin" diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/main.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/main.yml index d06c5cd1fb..3e7504e7ab 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/main.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/main.yml @@ -26,8 +26,11 @@ - name: "DB2 - Take offline backup of Primary DB" ansible.builtin.import_tasks: 4.2.1.1-db2_primary_backup.yml - - name: "DB2 - Keystore Setup, Part 1" + - name: "DB2 - Keystore Setup on Primary node" ansible.builtin.import_tasks: 4.2.1.8-db2_copy_keystore_files.yml + + - name: "DB2 - Generate SSL on Primary node" + ansible.builtin.import_tasks: 4.2.1.9-db2_generate_distribute_ssl.yml always: - name: "DB2 Primary System Install: result" ansible.builtin.debug: @@ -50,9 +53,12 @@ - name: "DB2 Secondary System Install" ansible.builtin.import_tasks: 4.2.1.2-db2_ha_install_secondary.yml - - name: "DB2 - Keystore Setup, Part 2" + - name: "DB2 - Keystore Setup on Secondary node" ansible.builtin.import_tasks: 4.2.1.8-db2_copy_keystore_files.yml + - name: "DB2 - Distribute SSL certificate to Secondary node" + ansible.builtin.import_tasks: 4.2.1.9-db2_generate_distribute_ssl.yml + - name: "DB2 - Restore Secondary with backup of Primary DB" ansible.builtin.import_tasks: 4.2.1.3-db2_restore_secondary.yml always: diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml index a8aa2c3db3..f89f6f1257 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml @@ -225,17 +225,18 @@ is_rhel_84_or_newer: "{{ ansible_distribution_version is version('8.4', '>=') }}" when: ansible_distribution_major_version in ["8", "9"] -- name: "1.17 Generic Pacemaker - Ensure Azure scheduled events is configured" - when: - - inventory_hostname == primary_instance_name - - is_rhel_84_or_newer - block: # After configuring the Pacemaker resources for azure-events agent, # when you place the cluster in or out of maintenance mode, you may get warning messages like: # WARNING: cib-bootstrap-options: unknown attribute 'hostName_ hostname' # WARNING: cib-bootstrap-options: unknown attribute 'azure-events_globalPullState' # WARNING: cib-bootstrap-options: unknown attribute 'hostName_ hostname' # These warning messages can be ignored. +- name: "1.17 Generic Pacemaker - Ensure Azure scheduled events is configured" + when: + - cluster_use_scheduled_events_agent + - inventory_hostname == primary_instance_name + - is_rhel_84_or_newer + block: - name: "1.17 Generic Pacemaker - Ensure maintenance mode is set" ansible.builtin.command: pcs property set maintenance-mode=true @@ -286,7 +287,7 @@ # /*---------------------------------------------------------------------------8 # | | -# | Azure scheduled events - BEGIN | +# | Azure scheduled events - END | # | | # +------------------------------------4--------------------------------------*/ diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml index c587ccce48..45a2cf1e81 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml @@ -87,7 +87,7 @@ {%- else -%} {%- set _timeoutvalue = 40 -%} {%- endif -%} - {{- _timeoutvalue -}} + {{- custom_cluster_fs_mon_timeout | default(_timeoutvalue, true) -}} when: - scs_high_availability @@ -104,7 +104,7 @@ {%- else -%} {%- set _timeoutvalue = 60 -%} {%- endif -%} - {{- _timeoutvalue -}} + {{- custom_cluster_sap_mon_timeout | default(_timeoutvalue, true) -}} when: - scs_high_availability diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-Suse.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-Suse.yml index 7f6f0e5bc8..d2750e1771 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-Suse.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-Suse.yml @@ -27,22 +27,9 @@ directory='{{ profile_directory }}' fstype='nfs' options='sec=sys,vers=4.1' \ op start timeout="{{ cluster_sap_scs_timeouts.start }}" interval=0 \ op stop timeout="{{ cluster_sap_scs_timeouts.stop }}" interval=0 \ - op monitor interval="20s" timeout="40s" + op monitor interval=200 timeout={{ clus_fs_mon_timeout | int }} register: ascs_fs_resource failed_when: ascs_fs_resource.rc > 1 - when: NFS_version == "NFSv3" - - - name: "5.6 SCSERS - SUSE - SCS - Configure File system resources" - ansible.builtin.command: > - crm configure primitive fs_{{ sap_sid | upper }}_{{ instance_type | upper }} Filesystem \ - device='{{ ascs_filesystem_device }}' \ - directory='{{ profile_directory }}' fstype='nfs' options='sec=sys,vers=4.1' \ - op start timeout="{{ cluster_sap_scs_timeouts.start }}" interval=0 \ - op stop timeout="{{ cluster_sap_scs_timeouts.stop }}" interval=0 \ - op monitor interval="20s" timeout="105s" - register: ascs_fs_resource - failed_when: ascs_fs_resource.rc > 1 - when: NFS_version == "NFSv4.1" - name: "5.6 SCSERS - SUSE - SCS - Create ASCS VIP - This is LB frontend ASCS/SCS IP" ansible.builtin.command: > @@ -185,22 +172,9 @@ directory='/usr/sap/{{ sap_sid | upper }}/ERS{{ ers_instance_number }}' fstype='nfs' options='sec=sys,vers=4.1' \ op start timeout="{{ cluster_sap_scs_timeouts.start }}" interval=0 \ op stop timeout="{{ cluster_sap_scs_timeouts.stop }}" interval=0 \ - op monitor interval="20s" timeout="40s" - register: ers_fs_resource - failed_when: ers_fs_resource.rc > 1 - when: NFS_version == "NFSv3" - - - name: "5.6 SCSERS - SUSE - ERS - Configure File system resources" - ansible.builtin.command: > - crm configure primitive fs_{{ sap_sid | upper }}_ERS Filesystem \ - device='{{ ers_filesystem_device }}' \ - directory='/usr/sap/{{ sap_sid | upper }}/ERS{{ ers_instance_number }}' fstype='nfs' options='sec=sys,vers=4.1' \ - op start timeout="{{ cluster_sap_scs_timeouts.start }}" interval=0 \ - op stop timeout="{{ cluster_sap_scs_timeouts.stop }}" interval=0 \ - op monitor interval="20s" timeout="105s" + op monitor interval=200 timeout={{ clus_fs_mon_timeout | int }} register: ers_fs_resource failed_when: ers_fs_resource.rc > 1 - when: NFS_version == "NFSv4.1" - name: "5.6 SCSERS - SUSE - ERS - Create ERS VIP - This is LB frontend ERS IP" ansible.builtin.command: > diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index 216b62e5a7..d4be5dbc84 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -211,6 +211,11 @@ use_simple_mount: false database_cluster_type: "AFA" # scs_high_availability: false scs_cluster_type: "AFA" +# Configure pacemaker for Azure scheduled events +cluster_use_scheduled_events_agent: true +# Custom pacemaker NFS filesystem and SAP monitor timeouts +custom_cluster_fs_mon_timeout: "" +custom_cluster_sap_mon_timeout: "" # ------------------- Begin - SAP SWAP settings variables --------------------8 sap_swap: @@ -227,4 +232,4 @@ sap_swap: - { tier: "oracle-multi-sid", swap_size_mb: "20480" } - { tier: "observer", swap_size_mb: "2048" } - { tier: 'sqlserver', swap_size_mb: '20480' } - # --------------------- End - SAP SWAP settings variables --------------------8 +# --------------------- End - SAP SWAP settings variables --------------------8 diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf index a8437deb0d..8aac7f124c 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf @@ -233,7 +233,6 @@ resource "azurerm_linux_virtual_machine" "dbserver" { lifecycle { ignore_changes = [ // Ignore changes to computername - tags, computer_name ] } From 2b222b85c42c3012fb45d9b10e3c3c40f7dd0d72 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 29 Jan 2024 22:54:28 +0200 Subject: [PATCH 168/607] Refactor deployment pipeline and control plane script --- deploy/pipelines/01-deploy-control-plane.yaml | 35 +++++-------------- deploy/scripts/deploy_controlplane.sh | 8 ++--- 2 files changed, 13 insertions(+), 30 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index cdc5778bbb..cbf3b0291d 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -125,14 +125,6 @@ stages: key_vault=$(cat ${deployer_environment_file_name} | grep keyvault= -m1 | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer Key Vault' ${key_vault} fi - # az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none - # return_code=$? - # if [ 0 != $return_code ]; then - # echo -e "$boldred--- Login failed ---$reset" - # echo "##vso[task.logissue type=error]az login failed." - # exit $return_code - # fi - key_vault_id=$(az resource list --name "${key_vault}" --resource-type Microsoft.KeyVault/vaults --query "[].id | [0]" -o tsv) export TF_VAR_deployer_kv_user_arm_id=${key_vault_id} if [ -n "${key_vault_id}" ]; then @@ -208,9 +200,7 @@ stages: dos2unix -q ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) echo -e "$green--- Configuring variables ---$reset" deployer_environment_file_name=$CONFIG_REPO_PATH/.sap_deployment_automation/${ENVIRONMENT}$LOCATION - echo -e "$green--- az login ---$reset" - az account set --subscription $ARM_SUBSCRIPTION_ID - echo -e "$green--- Deploy the Control Plane ---$reset" + echo -e "$green--- Deploy the Control Plane ---$reset" if [ -n "$(PAT)" ]; then echo 'Deployer Agent PAT is defined' fi @@ -234,16 +224,6 @@ stages: if [ "$USE_WEBAPP" = "true" ]; then echo "Use WebApp is selected" - - # if [ $TF_VAR_app_registration_app_id == '$(APP_REGISTRATION_APP_ID)' ]; then - # echo "##vso[task.logissue type=error]Variable APP_REGISTRATION_APP_ID was not defined." - # exit 2 - # fi - - # if [ -z $TF_VAR_webapp_client_secret == '$(WEB_APP_CLIENT_SECRET)' ]; then - # echo "##vso[task.logissue type=error]Variable WEB_APP_CLIENT_SECRET was not defined." - # exit 2 - # fi fi export TF_LOG_PATH=$CONFIG_REPO_PATH/.sap_deployment_automation/terraform.log @@ -251,6 +231,7 @@ stages: if [ "$USE_MSI" = "true" ]; then export ARM_CLIENT_SECRET=$servicePrincipalKey + export ARM_SUBSCRIPTION_ID=$servicePrincipalKey $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ --deployer_parameter_file ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) \ @@ -485,10 +466,9 @@ stages: fi keyvault_parameter="" - if [ -n "${keyvault}" ]; then - if [ "${keyvault}" != "$(Deployer_Key_Vault)" ]; then - keyvault_parameter=" --vault ${keyvault} " - fi + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" | tr -d \") + if [ -n "${az_var}" ]; then + keyvault_parameter=" --vault ${az_var} " fi echo -e "$green--- Validations ---$reset" @@ -508,13 +488,14 @@ stages: echo "REMOTE_STATE_SA="${az_var} echo "REMOTE_STATE_SA="${az_var} | tee -a $deployer_environment_file_name > /dev/null echo "STATE_SUBSCRIPTION="$ARM_SUBSCRIPTION_ID | tee -a $deployer_environment_file_name > /dev/null - echo "step=3" | tee -a $deployer_environment_file_name > /dev/null fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Resource_Group_Name.value") if [[ ${#az_var} -ne 0 ]]; then echo "REMOTE_STATE_RG="${az_var} echo "REMOTE_STATE_RG="${az_var} | tee -a $deployer_environment_file_name > /dev/null + echo "step=3" | tee -a $deployer_environment_file_name > /dev/null + fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_State_FileName.value") @@ -583,12 +564,14 @@ stages: echo "Login using SPN" export ARM_USE_MSI=false az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none + return_code=$? if [ 0 != $return_code ]; then echo -e "$boldred--- Login failed ---$reset" echo "##vso[task.logissue type=error]az login failed." exit $return_code fi + az account set --subscription $ARM_SUBSCRIPTION_ID else source /etc/profile.d/deploy_server.sh # export ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID diff --git a/deploy/scripts/deploy_controlplane.sh b/deploy/scripts/deploy_controlplane.sh index 3adefc03b7..daf6c3c14a 100755 --- a/deploy/scripts/deploy_controlplane.sh +++ b/deploy/scripts/deploy_controlplane.sh @@ -131,7 +131,6 @@ init "${automation_config_directory}" "${generic_config_information}" "${deploye if [ -n "${subscription}" ]; then ARM_SUBSCRIPTION_ID="${subscription}" - export ARM_SUBSCRIPTION_ID=$subscription fi # Check that the exports ARM_SUBSCRIPTION_ID and SAP_AUTOMATION_REPO_PATH are defined validate_exports @@ -194,9 +193,11 @@ if [ -n "${subscription}" ]; then if [ -n "${subscription}" ]; then - az account set --sub "${subscription}" - export ARM_SUBSCRIPTION_ID="${subscription}" + az account set --subscription "${subscription}" fi + + load_config_vars "${deployer_config_information}" "keyvault" + kv_found=$(az keyvault list --subscription "${subscription}" --query [].name | grep "${keyvault}") if [ -z "${kv_found}" ] ; then @@ -213,7 +214,6 @@ fi load_config_vars "${deployer_config_information}" "step" -load_config_vars "${deployer_config_information}" "keyvault" if [ 0 = "${deploy_using_msi_only:-}" ]; then echo "Using Service Principal for deployment" From e3f83d86d77be02eade24465e8458b57f2fbd67b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 29 Jan 2024 23:06:20 +0200 Subject: [PATCH 169/607] Fix Azure login and set ARM_USE_MSI to true --- deploy/pipelines/01-deploy-control-plane.yaml | 4 ++++ deploy/scripts/deploy_controlplane.sh | 5 ----- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index cbf3b0291d..27038833b1 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -574,8 +574,12 @@ stages: az account set --subscription $ARM_SUBSCRIPTION_ID else source /etc/profile.d/deploy_server.sh + az logout + az login --identity --output none + az account set --subscription $ARM_SUBSCRIPTION_ID # export ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID unset ARM_TENANT_ID + export ARM_USE_MSI=true fi fi diff --git a/deploy/scripts/deploy_controlplane.sh b/deploy/scripts/deploy_controlplane.sh index daf6c3c14a..c8c1a22581 100755 --- a/deploy/scripts/deploy_controlplane.sh +++ b/deploy/scripts/deploy_controlplane.sh @@ -191,11 +191,6 @@ if [ -n "${subscription}" ]; then echo "#########################################################################################" echo "" - if [ -n "${subscription}" ]; - then - az account set --subscription "${subscription}" - fi - load_config_vars "${deployer_config_information}" "keyvault" kv_found=$(az keyvault list --subscription "${subscription}" --query [].name | grep "${keyvault}") From 3041de27e5783c2e92c16d88b74b6916f4751f14 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 29 Jan 2024 23:15:56 +0200 Subject: [PATCH 170/607] Add console output for using SPN and MSI --- deploy/pipelines/01-deploy-control-plane.yaml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 27038833b1..def734ddf7 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -87,6 +87,7 @@ stages: green="\e[1;32m" reset="\e[0m" boldred="\e[1;31m" + cyan="\e[1;36m" set -eu @@ -561,7 +562,7 @@ stages: else if [ $USE_MSI != "true" ]; then - echo "Login using SPN" + echo -e "$cyan--- Using SPN ---$reset" export ARM_USE_MSI=false az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none @@ -573,10 +574,8 @@ stages: fi az account set --subscription $ARM_SUBSCRIPTION_ID else + echo -e "$cyan--- Using MSI ---$reset" source /etc/profile.d/deploy_server.sh - az logout - az login --identity --output none - az account set --subscription $ARM_SUBSCRIPTION_ID # export ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID unset ARM_TENANT_ID export ARM_USE_MSI=true @@ -648,7 +647,7 @@ stages: sudo chmod +x $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh if [ "$USE_MSI" = "true" ]; then - echo "Using MSI" + echo -e "$cyan--- Using MSI ---$reset" $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ --deployer_parameter_file ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) \ --library_parameter_file ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) \ From 769fe0c270048c4cb684e803b1fc800819630e87 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 30 Jan 2024 01:51:42 +0200 Subject: [PATCH 171/607] Deployer msi tests (#533) * Add cyan color to shell variables * Update variable retrieval in deploy control plane pipeline * Refactor deploy_controlplane.sh script * Remove shared_access_key_enabled variable * Refactor variable handling in deploy control plane pipeline * Add Azure account list command to deploy_controlplane.sh script * Add check for subscription access * Fix ARM_SUBSCRIPTION_ID assignment in deploy control plane pipeline --------- Co-authored-by: Kimmo Forss --- deploy/pipelines/01-deploy-control-plane.yaml | 31 ++++++------ deploy/scripts/deploy_controlplane.sh | 49 ++++++++++++++----- .../bootstrap/sap_library/tfvar_variables.tf | 6 --- 3 files changed, 53 insertions(+), 33 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index def734ddf7..7a3942e94d 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -118,7 +118,7 @@ stages: sed -i 's/step=3/step=0/' $deployer_environment_file_name export FORCE_RESET=true - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" | tr -d \") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" --output tsv) if [ -n "${az_var}" ]; then key_vault="${az_var}" ; echo 'Deployer Key Vault' ${key_vault} else @@ -232,7 +232,7 @@ stages: if [ "$USE_MSI" = "true" ]; then export ARM_CLIENT_SECRET=$servicePrincipalKey - export ARM_SUBSCRIPTION_ID=$servicePrincipalKey + export ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ --deployer_parameter_file ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) \ @@ -397,6 +397,7 @@ stages: green="\e[1;32m" reset="\e[0m" boldred="\e[1;31m" + cyan="\e[1;36m" ENVIRONMENT=$(echo $(deployerfolder) | awk -F'-' '{print $1}' | xargs) ; echo Environment ${ENVIRONMENT} LOCATION=$(echo $(deployerfolder) | awk -F'-' '{print $2}' | xargs) ; echo Location ${LOCATION} @@ -422,16 +423,18 @@ stages: exit 2 fi echo -e "$green--- Variables ---$reset" - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" | tr -d \") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" --output tsv) if [ -n "${az_var}" ]; then - key_vault="${az_var}" ; echo 'Deployer Key Vault' ${key_vault} + key_vault="${az_var}" + echo -e "$cyan 'Deployer Key Vault' ${key_vault} $reset" else if [ -f ${deployer_environment_file_name} ] ; then - key_vault=$(cat ${deployer_environment_file_name} | grep keyvault= | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer Key Vault' ${key_vault} + key_vault=$(cat ${deployer_environment_file_name} | grep keyvault= | awk -F'=' '{print $2}' | xargs) + echo -e "$cyan 'Deployer Key Vault' ${key_vault} $reset" fi fi - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Subscription.value" | tr -d \") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Subscription.value" --output tsv) if [ -n "${az_var}" ]; then STATE_SUBSCRIPTION="${az_var}" ; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION else @@ -440,7 +443,7 @@ stages: fi fi - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "DEPLOYER_RANDOM_ID_SEED.value" | tr -d \") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "DEPLOYER_RANDOM_ID_SEED.value" --output tsv) if [ -n "${az_var}" ]; then deployer_random_id="${az_var}" else @@ -449,7 +452,7 @@ stages: fi fi - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" | tr -d \") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" --output tsv) if [ -n "${az_var}" ]; then REMOTE_STATE_SA="${az_var}" ; echo 'Terraform state file storage account' $REMOTE_STATE_SA else @@ -467,9 +470,8 @@ stages: fi keyvault_parameter="" - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" | tr -d \") - if [ -n "${az_var}" ]; then - keyvault_parameter=" --vault ${az_var} " + if [ -n "${key_vault}" ]; then + keyvault_parameter=" --vault ${key_vault} " fi echo -e "$green--- Validations ---$reset" @@ -686,13 +688,13 @@ stages: echo 'Deployer State File' $file_deployer_tfstate_key file_key_vault=$(cat ${deployer_environment_file_name} | grep keyvault= | awk -F'=' '{print $2}' | xargs) - echo 'Deployer Key Vault' ${file_key_vault} + echo '(File) Deployer Key Vault' ${file_key_vault} file_REMOTE_STATE_SA=$(cat ${deployer_environment_file_name} | grep REMOTE_STATE_SA | awk -F'=' '{print $2}' | xargs) - echo 'Terraform state file storage account' $file_REMOTE_STATE_SA + echo '(File) Terraform state file storage account' $file_REMOTE_STATE_SA file_REMOTE_STATE_RG=$(cat ${deployer_environment_file_name} | grep REMOTE_STATE_RG | awk -F'=' '{print $2}' | xargs) - echo 'Terraform state file resource group' $file_REMOTE_STATE_RG + echo '(File) Terraform state file resource group' $file_REMOTE_STATE_RG fi echo -e "$green--- Update repo ---$reset" @@ -841,7 +843,6 @@ stages: DEPLOYER_RANDOM_ID_SEED: $(DEPLOYER_RANDOM_ID_SEED) DEPLOYER_TFSTATE_KEY: "${{ parameters.deployer }}.terraform.tfstate" IS_PIPELINE_DEPLOYMENT: true - keyvault: $(Deployer_Key_Vault) LOGON_USING_SPN: $(Logon_Using_SPN) POOL: $(POOL) SAP_AUTOMATION_REPO_PATH: ${{ parameters.sap_automation_repo_path }} diff --git a/deploy/scripts/deploy_controlplane.sh b/deploy/scripts/deploy_controlplane.sh index c8c1a22581..58c9204c7b 100755 --- a/deploy/scripts/deploy_controlplane.sh +++ b/deploy/scripts/deploy_controlplane.sh @@ -129,9 +129,6 @@ fi init "${automation_config_directory}" "${generic_config_information}" "${deployer_config_information}" -if [ -n "${subscription}" ]; then - ARM_SUBSCRIPTION_ID="${subscription}" -fi # Check that the exports ARM_SUBSCRIPTION_ID and SAP_AUTOMATION_REPO_PATH are defined validate_exports return_code=$? @@ -167,6 +164,20 @@ echo -e "# $cyan Starting the control plane deployment $resetf echo "# #" echo "#########################################################################################" +noAccess=$( az account show --query name | grep "N/A(tenant level account)") + +if [ -n "$noAccess" ]; then + echo "#########################################################################################" + echo "# #" + echo -e "# $boldred The provided credentials do not have access to the subscription!!! $resetformatting #" + echo "# #" + echo "#########################################################################################" + + az account show --output table + + exit 65 +fi +az account list --query "[].{Name:name,Id:id}" --output table #setting the user environment variables if [ -n "${subscription}" ]; then if is_valid_guid "$subscription"; then @@ -191,20 +202,34 @@ if [ -n "${subscription}" ]; then echo "#########################################################################################" echo "" - load_config_vars "${deployer_config_information}" "keyvault" + if [ -z $keyvault ] ; then + load_config_vars "${deployer_config_information}" "keyvault" + fi - kv_found=$(az keyvault list --subscription "${subscription}" --query [].name | grep "${keyvault}") + if [ -n $keyvault ] ; then + + + kv_found=$(az keyvault list --subscription "${subscription}" --query [].name | grep "${keyvault}") + + if [ -z "${kv_found}" ] ; then + echo "#########################################################################################" + echo "# #" + echo -e "# $boldred Detected a failed deployment $resetformatting #" + echo "# #" + echo -e "# $cyan Trying to recover $resetformatting #" + echo "# #" + echo "#########################################################################################" + step=0 + save_config_var "step" "${deployer_config_information}" + fi + else + step=0 + save_config_var "step" "${deployer_config_information}" - if [ -z "${kv_found}" ] ; then - echo "#########################################################################################" - echo "# #" - echo -e "# $boldred Detected a failed deployment $resetformatting #" - echo "# #" - echo "#########################################################################################" - step=0 fi + fi diff --git a/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf index 4313df950e..02532b4dde 100644 --- a/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf @@ -200,12 +200,6 @@ variable "short_named_endpoints_nics" { default = false } -variable "shared_access_key_enabled" { - description = "Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key." - default = true - type = bool - } - ######################################################################################### From 3e50c906cf676ffcba08ee1a7b30b5e55bc7870c Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 30 Jan 2024 09:35:56 +0200 Subject: [PATCH 172/607] Update backend and provider configurations --- deploy/terraform/run/sap_deployer/backend.tf | 2 +- deploy/terraform/run/sap_deployer/providers.tf | 6 +++--- deploy/terraform/run/sap_library/backend.tf | 2 +- deploy/terraform/run/sap_library/providers.tf | 8 ++++---- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/deploy/terraform/run/sap_deployer/backend.tf b/deploy/terraform/run/sap_deployer/backend.tf index b9335f77a9..14e7303d96 100644 --- a/deploy/terraform/run/sap_deployer/backend.tf +++ b/deploy/terraform/run/sap_deployer/backend.tf @@ -5,6 +5,6 @@ Description: */ terraform { backend "azurerm" { - use_azuread_auth = true + use_azuread_auth = !var.shared_access_key_enabled } } diff --git a/deploy/terraform/run/sap_deployer/providers.tf b/deploy/terraform/run/sap_deployer/providers.tf index 772f4a02d4..07199ae105 100644 --- a/deploy/terraform/run/sap_deployer/providers.tf +++ b/deploy/terraform/run/sap_deployer/providers.tf @@ -27,7 +27,7 @@ provider "azurerm" { } partner_id = "f94f50f2-2539-42f8-9c8e-c65b28c681f7" skip_provider_registration = true - storage_use_azuread = true + storage_use_azuread = !var.shared_access_key_enabled use_msi = var.use_spn ? false : true } @@ -52,7 +52,7 @@ provider "azurerm" { tenant_id = var.use_spn ? local.spn.tenant_id: null use_msi = var.use_spn ? false : true alias = "main" - storage_use_azuread = true + storage_use_azuread = !var.shared_access_key_enabled } provider "azurerm" { @@ -63,7 +63,7 @@ provider "azurerm" { client_secret = var.use_spn ? local.spn.client_secret: null tenant_id = var.use_spn ? local.spn.tenant_id: null skip_provider_registration = true - storage_use_azuread = true + storage_use_azuread = !var.shared_access_key_enabled use_msi = var.use_spn ? false : true } diff --git a/deploy/terraform/run/sap_library/backend.tf b/deploy/terraform/run/sap_library/backend.tf index 59c1fd0b6b..4c1ab08b3b 100644 --- a/deploy/terraform/run/sap_library/backend.tf +++ b/deploy/terraform/run/sap_library/backend.tf @@ -5,6 +5,6 @@ terraform { backend "azurerm" { - use_azuread_auth = true + use_azuread_auth = !var.shared_access_key_enabled } } diff --git a/deploy/terraform/run/sap_library/providers.tf b/deploy/terraform/run/sap_library/providers.tf index ac3d6bae92..cbc8b786d8 100644 --- a/deploy/terraform/run/sap_library/providers.tf +++ b/deploy/terraform/run/sap_library/providers.tf @@ -21,8 +21,8 @@ provider "azurerm" { features { } skip_provider_registration = true - storage_use_azuread = true use_msi = var.use_spn ? false : true + storage_use_azuread = !var.shared_access_key_enabled } @@ -38,7 +38,7 @@ provider "azurerm" { client_secret = local.use_spn ? local.spn.client_secret : null tenant_id = local.use_spn ? local.spn.tenant_id : null partner_id = "140c3bc9-c937-4139-874f-88288bab08bb" - storage_use_azuread = true + storage_use_azuread = !var.shared_access_key_enabled use_msi = var.use_spn ? false : true alias = "main" @@ -50,7 +50,7 @@ provider "azurerm" { } skip_provider_registration = true alias = "deployer" - storage_use_azuread = true + storage_use_azuread = !var.shared_access_key_enabled use_msi = var.use_spn ? false : true } @@ -64,7 +64,7 @@ provider "azurerm" { client_secret = local.use_spn ? local.spn.client_secret : null tenant_id = local.use_spn ? local.spn.tenant_id : null skip_provider_registration = true - storage_use_azuread = true + storage_use_azuread = !var.shared_access_key_enabled use_msi = var.use_spn ? false : true } From 305bd3659b52058cfca0371c53decbfafc0bca77 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 30 Jan 2024 09:53:45 +0200 Subject: [PATCH 173/607] Update Azure AD authentication in backend.tf files --- deploy/scripts/installer.sh | 9 +++++++++ deploy/terraform/run/sap_deployer/backend.tf | 1 - deploy/terraform/run/sap_landscape/backend.tf | 2 +- deploy/terraform/run/sap_landscape/imports.tf | 1 - deploy/terraform/run/sap_landscape/providers.tf | 5 ----- deploy/terraform/run/sap_library/backend.tf | 1 - 6 files changed, 10 insertions(+), 9 deletions(-) diff --git a/deploy/scripts/installer.sh b/deploy/scripts/installer.sh index 9a73805471..178802fa95 100755 --- a/deploy/scripts/installer.sh +++ b/deploy/scripts/installer.sh @@ -250,6 +250,15 @@ else fi +useSAS=$(az storage account show --name "${REMOTE_STATE_SA}" --query allowSharedKeyAccess --subscription "${STATE_SUBSCRIPTION}" --out tsv) + +if [ "$useSAS" = "true" ] ; then + export ARM_USE_AZUREAD=false +else + export ARM_USE_AZUREAD=true +fi + + landscape_tfstate_key_parameter='' if [[ -z $landscape_tfstate_key ]]; diff --git a/deploy/terraform/run/sap_deployer/backend.tf b/deploy/terraform/run/sap_deployer/backend.tf index 14e7303d96..ba7c160165 100644 --- a/deploy/terraform/run/sap_deployer/backend.tf +++ b/deploy/terraform/run/sap_deployer/backend.tf @@ -5,6 +5,5 @@ Description: */ terraform { backend "azurerm" { - use_azuread_auth = !var.shared_access_key_enabled } } diff --git a/deploy/terraform/run/sap_landscape/backend.tf b/deploy/terraform/run/sap_landscape/backend.tf index a32e75c6fe..0bc3091869 100644 --- a/deploy/terraform/run/sap_landscape/backend.tf +++ b/deploy/terraform/run/sap_landscape/backend.tf @@ -6,6 +6,6 @@ Description: terraform { backend "azurerm" { - use_azuread_auth = true + } } diff --git a/deploy/terraform/run/sap_landscape/imports.tf b/deploy/terraform/run/sap_landscape/imports.tf index 6b2010c8f1..74284361c4 100644 --- a/deploy/terraform/run/sap_landscape/imports.tf +++ b/deploy/terraform/run/sap_landscape/imports.tf @@ -16,7 +16,6 @@ data "terraform_remote_state" "deployer" { key = var.deployer_tfstate_key subscription_id = local.saplib_subscription_id use_msi = var.use_spn ? false : true - use_azuread_auth = true } } diff --git a/deploy/terraform/run/sap_landscape/providers.tf b/deploy/terraform/run/sap_landscape/providers.tf index 2c5b8a9165..0edbd14d22 100644 --- a/deploy/terraform/run/sap_landscape/providers.tf +++ b/deploy/terraform/run/sap_landscape/providers.tf @@ -16,7 +16,6 @@ provider "azurerm" { features {} subscription_id = length(local.deployer_subscription_id) > 0 ? local.deployer_subscription_id : null use_msi = var.use_spn ? false : true - storage_use_azuread = true } provider "azurerm" { @@ -37,8 +36,6 @@ provider "azurerm" { tenant_id = var.use_spn ? local.spn.tenant_id : null use_msi = var.use_spn ? false : true - storage_use_azuread = true - partner_id = "25c87b5f-716a-4067-bcd8-116956916dd6" alias = "workload" skip_provider_registration = true @@ -52,7 +49,6 @@ provider "azurerm" { client_secret = var.use_spn ? local.cp_spn.client_secret : null tenant_id = var.use_spn ? local.cp_spn.tenant_id : null use_msi = var.use_spn ? false : true - storage_use_azuread = true skip_provider_registration = true } @@ -69,7 +65,6 @@ provider "azurerm" { client_secret = var.use_spn ? local.cp_spn.client_secret : null tenant_id = var.use_spn ? local.cp_spn.tenant_id : null use_msi = var.use_spn ? false : true - storage_use_azuread = true skip_provider_registration = true } diff --git a/deploy/terraform/run/sap_library/backend.tf b/deploy/terraform/run/sap_library/backend.tf index 4c1ab08b3b..bb0c7be191 100644 --- a/deploy/terraform/run/sap_library/backend.tf +++ b/deploy/terraform/run/sap_library/backend.tf @@ -5,6 +5,5 @@ terraform { backend "azurerm" { - use_azuread_auth = !var.shared_access_key_enabled } } From 2d9137e0a1bc6ef573d5a65dd0923b82c65a5b7d Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 30 Jan 2024 10:02:30 +0200 Subject: [PATCH 174/607] Remove use_azuread_auth flag from terraform_remote_state --- deploy/terraform/run/sap_library/imports.tf | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/terraform/run/sap_library/imports.tf b/deploy/terraform/run/sap_library/imports.tf index 2c67219195..14630d7983 100644 --- a/deploy/terraform/run/sap_library/imports.tf +++ b/deploy/terraform/run/sap_library/imports.tf @@ -13,7 +13,6 @@ data "terraform_remote_state" "deployer" { key = local.deployer_tfstate_key subscription_id = local.saplib_subscription_id use_msi = var.use_spn ? false : true - use_azuread_auth = true } } From 4acdca44f40f1db9733dc1c5e8fb017898628f4c Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 30 Jan 2024 10:32:59 +0200 Subject: [PATCH 175/607] Remove use_azuread_auth from backend and imports.tf files --- deploy/terraform/run/sap_system/backend.tf | 1 - deploy/terraform/run/sap_system/imports.tf | 2 -- deploy/terraform/run/sap_system/providers.tf | 3 --- 3 files changed, 6 deletions(-) diff --git a/deploy/terraform/run/sap_system/backend.tf b/deploy/terraform/run/sap_system/backend.tf index 7c88d101e6..70dddc1207 100644 --- a/deploy/terraform/run/sap_system/backend.tf +++ b/deploy/terraform/run/sap_system/backend.tf @@ -5,6 +5,5 @@ terraform { backend "azurerm" { - use_azuread_auth = true } } diff --git a/deploy/terraform/run/sap_system/imports.tf b/deploy/terraform/run/sap_system/imports.tf index 0171babd7e..09f0753132 100644 --- a/deploy/terraform/run/sap_system/imports.tf +++ b/deploy/terraform/run/sap_system/imports.tf @@ -17,7 +17,6 @@ data "terraform_remote_state" "deployer" { key = var.deployer_tfstate_key subscription_id = local.saplib_subscription_id use_msi = var.use_spn ? false : true - use_azuread_auth = true } } @@ -30,7 +29,6 @@ data "terraform_remote_state" "landscape" { key = var.landscape_tfstate_key subscription_id = local.saplib_subscription_id use_msi = var.use_spn ? false : true - use_azuread_auth = true } } diff --git a/deploy/terraform/run/sap_system/providers.tf b/deploy/terraform/run/sap_system/providers.tf index 21f61e5dec..7d19f9ab51 100644 --- a/deploy/terraform/run/sap_system/providers.tf +++ b/deploy/terraform/run/sap_system/providers.tf @@ -35,8 +35,6 @@ provider "azurerm" { tenant_id = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? local.spn.tenant_id : null use_msi = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? false : true - storage_use_azuread = true - partner_id = "3179cd51-f54b-4c73-ac10-8e99417efce7" alias = "system" skip_provider_registration = true @@ -50,7 +48,6 @@ provider "azurerm" { client_secret = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? local.cp_spn.client_secret : null tenant_id = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? local.cp_spn.tenant_id : null use_msi = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? false : true - storage_use_azuread = true skip_provider_registration = true } From 3d77370abf72adc4309380be535f0f9b383ac5cd Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 30 Jan 2024 12:44:04 +0200 Subject: [PATCH 176/607] Refactor bom_download.yaml to simplify when conditions --- .../roles-sap/0.1-bom-validator/tasks/bom_download.yaml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_download.yaml b/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_download.yaml index 720ae43411..3ea6aea0e6 100644 --- a/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_download.yaml +++ b/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_download.yaml @@ -395,14 +395,12 @@ # -------------------------------------+---------------------------------------8 when: - - bom_media_entry.checksum | bool + - create_checksums is defined # Step: 05-03-03 - END # -------------------------------------+---------------------------------------8 when: - # - bom_media_entry.checksum is not defined - # - create_checksums is defined - - (bom_media_entry.checksum is not defined and create_checksums is defined) or (bom_media_entry.checksum | bool) + - bom_media_entry.checksum is not defined # Step: 05-03 - END # -------------------------------------+---------------------------------------8 From dcef2d5ff65e6242d0d63565dd74edf8441bd07f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 31 Jan 2024 13:52:35 +0200 Subject: [PATCH 177/607] Update scs_server_count property to allow null values --- Webapp/SDAF/Models/SystemModel.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Webapp/SDAF/Models/SystemModel.cs b/Webapp/SDAF/Models/SystemModel.cs index d7e0d9de12..1b1c336bbb 100644 --- a/Webapp/SDAF/Models/SystemModel.cs +++ b/Webapp/SDAF/Models/SystemModel.cs @@ -353,7 +353,7 @@ public bool IsValid() | | +------------------------------------4--------------------------------------*/ - public int scs_server_count { get; set; } = 1; + public int? scs_server_count { get; set; } = 1; public string scs_server_sku { get; set; } From 02130a5a8347756a728d7cd476131ab95290a964 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 31 Jan 2024 13:52:43 +0200 Subject: [PATCH 178/607] Update database high availability configuration --- .../terraform-units/modules/sap_system/anydb_node/vm-anydb.tf | 4 ++-- .../terraform-units/modules/sap_system/hdb_node/vm-hdb.tf | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf index 8aac7f124c..5243586592 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf @@ -590,7 +590,7 @@ resource "azurerm_role_assignment" "role_assignment_msi" { count = ( var.use_msi_for_clusters && length(var.fencing_role_name) > 0 && - var.database_server_count > 1 + var.database.high_availability ) ? ( var.database_server_count ) : ( @@ -606,7 +606,7 @@ resource "azurerm_role_assignment" "role_assignment_msi_ha" { count = ( var.use_msi_for_clusters && length(var.fencing_role_name) > 0 && - var.database_server_count > 1 + var.database.high_availability ) ? ( var.database_server_count ) : ( diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf index ac359c9f85..e5e1c1ad69 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf @@ -300,7 +300,7 @@ resource "azurerm_role_assignment" "role_assignment_msi" { count = ( var.use_msi_for_clusters && length(var.fencing_role_name) > 0 && - var.database_server_count > 1 + var.database.high_availability ) ? ( var.database_server_count ) : ( @@ -316,7 +316,7 @@ resource "azurerm_role_assignment" "role_assignment_msi_ha" { count = ( var.use_msi_for_clusters && length(var.fencing_role_name) > 0 && - var.database_server_count > 1 + var.database.high_availability ) ? ( var.database_server_count ) : ( From b7f4e78b831dc10565c0c1beadda9a06aeb70c6e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 31 Jan 2024 15:24:02 +0200 Subject: [PATCH 179/607] Update ANF volume count based on database server count --- .../modules/sap_system/hdb_node/anf.tf | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf index c3f4e246a7..fb0c542754 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf @@ -10,7 +10,7 @@ resource "azurerm_netapp_volume" "hanadata" { var.hana_ANF_volumes.use_existing_data_volume ? ( 0 ) : ( - var.database.high_availability ? 2 : 1 + var.database_server_count )) : ( 0 ) @@ -59,7 +59,7 @@ data "azurerm_netapp_volume" "hanadata" { count = var.hana_ANF_volumes.use_for_data ? ( var.hana_ANF_volumes.use_existing_data_volume || local.use_avg ? ( - var.database.high_availability ? 2 : 1 + var.database_server_count ) : ( 0 )) : ( @@ -89,7 +89,7 @@ resource "azurerm_netapp_volume" "hanalog" { var.hana_ANF_volumes.use_existing_log_volume ? ( 0 ) : ( - var.database.high_availability ? 2 : 1 + var.database_server_count )) : ( 0 ) @@ -135,7 +135,7 @@ data "azurerm_netapp_volume" "hanalog" { count = var.hana_ANF_volumes.use_for_log ? ( var.hana_ANF_volumes.use_existing_log_volume || local.use_avg ? ( - var.database.high_availability ? 2 : 1 + var.database_server_count ) : ( 0 )) : ( @@ -164,7 +164,7 @@ resource "azurerm_netapp_volume" "hanashared" { var.hana_ANF_volumes.use_existing_shared_volume ? ( 0 ) : ( - var.database.high_availability ? 2 : 1 + var.database_server_count )) : ( 0 ) @@ -212,7 +212,7 @@ data "azurerm_netapp_volume" "hanashared" { count = var.hana_ANF_volumes.use_for_shared ? ( var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( - var.database.high_availability ? 2 : 1 + var.database_server_count ) : ( 0 )) : ( From e05f866cfcec462446e4c5966ac4b2d1fc1b95c4 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 31 Jan 2024 17:15:31 +0200 Subject: [PATCH 180/607] Update ANF volume count based on database server count --- deploy/terraform/run/sap_system/module.tf | 2 +- .../modules/sap_system/hdb_node/anf.tf | 12 +++++----- .../modules/sap_system/hdb_node/outputs.tf | 24 +++---------------- 3 files changed, 10 insertions(+), 28 deletions(-) diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index ec318847ed..347db4049e 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -401,7 +401,7 @@ module "output_files" { shared_home = var.shared_home hana_data = [module.hdb_node.hana_data_primary, module.hdb_node.hana_data_secondary] hana_log = [module.hdb_node.hana_log_primary, module.hdb_node.hana_log_secondary] - hana_shared = [module.hdb_node.hana_shared_primary, module.hdb_node.hana_shared_secondary] + hana_shared = [module.hdb_node.hana_shared_primary] usr_sap = module.common_infrastructure.usrsap_path ######################################################################################### diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf index fb0c542754..7d234576f2 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf @@ -10,7 +10,7 @@ resource "azurerm_netapp_volume" "hanadata" { var.hana_ANF_volumes.use_existing_data_volume ? ( 0 ) : ( - var.database_server_count + var.database_server_count > 1 ? 2 : 1 )) : ( 0 ) @@ -59,7 +59,7 @@ data "azurerm_netapp_volume" "hanadata" { count = var.hana_ANF_volumes.use_for_data ? ( var.hana_ANF_volumes.use_existing_data_volume || local.use_avg ? ( - var.database_server_count + var.database_server_count > 1 ? 2 : 1 ) : ( 0 )) : ( @@ -89,7 +89,7 @@ resource "azurerm_netapp_volume" "hanalog" { var.hana_ANF_volumes.use_existing_log_volume ? ( 0 ) : ( - var.database_server_count + var.database_server_count > 1 ? 2 : 1 )) : ( 0 ) @@ -135,7 +135,7 @@ data "azurerm_netapp_volume" "hanalog" { count = var.hana_ANF_volumes.use_for_log ? ( var.hana_ANF_volumes.use_existing_log_volume || local.use_avg ? ( - var.database_server_count + var.database_server_count > 1 ? 2 : 1 ) : ( 0 )) : ( @@ -164,7 +164,7 @@ resource "azurerm_netapp_volume" "hanashared" { var.hana_ANF_volumes.use_existing_shared_volume ? ( 0 ) : ( - var.database_server_count + 1 )) : ( 0 ) @@ -212,7 +212,7 @@ data "azurerm_netapp_volume" "hanashared" { count = var.hana_ANF_volumes.use_for_shared ? ( var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( - var.database_server_count + 1 ) : ( 0 )) : ( diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf index f7511c3271..e7ee9e4f4c 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf @@ -141,7 +141,7 @@ output "hana_data_primary" { output "hana_data_secondary" { description = "HANA Data Secondary volume" - value = try(var.hana_ANF_volumes.use_for_data && var.database.high_availability ? ( + value = try(var.hana_ANF_volumes.use_for_data && var.database_server_count > 1 ? ( format("%s:/%s", var.hana_ANF_volumes.use_existing_data_volume || local.use_avg ? ( data.azurerm_netapp_volume.hanadata[1].mount_ip_addresses[0]) : ( @@ -157,7 +157,7 @@ output "hana_data_secondary" { ), "") } -# output "hana_data" { + # output "hana_data" { # value = var.hana_ANF_volumes.use_for_data ? ( # var.database.high_availability ? ( # [format("%s:/%s", @@ -214,7 +214,7 @@ output "hana_log_primary" { output "hana_log_secondary" { description = "HANA Log secondary volume" - value = try(var.hana_ANF_volumes.use_for_log && var.database.high_availability ? ( + value = try(var.hana_ANF_volumes.use_for_log && var.database_server_count > 1 ? ( format("%s:/%s", var.hana_ANF_volumes.use_existing_log_volume || local.use_avg ? ( data.azurerm_netapp_volume.hanalog[1].mount_ip_addresses[0]) : ( @@ -248,24 +248,6 @@ output "hana_shared_primary" { ), "") } -output "hana_shared_secondary" { - description = "HANA Shared secondary volume" - value = try(var.hana_ANF_volumes.use_for_shared && var.database.high_availability ? ( - format("%s:/%s", - var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( - data.azurerm_netapp_volume.hanashared[1].mount_ip_addresses[0]) : ( - try(azurerm_netapp_volume.hanashared[1].mount_ip_addresses[0], "") - ), - var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( - data.azurerm_netapp_volume.hanashared[1].volume_path) : ( - try(azurerm_netapp_volume.hanashared[1].volume_path, "") - ) - ) - ) : ( - "" - ), "") - } - output "application_volume_group" { description = "Application volume group" From 4afd8b738d3bc38fd720582fc91db0bde74bddf8 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 31 Jan 2024 20:04:48 +0200 Subject: [PATCH 181/607] feng shui --- .../0.1-bom-validator/tasks/bom_download.yaml | 302 ++++++++---------- .../tasks/bom_validator.yaml | 72 ++--- 2 files changed, 176 insertions(+), 198 deletions(-) diff --git a/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_download.yaml b/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_download.yaml index 3ea6aea0e6..0376468f93 100644 --- a/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_download.yaml +++ b/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_download.yaml @@ -13,7 +13,7 @@ # -------------------------------------+---------------------------------------8 -# -------------------------------------+---------------------------------------8 +# -------------------------------------+-----------------------------------------8 # Step: 02 # Description: Informational # @@ -41,10 +41,10 @@ - allowSharedKeyAccess block: -# -------------------------------------+---------------------------------------8 -# Step: 03-01 -# Description: -# + # -----------------------------------+-----------------------------------------8 + # Step: 03-01 + # Description: + # - name: "{{ task_prefix }} - BOM: {{ bom_name }} Check is file {{ bom_media_entry.archive }} is already downloaded" ansible.builtin.uri: url: "{{ sapbits_location_base_path }}/{{ sapbits_bom_files }}/archives/{{ bom_media_entry.archive }}{% if sapbits_sas_token is not undefined %}?{{ sapbits_sas_token }}{% endif %}" @@ -56,24 +56,24 @@ ansible.builtin.debug: var: blob_exists verbosity: 1 -# Step: 03-01 - END -# -------------------------------------+---------------------------------------8 + # Step: 03-01 - END + # -------------------------------+---------------------------------------------8 -# -------------------------------------+---------------------------------------8 -# Step: 03-02 -# Description: Validate the url status. Fail if authentication fails -# + # -------------------------------+---------------------------------------------8 + # Step: 03-02 + # Description: Validate the url status. Fail if authentication fails + # - name: "{{ task_prefix }} - BOM: {{ bom_name }} Authentication error {{ bom_media_entry.archive }}" ansible.builtin.fail: msg: "Authentication error, please check the SAS token" when: blob_exists.status == 403 -# Step: 03-02 - END -# -------------------------------------+---------------------------------------8 + # Step: 03-02 - END + # -------------------------------+---------------------------------------------8 -# -------------------------------------+---------------------------------------8 -# Step: 03-03 -# Description: -# + # -------------------------------+---------------------------------------------8 + # Step: 03-03 + # Description: + # - name: "{{ task_prefix }} - BOM: {{ bom_name }} Set Fact {{ bom_media_entry.archive }}" ansible.builtin.set_fact: proceed: false @@ -89,10 +89,10 @@ - not allowSharedKeyAccess block: -# -------------------------------------+---------------------------------------8 -# Step: 03-01 -# Description: -# + # -----------------------------------+-----------------------------------------8 + # Step: 03-01 + # Description: + # - name: "{{ task_prefix }} - BOM: {{ bom_name }} Check is file {{ bom_media_entry.archive }} is already downloaded" ansible.builtin.command: >- az storage blob show @@ -109,28 +109,28 @@ ansible.builtin.debug: var: azresult verbosity: 2 -# Step: 03-01 - END -# -------------------------------------+---------------------------------------8 + # Step: 03-01 - END + # -------------------------------+---------------------------------------------8 -# -------------------------------------+---------------------------------------8 -# Step: 03-02 - END -# -------------------------------------+---------------------------------------8 + # -------------------------------+---------------------------------------------8 + # Step: 03-02 - END + # -------------------------------+---------------------------------------------8 -# -------------------------------------+---------------------------------------8 -# Step: 03-03 -# Description: -# + # -------------------------------+---------------------------------------------8 + # Step: 03-03 + # Description: + # - name: "{{ task_prefix }} - BOM: {{ bom_name }} Set Fact {{ bom_media_entry.archive }}" ansible.builtin.set_fact: proceed: false when: azresult.rc == 0 -# Step: 03-03 - END -# -------------------------------------+---------------------------------------8 + # Step: 03-03 - END + # -------------------------------+---------------------------------------------8 # Step: 03 - END -# -------------------------------------+---------------------------------------8 +# -------------------------------------+---------------------------------------------8 -# -------------------------------------+---------------------------------------8 +# -------------------------------------+---------------------------------------------8 # Step: 04 # Description: Informational check of the proceed parameter. # @@ -150,24 +150,24 @@ proceed: {{ proceed }} failed_when: false # Step: 04 - END -# -------------------------------------+---------------------------------------8 +# -----------------------------------+-----------------------------------------8 -# -------------------------------------+---------------------------------------8 +# -----------------------------------+-----------------------------------------8 # Step: 05 # Description: # - name: "BOM: Download File {{ bom_media_entry.archive }}" block: -# -------------------------------------+---------------------------------------8 -# Step: 05-01 -# Description: -# + # -----------------------------------+-----------------------------------------8 + # Step: 05-01 + # Description: + # - name: "BOM: {{ bom_name }} Download File" block: -# -------------------------------------+---------------------------------------8 -# Step: 05-01-01 -# Description: -# + # -------------------------------+---------------------------------------------8 + # Step: 05-01-01 + # Description: + # - name: "BOM: {{ bom_name }} Download File {{ bom_media_entry.archive }}" ansible.builtin.get_url: url: "{{ bom_media_entry.url | string | trim }}" @@ -186,31 +186,31 @@ delay: 1 no_log: false rescue: -# -------------------------------------+---------------------------------------8 -# Step: 05-01-01-on-failure-01 -# Description: -# + # -------------------------------+---------------------------------------------8 + # Step: 05-01-01-on-failure-01 + # Description: + # - name: "BOM: Ensure URL is correct" ansible.builtin.set_fact: file_url: "{{ bom_media_entry.url | lower | urlsplit('scheme') }}://{{ bom_media_entry.url | lower | urlsplit('hostname') }}/{{ bom_media_entry.url | lower | urlsplit('path') | replace('\"', '') }}" -# Step: 05-01-01-on-failure-01 - END -# -------------------------------------+---------------------------------------8 + # Step: 05-01-01-on-failure-01 - END + # ---------------------------+-------------------------------------------------8 -# -------------------------------------+---------------------------------------8 -# Step: 05-01-01-on-failure-02 -# Description: -# + # -----------------------------------+-----------------------------------------8 + # Step: 05-01-01-on-failure-02 + # Description: + # - name: "BOM: Ensure URL is correct" ansible.builtin.debug: msg: - "file_url: '{{ file_url }}" -# Step: 05-01-01-on-failure-02 - END -# -------------------------------------+---------------------------------------8 + # Step: 05-01-01-on-failure-02 - END + # ---------------------------+-------------------------------------------------8 -# -------------------------------------+---------------------------------------8 -# Step: 05-01-01-on-failure-03 -# Description: -# + # ---------------------------+-------------------------------------------------8 + # Step: 05-01-01-on-failure-03 + # Description: + # - name: "BOM: {{ bom_name }} Download File {{ bom_media_entry.archive }}" ansible.builtin.get_url: url: "{{ file_url | trim }}" @@ -228,57 +228,36 @@ no_log: false # failed_when: false -# INCOMMING attempt attempt error handling - Collision with dynamic BOM - Unresolved - # - name: "BOM: {{ bom_name }} Download File {{ item.archive }}" - # ansible.builtin.debug: - # var: result - # verbosity: 2 + # Step: 05-01-01-on-failure-03 - END + # ---------------------------+-------------------------------------------------8 - # - name: "ErrorHandling" - # ansible.builtin.fail: - # msg: "DOWNLOAD:0001:Download failed, please check the URL ({{ item.url }})" - # when: result.status_code == 400 + # Step: 05-01-01 - END + # ---------------------------+-------------------------------------------------8 - # - name: "ErrorHandling" - # ansible.builtin.fail: - # msg: "DOWNLOAD:0002:Download failed, please check the S-user credentials" - # when: result.status_code == 401 + # Step: 05-01 - END + # -----------------------------------+-----------------------------------------8 - # - name: "ErrorHandling" - # ansible.builtin.fail: - # msg: "DOWNLOAD:0003:Download from SAP failed, URL ({{ item.url }})" - # when: result.status_code != 200 - -# Step: 05-01-01-on-failure-03 - END -# -------------------------------------+---------------------------------------8 - -# Step: 05-01-01 - END -# -------------------------------------+---------------------------------------8 - -# Step: 05-01 - END -# -------------------------------------+---------------------------------------8 - -# -------------------------------------+---------------------------------------8 -# Step: 05-02 -# Description: -# + # -----------------------------------+-----------------------------------------8 + # Step: 05-02 + # Description: + # - name: "BOM: {{ bom_name }} Download File {{ bom_media_entry.archive }}" ansible.builtin.debug: var: result verbosity: 1 -# Step: 05-02 - END -# -------------------------------------+---------------------------------------8 + # Step: 05-02 - END + # -----------------------------------+-----------------------------------------8 -# -------------------------------------+---------------------------------------8 -# Step: 05-03 -# Description: Checksum block -# + # -----------------------------------+-----------------------------------------8 + # Step: 05-03 + # Description: Checksum block + # - name: "BOM: Create checksums" block: -# -------------------------------------+---------------------------------------8 -# Step: 05-03-01 -# Description: -# + # -------------------------------+---------------------------------------------8 + # Step: 05-03-01 + # Description: + # - name: "BOM: Verify Files" ansible.builtin.stat: path: "{{ result.dest }}" @@ -294,20 +273,20 @@ ansible.builtin.debug: var: create_checksums verbosity: 1 -# Step: 05-03-01 - END -# -------------------------------------+---------------------------------------8 + # Step: 05-03-01 - END + # -------------------------------+---------------------------------------------8 -# -------------------------------------+---------------------------------------8 -# Step: 05-03-02 -# Description: -# + # -------------------------------+---------------------------------------------8 + # Step: 05-03-02 + # Description: + # - name: "block" block: -# -------------------------------------+---------------------------------------8 -# Step: 05-03-02-01 -# Description: -# + # ---------------------------+-------------------------------------------------8 + # Step: 05-03-02-01 + # Description: + # # # MKD - Interesting change to task, but ultimately leaves room for error. # # It can identify an incorrectly indentented line and insert a line @@ -336,13 +315,13 @@ - fs_check is defined - create_checksums is defined - bom_file is defined -# Step: 05-03-02-01 - END -# -------------------------------------+---------------------------------------8 + # Step: 05-03-02-01 - END + # ---------------------------+-------------------------------------------------8 -# -------------------------------------+---------------------------------------8 -# Step: 05-03-02-02 -# Description: -# + # ---------------------------+-------------------------------------------------8 + # Step: 05-03-02-02 + # Description: + # - name: "BOM: Remove marker" ansible.builtin.lineinfile: path: "{{ bom_file }}" @@ -354,70 +333,69 @@ path: "{{ bom_file }}" regexp: '# END ANSIBLE MANAGED BLOCK {{ bom_media_entry.archive }}' state: absent -# Step: 05-03-02-02 - END -# -------------------------------------+---------------------------------------8 + # Step: 05-03-02-02 - END + # ---------------------------+-------------------------------------------------8 when: - bom_media_entry.checksum is defined - bom_media_entry.checksum | bool is not true -# Step: 05-03-02 - END -# -------------------------------------+---------------------------------------8 + # Step: 05-03-02 - END + # -------------------------------+---------------------------------------------8 -# -------------------------------------+---------------------------------------8 -# Step: 05-03-03 -# Description: Update in memory BOM -# + # -------------------------------+---------------------------------------------8 + # Step: 05-03-03 + # Description: Update in memory BOM + # - name: "block" block: -# -------------------------------------+---------------------------------------8 -# Step: 05-03-03-01 -# Description: -# + # ---------------------------+-------------------------------------------------8 + # Step: 05-03-03-01 + # Description: + # - name: "Update BOM" ansible.builtin.set_fact: bom: "{{ bom_update }}" vars: bom_update: "{#- -#}{% set _ = bom.materials.media[bom_media_index].update({'checksum': fs_check.stat.checksum}) -%} {{ bom }}" - # Step: 05-03-03-01 - END - -# -------------------------------------+---------------------------------------8 + # Step: 05-03-03-01 - END + # ---------------------------+-------------------------------------------------8 -# -------------------------------------+---------------------------------------8 -# Step: 05-03-03-02 -# Description: -# + # ---------------------------+-------------------------------------------------8 + # Step: 05-03-03-02 + # Description: + # - name: "BOM: Show" ansible.builtin.debug: var: bom.materials.media[bom_media_index] verbosity: 1 -# Step: 05-03-03-02 - END -# -------------------------------------+---------------------------------------8 + # Step: 05-03-03-02 - END + # ---------------------------+-------------------------------------------------8 when: - create_checksums is defined -# Step: 05-03-03 - END -# -------------------------------------+---------------------------------------8 + # Step: 05-03-03 - END + # -------------------------------+---------------------------------------------8 when: - bom_media_entry.checksum is not defined -# Step: 05-03 - END -# -------------------------------------+---------------------------------------8 + # Step: 05-03 - END + # -----------------------------------+-----------------------------------------8 -# -------------------------------------+---------------------------------------8 -# Step: 05-04 -# Description: -# + # -----------------------------------+-----------------------------------------8 + # Step: 05-04 + # Description: + # - name: "Upload file" block: - # 03) Upload files to Azure - # TODO - put this in a block -# -------------------------------------+---------------------------------------8 -# Step: 05-04-01 -# Description: -# + # 03) Upload files to Azure + # TODO - put this in a block + # -------------------------------+---------------------------------------------8 + # Step: 05-04-01 + # Description: + # - name: "BOM: {{ bom_name }} Upload File {{ bom_media_entry.archive }} using SAS keys" ansible.builtin.command: >- @@ -460,25 +438,25 @@ when: - sapbits_sas_token is undefined -# Step: 05-04-01 - END -# -------------------------------------+---------------------------------------8 + # Step: 05-04-01 - END + # -------------------------------+---------------------------------------------8 -# -------------------------------------+---------------------------------------8 -# Step: 05-04-02 -# Description: -# + # -------------------------------+---------------------------------------------8 + # Step: 05-04-02 + # Description: + # - name: "BOM: {{ bom_name }} Remove File {{ bom_media_entry.archive }}" # become: true ansible.builtin.file: dest: "{{ download_directory }}/files/{{ bom_media_entry.archive }}" state: absent -# Step: 05-04-02 - END -# -------------------------------------+---------------------------------------8 + # Step: 05-04-02 - END + # -------------------------------+---------------------------------------------8 when: - sa_enabled -# Step: 05-04 - END -# -------------------------------------+---------------------------------------8 + # Step: 05-04 - END + # -----------------------------------+-----------------------------------------8 when: proceed # Step: 05 - END diff --git a/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_validator.yaml b/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_validator.yaml index bc2669b60a..820d298aa0 100644 --- a/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_validator.yaml +++ b/deploy/ansible/roles-sap/0.1-bom-validator/tasks/bom_validator.yaml @@ -93,32 +93,32 @@ - name: "Generate SAS token block" block: -# -------------------------------------+---------------------------------------8 -# Step: 04-01 -# Description: -# + # -----------------------------------+---------------------------------------8 + # Step: 04-01 + # Description: + # - name: "{{ task_prefix }} - Informational" ansible.builtin.debug: msg: |- Entering SAS token block... verbosity: 1 -# Step: 04-01 - END -# -------------------------------------+---------------------------------------8 + # Step: 04-01 - END + # -----------------------------------+---------------------------------------8 -# -------------------------------------+---------------------------------------8 -# Step: 04-02 -# Description: -# + # -----------------------------------+---------------------------------------8 + # Step: 04-02 + # Description: + # - name: "{{ task_prefix }} - Calculate expiration date (+3 Days)" ansible.builtin.set_fact: expiry: "{{ '%Y-%m-%d' | strftime((ansible_date_time.epoch | int) + (86400 * 3)) }}" -# Step: 04-02 - END -# -------------------------------------+---------------------------------------8 + # Step: 04-02 - END + # -----------------------------------+---------------------------------------8 -# -------------------------------------+---------------------------------------8 -# Step: 04-03 -# Description: -# + # -----------------------------------+---------------------------------------8 + # Step: 04-03 + # Description: + # - name: "{{ task_prefix }} - Create SAP Binaries Storage Account SAS" ansible.builtin.command: >- az storage account generate-sas \ @@ -131,18 +131,18 @@ --output tsv changed_when: false register: az_sapbits_sas_token -# Step: 04-03 - END -# -------------------------------------+---------------------------------------8 + # Step: 04-03 - END + # -----------------------------------+---------------------------------------8 -# -------------------------------------+---------------------------------------8 -# Step: 04-04 -# Description: -# + # -----------------------------------+---------------------------------------8 + # Step: 04-04 + # Description: + # - name: "{{ task_prefix }} - Extract SAP Binaries Storage Account SAS (temp)" ansible.builtin.set_fact: sapbits_sas_token: "{{ az_sapbits_sas_token.stdout }}" -# Step: 04-04 - END -# -------------------------------------+---------------------------------------8 + # Step: 04-04 - END + # -------------------------------------+---------------------------------------8 vars: task_prefix: Generate SAS token block @@ -174,27 +174,27 @@ - name: "Combine dependent BoMs block" block: -# -------------------------------------+---------------------------------------8 -# Step: 06-01 -# Description: -# + # -------------------------------------+---------------------------------------8 + # Step: 06-01 + # Description: + # - name: "{{ task_prefix }} - Combine dependent BoMs" ansible.builtin.set_fact: root_media_list: "{{ root_media_list + bom.materials.media | flatten(levels=1) }}" -# Step: 06-01 - END -# -------------------------------------+---------------------------------------8 + # Step: 06-01 - END + # -------------------------------------+---------------------------------------8 -# -------------------------------------+---------------------------------------8 -# Step: 06-02 -# Description: -# + # -------------------------------------+---------------------------------------8 + # Step: 06-02 + # Description: + # - name: "{{ task_prefix }} - Informational" ansible.builtin.debug: msg: |- {{ root_media_list }} verbosity: 1 -# Step: 06-02 - END -# -------------------------------------+---------------------------------------8 + # Step: 06-02 - END + # -------------------------------------+---------------------------------------8 vars: task_prefix: Combine dependent BoMs block From f88e7a3c1beca0871499fe14a36db4346741860d Mon Sep 17 00:00:00 2001 From: "Shekhar Sorot ( MSFT )" Date: Mon, 5 Feb 2024 16:29:04 +0530 Subject: [PATCH 182/607] Feature : HANA Scale out with worker/stand by node using shared ANF storage (#536) * Add files via upload * Test run for debugging HANA Scale out deployment * Test run for debugging HANA Scale out deployment * Test run for debugging HANA Scale out deployment-2 * Test run for debugging HANA Scale out deployment-3 * Test run for debugging HANA Scale out deployment-4 * add correct ANF permissions for scale out - ANF scenario * Merge changes for Scale out ANF into playbook_04_00_db_install for seamless integration * Suppress root password display and mark to verbose * temporary changes to scale-out mounting * just role name * add probe threshold to scs and hana db only. * Add condition to skip mounting for database scale-out * Update database server count condition in vm-hdb.tf * Update ANF volume count based on database server count * Commented out the mountpoint comparision lines in ANF mounts scaleout tasks for testing * Commented out comparision of the log mount and numbers of hosts for scale out without HSR/pacemaker for testing * Commented out HANA Scale-Out mounts in db_install playbook * Update HANA installation configuration Add listen_interface and internal_network values to HANA_2_00_install_scaleout_anf.rsp template. Set restrict_max_mem to 'n' in HANA_2_00_install_scaleout_anf.rsp template. Refactor playbook_04_00_00_db_install.yaml to simplify conditionals and improve readability. * Fix ANF Mounts for Scale Out * Update async_write_submit_active value in main.yaml * debug subnet prefix from instance metadata * Refactor database configuration and installation tasks * Fix SAP system start and stop commands * Update environment variables and paths for HDB installation and SAP mounts --------- Co-authored-by: hdamecharla --- .../ansible/playbook_04_00_00_db_install.yaml | 86 +- .../4.0.3-hdb-install-scaleout/readme.md | 0 .../tasks/main.yaml | 591 +++++++++++ .../HANA_2_00_055_v1_install.rsp.xml.j2 | 14 + .../HANA_2_00_install_scaleout_anf.rsp | 305 ++++++ .../templates/template.cfg | 140 +++ .../tasks/2.6.1.2-anf-mounts-scaleout.yaml | 988 ++++++++++++++++++ .../tasks/2.6.8-anf-mounts-simplemount.yaml | 7 + .../2.6-sap-mounts/tasks/main.yaml | 12 + deploy/ansible/vars/ansible-input-api.yaml | 1 + .../sap_system/app_tier/infrastructure.tf | 3 + .../sap_system/hdb_node/infrastructure.tf | 1 + 12 files changed, 2147 insertions(+), 1 deletion(-) create mode 100644 deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/readme.md create mode 100644 deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml create mode 100644 deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_055_v1_install.rsp.xml.j2 create mode 100644 deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_install_scaleout_anf.rsp create mode 100644 deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/template.cfg create mode 100644 deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.2-anf-mounts-scaleout.yaml diff --git a/deploy/ansible/playbook_04_00_00_db_install.yaml b/deploy/ansible/playbook_04_00_00_db_install.yaml index 11a62d6595..750608ef3a 100644 --- a/deploy/ansible/playbook_04_00_00_db_install.yaml +++ b/deploy/ansible/playbook_04_00_00_db_install.yaml @@ -77,12 +77,72 @@ tags: - kv-sap-installation-media-storage-details + - name: "Database Installation Playbook: - Generate root password" + ansible.builtin.set_fact: + root_password: "{{ lookup('ansible.builtin.password', '/dev/null', seed=inventory_hostname) }}" + when: + - db_scale_out is defined + - db_scale_out + - not db_high_availability + register: root_password_generated + + - name: "Database Installation Playbook: - Show root password" + ansible.builtin.debug: + msg: "{{ hostvars.localhost.root_password }}" + verbosity: 4 + when: + - hostvars.localhost.root_password is defined + + # /*----------------------------------------------------------------------------8 # | | # | Playbook for HANA DB Install | # | | # +------------------------------------4--------------------------------------*/ +# +------------------------Scale out HANA configuration only -----------------*/ + +# This configures root account on HANA nodes for scale out + anf configuration only +- hosts: "{{ sap_sid | upper }}_DB" + name: DB Installation - login configuration + remote_user: "{{ orchestration_ansible_user }}" + gather_facts: true + any_errors_fatal: true + vars_files: + - vars/ansible-input-api.yaml + tasks: + - name: "SAP HANA: Configure root credential for Scale-Out" + block: + - name: Reset root password + become: true + ansible.builtin.user: + name: root + update_password: always + password: "{{ hostvars.localhost.root_password | password_hash('sha512') }}" + + - name: Enable {{ item.key }} in /etc/ssh/sshd_config + become: true + lineinfile: + path: "/etc/ssh/sshd_config" + regex: "^(# *)?{{ item.key }}" + line: "{{ item.key }} {{ item.value }}" + state: present + loop: + - { key: "PermitRootLogin", value: "yes" } + - { key: "PasswordAuthentication", value: "yes" } + - { key: "ChallengeResponseAuthentication", value: "yes" } + + - name: "Restart SSHD on {{ ansible_hostname }}" + become: true + service: + name: sshd + state: restarted + + when: + - not db_high_availability + - db_scale_out | default(false) == true + - hostvars.localhost.root_password is defined + - hosts: "{{ sap_sid | upper }}_DB" name: DB Installation - HANA @@ -107,7 +167,8 @@ - name: "Database Installation Playbook: - Install HANA" become: true - when: node_tier == 'hana' + when: + - node_tier == 'hana' block: - name: "Database Installation Playbook: - Setting the DB facts" ansible.builtin.set_fact: @@ -115,9 +176,18 @@ main_password: "{{ hostvars.localhost.sap_password }}" sapbits_location_base_path: "{{ hostvars.localhost.sapbits_location_base_path }}" sapbits_sas_token: "{{ hostvars.localhost.sapbits_sas_token }}" + # Only applicable for scale out with HSR tags: - always + - name: "Database Installation Playbook: - Setting the DB Scale Out facts" + ansible.builtin.set_fact: + root_password: "{{ hostvars.localhost.root_password }}" + when: + - db_scale_out is defined + - db_scale_out + - not db_high_availability + - name: "Database Installation Playbook: - Show SAP password" ansible.builtin.debug: msg: "{{ hostvars.localhost.sap_password }}" @@ -128,6 +198,20 @@ - name: "Database Installation Playbook: - run HANA installation" ansible.builtin.include_role: name: roles-db/4.0.0-hdb-install + when: + - not db_scale_out + + # - name: "Database installation Playbook: - run HANA Scale-Out mounts" + # ansible.builtin.include_role: + # name: roles-sap-os/2.6-sap-mounts + # when: + # - db_scale_out | default(false) == true + + - name: "Database Installation Playbook: - run HANA Scale-Out installation" + ansible.builtin.include_role: + name: roles-db/4.0.3-hdb-install-scaleout + when: + - db_scale_out - name: "Database Installation Playbook: - Create db-install-done flag" delegate_to: localhost diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/readme.md b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/readme.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml new file mode 100644 index 0000000000..287923480f --- /dev/null +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml @@ -0,0 +1,591 @@ +# /*---------------------------------------------------------------------------8 +# | | +# | Perform the SAP DB Instance installation for scale out | +# | SAP: Register BOM | +# | create .params directory | +# | deploy db install template | +# | deploy hdblcm password file | +# | | +# +------------------------------------4--------------------------------------*/ + +--- + +# +------------------------------------4--------------------------------------*/ +- name: "SAP HANA: Set BOM facts" + ansible.builtin.set_fact: + sap_inifile: "hdbserver_{{ virtual_host }}_{{ sap_sid }}_install.rsp" + dir_params: "{{ tmp_directory }}/.params" + +- name: "SAP HANA: Create list of all db hosts" + ansible.builtin.set_fact: + db_hosts: "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + +# 0x) Create hidden directory for parameter files +- name: "SAP HANA: Create directories" + ansible.builtin.file: + path: "{{ item.path }}" + state: directory + mode: '{{ item.mode }}' + loop: + - { mode: '0755', path: '{{ dir_params }}' } + - { mode: '0755', path: '{{ tmp_directory }}/{{ db_sid | upper }}' } + - { mode: '0755', path: '/etc/sap_deployment_automation/{{ db_sid | upper }}' } + +- name: "SAP HANA: Install reset" + ansible.builtin.file: + path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + state: absent + when: reinstall + +- name: "SAP HANA: check if installed" + ansible.builtin.stat: + path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + register: hana_installed + +- name: "SAP HANA: check media exists" + ansible.builtin.stat: + path: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE/hdblcm" + register: hdblcm_found + +- name: "ErrorHandling" + ansible.builtin.fail: + msg: "INSTALL:0001:Unable to find hdblcm, please check that the installation media is mounted" + when: not hdblcm_found.stat.exists + +- name: "SAP HANA - Retrieve Subscription ID and Resource Group Name" + ansible.builtin.uri: + url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 + use_proxy: false + headers: + Metadata: true + register: azure_metadata + +- name: "SAP HANA - Show IMDS results" + ansible.builtin.debug: + var: azure_metadata.json + verbosity: 2 + +- name: "SAP HANA - - Extract details" + ansible.builtin.set_fact: + subnet_address: "{{ azure_metadata.json.network.interface[0].ipv4.subnet[0].address }}" + subnet_prefix: "{{ azure_metadata.json.network.interface[0].ipv4.subnet[0].prefix }}" + +- name: "SAP HANA - - Show the subnet details" + ansible.builtin.debug: + msg: + - "Subnet Address: {{ subnet_address }}" + - "Subnet Prefix: {{ subnet_prefix }}" + - "Subnet CIDR: {{ (subnet_address + '/' + subnet_prefix) }}" +# Scale out ANF only runs on primary node or the first node in the SID_DB list. This is mandatory. +- name: "HANA Install - Scale Out - ANF" + block: + + - name: "SAP HANA: remove install response file if exists" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_inifile }}" + state: absent + + - name: "SAP HANA Template processing: Create ini file {{ sap_inifile }} from {{ HANA_2_00_install.rsp }}" + ansible.builtin.template: + src: "HANA_2_00_install_scaleout_anf.rsp" + dest: "{{ dir_params }}/{{ sap_inifile }}" + mode: 0644 + force: true + # Template parameter mapping + vars: + _rsp_component_root: "../COMPONENTS" + _rsp_components: "{{ hana_components }}" + _rsp_sapmnt: "/hana/shared" # Default Value + _rsp_hostname: "{{ virtual_host }}" + _rsp_sid: "{{ db_sid | upper }}" + _rsp_number: "{{ db_instance_number }}" + _rsp_system_usage: "custom" + use_master_password: "{{ hana_use_master_password }}" + password_copy: "{%if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" + _rsp_internal_network: "{{ (subnet_address + '/' + subnet_prefix) }}" + # This comes in from the main ansible playbook. It is the password for the root user. Must be randomized after the installation. + _rsp_root_password: "{{ root_password }}" + # Note: Last node in the DB list is marked as standby, while everything else except first node is marked as worker node + # This is the way !!! + _rsp_additional_hosts: "{% for item in db_hosts[1:] %} + {% if loop.index == db_hosts | length -1 %} + {{ item }}:role=standby:group=default:workergroup=default + {% else %} + {{ item }}:role=worker:group=default:workergroup=default, + {% endif %} + {% endfor %}" + + - name: "SAP HANA: Progress" + ansible.builtin.debug: + msg: "Start HANA Installation" + + - name: "SAP HANA: installation" + block: + - name: "SAP HANA: Execute hdblcm on {{ virtual_host }}" + ansible.builtin.shell: | + umask {{ custom_umask | default('022') }} ; + chmod 755 /usr/sap; + ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' + args: + chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" + creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + environment: + TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" + register: hana_installation + failed_when: hana_installation.rc > 0 + rescue: + - name: "Fail if HANA installation failed with rc > 1" + ansible.builtin.fail: + msg: "INSTALL:0022:Execute hdblcm failed." + when: hana_installation.rc > 1 + + - name: "SAP HANA: Progress" + ansible.builtin.debug: + msg: "Restarting the HANA Installation" + when: hana_installation.rc == 1 + + + - name: "SAP HANA: Re-execute hdblcm on {{ virtual_host }} and rescue" + block: + - name: "SAP HANA: Re-execute hdblcm on {{ virtual_host }}" + ansible.builtin.shell: | + umask {{ custom_umask | default('022') }} ; + chmod 755 /usr/sap; + ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' + args: + chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" + creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + environment: + TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" + register: hana_installation + failed_when: hana_installation.rc > 0 + when: hana_installation.rc == 1 + rescue: + - name: "Fail if HANA installation failed on second attempt." + ansible.builtin.fail: + msg: "INSTALL:0022:Execute hdblcm failed." + + - name: "SAP HANA: Installation results" + ansible.builtin.debug: + msg: + - "HANA Installation failed" + - "HDBLCM output: {{ hana_installation }}" + when: + - hana_installation.rc is defined + - hana_installation.rc > 0 + + - name: "Errorhandling: SAP HANA" + ansible.builtin.debug: + msg: "INSTALL:{{ hana_installation }}" + when: + - hana_installation.rc is defined + - hana_installation.rc > 0 + + - name: "SAP HANA: Successful installation" + block: + + - name: "SAP HANA: Installation results" + ansible.builtin.debug: + msg: "HANA Installation succeeded" + + - name: "SAP HANA: HANA Install: flag" + ansible.builtin.file: + path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + state: touch + mode: 0755 + + - name: "Retrieve Subscription ID and Resource Group Name" + ansible.builtin.uri: + url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 + use_proxy: false + headers: + Metadata: true + register: azure_metadata + + - name: "Extract details" + ansible.builtin.set_fact: + subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" + resource_group_name: "{{ azure_metadata.json.compute.resourceGroupName }}" + + - name: "Show the subscription and resource group" + ansible.builtin.debug: + msg: + - "Subscription ID: {{ subscription_id }}" + - "Resource Group Name: {{ resource_group_name }}" + + - name: "Include deploy/ansible/roles-misc/0.6-ARM-Deployment" + ansible.builtin.include_role: + name: roles-misc/0.6-ARM-Deployment + vars: + subscriptionId: "{{ subscription_id }}" + resourceGroupName: "{{ resource_group_name }}" + + - name: "SAP HANA: ARM Deployment flag" + ansible.builtin.file: + path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb_arm.txt" + state: touch + mode: 0755 + + # - name: "SAP HANA: remove install response file" + # ansible.builtin.file: + # path: "{{ dir_params }}/{{ sap_inifile }}" + # state: absent + + + when: + - hana_installation.rc is defined + - hana_installation.rc < 1 + + - name: "SAP HANA: Create backup folder" + ansible.builtin.file: + path: "{{ hana_backup_path }}" + state: directory + group: sapsys + owner: "{{ db_sid | lower }}adm" + mode: 0755 + + - name: "SAP HANA: Configure global.ini" + block: + - name: "Prepare global.ini for domain name resolution." + become_user: root + become: true + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: communication + state: present + mode: 0644 + option: listeninterface + value: .internal + + # - name: "Prepare global.ini for internal network netmask." + # become_user: root + # become: true + # community.general.ini_file: + # path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + # section: communication + # state: present + # mode: 0644 + # option: internal_network + # value: "{{ (ansible_default_ipv4.network + '/' + ansible_default_ipv4.netmask) | ipaddr('network/prefix') }}" + + - name: "Prepare global.ini for public hostname resolution." + become_user: root + become: true + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: "public_hostname_resolution" + state: present + mode: 0644 + option: "map_{{ hostvars[item].virtual_host }}" + value: "{{ hostvars[item].ansible_host }}" + with_items: + - "{{ groups[(sap_sid | upper)~'_DB' ] }}" + + - name: "Prepare global.ini for site hosts name resolution (Primary Site)" + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: "internal_hostname_resolution" + mode: 0644 + state: present + option: "{{ hostvars[item].ansible_host }}" + value: "{{ hostvars[item].virtual_host }}" + with_items: + - "{{ groups[(sap_sid | upper)~'_DB' ] }}" + + - name: "Prepare global.ini for NetApp storage optimizations" + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: "{{ item.section }}" + mode: 0644 + state: present + option: "{{ item.option }}" + value: "{{ item.value }}" + with_items: + - { section: "fileio", option: "async_read_submit", value: "on" } + - { section: "fileio", option: "max_parallel_io_requests", value: "128" } + - { section: "fileio", option: "async_write_submit_active", value: "on" } + - { section: "fileio", option: "async_write_submit_blocks", value: "all" } + - { section: "persistence", option: "datavolume_striping", value: "true" } + - { section: "persistence", option: "datavolume_striping_size_gb", value: "15000" } + + + - name: "SAP HANA: Restart HANA" + block: + - name: "Stop HANA Database" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + sapcontrol -nr {{ db_instance_number }} -function StopSystem {{ db_sid | upper }} + changed_when: false + failed_when: false + register: hana_stopped + environment: + PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}:/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + DIR_LIBRARY: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + LD_LIBRARY_PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + SAPSYSTEMNAME: "{{ db_sid | upper }}" + vars: + allow_world_readable_tmpfiles: true + + #ToDo: Check if we can interrogate the HANA DB to see if it is stopped. + - name: "Wait 5 minutes for SAP system to stop" + ansible.builtin.wait_for: + timeout: 300 + + - name: "Start HANA Database" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + sapcontrol -nr {{ db_instance_number }} -function StartSystem {{ db_sid | upper }} + changed_when: false + failed_when: false + register: hana_started + environment: + PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}:/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + DIR_LIBRARY: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + LD_LIBRARY_PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + SAPSYSTEMNAME: "{{ db_sid | upper }}" + vars: + allow_world_readable_tmpfiles: true + + - name: "Wait 5 minutes for SAP system to start" + ansible.builtin.wait_for: + timeout: 300 + + when: + - not hana_installed.stat.exists + - db_high_availability | default(false) == false + # Only allowed for the first node. No other node in the scale out - ANF setup is allowed to install hdblcm. + - ansible_hostname == db_hosts[0] + - db_scale_out is defined + - db_scale_out + +# TODO: add block for Scale out with HSR support here, same as regular installation. +- name: "HANA Install - Scale Out - HSR" + block: + + - name: "SAP HANA: remove install response file if exists" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_inifile }}" + state: absent + + - name: "SAP HANA Template processing: Create ini file {{ sap_inifile }} from {{ HANA_2_00_install.rsp }}" + ansible.builtin.template: + src: "HANA_2_00_install.rsp" + dest: "{{ dir_params }}/{{ sap_inifile }}" + mode: 0644 + force: true + # Template parameter mapping + vars: + _rsp_component_root: "../COMPONENTS" + _rsp_components: "{{ hana_components }}" + _rsp_sapmnt: "/hana/shared" # Default Value + _rsp_hostname: "{{ virtual_host }}" + _rsp_sid: "{{ db_sid | upper }}" + _rsp_number: "{{ db_instance_number }}" + _rsp_system_usage: "custom" + use_master_password: "{{ hana_use_master_password }}" + password_copy: "{%if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" + + - name: "SAP HANA: Progress" + ansible.builtin.debug: + msg: "Start HANA Installation" + + - name: "SAP HANA: installation" + block: + - name: "SAP HANA: Execute hdblcm on {{ virtual_host }}" + ansible.builtin.shell: | + umask {{ custom_umask | default('022') }} ; + chmod 755 /usr/sap; + ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' + args: + chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" + creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + environment: + TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" + register: hana_installation + failed_when: hana_installation.rc > 0 + rescue: + - name: "Fail if HANA installation failed with rc > 1" + ansible.builtin.fail: + msg: "INSTALL:0022:Execute hdblcm failed." + when: hana_installation.rc > 1 + + - name: "SAP HANA: Progress" + ansible.builtin.debug: + msg: "Restarting the HANA Installation" + when: hana_installation.rc == 1 + + + - name: "SAP HANA: Re-execute hdblcm on {{ virtual_host }} and rescue" + block: + - name: "SAP HANA: Re-execute hdblcm on {{ virtual_host }}" + ansible.builtin.shell: | + umask {{ custom_umask | default('022') }} ; + chmod 755 /usr/sap; + ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' + args: + chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" + creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + environment: + TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" + register: hana_installation + failed_when: hana_installation.rc > 0 + when: hana_installation.rc == 1 + rescue: + - name: "Fail if HANA installation failed on second attempt." + ansible.builtin.fail: + msg: "INSTALL:0022:Execute hdblcm failed." + + - name: "SAP HANA: Installation results" + ansible.builtin.debug: + msg: + - "HANA Installation failed" + - "HDBLCM output: {{ hana_installation }}" + when: + - hana_installation.rc is defined + - hana_installation.rc > 0 + + - name: "Errorhandling: SAP HANA" + ansible.builtin.debug: + msg: "INSTALL:{{ hana_installation }}" + when: + - hana_installation.rc is defined + - hana_installation.rc > 0 + + - name: "SAP HANA: Successful installation" + block: + + - name: "SAP HANA: Installation results" + ansible.builtin.debug: + msg: "HANA Installation succeeded" + + - name: "SAP HANA: HANA Install: flag" + ansible.builtin.file: + path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + state: touch + mode: 0755 + + - name: "Retrieve Subscription ID and Resource Group Name" + ansible.builtin.uri: + url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 + use_proxy: false + headers: + Metadata: true + register: azure_metadata + + - name: "Extract details" + ansible.builtin.set_fact: + subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" + resource_group_name: "{{ azure_metadata.json.compute.resourceGroupName }}" + + - name: "Show the subscription and resource group" + ansible.builtin.debug: + msg: + - "Subscription ID: {{ subscription_id }}" + - "Resource Group Name: {{ resource_group_name }}" + + - name: "Include deploy/ansible/roles-misc/0.6-ARM-Deployment" + ansible.builtin.include_role: + name: roles-misc/0.6-ARM-Deployment + vars: + subscriptionId: "{{ subscription_id }}" + resourceGroupName: "{{ resource_group_name }}" + + - name: "SAP HANA: ARM Deployment flag" + ansible.builtin.file: + path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb_arm.txt" + state: touch + mode: 0755 + + - name: "SAP HANA: remove install response file" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_inifile }}" + state: absent + + when: + - hana_installation.rc is defined + - hana_installation.rc < 1 + + - name: "SAP HANA: Create backup folder" + ansible.builtin.file: + path: "{{ hana_backup_path }}" + state: directory + group: sapsys + owner: "{{ db_sid | lower }}adm" + mode: 0755 + when: + - not hana_installed.stat.exists + - db_high_availability is defined + - db_high_availability == true + - db_scale_out is defined + - db_scale_out == true + + +- name: "HANA Install status" + block: + + - name: "HANA Install status" + ansible.builtin.debug: + msg: "HANA is already installed" + + - name: "HANA: - return value" + ansible.builtin.set_fact: + hana_already_installed: true + + - name: "SAP HANA: check if ARM Deployment done" + ansible.builtin.stat: + path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb_arm.txt" + register: hana_arm_deployment_done + + - name: "SAP HANA: Successful installation" + block: + - name: "Retrieve Subscription ID and Resource Group Name" + ansible.builtin.uri: + url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 + use_proxy: false + headers: + Metadata: true + register: azure_metadata + + - name: "Extract details" + ansible.builtin.set_fact: + subscription_id_tmp: "{{ azure_metadata.json.compute.subscriptionId }}" + resource_group_name_tmp: "{{ azure_metadata.json.compute.resourceGroupName }}" + + - name: "Show the subscription and resource group" + ansible.builtin.debug: + msg: + - "Subscription ID: {{ subscription_id_tmp }}" + - "Resource Group Name: {{ resource_group_name_tmp }}" + + - name: "Include deploy/ansible/roles-misc/0.6-ARM-Deployment" + ansible.builtin.include_role: + name: roles-misc/0.6-ARM-Deployment + vars: + subscription_id: "{{ subscription_id_tmp }}" + resource_group_name: "{{ resource_group_name_tmp }}" + + - name: "SAP HANA: ARM Deployment flag" + ansible.builtin.file: + path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb_arm.txt" + state: touch + mode: 0755 + when: + - not hana_arm_deployment_done.stat.exists + + + - name: "SAP HANA: Create backup folder" + ansible.builtin.file: + path: "{{ hana_backup_path }}" + state: directory + group: sapsys + owner: "{{ db_sid | lower }}adm" + mode: 0755 + when: + - hana_installed.stat.exists + +... +# /*---------------------------------------------------------------------------8 +# | END | +# +------------------------------------4--------------------------------------*/ diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_055_v1_install.rsp.xml.j2 b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_055_v1_install.rsp.xml.j2 new file mode 100644 index 0000000000..c07fad2e1c --- /dev/null +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_055_v1_install.rsp.xml.j2 @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_install_scaleout_anf.rsp b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_install_scaleout_anf.rsp new file mode 100644 index 0000000000..bc31db0ffe --- /dev/null +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_install_scaleout_anf.rsp @@ -0,0 +1,305 @@ +[General] + +# Location of Installation Medium +component_medium= + +# Comma separated list of component directories +component_dirs= + +# Use single master password for all users, created during installation ( Default: n ) +use_master_password= {{ use_master_password }} + +# Directory root to search for components +component_root={{ _rsp_component_root }} + +# Skip all SAP Host Agent calls ( Default: n ) +skip_hostagent_calls=n + +# Remote Execution ( Default: ssh; Valid values: ssh | saphostagent ) +remote_execution=ssh + +# Verify the authenticity of SAP HANA components ( Default: n ) +verify_signature=n + +# Components ( Valid values: all | client | es | ets | lcapps | server | smartda | streaming | rdsync | xs | studio | afl | sca | sop | eml | rme | rtl | trp ) +components={{ _rsp_components }} + +# Install Execution Mode ( Default: standard; Valid values: standard | optimized ) +install_execution_mode=standard + +# Ignore failing prerequisite checks +ignore= + +# Do not Modify '/etc/sudoers' File ( Default: n ) +skip_modify_sudoers=n + +[Server] + +# Enable usage of persistent memory ( Default: n ) +use_pmem=n + +# Enable the installation or upgrade of the SAP Host Agent ( Default: y ) +install_hostagent=n + +# Database Isolation ( Default: low; Valid values: low | high ) +db_isolation=low + +# Create initial tenant database ( Default: y ) +create_initial_tenant=y + +# Non-standard Shared File System +checkmnt= + +# Installation Path ( Default: /hana/shared ) +sapmnt={{ _rsp_sapmnt }} + +# Local Host Name ( Default: x00dhdb00l0b3a ) +hostname={{ _rsp_hostname }} + +# Install SSH Key ( Default: y ) +install_ssh_key=y + +# Root User Name For Remote Hosts ( Default: root ) +root_user=root + +# Root User Password For Remote Hosts +root_password={{ _rsp_root_password }} + +# SAP Host Agent User (sapadm) Password +sapadm_password={{ password_copy }} + +# Directory containing a storage configuration +storage_cfg= + +# Listen Interface ( Valid values: global | internal | local ) +listen_interface=global + +# Internal Network Address +internal_network= {{ _rsp_internal_network }} + +# SAP HANA System ID +sid={{ _rsp_sid }} + +# Instance Number +number={{ _rsp_number }} + +# Local Host Worker Group ( Default: default ) +workergroup=default + +# System Usage ( Default: custom; Valid values: production | test | development | custom ) +system_usage={{ _rsp_system_usage }} + +# Instruct the Local Secure Store (LSS) to trust an unsigned SAP HANA Database ( Default: n ) +lss_trust_unsigned_server=n + +# Do you want to enable data and log volume encryption? ( Default: n ) +volume_encryption=n + +# Location of Data Volumes ( Default: /hana/data/${sid} ) +datapath=/hana/data/${sid} + +# Location of Log Volumes ( Default: /hana/log/${sid} ) +logpath=/hana/log/${sid} + +# Location of Persistent Memory Volumes ( Default: /hana/pmem/${sid} ) +pmempath=/hana/pmem/${sid} + +# Directory containing custom configurations +custom_cfg= + +# SAP HANA Database secure store ( Default: ssfs; Valid values: ssfs | localsecurestore ) +secure_store=ssfs + +# Restrict maximum memory allocation? +restrict_max_mem=n + +# Maximum Memory Allocation in MB +max_mem= + +# Certificate Host Names +certificates_hostmap= + +# Master Password +master_password={{ main_password }} + +# System Administrator Password +password={{ password_copy }} + +# System Administrator Home Directory ( Default: /usr/sap/${sid}/home ) +home=/usr/sap/${sid}/home + +# System Administrator Login Shell ( Default: /bin/sh ) +shell=/bin/sh + +# System Administrator User ID +userid={{ hdbadm_uid }} + +# ID of User Group (sapsys) +groupid={{ sapsys_gid }} + +# Database User (SYSTEM) Password +system_user_password={{ password_copy }} + +# Restart system after machine reboot? ( Default: n ) +autostart=n + +# Enable HANA repository ( Default: y ) +repository=y + +# Inter Service Communication Mode ( Valid values: standard | ssl ) +isc_mode= + +[Action] + +# Action ( Default: exit; Valid values: install | update | extract_components ) +action=install + +[AddHosts] + +# Auto Initialize Services ( Default: y ) +auto_initialize_services=y + +# Additional Hosts +addhosts={{ _rsp_additional_hosts }} + +# Additional Local Host Roles ( Valid values: extended_storage_worker | extended_storage_standby | ets_worker | ets_standby | streaming | xs_worker | xs_standby ) +add_local_roles= + +# Automatically assign XS Advanced Runtime roles to the hosts with database roles (y/n) ( Default: y ) +autoadd_xs_roles=y + +# Import initial content of XS Advanced Runtime ( Default: y ) +import_xs_content=y + +[Client] + +# SAP HANA Database Client Installation Path ( Default: ${sapmnt}/${sid}/hdbclient ) +client_path=/hana/shared/${sid}/hdbclient + +[Studio] + +# SAP HANA Studio Installation Path ( Default: ${sapmnt}/${sid}/hdbstudio ) +studio_path=/hana/shared/${sid}/hdbstudio + +# Enables copying of SAP HANA Studio repository ( Default: y ) +studio_repository=y + +# Target path to which SAP HANA Studio repository should be copied +copy_repository= + +# Java Runtime ( Default: ) +vm= + +[Reference_Data] + +# Installation Path for Address Directories and Reference Data +reference_data_path= + +[XS_Advanced] + +# Install XS Advanced in the default tenant database? (y/n) ( Default: n ) +xs_use_default_tenant=n + +# XS Advanced App Working Path +xs_app_working_path= + +# Organization Name For Space "SAP" ( Default: orgname ) +org_name=orgname + +# XS Advanced Admin User ( Default: XSA_ADMIN ) +org_manager_user=XSA_ADMIN + +# XS Advanced Admin User Password +org_manager_password= + +# Customer Space Name ( Default: PROD ) +prod_space_name=PROD + +# Routing Mode ( Default: ports; Valid values: ports | hostnames ) +xs_routing_mode=ports + +# XS Advanced Domain Name (see SAP Note 2245631) +xs_domain_name= + +# Run Applications in SAP Space with Separate OS User (y/n) ( Default: y ) +xs_sap_space_isolation=y + +# Run Applications in Customer Space with Separate OS User (y/n) ( Default: y ) +xs_customer_space_isolation=y + +# XS Advanced SAP Space OS User ID +xs_sap_space_user_id= + +# XS Advanced Customer Space OS User ID +xs_customer_space_user_id= + +# XS Advanced Components +xs_components= + +# Do not start the selected XS Advanced components after installation ( Default: none ) +xs_components_nostart=none + +# XS Advanced Components Configurations +xs_components_cfg= + +# XS Advanced Certificate +xs_cert_pem= + +# XS Advanced Certificate Key +xs_cert_key= + +# XS Advanced Trust Certificate +xs_trust_pem= + +[lss] + +# Installation Path for Local Secure Store ( Default: /lss/shared ) +lss_inst_path=/lss/shared + +# Local Secure Store User Password +lss_user_password= + +# Local Secure Store User ID +lss_userid= + +# Local Secure Store User Group ID +lss_groupid= + +# Local Secure Store User Home Directory ( Default: /usr/sap/${sid}/lss/home ) +lss_user_home=/usr/sap/${sid}/lss/home + +# Local Secure Store User Login Shell ( Default: /bin/sh ) +lss_user_shell=/bin/sh + +# Local Secure Store Auto Backup Password +lss_backup_password= + +[streaming] + +# Streaming Cluster Manager Password +streaming_cluster_manager_password= + +# Location of Streaming logstores and runtime information ( Default: /hana/data_streaming/${sid} ) +basepath_streaming=/hana/data_streaming/${sid} + +[es] + +# Location of Dynamic Tiering Data Volumes ( Default: /hana/data_es/${sid} ) +es_datapath=/hana/data_es/${sid} + +# Location of Dynamic Tiering Log Volumes ( Default: /hana/log_es/${sid} ) +es_logpath=/hana/log_es/${sid} + +[ets] + +# Location of Data Volumes of the Accelerator for SAP ASE ( Default: /hana/data_ase/${sid} ) +ase_datapath=/hana/data_ase/${sid} + +# Location of Log Volumes of the Accelerator for SAP ASE ( Default: /hana/log_ase/${sid} ) +ase_logpath=/hana/log_ase/${sid} + +# SAP ASE Administrator User ( Default: sa ) +ase_user=sa + +# SAP ASE Administrator Password +ase_user_password= diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/template.cfg b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/template.cfg new file mode 100644 index 0000000000..ccbfca006a --- /dev/null +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/template.cfg @@ -0,0 +1,140 @@ +[Action] + +# Execution scenario +action=install + +[General] + +# Use Simplified Installation UI +simplified_ui=n + +# Skip all SAP Host Agent calls +skip_hostagent_calls=n + +# Remote Execution +remote_execution=ssh + +# Use single master password for all users, created during installation +use_master_password=n + +# Verify the authenticity of SAP HANA components +verify_signature=n + +# Components +components=server + +# Install Execution Mode +install_execution_mode=standard + +# Do not Modify '/etc/sudoers' File +skip_modify_sudoers=n + +[Server] + +# Enable usage of persistent memory +use_pmem=n + +# Enable the installation or upgrade of the SAP Host Agent +install_hostagent=y + +# Database Mode +db_mode=multidb + +# Database Isolation +db_isolation=low + +# Create initial tenant database +create_initial_tenant=y + +# Installation Path +sapmnt=/hana/shared + +# Local Host Name +hostname=hana-01 + +# Install SSH Key +install_ssh_key=y + +# Root User Name For Remote Hosts +root_user=root + +# Register the SAP HANA System with systemd +use_systemd=n + +# Listen Interface +listen_interface=global + +# Internal Network Address +internal_network=none + +# SAP HANA System ID +sid=XDB + +# Instance Number +number=00 + +# Configure Python version +configure_python=python3 + +# Local Host Worker Group +workergroup=default + +# System Usage +system_usage=test + +# Do you want to enable data and log volume encryption? +volume_encryption=n + +# Location of Data Volumes +datapath=/hana/data/XDB + +# Location of Log Volumes +logpath=/hana/log/XDB + +# Restrict maximum memory allocation? +restrict_max_mem=n + +# Maximum Memory Allocation in MB +max_mem=0 + +# Apply System Size Dependent Resource Limits? (SAP Note 3014176) +apply_system_size_dependent_parameters=y + +# Certificate Host Names +certificates_hostmap=hana-01=hana-01 +certificates_hostmap=hana-02=hana-02 + +# System Administrator Home Directory +home=/usr/sap/XDB/home + +# System Administrator Login Shell +shell=/bin/sh + +# System Administrator User ID +userid=1001 + +# ID of User Group (sapsys) +groupid=2000 + +# Do not start the instance after installation +nostart=n + +# Restart system after machine reboot? +autostart=n + +# Enable HANA repository +repository=y + +# Inter Service Communication Mode +isc_mode=standard + +[AddHosts] + +# Auto Initialize Services +auto_initialize_services=y + +# Additional Hosts +addhosts=hana-02:role=standby:group=default:workergroup=default + +# Tenant Database User Name +tenantdb_user=SYSTEM \ No newline at end of file diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.2-anf-mounts-scaleout.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.2-anf-mounts-scaleout.yaml new file mode 100644 index 0000000000..18b423d504 --- /dev/null +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.2-anf-mounts-scaleout.yaml @@ -0,0 +1,988 @@ +# /*---------------------------------------------------------------------------8 +# | | +# | Perform the ANF system mounts for Scale out systems only | +# | | +# +------------------------------------4--------------------------------------*/ +--- + +- name: "ANF Mount: Set the NFS Service name" + ansible.builtin.set_fact: + nfs_service: "{% if distribution_id in ['redhat8', 'redhat9'] %}nfs-server{% else %}{% if distribution_id == 'redhat7' %}nfs{% else %}{% if distribution_id == 'oraclelinux8' %}rpcbind{% else %}nfsserver{% endif %}{% endif %}{% endif %}" + +- name: "ANF Mount: Set the NFSmount options" + ansible.builtin.set_fact: + mnt_options: 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys' + when: distribution_full_id not in ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5'] + +- name: "ANF Mount: Set the NFSmount options" + ansible.builtin.set_fact: + mnt_options: 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys,nconnect=8' + when: distribution_full_id in ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5'] + +- name: "ANF Mount: Define this SID" + ansible.builtin.set_fact: + this_sid: + { + 'sid': '{{ sap_sid | upper }}', + 'dbsid_uid': '{{ hdbadm_uid }}', + 'sidadm_uid': '{{ sidadm_uid }}', + 'ascs_inst_no': '{{ scs_instance_number }}', + 'pas_inst_no': '{{ pas_instance_number }}', + 'app_inst_no': '{{ app_instance_number }}' + } + +- name: "ANF Mount: Create list of all_sap_mounts to support " + ansible.builtin.set_fact: + all_sap_mounts: "{% if MULTI_SIDS is defined %}{{ MULTI_SIDS }}{% else %}{{ all_sap_mounts | default([]) + [this_sid] }}{% endif %}" + db_hosts: "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + +- name: "ANF Mount: Ensure the NFS service is stopped" + ansible.builtin.systemd: + name: "{{ nfs_service }}" + state: stopped + when: + - "'scs' in supported_tiers" + - sap_mnt is not defined + - sap_trans is not defined + +# /*---------------------------------------------------------------------------8 +# | | +# | Mount the ANF Volumes | +# | Make sure to set the NFS domain in /etc/idmapd.conf on the VM to match the | +# | default domain configuration on Azure NetApp Files: defaultv4iddomain.com. | +# | and the mapping is set to nobody | +# | We use tier in tasks as well, to treat any special scenarios that may arise| +# +------------------------------------4--------------------------------------*/ +# For additional information refer to the below URLs +# https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/sap-hana-high-availability-netapp-files-suse#mount-the-azure-netapp-files-volume +# https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/sap-hana-high-availability-netapp-files-red-hat#mount-the-azure-netapp-files-volume +- name: "ANF Mount: NFS Domain Setting (ANF)" + block: + - name: "ANF Mount: Domain is configured as + the default Azure NetApp Files domain" + ansible.builtin.lineinfile: + path: /etc/idmapd.conf + regexp: '^[ #]*Domain = ' + line: 'Domain = defaultv4iddomain.com' + insertafter: '[General]' + when: + - tier == 'sapos' + register: id_mapping_changed + + - name: "ANF Mount: Make sure that user + mapping is set to 'nobody'" + ansible.builtin.lineinfile: + path: /etc/idmapd.conf + regexp: '^[ #]*Nobody-User = ' + line: 'Nobody-User = nobody' + insertafter: '^[ #]*Nobody-User = ' + when: + - tier == 'sapos' + register: id_mapping_changed + + - name: "ANF Mount: Make sure that group + mapping is set to 'nobody'" + ansible.builtin.lineinfile: + path: /etc/idmapd.conf + regexp: '^[ #]*Nobody-Group = ' + line: 'Nobody-Group = nobody' + insertafter: '^[ #]*Nobody-Group = ' + when: + - tier == 'sapos' + register: id_mapping_changed + when: + - tier == 'sapos' + +- name: "ANF Mount: Set nfs4_disable_idmapping to Y" + ansible.builtin.lineinfile: + path: /etc/modprobe.d/nfs.conf + line: 'options nfs nfs4_disable_idmapping=Y' + create: true + mode: 0644 + when: + - tier == 'sapos' + +- name: "ANF Mount: Ensure the services are restarted" + block: + - name: "AF Mount: Ensure the rpcbind service is restarted" + ansible.builtin.systemd: + name: rpcbind + state: restarted + - name: "ANF Mount: Ensure the NFS ID Map service is restarted" + ansible.builtin.systemd: + name: "nfs-idmapd" + daemon-reload: true + state: restarted + - name: "ANF Mount: Pause for 5 seconds" + ansible.builtin.pause: + seconds: 5 + - name: "ANF Mount: Ensure the NFS service is restarted" + ansible.builtin.systemd: + name: "{{ nfs_service }}" + state: restarted + when: + - id_mapping_changed is changed + +# /*---------------------------------------------------------------------------8 +# | | +# | Prepare for the /usr/sap mounts | +# | Create temporary directory structure | +# | Mount the share, create the directory structure on share | +# | Unmount and clean up temporary directory structure | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "ANF Mount: install:Get the Server name list" + ansible.builtin.set_fact: + first_app_server_temp: "{{ first_app_server_temp | default([]) + [item] }}" + with_items: + - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_PAS') }}" + - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + +- name: "ANF Mount: usr/sap" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'usrsap', + 'temppath': 'tmpusersap', + 'mount': '{{ usr_sap_mountpoint }}', + 'opts': 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp', + 'path': '/usr/sap', + 'set_chattr_on_dir': false, + 'target_nodes': ['app','pas'], + 'create_temp_folders': false + } + vars: + primary_host: "{{ first_app_server_temp | first }}" + when: + - tier == 'sapos' + - usr_sap_mountpoint is defined + +# /*---------------------------------------------------------------------------8 +# | | +# | Prepare for the sap_mnt mounts | +# | Create temporary directory structure | +# | Mount the share, create the directory structure on share | +# | Unmount and clean up temporary directory structure | +# | | +# +------------------------------------4--------------------------------------*/ +- name: "ANF Mount: (sapmnt)" + block: + - name: "ANF Mount: Create /saptmp" + ansible.builtin.file: + path: "/saptmp" + state: directory + mode: 0755 + group: sapsys + + - name: "ANF Mount: (sapmnt)" + block: + - name: "ANF Mount: Filesystems on ANF (sapmnt)" + ansible.posix.mount: + src: "{{ sap_mnt }}" + path: "/saptmp" + fstype: "nfs4" + opts: "rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp" + state: mounted + rescue: + - name: "ANF Mount: Clear the cache of the nfsidmap daemon (ANF)" + ansible.builtin.shell: | + nfsidmap -c + - name: "ANF Mount: Ensure the rpcbind service is restarted" + ansible.builtin.systemd: + name: rpcbind + daemon-reload: true + state: restarted + + - name: "ANF Mount: Create SAP Directories (spmnt & usrsap)" + ansible.builtin.file: + path: "{{ item.path }}" + state: directory + mode: 0755 + loop: + - { path: '/saptmp/sapmnt{{ sap_sid | upper }}' } + - { path: '/saptmp/usrsap{{ sap_sid | upper }}' } + - { path: '/saptmp/usrsap{{ sap_sid | upper }}ascs{{ scs_instance_number }}' } + - { path: '/saptmp/usrsap{{ sap_sid | upper }}ers{{ ers_instance_number }}' } + - { path: '/saptmp/usrsap{{ sap_sid | upper }}sys' } + + - name: "ANF Mount: Create SAP Directories (ANF)" + ansible.builtin.file: + path: "/saptmp/sapmnt{{ item.sid | upper }}" + state: directory + mode: 0755 + loop: "{{ MULTI_SIDS }}" + when: MULTI_SIDS is defined + + - name: "ANF Mount: Unmount file systems (sap_mnt)" + ansible.posix.mount: + src: "{{ sap_mnt }}" + path: "/saptmp" + state: unmounted + + - name: "ANF Mount: Delete locally created SAP Directories" + ansible.builtin.file: + path: "{{ item.path }}" + state: absent + loop: + - { path: '/saptmp/sapmnt{{ sap_sid | upper }}' } + - { path: '/saptmp/usrsap{{ sap_sid | upper }}' } + - { path: '/saptmp/usrsap{{ sap_sid | upper }}ascs{{ scs_instance_number }}' } + - { path: '/saptmp/usrsap{{ sap_sid | upper }}ers{{ ers_instance_number }}' } + - { path: '/saptmp/usrsap{{ sap_sid | upper }}sys' } + + - name: "ANF Mount: Remove SAP Directories (ANF)" + ansible.builtin.file: + path: "/saptmp/sapmnt{{ item.sid | upper }}" + state: absent + loop: "{{ MULTI_SIDS }}" + when: MULTI_SIDS is defined + + - name: "ANF Mount: Cleanup fstab and directory (sap_mnt)" + ansible.posix.mount: + src: "{{ sap_mnt }}" + path: "/saptmp" + fstype: "nfs4" + opts: "rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp" + state: absent + + when: + - tier == 'sapos' + - "'scs' in supported_tiers" + - sap_mnt is defined + +# /*---------------------------------------------------------------------------8 +# | | +# | Perform the sap_mnt mounts | +# | Create directories and make them immutable | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "ANF Mount: Create SAP Directories (sapmnt)" + ansible.builtin.file: + owner: "{{ item.sidadm_uid }}" + group: sapsys + mode: 0755 + path: "/sapmnt/{{ item.sid }}" + state: directory + register: is_created_now + loop: "{{ all_sap_mounts }}" + when: + - tier == 'sapos' + - node_tier in ['app','scs','ers', 'pas'] or 'scs' in supported_tiers + - sap_mnt is defined + +- name: "ANF Mount: Change attribute only when we create SAP Directories (sap_mnt)" + ansible.builtin.file: + path: "{{ item.item.path }}" + state: directory + mode: 0755 + attr: i+ + loop: "{{ is_created_now.results }}" + when: + - tier == 'sapos' + - item.item is changed + register: set_immutable_attribute + +- name: "ANF Mount: Create SAP Directories (scs & ers)" + ansible.builtin.file: + path: "{{ item.path }}" + state: directory + owner: '{{ sidadm_uid }}' + group: sapsys + mode: 0755 + loop: + - { path: '/usr/sap/{{ sap_sid | upper }}' } + - { path: '/usr/sap/{{ sap_sid | upper }}/SYS' } + - { path: '/usr/sap/{{ sap_sid | upper }}/{{ instance_type | upper }}{{ scs_instance_number }}' } + - { path: '/usr/sap/{{ sap_sid | upper }}/ERS{{ ers_instance_number }}' } + when: + - tier == 'sapos' + - node_tier in ['scs','ers'] or 'scs' in supported_tiers + - sap_mnt is defined + - MULTI_SIDS is undefined + register: is_created_now3 + +- name: "ANF Mount: Change attribute only when we create SAP Directories (scs & ers)" + ansible.builtin.file: + path: "{{ item.item.path }}" + state: directory + mode: 0755 + attr: i+ + loop: "{{ is_created_now3.results }}" + when: + - tier == 'sapos' + - item.item is changed + register: set_immutable_attribute + +- name: "ANF Mount: Debug" + ansible.builtin.debug: + msg: 'isHA:{{ scs_high_availability }} | node_tier:{{ node_tier }} | tier:{{ tier }} | sapmnt:{{ sap_mnt }}' + +- name: "ANF Mount: sapmnt/{{ sap_sid | upper }} - Distributed Non-HA" + ansible.posix.mount: + src: "{{ item.src }}" + path: "{{ item.path }}" + fstype: "{{ item.type }}" + opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' + state: mounted + loop: + - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } + when: + - tier == 'sapos' + - sap_mnt is defined + - not scs_high_availability + - ansible_play_hosts_all | length > 1 + - node_tier != 'hana' + +- name: "ANF Mount: sapmnt/{{ sap_sid | upper }} - Single instance" + ansible.posix.mount: + src: "{{ item.src }}" + path: "{{ item.path }}" + fstype: "{{ item.type }}" + opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' + state: mounted + loop: + - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } + when: + - tier == 'sapos' + - sap_mnt is defined + - not scs_high_availability + - ansible_play_hosts_all | length == 1 + + +- name: "ANF Mount: sapmnt/{{ sap_sid | upper }} - Standalone MULTI_SIDS" + become: true + become_user: root + ansible.posix.mount: + src: "{{ sap_mnt }}/sapmnt{{ item.sid }}" + path: "/sapmnt/{{ item.sid }}" + fstype: 'nfs4' + opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' + state: mounted + loop: "{{ MULTI_SIDS }}" + when: + - not scs_high_availability + - sap_mnt is defined + - MULTI_SIDS is defined + +- name: "ANF Mount: sapmnt/{{ sap_sid | upper }} - High Availability" + ansible.posix.mount: + src: "{{ item.src }}" + path: "{{ item.path }}" + fstype: "{{ item.type }}" + opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' + state: mounted + loop: + - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } + when: + - scs_high_availability + - tier in ['sapos'] + - node_tier != 'hana' + - sap_mnt is defined + +- name: "ANF Mount: usr/sap/{{ sap_sid | upper }}/SYS" + ansible.posix.mount: + src: "{{ item.src }}" + path: "{{ item.path }}" + fstype: "{{ item.type }}" + opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' + state: mounted + loop: + - { type: 'nfs4', src: '{{ sap_mnt }}/usrsap{{ sap_sid | upper }}sys', path: '/usr/sap/{{ sap_sid | upper }}/SYS' } + when: + - scs_high_availability + - tier in ['sapos'] + - node_tier in ['scs','ers'] + - sap_mnt is defined + + +# /*---------------------------------------------------------------------------8 +# | | +# | Prepare for the sap_trans, install mounts | +# | Create temporary directory structure | +# | Mount the share, create the directory structure on share | +# | Unmount and clean up temporary directory structure | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "ANF Mount: install:Get the Server name list" + ansible.builtin.set_fact: + first_server_temp: "{{ first_server_temp | default([]) + [item] }}" + with_items: + - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" + - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + +- name: "ANF Mount: sap_trans" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'trans', + 'temppath': 'saptrans', + 'mount': '{{ sap_trans }}', + 'opts': 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp', + 'path': '/usr/sap/trans', + 'permissions': '0775', + 'set_chattr_on_dir': false, + 'target_nodes': ['app','pas', 'ers', 'scs'], + 'create_temp_folders': false + } + vars: + primary_host: "{{ first_server_temp | first }}" + when: + - tier == 'sapos' + - sap_trans is defined + +- name: "ANF Mount: install" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'install', + 'temppath': 'sapinstall', + 'folder': '{{ bom_base_name }}', + 'mount': '{{ usr_sap_install_mountpoint }}', + 'opts': 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp', + 'path': '/usr/sap/install', + 'permissions': '0775', + 'set_chattr_on_dir': false, + 'target_nodes': ['all'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ first_server_temp | first }}" + when: + - tier == 'sapos' + - usr_sap_install_mountpoint is defined + +# /*---------------------------------------------------------------------------8 +# | | +# | Prepare the OS for running SAP HANA on | +# | Azure NetApp Files with NFS | +# | Except Scale out + ANF | +# +------------------------------------4--------------------------------------*/ +- name: "ANF Mount: Prepare the OS for running + SAP HANA on Azure NetApp with NFS" + block: + - name: "ANF Mount: Create configuration file for the NetApp configuration settings" + ansible.builtin.blockinfile: + path: /etc/sysctl.d/91-NetApp-HANA.conf + backup: true + create: true + mode: 0644 + marker: "# {mark} HANA NetApp configuration high availability" + block: | + net.core.rmem_max = 16777216 + net.core.wmem_max = 16777216 + net.core.rmem_default = 16777216 + net.core.wmem_default = 16777216 + net.core.optmem_max = 16777216 + net.ipv4.tcp_rmem = 4096 131072 16777216 + net.ipv4.tcp_wmem = 4096 16384 16777216 + net.core.netdev_max_backlog = 300000 + net.ipv4.tcp_slow_start_after_idle=0 + net.ipv4.tcp_no_metrics_save = 1 + net.ipv4.tcp_moderate_rcvbuf = 1 + net.ipv4.tcp_window_scaling = 1 + net.ipv4.tcp_timestamps = 0 + net.ipv4.tcp_sack = 1 + when: + - node_tier == 'hana' + + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + + - name: "ANF Mount: Create configuration file for the NetApp configuration settings" + ansible.builtin.blockinfile: + path: /etc/sysctl.d/91-NetApp-HANA.conf + backup: true + create: true + mode: 0644 + marker: "# {mark} HANA NetApp configuration standalone" + block: | + net.core.rmem_max = 16777216 + net.core.wmem_max = 16777216 + net.core.rmem_default = 16777216 + net.core.wmem_default = 16777216 + net.core.optmem_max = 16777216 + net.ipv4.tcp_rmem = 4096 131072 16777216 + net.ipv4.tcp_wmem = 4096 16384 16777216 + net.core.netdev_max_backlog = 300000 + net.ipv4.tcp_slow_start_after_idle=0 + net.ipv4.tcp_no_metrics_save = 1 + net.ipv4.tcp_moderate_rcvbuf = 1 + net.ipv4.tcp_window_scaling = 1 + net.ipv4.tcp_timestamps = 1 + net.ipv4.tcp_sack = 1 + when: + - node_tier == 'hana' + - not database_high_availability + + - name: "ANF Mount: Create configuration file + with additional optimization settings" + ansible.builtin.blockinfile: + path: /etc/sysctl.d/ms-az.conf + backup: true + create: true + mode: 0644 + marker: "# {mark} HANA NetApp optimizations" + block: | + net.ipv6.conf.all.disable_ipv6 = 1 + net.ipv4.tcp_max_syn_backlog = 16348 + net.ipv4.conf.all.rp_filter = 0 + sunrpc.tcp_slot_table_entries = 128 + vm.swappiness=10 + when: + - node_tier == 'hana' + + # /*-----------------------------------------------------------------------8 + # | Configure the maximum number of (TCP) RPC requests that can be in | + # | flight at a time (to the NFS server) to be 128 | + # |--------------------------------4--------------------------------------*/ + - name: "ANF Mount: configure the maximum number + of RPC requests for the NFS session" + ansible.builtin.blockinfile: + path: /etc/modprobe.d/sunrpc.conf + backup: true + create: true + mode: 0644 + marker: "# {mark} NFS RPC Connections" + block: "options sunrpc tcp_max_slot_table_entries=128" + when: + - node_tier == 'hana' + + when: + - tier == 'sapos' + - node_tier == 'hana' + +- name: "ANF Mount: Create /hana folder" + ansible.builtin.file: + path: /hana + mode: 0755 + state: directory + group: sapsys + when: + - tier == 'sapos' + - node_tier == 'hana' + +# Note: This block ( and one for second DB note) must run only for HSR - pacemaker HANA scale out +# Currently we only support two node cluster + observer. +# TODO: Add support for >2(even count) node cluster + observer +- name: "ANF Mount: HANA data" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'data', + 'temppath': 'hanadata', + 'folder': 'hanadata', + 'mount': '{{ hana_data_mountpoint[0] }}', + 'opts': '{{ mnt_options }}', + 'path': '/hana/data', + 'permissions': '0755', + 'set_chattr_on_dir': false, + 'target_nodes' : ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ db_hosts[0] }}" + when: + - tier == 'sapos' + - node_tier == 'hana' + - hana_data_mountpoint is defined + - hana_data_mountpoint | length > 0 + - ansible_hostname == db_hosts[0] + # For HSR based scale out, needs DB high availability + - db_high_availability is defined + - db_high_availability == true + +- name: "ANF Mount: HANA log" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'log', + 'temppath': 'hanalog', + 'folder': 'hanalog', + 'mount' : '{{ hana_log_mountpoint[0] }}', + 'opts': '{{ mnt_options }}', + 'path' : '/hana/log', + 'permissions': '0755', + 'set_chattr_on_dir': false, + 'target_nodes': ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ db_hosts[0] }}" + when: + - tier == 'sapos' + - node_tier == 'hana' + - hana_log_mountpoint is defined + - hana_log_mountpoint | length > 0 + - ansible_hostname == db_hosts[0] + # For HSR based scale out, needs DB high availability + - db_high_availability is defined + - db_high_availability == true + +- name: "ANF Mount: HANA shared" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'shared', + 'temppath': 'hanashared', + 'folder': 'hanashared', + 'mount': '{{ hana_shared_mountpoint[0] }}', + 'opts': '{{ mnt_options }}', + 'path': '/hana/shared', + 'permissions': '0775', + 'set_chattr_on_dir': false, + 'target_nodes' : ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ db_hosts[0] }}" + when: + - tier == 'sapos' + - node_tier == 'hana' + - hana_shared_mountpoint is defined + - hana_shared_mountpoint | length > 0 + - ansible_hostname == db_hosts[0] + # For HSR based scale out, needs DB high availability + - db_high_availability is defined + - db_high_availability == true + +- name: "ANF Mount: HANA data (secondary)" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'data', + 'temppath': 'hanadata', + 'folder': 'hanadata', + 'mount': '{{ hana_data_mountpoint[1] }}', + 'opts': '{{ mnt_options }}', + 'path': '/hana/data', + 'permissions': '0755', + 'set_chattr_on_dir': false, + 'target_nodes' : ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ db_hosts[1] }}" + when: + - tier == 'sapos' + - node_tier == 'hana' + - hana_data_mountpoint is defined + - hana_data_mountpoint | length > 1 + - db_hosts | length == 2 + - ansible_hostname == db_hosts[1] + # For HSR based scale out, needs DB high availability + - db_high_availability is defined + - db_high_availability == true + +- name: "ANF Mount: HANA log (secondary)" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'log', + 'temppath': 'hanalog', + 'folder': 'hanalog', + 'mount' : '{{ hana_log_mountpoint[1] }}', + 'opts': '{{ mnt_options }}', + 'path' : '/hana/log', + 'permissions': '0755', + 'set_chattr_on_dir': false, + 'target_nodes': ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ db_hosts[1] }}" + when: + - tier == 'sapos' + - node_tier == 'hana' + - hana_log_mountpoint is defined + - hana_log_mountpoint | length > 1 + - db_hosts | length ==2 + - ansible_hostname == db_hosts[1] + # For HSR based scale out, needs DB high availability + - db_high_availability is defined + - db_high_availability == true + +- name: "ANF Mount: HANA shared (secondary)" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'shared', + 'temppath': 'hanashared', + 'folder': 'hanashared', + 'mount': '{{ hana_shared_mountpoint[1] }}', + 'opts': '{{ mnt_options }}', + 'path': '/hana/shared', + 'permissions': '0775', + 'set_chattr_on_dir': false, + 'target_nodes' : ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ db_hosts[1] }}" + when: + - tier == 'sapos' + - node_tier == 'hana' + - hana_shared_mountpoint is defined + - hana_shared_mountpoint | length > 1 + - db_hosts | length == 2 + - ansible_hostname == db_hosts[1] + # For HSR based scale out, needs DB high availability + - db_high_availability is defined + - db_high_availability == true + +# /*---------------------------------------------------------------------------8 +# | | +# | Prepare the OS for running SAP HANA on | +# | Azure NetApp Files with NFS | +# | Scale out + ANF | +# +------------------------------------4--------------------------------------*/ + +# FOR ANF mount on SLES and RHEl, the below tasks replicate the steps in the link https://learn.microsoft.com/en-us/azure/sap/workloads/sap-hana-scale-out-standby-netapp-files-suse#mount-the-azure-netapp-files-volumes +# Mount the HANA shared on to the temp path + +- name: "ANF Mount: Scale Out - Create SAP Directories (usrsap)" + ansible.builtin.file: + owner: "{{ db_sid | lower }}adm" + group: sapsys + mode: 0755 + path: "/usr/sap/{{ db_sid | upper }}" + state: directory + when: + - tier == 'hana' + # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. + - db_scale_out is defined + - db_scale_out + +- name: "ANF Mount: Scale Out - Create SAP Directories (hana data)" + ansible.builtin.file: + owner: "{{ db_sid | lower }}adm" + group: sapsys + mode: 0755 + path: "/hana/data/{{ db_sid | upper }}" + state: directory + when: + - tier == 'hana' + # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. + - db_scale_out is defined + - db_scale_out + + +- name: "ANF Mount: Scale Out - Create SAP Directories (hana log)" + ansible.builtin.file: + owner: "{{ db_sid | lower }}adm" + group: sapsys + mode: 0755 + path: "/hana/log/{{ db_sid | upper }}" + state: directory + when: + - tier == 'hana' + # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. + - db_scale_out is defined + - db_scale_out + +- name: "ANF Mount: HANA shared - Scale out" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'shared', + 'temppath': 'shared', + # change folder to match the mount folder within the share + 'folder': 'shared', + 'mount': '{{ hana_shared_mountpoint[0] }}', + 'opts': '{{ mnt_options }}', + 'path': '/hana/shared', + 'permissions': '0775', + 'set_chattr_on_dir': false, + 'target_nodes' : ['hana'], + 'create_temp_folders': true + } + vars: + # Run this on all the nodes, not just primary. + primary_host: "{{ ansible_hostname }}" + when: + - node_tier == 'hana' + - hana_shared_mountpoint is defined + - hana_shared_mountpoint | length > 0 + # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. + - db_scale_out is defined + - db_scale_out + +# This runs for unique share per node +- name: "ANF Mount: usrsap - Scale out" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'usrsap', + 'temppath': 'usrsap', + 'folder': "usr-sap-hanadb{{ lookup('ansible.utils.index_of', db_hosts, 'eq', ansible_hostname) }}", + 'mount': '{{ hana_shared_mountpoint[0] }}', + 'opts': '{{ mnt_options }}', + 'path': '/usr/sap/{{ db_sid | upper }}', + 'permissions': '0775', + 'set_chattr_on_dir': false, + 'target_nodes' : ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ ansible_hostname }}" + when: + - node_tier == 'hana' + - hana_shared_mountpoint is defined + - hana_shared_mountpoint | length == 1 + # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. + - db_scale_out is defined + - db_scale_out + +- name: "ANF Mount: HANA Data - Scale out - Create mount list" + block: + - name: "Initialize HANA Data mountpoints" + ansible.builtin.set_fact: + hana_data_scaleout_mountpoint: [] + - name: "Build HANA Data mountpoints" + ansible.builtin.set_fact: + # hana_data_mountpoint: "{{ hana_data_mountpoint | default([]) + [item] }}" + hana_data_scaleout_mountpoint: "{{ hana_data_scaleout_mountpoint + dataupdate }}" + loop: "{{ hana_data_mountpoint }}" + loop_control: + index_var: my_index + # Note the object structure and specific key:pair value. Do not modify those hard coded. + vars: + dataupdate: + - { type: 'data', + temppath: 'hanadata', + folder: 'hanadata', + mount: "{{ item }}", + opts: "{{ mnt_options }}", + path: "{{ '/hana/data/' + (db_sid | upper ) + '/mnt0000' + ( my_index + 1 )| string }}", + permissions: '0775', + set_chattr_on_dir: false, + target_nodes: ['hana'], + create_temp_folders: 'true' + } + when: + - node_tier == 'hana' + - hana_data_mountpoint is defined + # - hana_data_mountpoint | length == db_hosts | length + # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. + - db_scale_out is defined + - db_scale_out + +- name: "DEBUG:ANF Mount: HANA Data - Scale out - Create mount list" + ansible.builtin.debug: + var: hana_data_scaleout_mountpoint + +- name: "ANF Mount: HANA Data - Scale out" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + # Do not change this to loop:. It Breaks. i don't know why but this modification only seems to work with with_items: despite multiple formatting attempts. + with_items: + - "{{ hana_data_scaleout_mountpoint | list }}" + vars: + primary_host: "{{ ansible_hostname }}" + when: + - node_tier == 'hana' + - hana_data_mountpoint is defined + # - hana_data_mountpoint | length == db_hosts | length + # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. + - db_scale_out is defined + - db_scale_out + + + +- name: "ANF Mount: HANA Log - Scale out - Create mount list" + block: + - name: "Initialize HANA Log mountpoints" + ansible.builtin.set_fact: + hana_log_scaleout_mountpoint: [] + + - name: "Build HANA log mountpoints" + ansible.builtin.set_fact: + hana_log_scaleout_mountpoint: "{{ hana_log_scaleout_mountpoint + logupdate }}" + loop: "{{ hana_log_mountpoint }}" + loop_control: + index_var: my_index + # Note the object structure and specific key:pair value. Do not modify those hard coded. + vars: + logupdate: + - { type: 'log', + temppath: 'hanalog', + folder: 'hanalog', + mount: "{{ item }}", + opts: "{{ mnt_options }}", + path: "{{ '/hana/log/' + (db_sid | upper ) + '/mnt0000' + ( my_index + 1 )| string }}", + permissions: '0775', + set_chattr_on_dir: false, + target_nodes: ['hana'], + create_temp_folders: 'true' + } + when: + - node_tier == 'hana' + - hana_log_mountpoint is defined + # - hana_log_mountpoint | length == db_hosts | length + # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. + - db_scale_out is defined + - db_scale_out + +- name: "DEBUG:ANF Mount: HANA Log - Scale out - Create mount list" + ansible.builtin.debug: + var: hana_log_scaleout_mountpoint + +- name: "ANF Mount: HANA Log - Scale out" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + # Do not change this to loop:. It Breaks. i don't know why but this modification only seems to work with with_items: despite multiple formatting attempts. + with_items: + - "{{ hana_log_scaleout_mountpoint | list }}" + vars: + primary_host: "{{ ansible_hostname }}" + when: + - node_tier == 'hana' + - hana_log_mountpoint is defined + # - hana_log_mountpoint | length == db_hosts | length + # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. + - db_scale_out is defined + - db_scale_out + + + +- name: "ANF Mount: Set Permissons on HANA (HSR) Directories ({{ item.path }})" + ansible.builtin.file: + owner: '{{ hdbadm_uid }}' + group: sapsys + path: "{{ item.path }}" + state: directory + recurse: true + loop: + - { 'path': '/hana/data' } + - { 'path': '/hana/log' } + - { 'path': '/hana/shared' } + when: + - tier == 'sapos' + - node_tier == 'hana' + - db_high_availability is defined + - db_high_availability == true + + +- name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" + ansible.builtin.file: + owner: '{{ hdbadm_uid }}' + group: sapsys + path: "{{ item.path }}" + state: directory + recurse: true + with_items: + - "{{ hana_log_scaleout_mountpoint }}" + - "{{ hana_data_scaleout_mountpoint }}" + - { 'path': '/hana/shared' } + - { 'path': '/usr/sap/{{ db_sid | upper }}' } + when: + - tier == 'sapos' + - node_tier == 'hana' + - db_high_availability | default(false) == false + - db_scale_out + +... diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml index 25347b51c3..de9834b44a 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml @@ -512,6 +512,7 @@ when: - tier == 'sapos' - node_tier == 'hana' + - not db_scale_out - name: "ANF Mount: HANA data" ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml @@ -533,6 +534,7 @@ when: - tier == 'sapos' - node_tier == 'hana' + - not db_scale_out - hana_data_mountpoint is defined - hana_data_mountpoint | length > 0 - ansible_hostname == db_hosts[0] @@ -557,6 +559,7 @@ when: - tier == 'sapos' - node_tier == 'hana' + - not db_scale_out - hana_log_mountpoint is defined - hana_log_mountpoint | length > 0 - ansible_hostname == db_hosts[0] @@ -581,6 +584,7 @@ when: - tier == 'sapos' - node_tier == 'hana' + - not db_scale_out - hana_shared_mountpoint is defined - hana_shared_mountpoint | length > 0 - ansible_hostname == db_hosts[0] @@ -605,6 +609,7 @@ when: - tier == 'sapos' - node_tier == 'hana' + - not db_scale_out - hana_data_mountpoint is defined - hana_data_mountpoint | length > 1 - db_hosts | length == 2 @@ -630,6 +635,7 @@ when: - tier == 'sapos' - node_tier == 'hana' + - not db_scale_out - hana_log_mountpoint is defined - hana_log_mountpoint | length > 1 - db_hosts | length ==2 @@ -655,6 +661,7 @@ when: - tier == 'sapos' - node_tier == 'hana' + - not db_scale_out - hana_shared_mountpoint is defined - hana_shared_mountpoint | length > 1 - db_hosts | length == 2 diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml index 6285cead27..d8c3420014 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml @@ -358,6 +358,18 @@ - sap_mnt is defined or sap_trans is defined or usr_sap_install_mountpoint is defined - not use_simple_mount - NFS_provider == 'ANF' + # only run when no scale out configuration is used. + - db_scale_out is not defined or db_scale_out == false + + +# Import this task only if db_scale_out is defined. Implies that sap_mnt is defined, i.e. ANF is used +- name: "2.6 SAP Mounts: - Import ANF tasks for Scale-Out" + ansible.builtin.import_tasks: 2.6.1.2-anf-mounts-scaleout.yaml + when: + - NFS_provider == 'ANF' + - db_scale_out is defined + - db_scale_out + - sap_mnt is defined or sap_trans is defined or usr_sap_install_mountpoint is defined - name: "2.6 SAP Mounts: - Import ANF tasks" ansible.builtin.import_tasks: 2.6.8-anf-mounts-simplemount.yaml diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index d4be5dbc84..31e9624ea8 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -208,6 +208,7 @@ use_simple_mount: false # Cluster - Defaults # database_high_availability: false +db_scale_out: false database_cluster_type: "AFA" # scs_high_availability: false scs_cluster_type: "AFA" diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf index ef4d627f53..cdcd3a4a0b 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf @@ -132,6 +132,7 @@ resource "azurerm_lb_probe" "scs" { protocol = "Tcp" interval_in_seconds = 5 number_of_probes = var.application_tier.scs_high_availability && upper(var.application_tier.scs_os.os_type) == "WINDOWS" ? 4 : 2 + probe_threshold = 2 } resource "azurerm_lb_probe" "clst" { @@ -148,6 +149,7 @@ resource "azurerm_lb_probe" "clst" { protocol = "Tcp" interval_in_seconds = 5 number_of_probes = var.application_tier.scs_high_availability && upper(var.application_tier.scs_os.os_type) == "WINDOWS" ? 4 : 2 + probe_threshold = 2 } resource "azurerm_lb_probe" "fs" { @@ -163,6 +165,7 @@ resource "azurerm_lb_probe" "fs" { protocol = "Tcp" interval_in_seconds = 5 number_of_probes = var.application_tier.scs_high_availability && upper(var.application_tier.scs_os.os_type) == "WINDOWS" ? 4 : 2 + probe_threshold = 2 } # Create the SCS Load Balancer Rules diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/infrastructure.tf index f38a9274d8..bc630e1e8c 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/infrastructure.tf @@ -103,6 +103,7 @@ resource "azurerm_lb_probe" "hdb" { protocol = "Tcp" interval_in_seconds = 5 number_of_probes = 2 + probe_threshold = 2 } # TODO: From 3e299c596762998039c47e3f65cb646cd461b189 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 5 Feb 2024 17:18:09 +0200 Subject: [PATCH 183/607] Support for HANA scaleout for ANF volumes (#534) * Update database server count and ANF volumes to support scale out scenarios * Update hana_shared output in outputs.tf * Update db_server_count to include stand_by_node_count * Add output for disks attached to virtual machines * Update HANA Data volumes description and value * Fix keyvault check in deploy_controlplane.sh * Update hana_data and hana_log paths in module.tf * Refactor ANF volume outputs in HDB node module * Refactor agent_pat and agent_pool variables in sap_deployer module.tf * Refactor code and add TFVars support * Refactor ANF_pool_settings in variables_local.tf * Fix standby node count description and ANF volume count calculation * Refactor data and log volume count calculations in variables_local.tf * Fix data and log volume count calculation in variables_local.tf * Lint errors * Lint Fixes part 2 --------- Co-authored-by: Kimmo Forss --- Webapp/SDAF/Controllers/FileController.cs | 12 +- Webapp/SDAF/Controllers/Helper.cs | 14 +- .../SDAF/Controllers/LandscapeController.cs | 63 +++++++- Webapp/SDAF/Controllers/SystemController.cs | 48 +++++- Webapp/SDAF/Models/AppFileEntity.cs | 2 +- Webapp/SDAF/Models/DatabaseSettings.cs | 2 + Webapp/SDAF/Models/SystemModel.cs | 49 ++++++- .../SDAF/ParameterDetails/SystemDetails.json | 45 ++++-- .../SDAF/ParameterDetails/SystemTemplate.txt | 19 ++- Webapp/SDAF/ParameterDetails/VM-Images.json | 12 ++ Webapp/SDAF/SDAFWebApp.csproj | 2 +- Webapp/SDAF/Services/AppFileService.cs | 5 + Webapp/SDAF/Services/ITableStorageService.cs | 4 +- Webapp/SDAF/Services/LandscapeService.cs | 15 +- Webapp/SDAF/Services/SystemService.cs | 13 +- Webapp/SDAF/appsettings.json | 3 +- .../ansible/playbook_04_00_00_db_install.yaml | 6 +- .../tasks/main.yaml | 8 +- .../tasks/2.6.1.2-anf-mounts-scaleout.yaml | 18 +-- .../2.6-sap-mounts/tasks/main.yaml | 2 +- deploy/scripts/deploy_controlplane.sh | 2 +- .../bootstrap/sap_deployer/module.tf | 4 +- deploy/terraform/run/sap_deployer/module.tf | 4 +- deploy/terraform/run/sap_system/module.tf | 15 +- deploy/terraform/run/sap_system/output.tf | 1 + .../run/sap_system/tfvar_variables.tf | 56 +++++-- deploy/terraform/run/sap_system/transform.tf | 41 ++++++ .../run/sap_system/variables_local.tf | 37 ----- .../modules/sap_system/hdb_node/anf.tf | 32 ++-- .../modules/sap_system/hdb_node/outputs.tf | 137 ++++-------------- .../sap_system/hdb_node/variables_local.tf | 16 +- 31 files changed, 445 insertions(+), 242 deletions(-) diff --git a/Webapp/SDAF/Controllers/FileController.cs b/Webapp/SDAF/Controllers/FileController.cs index aab009d76f..35b2e7678b 100644 --- a/Webapp/SDAF/Controllers/FileController.cs +++ b/Webapp/SDAF/Controllers/FileController.cs @@ -62,7 +62,7 @@ public IActionResult UseTemplate(string fileName, string sourceController) { string content = restHelper.GetTemplateFile(fileName).Result; ViewBag.Message = content; - ViewBag.TemplateName = fileName.Substring(fileName.LastIndexOf('/') + 1); + ViewBag.TemplateName = fileName[(fileName.LastIndexOf('/') + 1)..]; ViewBag.SourceController = sourceController; return View("Create"); } @@ -143,7 +143,7 @@ public async Task ConvertFileToObject(string id, string sourceCon AppFile file = await _appFileService.GetByIdAsync(id, GetPartitionKey(id)); if (file == null) return NotFound(); - id = id.Substring(0, id.IndexOf('.')); + id = id[..id.IndexOf('.')]; byte[] bytes = file.Content; string bitString = Encoding.UTF8.GetString(bytes); string jsonString = Helper.TfvarToJson(bitString); @@ -319,7 +319,7 @@ public async Task EditAsync(string id, string newId, string fileC } else { - string newName = id.Substring(0, id.IndexOf("_custom")); + string newName = id[..id.IndexOf("_custom")]; return RedirectToAction("Edit", sourceController, new { @id = newName , @partitionKey= GetPartitionKey(id) }); } } @@ -418,7 +418,7 @@ public async Task DownloadFile(string id, string sourceController, private string GetPartitionKey(string id) { - return id.Substring(0, id.IndexOf('-')); + return id[..id.IndexOf('-')]; } public async Task GetImagesFile(string filename, int type, string partitionKey) @@ -434,12 +434,12 @@ public async Task GetImagesFile(string filename, int type, string parti if (filename.EndsWith("_custom_sizes.json")) { - newName = filename.Substring(filename.IndexOf("_custom_sizes.json") + 1); + newName = filename[(filename.IndexOf("_custom_sizes.json") + 1)..]; type = 1; } if (filename.EndsWith("_custom_naming.json")) { - newName = filename.Substring(filename.IndexOf("_custom_naming.json") + 1); + newName = filename[(filename.IndexOf("_custom_naming.json") + 1)..]; type = 2; } diff --git a/Webapp/SDAF/Controllers/Helper.cs b/Webapp/SDAF/Controllers/Helper.cs index 8381b127f9..26129a7bf9 100644 --- a/Webapp/SDAF/Controllers/Helper.cs +++ b/Webapp/SDAF/Controllers/Helper.cs @@ -263,7 +263,7 @@ public static async Task ProcessFormFile(IFormFile formFile, // a display name. MemberInfo property = typeof(FileUploadModel).GetProperty( - formFile.Name.Substring(formFile.Name.IndexOf(".", StringComparison.Ordinal) + 1)); + formFile.Name[(formFile.Name.IndexOf(".", StringComparison.Ordinal) + 1)..]); if (property != null) { @@ -384,7 +384,7 @@ public static string TfvarToJson(string hclString) int equalIndex = currLine.IndexOf("="); if (equalIndex >= 0) { - string key = currLine.Substring(0, equalIndex).Trim(); + string key = currLine[..equalIndex].Trim(); if (!key.StartsWith("\"")) { key = "\"" + key + "\""; @@ -398,12 +398,12 @@ public static string TfvarToJson(string hclString) while (!currLine.StartsWith("}")) { equalIndex = currLine.IndexOf("="); - var tagKey = currLine.Substring(0, equalIndex).Trim(); + var tagKey = currLine[..equalIndex].Trim(); if (!tagKey.StartsWith("\"")) { tagKey = "\"" + tagKey + "\""; } - var tagValue = currLine.Substring(equalIndex + 1, currLine.Length - (equalIndex + 1)).Trim(); + var tagValue = currLine[(equalIndex + 1)..].Trim(); value += "{"; value += "\"Key\":" + tagKey + "," + "\"Value\":" + tagValue.Trim(','); value += "},"; @@ -419,12 +419,12 @@ public static string TfvarToJson(string hclString) while (!currLine.StartsWith("}")) { equalIndex = currLine.IndexOf("="); - var tagKey = currLine.Substring(0, equalIndex).Trim(); + var tagKey = currLine[..equalIndex].Trim(); if (!tagKey.StartsWith("\"")) { tagKey = "\"" + tagKey + "\""; } - var tagValue = currLine.Substring(equalIndex + 1, currLine.Length - (equalIndex + 1)).Trim(); + var tagValue = currLine[(equalIndex + 1)..].Trim(); value += "{"; value += "\"Key\":" + tagKey + "," + "\"Value\":" + tagValue.Trim(','); value += "},"; @@ -435,7 +435,7 @@ public static string TfvarToJson(string hclString) } else { - value = currLine.Substring(equalIndex + 1, currLine.Length - (equalIndex + 1)).Trim(); + value = currLine[(equalIndex + 1)..].Trim(); if (!value.EndsWith(",") && !value.EndsWith("{")) { value += ","; diff --git a/Webapp/SDAF/Controllers/LandscapeController.cs b/Webapp/SDAF/Controllers/LandscapeController.cs index 1d452fe1b0..9b6f205a54 100644 --- a/Webapp/SDAF/Controllers/LandscapeController.cs +++ b/Webapp/SDAF/Controllers/LandscapeController.cs @@ -8,6 +8,7 @@ using System; using System.Collections.Generic; using System.IO; +using System.Net; using System.Text; using System.Threading.Tasks; @@ -21,7 +22,7 @@ public class LandscapeController : Controller private FormViewModel landscapeView; private readonly IConfiguration _configuration; private RestHelper restHelper; - private ImageDropdown[] imagesOffered; + private readonly ImageDropdown[] imagesOffered; private List imageOptions; private Dictionary imageMapping; private readonly string sdafControlPlaneEnvironment; @@ -142,7 +143,7 @@ public async Task GetById(string id, string partitionKey) [HttpGet] public async Task GetByIdJson(string id) { - string environment = id.Substring(0, id.IndexOf('-')); + string environment = id[..id.IndexOf('-')]; LandscapeEntity landscape = await _landscapeService.GetByIdAsync(id, environment); if (landscape == null || landscape.Landscape == null) return NotFound(); return Json(landscape.Landscape); @@ -188,6 +189,10 @@ public async Task CreateAsync(LandscapeModel landscape) landscape.Id = Helper.GenerateId(landscape); await _landscapeService.CreateAsync(new LandscapeEntity(landscape)); TempData["success"] = "Successfully created workload zone " + landscape.Id; + string id = landscape.Id; + string path = $"/LANDSCAPE/{id}/{id}.tfvars"; + string content = Helper.ConvertToTerraform(landscape); + return RedirectToAction("Index"); } catch (Exception e) @@ -329,7 +334,24 @@ public async Task EditAsync(LandscapeModel landscape) if (newId != landscape.Id) { landscape.Id = newId; - return SubmitNewAsync(landscape).Result; + await SubmitNewAsync(landscape); + string id = landscape.Id; + string path = $"/LANDSCAPE/{id}/{id}.tfvars"; + string content = Helper.ConvertToTerraform(landscape); + byte[] bytes = Encoding.UTF8.GetBytes(content); + + AppFile file = new() + { + Id = WebUtility.HtmlEncode(path), + Content = bytes, + UntrustedName = path, + Size = bytes.Length, + UploadDT = DateTime.UtcNow + }; + + await _landscapeService.CreateTFVarsAsync(file); + + return RedirectToAction("Index"); } else { @@ -339,6 +361,23 @@ public async Task EditAsync(LandscapeModel landscape) } await _landscapeService.UpdateAsync(new LandscapeEntity(landscape)); TempData["success"] = "Successfully updated workload zone " + landscape.Id; + + string id = landscape.Id; + string path = $"/LANDSCAPE/{id}/{id}.tfvars"; + string content = Helper.ConvertToTerraform(landscape); + byte[] bytes = Encoding.UTF8.GetBytes(content); + + AppFile file = new() + { + Id = WebUtility.HtmlEncode(path), + Content = bytes, + UntrustedName = path, + Size = bytes.Length, + UploadDT = DateTime.UtcNow + }; + + await _landscapeService.CreateTFVarsAsync(file); + return RedirectToAction("Index"); } } @@ -371,6 +410,24 @@ public async Task SubmitNewAsync(LandscapeModel landscape) landscape.Id = Helper.GenerateId(landscape); await _landscapeService.CreateAsync(new LandscapeEntity(landscape)); TempData["success"] = "Successfully created workload zone " + landscape.Id; + string id = landscape.Id; + string path = $"/LANDSCAPE/{id}/{id}.tfvars"; + string content = Helper.ConvertToTerraform(landscape); + + byte[] bytes = Encoding.UTF8.GetBytes(content); + + AppFile file = new() + { + Id = WebUtility.HtmlEncode(id), + Content = bytes, + UntrustedName = id, + Size = bytes.Length, + UploadDT = DateTime.UtcNow + }; + + await _landscapeService.CreateTFVarsAsync(file); + + return RedirectToAction("Index"); } catch (Exception e) diff --git a/Webapp/SDAF/Controllers/SystemController.cs b/Webapp/SDAF/Controllers/SystemController.cs index 424e31d93b..785fef68ce 100644 --- a/Webapp/SDAF/Controllers/SystemController.cs +++ b/Webapp/SDAF/Controllers/SystemController.cs @@ -8,6 +8,7 @@ using System; using System.Collections.Generic; using System.IO; +using System.Net; using System.Text; using System.Threading.Tasks; @@ -20,7 +21,7 @@ public class SystemController : Controller private readonly ITableStorageService _appFileService; private FormViewModel systemView; private readonly IConfiguration _configuration; - private RestHelper restHelper; + private readonly RestHelper restHelper; private ImageDropdown[] imagesOffered; private List imageOptions; @@ -87,7 +88,15 @@ public async Task GetById(string id, string partitionKey) if (id == null || partitionKey == null) throw new ArgumentNullException(); var systemEntity = await _systemService.GetByIdAsync(id, partitionKey); if (systemEntity == null || systemEntity.System == null) throw new KeyNotFoundException(); - SystemModel s = JsonConvert.DeserializeObject(systemEntity.System); + SystemModel s = null; + try + { + s = JsonConvert.DeserializeObject(systemEntity.System); + } + catch + { + + } AppFile file = null; try { @@ -419,7 +428,25 @@ public async Task EditAsync(SystemModel system) if (system.Id == null) system.Id = newId; if (newId != system.Id) { - return SubmitNewAsync(system).Result; + await SubmitNewAsync(system); + string id = system.Id; + string path = $"/SYSTEM/{id}/{id}.tfvars"; + string content = Helper.ConvertToTerraform(system); + byte[] bytes = Encoding.UTF8.GetBytes(content); + + AppFile file = new() + { + Id = WebUtility.HtmlEncode(path), + Content = bytes, + UntrustedName = path, + Size = bytes.Length, + UploadDT = DateTime.UtcNow + }; + + await _systemService.CreateTFVarsAsync(file); + return RedirectToAction("Index"); + + } else { @@ -429,6 +456,21 @@ public async Task EditAsync(SystemModel system) } await _systemService.UpdateAsync(new SystemEntity(system)); TempData["success"] = "Successfully updated system " + system.Id; + string id = system.Id; + string path = $"/SYSTEM/{id}/{id}.tfvars"; + string content = Helper.ConvertToTerraform(system); + byte[] bytes = Encoding.UTF8.GetBytes(content); + + AppFile file = new() + { + Id = WebUtility.HtmlEncode(path), + Content = bytes, + UntrustedName = path, + Size = bytes.Length, + UploadDT = DateTime.UtcNow + }; + + await _systemService.CreateTFVarsAsync(file); return RedirectToAction("Index"); } } diff --git a/Webapp/SDAF/Models/AppFileEntity.cs b/Webapp/SDAF/Models/AppFileEntity.cs index 42a3f2c04a..d155f01071 100644 --- a/Webapp/SDAF/Models/AppFileEntity.cs +++ b/Webapp/SDAF/Models/AppFileEntity.cs @@ -11,7 +11,7 @@ public AppFileEntity() { } public AppFileEntity(string id, string uri) { RowKey = id; - PartitionKey = id.Substring(0, id.IndexOf('-')); + PartitionKey = id[..id.IndexOf('-')]; BlobUri = uri; } diff --git a/Webapp/SDAF/Models/DatabaseSettings.cs b/Webapp/SDAF/Models/DatabaseSettings.cs index 0e65e2a00d..5aac91d3a9 100644 --- a/Webapp/SDAF/Models/DatabaseSettings.cs +++ b/Webapp/SDAF/Models/DatabaseSettings.cs @@ -7,6 +7,7 @@ public class DatabaseSettings : IDatabaseSettings public string SystemCollectionName { get; set; } public string AppFileCollectionName { get; set; } public string AppFileBlobCollectionName { get; set; } + public string TfVarBlobCollectionName { get; set; } public string TemplateCollectionName { get; set; } public string ConnectionStringKey { get; set; } } @@ -18,6 +19,7 @@ public interface IDatabaseSettings string SystemCollectionName { get; set; } string AppFileCollectionName { get; set; } string AppFileBlobCollectionName { get; set; } + string TfVarBlobCollectionName { get; set; } string TemplateCollectionName { get; set; } string ConnectionStringKey { get; set; } } diff --git a/Webapp/SDAF/Models/SystemModel.cs b/Webapp/SDAF/Models/SystemModel.cs index 1b1c336bbb..9bd4d4941d 100644 --- a/Webapp/SDAF/Models/SystemModel.cs +++ b/Webapp/SDAF/Models/SystemModel.cs @@ -198,6 +198,8 @@ public bool IsValid() public Tag[] configuration_settings { get; set; } public bool? dns_a_records_for_secondary_names { get; set; } = true; + public bool? use_private_endpoint { get; set; } + /*---------------------------------------------------------------------------8 | | | Cluster information | @@ -283,8 +285,7 @@ public bool IsValid() [IpAddressValidator] public string[] database_vm_db_nic_ips { get; set; } - public string database_HANA_use_ANF_scaleout_scenario { get; set; } - + [IpAddressValidator] public string[] database_vm_db_nic_secondary_ips { get; set; } @@ -471,6 +472,14 @@ public bool IsValid() | | +------------------------------------4--------------------------------------*/ + public bool? ANF_HANA_use_AVG { get; set; } = false; + + /*---------------------------------------------------------------------------8 + | | + | Data | + | | + +------------------------------------4--------------------------------------*/ + public bool? ANF_HANA_data { get; set; } public int? ANF_HANA_data_volume_size { get; set; } @@ -481,6 +490,13 @@ public bool IsValid() public int? ANF_HANA_data_volume_throughput { get; set; } + public int? ANF_hana_data_volume_count { get; set; } = 1; + + /*---------------------------------------------------------------------------8 + | | + | Log | + | | + +------------------------------------4--------------------------------------*/ public bool? ANF_HANA_log { get; set; } public int? ANF_HANA_log_volume_size { get; set; } @@ -491,6 +507,13 @@ public bool IsValid() public int? ANF_HANA_log_volume_throughput { get; set; } + public int? ANF_hana_log_volume_count { get; set; } = 1; + + /*---------------------------------------------------------------------------8 + | | + | Shared | + | | + +------------------------------------4--------------------------------------*/ public bool? ANF_HANA_shared { get; set; } public int? ANF_HANA_shared_volume_size { get; set; } @@ -501,6 +524,11 @@ public bool IsValid() public int? ANF_HANA_shared_volume_throughput { get; set; } + /*---------------------------------------------------------------------------8 + | | + | /usr/sap | + | | + +------------------------------------4--------------------------------------*/ public bool? ANF_usr_sap { get; set; } public int? ANF_usr_sap_volume_size { get; set; } @@ -511,7 +539,11 @@ public bool IsValid() public int? ANF_usr_sap_throughput { get; set; } - public bool? use_private_endpoint { get; set; } + /*---------------------------------------------------------------------------8 + | | + | sapmnt | + | | + +------------------------------------4--------------------------------------*/ public bool? ANF_sapmnt { get; set; } @@ -523,7 +555,6 @@ public bool IsValid() public bool? ANF_sapmnt_use_clone_in_secondary_zone { get; set; } - public bool? ANF_HANA_use_AVG { get; set; } = false; /*---------------------------------------------------------------------------8 | | @@ -569,6 +600,16 @@ public bool IsValid() +------------------------------------4--------------------------------------*/ public bool? use_spn { get; set; } = true; + /*---------------------------------------------------------------------------8 + | | + | HANA Scale Out | + | | + +------------------------------------4--------------------------------------*/ + + public bool? database_HANA_use_ANF_scaleout_scenario { get; set; } = false; + + public int? stand_by_node_count { get; set; } = 0; + } public class Tag diff --git a/Webapp/SDAF/ParameterDetails/SystemDetails.json b/Webapp/SDAF/ParameterDetails/SystemDetails.json index d2feef0c54..6e30cd8a26 100644 --- a/Webapp/SDAF/ParameterDetails/SystemDetails.json +++ b/Webapp/SDAF/ParameterDetails/SystemDetails.json @@ -645,15 +645,6 @@ "Overrules": "", "Display": 3 }, - { - "Name": "database_HANA_use_ANF_scaleout_scenario", - "Required": false, - "Description": "Not implemented yet.", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 3 - }, { "Name": "database_vm_authentication_type", "Required": false, @@ -1818,6 +1809,24 @@ "Options": [], "Overrules": "", "Display": 2 + }, + { + "Name": "database_HANA_use_ANF_scaleout_scenario", + "Required": false, + "Description": "If true, the database tier will be configured for scaleout scenario.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "stand_by_node_count", + "Required": false, + "Description": "The number of standby nodes.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 } ] }, @@ -1869,6 +1878,15 @@ "Options": [], "Overrules": "", "Display": 3 + }, + { + "Name": "ANF_hana_data_volume_count", + "Required": false, + "Description": "Number of ANF Data Volumes", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 } ] }, @@ -1920,6 +1938,15 @@ "Options": [], "Overrules": "", "Display": 3 + }, + { + "Name": "ANF_hana_data_volume_count", + "Required": false, + "Description": "Number of ANF Data Volumes", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 } ] }, diff --git a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt index 9983ef172f..dcf5303a79 100644 --- a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt @@ -249,7 +249,6 @@ $$database_use_avset$$ # Optional, Defines if the tags for the database virtual machines $$database_tags$$ -$$database_HANA_use_ANF_scaleout_scenario$$ ######################################################################################### # # @@ -501,6 +500,9 @@ $$ANF_HANA_data_use_existing_volume$$ # ANF_HANA_data_volume_name, if defined, provides the name of the HANA data volume(s). $$ANF_HANA_data_volume_name$$ +# Number of ANF Data Volumes +$$ANF_hana_data_volume_count$$ + ######################################################################################### # # @@ -523,6 +525,8 @@ $$ANF_HANA_log_use_existing$$ # ANF_HANA_log_volume_name, if defined, provides the name of the HANA log volume(s). $$ANF_HANA_log_volume_name$$ +# Number of ANF Data Volumes +$$ANF_hana_log_volume_count$$ ######################################################################################### # # @@ -833,3 +837,16 @@ $$configuration_settings$$ # These tags will be applied to all resources $$tags$$ + + +######################################################################################### +# # +# Scaleout variables # +# # +######################################################################################### + +#If true, the database tier will be configured for scaleout scenario +$$database_HANA_use_ANF_scaleout_scenario$$ + +# Defined the standbynode count in a scaleout scenario +$$stand_by_node_count$$ diff --git a/Webapp/SDAF/ParameterDetails/VM-Images.json b/Webapp/SDAF/ParameterDetails/VM-Images.json index 6e9fd5d509..c75ce3337f 100644 --- a/Webapp/SDAF/ParameterDetails/VM-Images.json +++ b/Webapp/SDAF/ParameterDetails/VM-Images.json @@ -251,6 +251,18 @@ "type": "marketplace" } }, + { + "name": "OracleLinux 8.8", + "data": { + "os_type": "LINUX", + "source_image_id": "", + "publisher": "Oracle", + "offer": "Oracle-Linux", + "sku": "ol88-lvm-gen2", + "version": "latest", + "type": "marketplace" + } + }, { "name": "Windows Server 2016", "data": { diff --git a/Webapp/SDAF/SDAFWebApp.csproj b/Webapp/SDAF/SDAFWebApp.csproj index 581e83a61e..99f8ef615a 100644 --- a/Webapp/SDAF/SDAFWebApp.csproj +++ b/Webapp/SDAF/SDAFWebApp.csproj @@ -17,7 +17,7 @@ - + diff --git a/Webapp/SDAF/Services/AppFileService.cs b/Webapp/SDAF/Services/AppFileService.cs index 6d8ccba24f..f2535dbe86 100644 --- a/Webapp/SDAF/Services/AppFileService.cs +++ b/Webapp/SDAF/Services/AppFileService.cs @@ -84,5 +84,10 @@ public async Task DeleteAsync(string rowKey, string partitionKey) await blobClient.DeleteAsync(); await client.DeleteEntityAsync(partitionKey, rowKey); } + + public Task CreateTFVarsAsync(AppFile file) + { + throw new NotImplementedException(); + } } } diff --git a/Webapp/SDAF/Services/ITableStorageService.cs b/Webapp/SDAF/Services/ITableStorageService.cs index af9c967c31..ad2cead8e9 100644 --- a/Webapp/SDAF/Services/ITableStorageService.cs +++ b/Webapp/SDAF/Services/ITableStorageService.cs @@ -1,4 +1,5 @@ -using System.Collections.Generic; +using AutomationForm.Models; +using System.Collections.Generic; using System.Threading.Tasks; namespace AutomationForm.Services @@ -13,5 +14,6 @@ public interface ITableStorageService public Task CreateAsync(T model); public Task UpdateAsync(T model); public Task DeleteAsync(string rowKey, string partitionKey); + public Task CreateTFVarsAsync(AppFile file); } } diff --git a/Webapp/SDAF/Services/LandscapeService.cs b/Webapp/SDAF/Services/LandscapeService.cs index d710809c42..0298358381 100644 --- a/Webapp/SDAF/Services/LandscapeService.cs +++ b/Webapp/SDAF/Services/LandscapeService.cs @@ -1,6 +1,8 @@ -using AutomationForm.Models; +using AutomationForm.Models; using Azure; using Azure.Data.Tables; +using Azure.Storage.Blobs; +using System; using System.Collections.Generic; using System.Linq; using System.Threading.Tasks; @@ -10,10 +12,14 @@ namespace AutomationForm.Services public class LandscapeService : ITableStorageService { private readonly TableClient client; + private readonly BlobContainerClient tfvarsBlobContainerClient; + public LandscapeService(TableStorageService tableStorageService, IDatabaseSettings settings) { client = tableStorageService.GetTableClient(settings.LandscapeCollectionName).Result; + tfvarsBlobContainerClient = tableStorageService.GetBlobClient(settings.TfVarBlobCollectionName).Result; + } public async Task> GetNAsync(int n) @@ -56,5 +62,12 @@ public Task DeleteAsync(string rowKey, string partitionKey) { return client.DeleteEntityAsync(partitionKey, rowKey); } + public Task CreateTFVarsAsync(AppFile file) + { + BlobClient blobClient = tfvarsBlobContainerClient.GetBlobClient(file.Id); + return blobClient.UploadAsync(new BinaryData(file.Content), overwrite: blobClient.Exists()); + + } + } } diff --git a/Webapp/SDAF/Services/SystemService.cs b/Webapp/SDAF/Services/SystemService.cs index 1c2693ce34..24fbc1df76 100644 --- a/Webapp/SDAF/Services/SystemService.cs +++ b/Webapp/SDAF/Services/SystemService.cs @@ -1,6 +1,8 @@ -using AutomationForm.Models; +using AutomationForm.Models; using Azure; using Azure.Data.Tables; +using Azure.Storage.Blobs; +using System; using System.Collections.Generic; using System.Linq; using System.Threading.Tasks; @@ -10,10 +12,12 @@ namespace AutomationForm.Services public class SystemService : ITableStorageService { private readonly TableClient client; + private readonly BlobContainerClient tfvarsBlobContainerClient; public SystemService(TableStorageService tableStorageService, IDatabaseSettings settings) { client = tableStorageService.GetTableClient(settings.SystemCollectionName).Result; + tfvarsBlobContainerClient = tableStorageService.GetBlobClient(settings.TfVarBlobCollectionName).Result; } public async Task> GetNAsync(int n) @@ -56,5 +60,12 @@ public Task DeleteAsync(string rowKey, string partitionKey) { return client.DeleteEntityAsync(partitionKey, rowKey); } + + public Task CreateTFVarsAsync(AppFile file) + { + BlobClient blobClient = tfvarsBlobContainerClient.GetBlobClient(file.Id); + return blobClient.UploadAsync(new BinaryData(file.Content), overwrite: blobClient.Exists()); + } + } } diff --git a/Webapp/SDAF/appsettings.json b/Webapp/SDAF/appsettings.json index 30b54100ff..76534d9d69 100644 --- a/Webapp/SDAF/appsettings.json +++ b/Webapp/SDAF/appsettings.json @@ -6,6 +6,7 @@ "SystemCollectionName": "Systems", "AppFileCollectionName": "Files", "AppFileBlobCollectionName": "appfiles", + "TfVarBlobCollectionName": "tfvars", "TemplateCollectionName": "Templates" }, "Logging": { @@ -15,4 +16,4 @@ "Microsoft.Hosting.Lifetime": "Information" } } -} \ No newline at end of file +} diff --git a/deploy/ansible/playbook_04_00_00_db_install.yaml b/deploy/ansible/playbook_04_00_00_db_install.yaml index 750608ef3a..a5a755b5ee 100644 --- a/deploy/ansible/playbook_04_00_00_db_install.yaml +++ b/deploy/ansible/playbook_04_00_00_db_install.yaml @@ -122,7 +122,7 @@ - name: Enable {{ item.key }} in /etc/ssh/sshd_config become: true - lineinfile: + ansible.builtin.lineinfile: path: "/etc/ssh/sshd_config" regex: "^(# *)?{{ item.key }}" line: "{{ item.key }} {{ item.value }}" @@ -134,13 +134,13 @@ - name: "Restart SSHD on {{ ansible_hostname }}" become: true - service: + ansible.builtin.service: name: sshd state: restarted when: - not db_high_availability - - db_scale_out | default(false) == true + - db_scale_out | default(false) - hostvars.localhost.root_password is defined diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml index 287923480f..8a890e59ff 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml @@ -327,7 +327,7 @@ vars: allow_world_readable_tmpfiles: true - #ToDo: Check if we can interrogate the HANA DB to see if it is stopped. + # ToDo: Check if we can interrogate the HANA DB to see if it is stopped. - name: "Wait 5 minutes for SAP system to stop" ansible.builtin.wait_for: timeout: 300 @@ -354,7 +354,7 @@ when: - not hana_installed.stat.exists - - db_high_availability | default(false) == false + - not (db_high_availability | default(false)) # Only allowed for the first node. No other node in the scale out - ANF setup is allowed to install hdblcm. - ansible_hostname == db_hosts[0] - db_scale_out is defined @@ -517,9 +517,9 @@ when: - not hana_installed.stat.exists - db_high_availability is defined - - db_high_availability == true + - db_high_availability - db_scale_out is defined - - db_scale_out == true + - db_scale_out - name: "HANA Install status" diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.2-anf-mounts-scaleout.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.2-anf-mounts-scaleout.yaml index 18b423d504..f3eb975ac9 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.2-anf-mounts-scaleout.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.2-anf-mounts-scaleout.yaml @@ -596,7 +596,7 @@ - ansible_hostname == db_hosts[0] # For HSR based scale out, needs DB high availability - db_high_availability is defined - - db_high_availability == true + - db_high_availability - name: "ANF Mount: HANA log" ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml @@ -623,7 +623,7 @@ - ansible_hostname == db_hosts[0] # For HSR based scale out, needs DB high availability - db_high_availability is defined - - db_high_availability == true + - db_high_availability - name: "ANF Mount: HANA shared" ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml @@ -650,7 +650,7 @@ - ansible_hostname == db_hosts[0] # For HSR based scale out, needs DB high availability - db_high_availability is defined - - db_high_availability == true + - db_high_availability - name: "ANF Mount: HANA data (secondary)" ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml @@ -678,7 +678,7 @@ - ansible_hostname == db_hosts[1] # For HSR based scale out, needs DB high availability - db_high_availability is defined - - db_high_availability == true + - db_high_availability - name: "ANF Mount: HANA log (secondary)" ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml @@ -706,7 +706,7 @@ - ansible_hostname == db_hosts[1] # For HSR based scale out, needs DB high availability - db_high_availability is defined - - db_high_availability == true + - db_high_availability - name: "ANF Mount: HANA shared (secondary)" ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml @@ -734,7 +734,7 @@ - ansible_hostname == db_hosts[1] # For HSR based scale out, needs DB high availability - db_high_availability is defined - - db_high_availability == true + - db_high_availability # /*---------------------------------------------------------------------------8 # | | @@ -893,7 +893,6 @@ - db_scale_out - - name: "ANF Mount: HANA Log - Scale out - Create mount list" block: - name: "Initialize HANA Log mountpoints" @@ -948,7 +947,6 @@ - db_scale_out - - name: "ANF Mount: Set Permissons on HANA (HSR) Directories ({{ item.path }})" ansible.builtin.file: owner: '{{ hdbadm_uid }}' @@ -964,7 +962,7 @@ - tier == 'sapos' - node_tier == 'hana' - db_high_availability is defined - - db_high_availability == true + - db_high_availability - name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" @@ -982,7 +980,7 @@ when: - tier == 'sapos' - node_tier == 'hana' - - db_high_availability | default(false) == false + - not (db_high_availability | default(false)) - db_scale_out ... diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml index d8c3420014..679e0b11fa 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml @@ -359,7 +359,7 @@ - not use_simple_mount - NFS_provider == 'ANF' # only run when no scale out configuration is used. - - db_scale_out is not defined or db_scale_out == false + - db_scale_out is not defined or (not db_scale_out) # Import this task only if db_scale_out is defined. Implies that sap_mnt is defined, i.e. ANF is used diff --git a/deploy/scripts/deploy_controlplane.sh b/deploy/scripts/deploy_controlplane.sh index 58c9204c7b..7d1c79b452 100755 --- a/deploy/scripts/deploy_controlplane.sh +++ b/deploy/scripts/deploy_controlplane.sh @@ -206,7 +206,7 @@ if [ -n "${subscription}" ]; then load_config_vars "${deployer_config_information}" "keyvault" fi - if [ -n $keyvault ] ; then + if [ -n "${keyvault}" ] ; then kv_found=$(az keyvault list --subscription "${subscription}" --query [].name | grep "${keyvault}") diff --git a/deploy/terraform/bootstrap/sap_deployer/module.tf b/deploy/terraform/bootstrap/sap_deployer/module.tf index 54db2dea8f..334d917f96 100644 --- a/deploy/terraform/bootstrap/sap_deployer/module.tf +++ b/deploy/terraform/bootstrap/sap_deployer/module.tf @@ -16,8 +16,8 @@ module "sap_deployer" { additional_users_to_add_to_keyvault_policies = var.additional_users_to_add_to_keyvault_policies agent_ado_url = var.agent_ado_url Agent_IP = var.Agent_IP - agent_pat = var.use_webapp ? var.agent_pat : "" - agent_pool = var.use_webapp ? var.agent_pool : "" + agent_pat = var.agent_pat + agent_pool = var.agent_pool ansible_core_version = var.ansible_core_version app_registration_app_id = var.use_webapp ? var.app_registration_app_id : "" app_service = local.app_service diff --git a/deploy/terraform/run/sap_deployer/module.tf b/deploy/terraform/run/sap_deployer/module.tf index 7bb058f551..fd48b72325 100644 --- a/deploy/terraform/run/sap_deployer/module.tf +++ b/deploy/terraform/run/sap_deployer/module.tf @@ -16,8 +16,8 @@ module "sap_deployer" { additional_users_to_add_to_keyvault_policies = var.additional_users_to_add_to_keyvault_policies agent_ado_url = var.agent_ado_url Agent_IP = var.Agent_IP - agent_pat = var.use_webapp ? var.agent_pat : "" - agent_pool = var.use_webapp ? var.agent_pool : "" + agent_pat = var.agent_pat + agent_pool = var.agent_pool ansible_core_version = var.ansible_core_version app_registration_app_id = var.use_webapp ? var.app_registration_app_id : "" app_service = local.app_service diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index 347db4049e..9e8b226d67 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -19,8 +19,7 @@ module "sap_namegenerator" { app_ostype = upper(try(local.application_tier.app_os.os_type, "LINUX")) anchor_ostype = upper(try(local.anchor_vms.os.os_type, "LINUX")) db_ostype = upper(try(local.database.os.os_type, "LINUX")) - - db_server_count = var.database_server_count + db_server_count = var.database_server_count + var.stand_by_node_count app_server_count = local.enable_app_tier_deployment ? try(local.application_tier.application_server_count, 0) : 0 web_server_count = local.enable_app_tier_deployment ? try(local.application_tier.webdispatcher_count, 0) : 0 scs_server_count = local.enable_app_tier_deployment ? local.application_tier.scs_high_availability ? ( @@ -123,8 +122,8 @@ module "hdb_node" { database_dual_nics = try(module.common_infrastructure.admin_subnet, null) == null ? false : var.database_dual_nics database_server_count = upper(try(local.database.platform, "HANA")) == "HANA" ? ( local.database.high_availability ? ( - 2 * var.database_server_count) : ( - var.database_server_count + 2 * (var.database_server_count + var.stand_by_node_count)) : ( + var.database_server_count + var.stand_by_node_count )) : ( 0 ) @@ -399,9 +398,9 @@ module "output_files" { sap_transport = try(data.terraform_remote_state.landscape.outputs.saptransport_path, "") install_path = try(data.terraform_remote_state.landscape.outputs.install_path, "") shared_home = var.shared_home - hana_data = [module.hdb_node.hana_data_primary, module.hdb_node.hana_data_secondary] - hana_log = [module.hdb_node.hana_log_primary, module.hdb_node.hana_log_secondary] - hana_shared = [module.hdb_node.hana_shared_primary] + hana_data = module.hdb_node.hana_data_ANF_volumes + hana_log = module.hdb_node.hana_log_ANF_volumes + hana_shared = [module.hdb_node.hana_shared] usr_sap = module.common_infrastructure.usrsap_path ######################################################################################### @@ -418,7 +417,7 @@ module "output_files" { # Server counts # ######################################################################################### app_server_count = try(local.application_tier.application_server_count, 0) - db_server_count = var.database_server_count + db_server_count = var.database_server_count + var.stand_by_node_count scs_server_count = local.application_tier.scs_high_availability ? ( 2 * local.application_tier.scs_server_count) : ( local.application_tier.scs_server_count diff --git a/deploy/terraform/run/sap_system/output.tf b/deploy/terraform/run/sap_system/output.tf index f1ad2197e8..993c8ec813 100644 --- a/deploy/terraform/run/sap_system/output.tf +++ b/deploy/terraform/run/sap_system/output.tf @@ -217,3 +217,4 @@ output "subscription_id_used" { value = local.spn.subscription_id sensitive = true } + diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index a813215b71..bda0fe1416 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -517,10 +517,6 @@ variable "database_vm_storage_nic_ips" { default = [""] } -variable "database_HANA_use_ANF_scaleout_scenario" { - description = "Not implemented yet" - default = false - } variable "database_use_premium_v2_storage" { description = "If true, the database tier will use premium storage v2" @@ -1050,6 +1046,18 @@ variable "Use_AFS_for_Installation" { # # ######################################################################################### +variable "ANF_HANA_use_AVG" { + description = "Use Application Volume Group for data volume" + default = false + } + +variable "ANF_HANA_use_Zones" { + description = "Use zonal ANF deployments" + default = false + } + + +# Data volume variable "ANF_HANA_data" { description = "If defined, will create ANF volumes for HANA data" @@ -1076,15 +1084,12 @@ variable "ANF_HANA_data_volume_throughput" { default = 128 } -variable "ANF_HANA_use_AVG" { - description = "Use Application Volume Group for data volume" - default = false +variable "ANF_hana_data_volume_count" { + description = "If defined provides the number of data volumes" + default = 1 } -variable "ANF_HANA_use_Zones" { - description = "Use zonal ANF deployments" - default = false - } +# Log volume variable "ANF_HANA_log" { description = "If defined, will create ANF volumes for HANA log" @@ -1111,6 +1116,13 @@ variable "ANF_HANA_log_volume_throughput" { default = 128 } +variable "ANF_hana_log_volume_count" { + description = "If defined provides the number of data volumes" + default = 1 + } + +# Shared volume + variable "ANF_HANA_shared" { description = "If defined, will create ANF volumes for HANA shared" default = false @@ -1136,6 +1148,7 @@ variable "ANF_HANA_shared_volume_throughput" { default = 128 } +# /usr/sap variable "ANF_usr_sap" { description = "If defined, will create ANF volumes for /usr/sap" @@ -1162,6 +1175,10 @@ variable "ANF_usr_sap_throughput" { default = 128 } + +# /sapmnt + + variable "ANF_sapmnt_use_existing" { description = "Use existing sapmnt volume" default = false @@ -1272,3 +1289,20 @@ variable "tags" { description = "If provided, tags for all resources" default = {} } + + +######################################################################################### +# # +# Scaleout variables # +# # +######################################################################################### + +variable "database_HANA_use_ANF_scaleout_scenario" { + description = "If true, the database tier will be configured for scaleout scenario" + default = false + } + +variable "stand_by_node_count" { + description = "The number of standby nodes" + default = 0 + } diff --git a/deploy/terraform/run/sap_system/transform.tf b/deploy/terraform/run/sap_system/transform.tf index 3c4074a311..b573cafb94 100644 --- a/deploy/terraform/run/sap_system/transform.tf +++ b/deploy/terraform/run/sap_system/transform.tf @@ -95,6 +95,7 @@ locals { ) user_assigned_identity_id = var.user_assigned_identity_id zones = var.database_vm_zones + stand_by_node_count = var.stand_by_node_count } db_os = { @@ -636,4 +637,44 @@ locals { } ) : null)) + hana_ANF_volumes = { + use_for_data = var.ANF_HANA_data + data_volume_size = var.ANF_HANA_data_volume_size + use_existing_data_volume = var.ANF_HANA_data_use_existing_volume + data_volume_name = var.ANF_HANA_data_volume_name + data_volume_throughput = var.ANF_HANA_data_volume_throughput + data_volume_count = var.ANF_hana_data_volume_count + + use_for_log = var.ANF_HANA_log + log_volume_size = var.ANF_HANA_log_volume_size + use_existing_log_volume = var.ANF_HANA_log_use_existing + log_volume_name = var.ANF_HANA_log_volume_name + log_volume_throughput = var.ANF_HANA_log_volume_throughput + log_volume_count = var.ANF_hana_log_volume_count + + use_for_shared = var.ANF_HANA_shared + shared_volume_size = var.ANF_HANA_shared_volume_size + use_existing_shared_volume = var.ANF_HANA_shared_use_existing + shared_volume_name = var.ANF_HANA_shared_volume_name + shared_volume_throughput = var.ANF_HANA_shared_volume_throughput + + use_for_usr_sap = var.ANF_usr_sap + usr_sap_volume_size = var.ANF_usr_sap_volume_size + use_existing_usr_sap_volume = var.ANF_usr_sap_use_existing + usr_sap_volume_name = var.ANF_usr_sap_volume_name + usr_sap_volume_throughput = var.ANF_usr_sap_throughput + + sapmnt_volume_size = var.sapmnt_volume_size + use_for_sapmnt = var.ANF_sapmnt + use_existing_sapmnt_volume = var.ANF_sapmnt_use_existing + sapmnt_volume_name = var.ANF_sapmnt_volume_name + sapmnt_volume_throughput = var.ANF_sapmnt_volume_throughput + sapmnt_use_clone_in_secondary_zone = var.ANF_sapmnt_use_clone_in_secondary_zone + + use_AVG_for_data = var.ANF_HANA_use_AVG + use_zones = var.ANF_HANA_use_Zones + + } + + } diff --git a/deploy/terraform/run/sap_system/variables_local.tf b/deploy/terraform/run/sap_system/variables_local.tf index a0683743df..4bb0b5f4bf 100644 --- a/deploy/terraform/run/sap_system/variables_local.tf +++ b/deploy/terraform/run/sap_system/variables_local.tf @@ -65,41 +65,4 @@ locals { null ) - hana_ANF_volumes = { - use_for_data = var.ANF_HANA_data - data_volume_size = var.ANF_HANA_data_volume_size - use_existing_data_volume = var.ANF_HANA_data_use_existing_volume - data_volume_name = var.ANF_HANA_data_volume_name - data_volume_throughput = var.ANF_HANA_data_volume_throughput - - use_for_log = var.ANF_HANA_log - log_volume_size = var.ANF_HANA_log_volume_size - use_existing_log_volume = var.ANF_HANA_log_use_existing - log_volume_name = var.ANF_HANA_log_volume_name - log_volume_throughput = var.ANF_HANA_log_volume_throughput - - use_for_shared = var.ANF_HANA_shared - shared_volume_size = var.ANF_HANA_shared_volume_size - use_existing_shared_volume = var.ANF_HANA_shared_use_existing - shared_volume_name = var.ANF_HANA_shared_volume_name - shared_volume_throughput = var.ANF_HANA_shared_volume_throughput - - use_for_usr_sap = var.ANF_usr_sap - usr_sap_volume_size = var.ANF_usr_sap_volume_size - use_existing_usr_sap_volume = var.ANF_usr_sap_use_existing - usr_sap_volume_name = var.ANF_usr_sap_volume_name - usr_sap_volume_throughput = var.ANF_usr_sap_throughput - - sapmnt_volume_size = var.sapmnt_volume_size - use_for_sapmnt = var.ANF_sapmnt - use_existing_sapmnt_volume = var.ANF_sapmnt_use_existing - sapmnt_volume_name = var.ANF_sapmnt_volume_name - sapmnt_volume_throughput = var.ANF_sapmnt_volume_throughput - sapmnt_use_clone_in_secondary_zone = var.ANF_sapmnt_use_clone_in_secondary_zone - - use_AVG_for_data = var.ANF_HANA_use_AVG - use_zones = var.ANF_HANA_use_Zones - - } - } diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf index 7d234576f2..37dda1e7c8 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf @@ -6,14 +6,14 @@ resource "azurerm_netapp_volume" "hanadata" { provider = azurerm.main - count = var.hana_ANF_volumes.use_for_data && !local.use_avg ? ( + count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_data && !local.use_avg ? ( var.hana_ANF_volumes.use_existing_data_volume ? ( 0 ) : ( - var.database_server_count > 1 ? 2 : 1 + (var.database_server_count - var.database.stand_by_node_count) * var.hana_ANF_volumes.data_volume_count )) : ( 0 - ) + ) : 0 name = format("%s%s%s%s%d", var.naming.resource_prefixes.hanadata, local.prefix, @@ -57,14 +57,14 @@ data "azurerm_netapp_volume" "hanadata" { depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA] - count = var.hana_ANF_volumes.use_for_data ? ( + count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_data ? ( var.hana_ANF_volumes.use_existing_data_volume || local.use_avg ? ( - var.database_server_count > 1 ? 2 : 1 + (var.database_server_count - var.database.stand_by_node_count) * var.hana_ANF_volumes.data_volume_count ) : ( 0 )) : ( 0 - ) + ) : 0 name = local.use_avg ? ( format("%s%s%s%s%d", var.naming.resource_prefixes.hanadata, @@ -85,14 +85,14 @@ resource "azurerm_netapp_volume" "hanalog" { provider = azurerm.main depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA] - count = var.hana_ANF_volumes.use_for_log && !local.use_avg ? ( + count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_log && !local.use_avg ? ( var.hana_ANF_volumes.use_existing_log_volume ? ( 0 ) : ( - var.database_server_count > 1 ? 2 : 1 + (var.database_server_count - var.database.stand_by_node_count) * var.hana_ANF_volumes.log_volume_count )) : ( 0 - ) + ) : 0 name = format("%s%s%s%s%d", var.naming.resource_prefixes.hanalog, local.prefix, @@ -133,14 +133,14 @@ data "azurerm_netapp_volume" "hanalog" { provider = azurerm.main depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA] - count = var.hana_ANF_volumes.use_for_log ? ( + count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_log ? ( var.hana_ANF_volumes.use_existing_log_volume || local.use_avg ? ( - var.database_server_count > 1 ? 2 : 1 + (var.database_server_count - var.database.stand_by_node_count) * var.hana_ANF_volumes.log_volume_count ) : ( 0 )) : ( 0 - ) + ) : 0 name = local.use_avg ? ( format("%s%s%s%s%d", var.naming.resource_prefixes.hanalog, @@ -160,14 +160,14 @@ resource "azurerm_netapp_volume" "hanashared" { provider = azurerm.main depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA] - count = var.hana_ANF_volumes.use_for_shared && !local.use_avg ? ( + count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_shared && !local.use_avg ? ( var.hana_ANF_volumes.use_existing_shared_volume ? ( 0 ) : ( 1 )) : ( 0 - ) + ) : 0 name = format("%s%s%s%s%d", var.naming.resource_prefixes.hanashared, local.prefix, @@ -210,14 +210,14 @@ data "azurerm_netapp_volume" "hanashared" { provider = azurerm.main depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA] - count = var.hana_ANF_volumes.use_for_shared ? ( + count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_shared ? ( var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( 1 ) : ( 0 )) : ( 0 - ) + ) : 0 name = local.use_avg ? ( format("%s%s%s%s%d", var.naming.resource_prefixes.hanashared, diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf index e7ee9e4f4c..3874fcd58e 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf @@ -121,116 +121,44 @@ output "database_server_secondary_ips" { } -output "hana_data_primary" { - description = "HANA Data Primary volume" - value = try(var.hana_ANF_volumes.use_for_data ? ( - format("%s:/%s", - var.hana_ANF_volumes.use_existing_data_volume || local.use_avg ? ( - data.azurerm_netapp_volume.hanadata[0].mount_ip_addresses[0]) : ( - azurerm_netapp_volume.hanadata[0].mount_ip_addresses[0] - ), - var.hana_ANF_volumes.use_existing_data_volume || local.use_avg ? ( - data.azurerm_netapp_volume.hanadata[0].volume_path) : ( - azurerm_netapp_volume.hanadata[0].volume_path - ) - ) - ) : ( - "" - ), "") - } - -output "hana_data_secondary" { - description = "HANA Data Secondary volume" - value = try(var.hana_ANF_volumes.use_for_data && var.database_server_count > 1 ? ( - format("%s:/%s", - var.hana_ANF_volumes.use_existing_data_volume || local.use_avg ? ( - data.azurerm_netapp_volume.hanadata[1].mount_ip_addresses[0]) : ( - azurerm_netapp_volume.hanadata[1].mount_ip_addresses[0] - ), - var.hana_ANF_volumes.use_existing_data_volume || local.use_avg ? ( - data.azurerm_netapp_volume.hanadata[1].volume_path) : ( - azurerm_netapp_volume.hanadata[1].volume_path +output "hana_data_ANF_volumes" { + description = "HANA Data volumes" + value = flatten([ + for idx in range(local.data_volume_count) : [ + format("%s:/%s", + var.hana_ANF_volumes.use_existing_data_volume || local.use_avg ? ( + data.azurerm_netapp_volume.hanadata[idx].mount_ip_addresses[0]) : ( + azurerm_netapp_volume.hanadata[idx].mount_ip_addresses[0] + ), + var.hana_ANF_volumes.use_existing_data_volume || local.use_avg ? ( + data.azurerm_netapp_volume.hanadata[idx].volume_path) : ( + azurerm_netapp_volume.hanadata[idx].volume_path + ) ) - ) - ) : ( - "" - ), "") + ] + ]) } - # output "hana_data" { -# value = var.hana_ANF_volumes.use_for_data ? ( -# var.database.high_availability ? ( -# [format("%s:/%s", -# var.hana_ANF_volumes.use_existing_data_volume ? ( -# data.azurerm_netapp_volume.hanadata[0].mount_ip_addresses[0]) : ( -# azurerm_netapp_volume.hanadata[0].mount_ip_addresses[0] -# ), -# var.hana_ANF_volumes.use_existing_data_volume ? ( -# data.azurerm_netapp_volume.hanadata[0].volume_path) : ( -# azurerm_netapp_volume.hanadata[0].volume_path -# ) -# ), format("%s:/%s", -# var.hana_ANF_volumes.use_existing_data_volume ? ( -# data.azurerm_netapp_volume.hanadata[1].mount_ip_addresses[1]) : ( -# azurerm_netapp_volume.hanadata[1].mount_ip_addresses[1] -# ), -# var.hana_ANF_volumes.use_existing_data_volume ? ( -# data.azurerm_netapp_volume.hanadata[1].volume_path) : ( -# azurerm_netapp_volume.hanadata[1].volume_path -# ) -# )] -# ) : ( -# [format("%s:/%s", -# var.hana_ANF_volumes.use_existing_data_volume ? ( -# data.azurerm_netapp_volume.hanadata[0].mount_ip_addresses[0]) : ( -# azurerm_netapp_volume.hanadata[0].mount_ip_addresses[0] -# ), -# var.hana_ANF_volumes.use_existing_data_volume ? ( -# data.azurerm_netapp_volume.hanadata[0].volume_path) : ( -# azurerm_netapp_volume.hanadata[0].volume_path -# ) -# )] -# ) -# ) : ( -# [""]) -# } -output "hana_log_primary" { - description = "HANA Log primary volume" - value = try(var.hana_ANF_volumes.use_for_log ? ( - format("%s:/%s", - var.hana_ANF_volumes.use_existing_log_volume || local.use_avg ? ( - data.azurerm_netapp_volume.hanalog[0].mount_ip_addresses[0]) : ( - azurerm_netapp_volume.hanalog[0].mount_ip_addresses[0] - ), - var.hana_ANF_volumes.use_existing_log_volume || local.use_avg ? ( - data.azurerm_netapp_volume.hanalog[0].volume_path) : ( - azurerm_netapp_volume.hanalog[0].volume_path - ) - ) - ) : ( - "" - ), "") - } +output "hana_log_ANF_volumes" { + description = "HANA Log volumes" + value = flatten([ + for idx in range(local.log_volume_count) : [ + format("%s:/%s", + var.hana_ANF_volumes.use_existing_log_volume || local.use_avg ? ( + data.azurerm_netapp_volume.hanalog[idx].mount_ip_addresses[0]) : ( + azurerm_netapp_volume.hanalog[idx].mount_ip_addresses[0] + ), + var.hana_ANF_volumes.use_existing_log_volume || local.use_avg ? ( + data.azurerm_netapp_volume.hanalog[idx].volume_path) : ( + azurerm_netapp_volume.hanalog[idx].volume_path + ) + ) -output "hana_log_secondary" { - description = "HANA Log secondary volume" - value = try(var.hana_ANF_volumes.use_for_log && var.database_server_count > 1 ? ( - format("%s:/%s", - var.hana_ANF_volumes.use_existing_log_volume || local.use_avg ? ( - data.azurerm_netapp_volume.hanalog[1].mount_ip_addresses[0]) : ( - azurerm_netapp_volume.hanalog[1].mount_ip_addresses[0] - ), - var.hana_ANF_volumes.use_existing_log_volume || local.use_avg ? ( - data.azurerm_netapp_volume.hanalog[1].volume_path) : ( - azurerm_netapp_volume.hanalog[1].volume_path - ) - ) - ) : ( - "" - ), "") + ] + ]) } -output "hana_shared_primary" { +output "hana_shared" { description = "HANA Shared primary volume" value = try(var.hana_ANF_volumes.use_for_shared ? ( format("%s:/%s", @@ -248,7 +176,6 @@ output "hana_shared_primary" { ), "") } - output "application_volume_group" { description = "Application volume group" value = azurerm_netapp_volume_group_sap_hana.avg_HANA diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf index ed9157a0ce..f9c8826b62 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf @@ -340,9 +340,7 @@ locals { dns_label = try(var.landscape_tfstate.dns_label, "") - ANF_pool_settings = var.NFS_provider == "ANF" ? ( - try(var.landscape_tfstate.ANF_pool_settings, {}) - ) : ( + ANF_pool_settings = try(var.landscape_tfstate.ANF_pool_settings, { use_ANF = false account_name = "" @@ -382,4 +380,16 @@ locals { flatten(concat(local.database_primary_ips, local.database_secondary_ips))) : ( local.database_primary_ips ) + + + + data_volume_count = (var.hana_ANF_volumes.use_for_data || var.hana_ANF_volumes.use_existing_data_volume) ? ( + (var.database_server_count - var.database.stand_by_node_count) * var.hana_ANF_volumes.data_volume_count) : ( + 0 + ) + log_volume_count = (var.hana_ANF_volumes.use_for_log || var.hana_ANF_volumes.use_existing_log_volume) ? ( + (var.database_server_count - var.database.stand_by_node_count) * var.hana_ANF_volumes.log_volume_count) : ( + 0 + ) + } From 5aadf26c388d4f3eafbd46048215a8377994f9ef Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 5 Feb 2024 20:35:22 +0200 Subject: [PATCH 184/607] Refactor HANA Data and Log volumes in outputs.tf --- .../modules/sap_system/hdb_node/outputs.tf | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf index 3874fcd58e..3c27c4a7ee 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf @@ -123,7 +123,7 @@ output "database_server_secondary_ips" { output "hana_data_ANF_volumes" { description = "HANA Data volumes" - value = flatten([ + value = local.data_volume_count > 0 ? flatten([ for idx in range(local.data_volume_count) : [ format("%s:/%s", var.hana_ANF_volumes.use_existing_data_volume || local.use_avg ? ( @@ -136,12 +136,12 @@ output "hana_data_ANF_volumes" { ) ) ] - ]) + ]) : [] } output "hana_log_ANF_volumes" { description = "HANA Log volumes" - value = flatten([ + value = local.log_volume_count > 0 ? flatten([ for idx in range(local.log_volume_count) : [ format("%s:/%s", var.hana_ANF_volumes.use_existing_log_volume || local.use_avg ? ( @@ -155,7 +155,7 @@ output "hana_log_ANF_volumes" { ) ] - ]) + ]) : [] } output "hana_shared" { From f9d5a7c80e1180d6422d3b6b7b67fbfe3e370add Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 5 Feb 2024 22:08:04 +0200 Subject: [PATCH 185/607] Add oraclelinux8.9 repository and packages --- deploy/ansible/roles-os/1.3-repository/vars/repos.yaml | 1 + deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml b/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml index d7562fd267..f82c5498b6 100644 --- a/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml +++ b/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml @@ -49,3 +49,4 @@ repos: oraclelinux8.6: oraclelinux8.7: oraclelinux8.8: + oraclelinux8.9: diff --git a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml index ce70999945..94ef59ad0c 100644 --- a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml +++ b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml @@ -502,3 +502,8 @@ packages: - { tier: 'os', package: 'oracle-database-preinstall-19c', node_tier: 'all', state: 'present' } - { tier: 'os', package: 'oracleasm-support', node_tier: 'oracle-asm', state: 'present' } - { tier: 'os', package: 'gdisk', node_tier: 'all', state: 'present' } + + oraclelinux8.9: + - { tier: 'os', package: 'oracle-database-preinstall-19c', node_tier: 'all', state: 'present' } + - { tier: 'os', package: 'oracleasm-support', node_tier: 'oracle-asm', state: 'present' } + - { tier: 'os', package: 'gdisk', node_tier: 'all', state: 'present' } From 19f19916c58931fe58fd5b2a1091189a3c3b8925 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 5 Feb 2024 23:43:39 +0200 Subject: [PATCH 186/607] Update SELinux configuration for Oracle Linux 8 --- .../2.10-sap-notes/tasks/main.yaml | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml index 2197e4a9f9..7683f8bdf3 100644 --- a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml @@ -47,7 +47,7 @@ - name: "2.10.1 sap-notes: - Disable SELinux and Reboot" when: - node_tier in ['scs', 'ers', 'pas', 'app', 'web'] - - distribution_id in ['redhat7', 'redhat8', 'redhat9', 'oraclelinux8'] + - distribution_id in ['redhat7', 'redhat8', 'redhat9'] block: - name: "2.10.1 sap-notes: - Disable SELinux" ansible.posix.selinux: @@ -62,6 +62,24 @@ when: - selinux_disabled.changed +- name: "2.10.1 sap-notes: - Disable SELinux and Reboot" + when: + - node_tier in ['scs', 'ers', 'pas', 'app', 'web'] + - distribution_id in ['oraclelinux8'] + block: + - name: "2.10.1 sap-notes: - Disable SELinux" + ansible.posix.selinux: + state: permissive + register: selinux_permissive + + - name: "2.10.1 sap-notes: Reboot app VMs after selinux is configured" + ansible.builtin.reboot: + reboot_timeout: 300 + post_reboot_delay: 60 + ignore_unreachable: true + when: + - selinux_permissive.changed + - name: "2.10.1 sap-notes: Check VM Agent Status" when: - selinux_disabled.changed From 2095943e749aea373e03c1b8766fc01a2931581b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 6 Feb 2024 00:27:48 +0200 Subject: [PATCH 187/607] Update SELinux state to disabled in sap-notes role --- deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml index 7683f8bdf3..70937be5df 100644 --- a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml @@ -69,7 +69,7 @@ block: - name: "2.10.1 sap-notes: - Disable SELinux" ansible.posix.selinux: - state: permissive + state: disabled register: selinux_permissive - name: "2.10.1 sap-notes: Reboot app VMs after selinux is configured" From 2afda76c477368218651b06172bf2ef5b3c58b3b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 6 Feb 2024 10:19:34 +0200 Subject: [PATCH 188/607] Fix Oracle Data Guard setup for secondary node --- .../roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml index f47dc94838..dfea0ca840 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml @@ -154,8 +154,8 @@ executable: /bin/csh creates: /etc/sap_deployment_automation/dgscripts/asm_oraarch_created.txt when: - - current_host == ora_secondary - node_tier == 'oracle-asm' + - current_host == ora_secondary - name: "Oracle Data Guard - Setup Secondary: Create asm_oraarch_created.txt" become: true @@ -165,9 +165,9 @@ state: touch mode: '0755' when: + - node_tier == 'oracle-asm' - current_host == ora_secondary - asm_oraarch_created_results.rc == 0 - - node_tier == 'oracle-asm' - name: "Oracle Data Guard - Setup Secondary: Duplicate Secondary DB from Primary DB using RMAN" block: From 2aa6e2f98a42049ed05f785cee76c59148e1d56c Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 6 Feb 2024 10:47:42 +0200 Subject: [PATCH 189/607] Update app_virtual_hostname variable in main.yaml --- deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml index 1ed018df28..6755e3ea99 100644 --- a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml @@ -33,7 +33,7 @@ dir_params: "{{ tmp_directory }}/.{{ sid_to_be_deployed.sid | upper }}-params" db_lb_virtual_host_HANA: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ hostvars[db_server_temp | first]['virtual_host'] }}{% endif %}" db_lb_virtual_host_AnyDB: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ db_server_temp }}{% endif %}" - app_virtual_hostname: "{{ default(virtual_host, true) }}" + app_virtual_hostname: "{{ virtual_host }}" - name: "APP Install: Set BOM facts db host" ansible.builtin.set_fact: From f9806079ec67af6ebd3e61ed7996051340f9c4cf Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 6 Feb 2024 16:04:47 +0200 Subject: [PATCH 190/607] Add Oracle Data Guard post-processing task to restart lsnrctl on Secondary --- .../tasks/ora-dg-secondary-preparation.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-secondary-preparation.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-secondary-preparation.yaml index 7b33d04c71..339fc122df 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-secondary-preparation.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-secondary-preparation.yaml @@ -132,6 +132,17 @@ path: /etc/sap_deployment_automation/dgscripts/dbscopied.txt state: touch mode: '0755' + +- name: "Oracle Data Guard - Post Processing: Restart lsnrctl on Secondary" + become: true + become_user: "oracle" + ansible.builtin.shell: lsnrctl reload + register: lsnrctl_start_secondary_results + failed_when: lsnrctl_start_secondary_results.rc > 0 + args: + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + ... # /*---------------------------------------------------------------------------8 # | END | From 54003d5e28639400cf55d4239414b6e0c7e953d1 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 6 Feb 2024 17:14:43 +0200 Subject: [PATCH 191/607] Add input and output variable files for SAP landscape module --- .../modules/sap_landscape/outputs.tf | 7 + .../modules/sap_landscape/readme.md | 13 ++ .../modules/sap_landscape/storage_accounts.tf | 186 +++++++----------- .../modules/sap_landscape/variables_global.tf | 7 + .../modules/sap_library/storage_accounts.tf | 72 +++---- 5 files changed, 131 insertions(+), 154 deletions(-) create mode 100644 deploy/terraform/terraform-units/modules/sap_landscape/readme.md diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf b/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf index c376803a16..cfa1feeffa 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf @@ -1,3 +1,10 @@ +#######################################4#######################################8 +# # +# This file contains the output variables for the SAP landscape module # +# # +#######################################4#######################################8 + + #######################################4#######################################8 # # # Resource Group # diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/readme.md b/deploy/terraform/terraform-units/modules/sap_landscape/readme.md new file mode 100644 index 0000000000..be828581de --- /dev/null +++ b/deploy/terraform/terraform-units/modules/sap_landscape/readme.md @@ -0,0 +1,13 @@ +# Module Description + +This folder contains the terraform templates for the SAP landscape deployment module. + +## variables_global.tf +The file variables_global.tf contains the input variables that are required by the module. + +## outputs.tf +The file outputs.tf contains the output variables that are exposed by the module. + +## variables_local.tf +The file variables_local.tf contains the local variables that are required by the module. + diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf index 611a080493..7b7880e1d4 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf @@ -149,42 +149,29 @@ resource "azurerm_storage_account" "witness_storage" { public_network_access_enabled = var.public_network_access_enabled tags = var.tags + network_rules { + default_action = "Deny" + virtual_network_subnet_ids = compact([ + local.database_subnet_defined ? ( + local.database_subnet_existing ? var.infrastructure.vnets.sap.subnet_db.arm_id : azurerm_subnet.db[0].id) : ( + null + ), local.application_subnet_defined ? ( + local.application_subnet_existing ? var.infrastructure.vnets.sap.subnet_app.arm_id : azurerm_subnet.app[0].id) : ( + null + ), + data.azurerm_resource_group.mgmt[0].location == (local.resource_group_exists ? ( + data.azurerm_resource_group.resource_group[0].location) : ( + azurerm_resource_group.resource_group[0].location + )) ? local.deployer_subnet_management_id : null + ] + ) + ip_rules = compact([ + length(local.deployer_public_ip_address) > 0 ? local.deployer_public_ip_address : "", + length(var.Agent_IP) > 0 ? var.Agent_IP : "" + ]) + } -} -resource "azurerm_storage_account_network_rules" "witness" { - provider = azurerm.main - count = var.enable_firewall_for_keyvaults_and_storage && length(var.witness_storage_account.arm_id) == 0 ? 1 : 0 - depends_on = [ - azurerm_storage_account.witness_storage, - azurerm_subnet.db, - azurerm_subnet.app - ] - storage_account_id = azurerm_storage_account.witness_storage[0].id - default_action = "Deny" - bypass = ["AzureServices", "Logging", "Metrics"] - - ip_rules = compact([ - length(local.deployer_public_ip_address) > 0 ? local.deployer_public_ip_address : "", - length(var.Agent_IP) > 0 ? var.Agent_IP : "" - ]) - virtual_network_subnet_ids = compact([ - local.database_subnet_defined ? ( - local.database_subnet_existing ? var.infrastructure.vnets.sap.subnet_db.arm_id : azurerm_subnet.db[0].id) : ( - null - ), local.application_subnet_defined ? ( - local.application_subnet_existing ? var.infrastructure.vnets.sap.subnet_app.arm_id : azurerm_subnet.app[0].id) : ( - null - ), - data.azurerm_resource_group.mgmt[0].location == (local.resource_group_exists ? ( - data.azurerm_resource_group.resource_group[0].location) : ( - azurerm_resource_group.resource_group[0].location - )) ? local.deployer_subnet_management_id : null - ] - ) - lifecycle { - ignore_changes = [virtual_network_subnet_ids] - } } resource "azurerm_private_dns_a_record" "witness_storage" { @@ -311,47 +298,37 @@ resource "azurerm_storage_account" "transport" { public_network_access_enabled = var.public_network_access_enabled + network_rules { + default_action = "Deny" + virtual_network_subnet_ids = compact( + [ + local.database_subnet_defined ? ( + local.database_subnet_existing ? var.infrastructure.vnets.sap.subnet_db.arm_id : azurerm_subnet.db[0].id) : ( + "" + ), local.application_subnet_defined ? ( + local.application_subnet_existing ? var.infrastructure.vnets.sap.subnet_app.arm_id : azurerm_subnet.app[0].id) : ( + "" + ), local.web_subnet_defined ? ( + local.web_subnet_existing ? var.infrastructure.vnets.sap.subnet_web.arm_id : azurerm_subnet.web[0].id) : ( + "" + ), + data.azurerm_resource_group.mgmt[0].location == (local.resource_group_exists ? ( + data.azurerm_resource_group.resource_group[0].location) : ( + azurerm_resource_group.resource_group[0].location + )) ? local.deployer_subnet_management_id : null + + ] + ) + ip_rules = compact([ + length(local.deployer_public_ip_address) > 0 ? local.deployer_public_ip_address : "", + length(var.Agent_IP) > 0 ? var.Agent_IP : "" + ]) + } + tags = var.tags } -resource "azurerm_storage_account_network_rules" "transport" { - provider = azurerm.main - count = var.create_transport_storage && local.use_AFS_for_shared && length(var.transport_storage_account_id) == 0 ? 1 : 0 - storage_account_id = azurerm_storage_account.transport[0].id - default_action = "Deny" - - ip_rules = compact([ - length(local.deployer_public_ip_address) > 0 ? local.deployer_public_ip_address : "", - length(var.Agent_IP) > 0 ? var.Agent_IP : "" - ]) - - bypass = ["AzureServices", "Logging", "Metrics"] - virtual_network_subnet_ids = compact( - [ - local.database_subnet_defined ? ( - local.database_subnet_existing ? var.infrastructure.vnets.sap.subnet_db.arm_id : azurerm_subnet.db[0].id) : ( - "" - ), local.application_subnet_defined ? ( - local.application_subnet_existing ? var.infrastructure.vnets.sap.subnet_app.arm_id : azurerm_subnet.app[0].id) : ( - "" - ), local.web_subnet_defined ? ( - local.web_subnet_existing ? var.infrastructure.vnets.sap.subnet_web.arm_id : azurerm_subnet.web[0].id) : ( - "" - ), - data.azurerm_resource_group.mgmt[0].location == (local.resource_group_exists ? ( - data.azurerm_resource_group.resource_group[0].location) : ( - azurerm_resource_group.resource_group[0].location - )) ? local.deployer_subnet_management_id : null - - ] - ) - - lifecycle { - ignore_changes = [virtual_network_subnet_ids] - } - -} resource "azurerm_private_dns_a_record" "transport" { provider = azurerm.dnsmanagement @@ -547,50 +524,37 @@ resource "azurerm_storage_account" "install" { min_tls_version = "TLS1_2" public_network_access_enabled = var.public_network_access_enabled tags = var.tags + network_rules { + default_action = "Deny" + virtual_network_subnet_ids = compact( + [ + local.database_subnet_defined ? ( + local.database_subnet_existing ? var.infrastructure.vnets.sap.subnet_db.arm_id : azurerm_subnet.db[0].id) : ( + "" + ), local.application_subnet_defined ? ( + local.application_subnet_existing ? var.infrastructure.vnets.sap.subnet_app.arm_id : azurerm_subnet.app[0].id) : ( + "" + ), local.web_subnet_defined ? ( + local.web_subnet_existing ? var.infrastructure.vnets.sap.subnet_web.arm_id : azurerm_subnet.web[0].id) : ( + "" + ), + data.azurerm_resource_group.mgmt[0].location == (local.resource_group_exists ? ( + data.azurerm_resource_group.resource_group[0].location) : ( + azurerm_resource_group.resource_group[0].location + )) ? local.deployer_subnet_management_id : null + + ] + ) + ip_rules = compact([ + length(local.deployer_public_ip_address) > 0 ? local.deployer_public_ip_address : "", + length(var.Agent_IP) > 0 ? var.Agent_IP : "" + ]) + } -} - -resource "azurerm_storage_account_network_rules" "install" { - provider = azurerm.main - count = local.use_AFS_for_shared && length(var.install_storage_account_id) == 0 ? 1 : 0 - depends_on = [ - azurerm_subnet.app, - azurerm_subnet.db - ] - - storage_account_id = azurerm_storage_account.install[0].id - default_action = "Deny" - - ip_rules = compact([ - length(local.deployer_public_ip_address) > 0 ? local.deployer_public_ip_address : "", - length(var.Agent_IP) > 0 ? var.Agent_IP : "" - ]) - bypass = ["AzureServices", "Logging", "Metrics"] - virtual_network_subnet_ids = compact( - [ - local.database_subnet_defined ? ( - local.database_subnet_existing ? var.infrastructure.vnets.sap.subnet_db.arm_id : azurerm_subnet.db[0].id) : ( - "" - ), local.application_subnet_defined ? ( - local.application_subnet_existing ? var.infrastructure.vnets.sap.subnet_app.arm_id : azurerm_subnet.app[0].id) : ( - "" - ), local.web_subnet_defined ? ( - local.web_subnet_existing ? var.infrastructure.vnets.sap.subnet_web.arm_id : azurerm_subnet.web[0].id) : ( - "" - ), - data.azurerm_resource_group.mgmt[0].location == (local.resource_group_exists ? ( - data.azurerm_resource_group.resource_group[0].location) : ( - azurerm_resource_group.resource_group[0].location - )) ? local.deployer_subnet_management_id : null - - ] - ) - lifecycle { - ignore_changes = [virtual_network_subnet_ids] - } } + resource "azurerm_private_dns_a_record" "install" { provider = azurerm.dnsmanagement count = var.use_private_endpoint && local.use_Azure_native_DNS && local.use_AFS_for_shared && length(var.install_private_endpoint_id) == 0 ? 1 : 0 diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf index b623e3bc58..bbdcc1aac6 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf @@ -1,3 +1,10 @@ +#######################################4#######################################8 +# # +# This file contains the input variables for the SAP landscape module # +# # +#######################################4#######################################8 + + #######################################4#######################################8 # # diff --git a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf index 5ea0e76220..321a86a93a 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf @@ -47,34 +47,27 @@ resource "azurerm_storage_account" "storage_tfstate" { choice = "MicrosoftRouting" } + network_rules { + default_action = "Deny" + virtual_network_subnet_ids = local.virtual_additional_network_ids + ip_rules = local.deployer_public_ip_address_used ? ( + [ + local.deployer_public_ip_address + ]) : compact( + [ + try(var.deployer_tfstate.Agent_IP, ""), + try(var.Agent_IP, "") + ] + ) + } + lifecycle { ignore_changes = [tags] } -} -resource "azurerm_storage_account_network_rules" "storage_tfstate" { - provider = azurerm.main - count = local.enable_firewall_for_keyvaults_and_storage && !local.sa_tfstate_exists ? 1 : 0 - storage_account_id = azurerm_storage_account.storage_tfstate[0].id - default_action = "Deny" - - ip_rules = local.deployer_public_ip_address_used ? ( - [ - local.deployer_public_ip_address - ]) : compact( - [ - try(var.deployer_tfstate.Agent_IP, ""), - try(var.Agent_IP, "") - ] - ) - - virtual_network_subnet_ids = local.virtual_additional_network_ids - - lifecycle { - ignore_changes = [virtual_network_subnet_ids] - } } + // Imports existing storage account to use for tfstate data "azurerm_storage_account" "storage_tfstate" { provider = azurerm.main @@ -251,29 +244,22 @@ resource "azurerm_storage_account" "storage_sapbits" { publish_microsoft_endpoints = true choice = "MicrosoftRouting" } - lifecycle { - ignore_changes = [tags] - } -} - -resource "azurerm_storage_account_network_rules" "storage_sapbits" { - provider = azurerm.main - count = local.enable_firewall_for_keyvaults_and_storage && !local.sa_tfstate_exists ? 1 : 0 - storage_account_id = azurerm_storage_account.storage_sapbits[0].id - default_action = "Deny" - ip_rules = local.deployer_public_ip_address_used ? ( - [ - local.deployer_public_ip_address - ]) : compact( - [ - try(var.deployer_tfstate.Agent_IP, ""), - try(var.Agent_IP, "") - ] - ) - virtual_network_subnet_ids = local.virtual_additional_network_ids + network_rules { + default_action = "Deny" + virtual_network_subnet_ids = local.virtual_additional_network_ids + ip_rules = local.deployer_public_ip_address_used ? ( + [ + local.deployer_public_ip_address + ]) : compact( + [ + try(var.deployer_tfstate.Agent_IP, ""), + try(var.Agent_IP, "") + ] + ) + } lifecycle { - ignore_changes = [virtual_network_subnet_ids] + ignore_changes = [tags] } } From aadeb92c8b09fe38641f5a8018fc199ef3762bc4 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 6 Feb 2024 17:28:08 +0200 Subject: [PATCH 192/607] remove dependency --- .../terraform-units/modules/sap_library/storage_accounts.tf | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf index 321a86a93a..12c92c471b 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf @@ -447,7 +447,6 @@ resource "azurerm_storage_container" "storagecontainer_tfvars" { provider = azurerm.main count = var.storage_account_tfstate.tfvars_blob_container.is_existing ? 0 : 1 depends_on = [ - azurerm_storage_account_network_rules.storage_tfstate, azurerm_private_endpoint.storage_tfstate ] name = var.storage_account_tfstate.tfvars_blob_container.name From a8370299d7a6557aa257529deebd3ae86700695a Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 6 Feb 2024 17:34:01 +0200 Subject: [PATCH 193/607] Move the rules --- .../common_infrastructure/storage_accounts.tf | 45 +++++++------------ 1 file changed, 17 insertions(+), 28 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf index c5c39ca314..565c8e8141 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf @@ -41,34 +41,23 @@ resource "azurerm_storage_account" "sapmnt" { public_network_access_enabled = try(var.landscape_tfstate.public_network_access_enabled, true) tags = var.tags -} -resource "azurerm_storage_account_network_rules" "sapmnt" { - provider = azurerm.main - count = var.NFS_provider == "AFS" ? ( - length(var.azure_files_sapmnt_id) > 0 ? ( - 0) : ( - 1 - )) : ( - 0 - ) - storage_account_id = azurerm_storage_account.sapmnt[0].id - default_action = "Deny" - - bypass = ["AzureServices", "Logging", "Metrics"] - virtual_network_subnet_ids = compact( - [ - try(var.landscape_tfstate.admin_subnet_id, ""), - try(var.landscape_tfstate.app_subnet_id, ""), - try(var.landscape_tfstate.db_subnet_id, ""), - try(var.landscape_tfstate.web_subnet_id, ""), - try(var.landscape_tfstate.subnet_mgmt_id, "") - ] - ) - ip_rules = compact( - [ - length(var.Agent_IP) > 0 ? var.Agent_IP : "" - ] - ) + network_rules { + default_action = "Deny" + virtual_network_subnet_ids = compact( + [ + try(var.landscape_tfstate.admin_subnet_id, ""), + try(var.landscape_tfstate.app_subnet_id, ""), + try(var.landscape_tfstate.db_subnet_id, ""), + try(var.landscape_tfstate.web_subnet_id, ""), + try(var.landscape_tfstate.subnet_mgmt_id, "") + ] + ) + ip_rules = compact( + [ + length(var.Agent_IP) > 0 ? var.Agent_IP : "" + ] + ) + } } From 470724b0a6f5a318e0fddc55c09f522028e4af66 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 7 Feb 2024 13:14:05 +0200 Subject: [PATCH 194/607] Add a space so that --filter command works for all tenants --- deploy/scripts/New-SDAFDevopsProject.ps1 | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index ac4f84f2ea..5983a15545 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -28,6 +28,8 @@ $ARM_TENANT_ID = $Env:ARM_TENANT_ID $versionLabel = "v3.10.1.0" + + # az logout # az account clear @@ -697,11 +699,11 @@ Add-Content -Path $fname -Value ("Web Application: " + $ApplicationName) #region App registration Write-Host "Creating the App registration in Azure Active Directory" -ForegroundColor Green -$found_appRegistration = (az ad app list --all --filter "startswith(displayName,'$ApplicationName')" --query "[?displayName=='$ApplicationName'].displayName | [0]" --only-show-errors) +$found_appRegistration = (az ad app list --all --filter "startswith(displayName, '$ApplicationName')" --query "[?displayName=='$ApplicationName'].displayName | [0]" --only-show-errors) if ($found_appRegistration.Length -ne 0) { Write-Host "Found an existing App Registration:" $ApplicationName - $ExistingData = (az ad app list --all --filter "startswith(displayName,'$ApplicationName')" --query "[?displayName=='$ApplicationName']| [0]" --only-show-errors) | ConvertFrom-Json + $ExistingData = (az ad app list --all --filter "startswith(displayName, '$ApplicationName')" --query "[?displayName=='$ApplicationName']| [0]" --only-show-errors) | ConvertFrom-Json $APP_REGISTRATION_ID = $ExistingData.appId @@ -746,10 +748,10 @@ if ($authenticationMethod -eq "Service Principal") { $SPN_Created = $false $bSkip = $true - $found_appName = (az ad sp list --all --filter "startswith(displayName,'$spn_name')" --query "[?displayName=='$spn_name'].displayName | [0]" --only-show-errors) + $found_appName = (az ad sp list --all --filter "startswith(displayName, '$spn_name')" --query "[?displayName=='$spn_name'].displayName | [0]" --only-show-errors) if ($found_appName.Length -gt 0) { Write-Host "Found an existing Service Principal:" $spn_name - $ExistingData = (az ad sp list --all --filter "startswith(displayName,'$spn_name')" --query "[?displayName=='$spn_name']| [0]" --only-show-errors) | ConvertFrom-Json + $ExistingData = (az ad sp list --all --filter "startswith(displayName, '$spn_name')" --query "[?displayName=='$spn_name']| [0]" --only-show-errors) | ConvertFrom-Json Write-Host "Updating the variable group" $CP_ARM_CLIENT_ID = $ExistingData.appId @@ -771,7 +773,7 @@ if ($authenticationMethod -eq "Service Principal") { $SPN_Created = $true $Control_plane_SPN_data = (az ad sp create-for-rbac --role "Contributor" --scopes $scopes --name $spn_name --only-show-errors) | ConvertFrom-Json $CP_ARM_CLIENT_SECRET = $Control_plane_SPN_data.password - $ExistingData = (az ad sp list --all --filter "startswith(displayName,'$spn_name')" --query "[?displayName=='$spn_name'] | [0]" --only-show-errors) | ConvertFrom-Json + $ExistingData = (az ad sp list --all --filter "startswith(displayName, '$spn_name')" --query "[?displayName=='$spn_name'] | [0]" --only-show-errors) | ConvertFrom-Json $CP_ARM_CLIENT_ID = $ExistingData.appId $CP_ARM_TENANT_ID = $ExistingData.appOwnerOrganizationId $CP_ARM_OBJECT_ID = $ExistingData.Id @@ -864,11 +866,11 @@ if ($authenticationMethod -eq "Service Principal") { Add-Content -path $fname -value ("Workload zone Service Principal: " + $workload_zone_spn_name) $SPN_Created = $false - $found_appName = (az ad sp list --all --filter "startswith(displayName,'$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'].displayName | [0]" --only-show-errors) + $found_appName = (az ad sp list --all --filter "startswith(displayName, '$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'].displayName | [0]" --only-show-errors) if ($found_appName.Length -ne 0) { Write-Host "Found an existing Service Principal:" $workload_zone_spn_name -ForegroundColor Green - $ExistingData = (az ad sp list --all --filter "startswith(displayName,'$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'] | [0]" --only-show-errors) | ConvertFrom-Json + $ExistingData = (az ad sp list --all --filter "startswith(displayName, '$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'] | [0]" --only-show-errors) | ConvertFrom-Json $ARM_CLIENT_ID = $ExistingData.appId $ARM_TENANT_ID = $ExistingData.appOwnerOrganizationId $ARM_OBJECT_ID = $ExistingData.Id @@ -886,7 +888,7 @@ if ($authenticationMethod -eq "Service Principal") { $SPN_Created = $true $Data = (az ad sp create-for-rbac --role="Contributor" --scopes=$workload_zone_scopes --name=$workload_zone_spn_name --only-show-errors) | ConvertFrom-Json $ARM_CLIENT_SECRET = $Data.password - $ExistingData = (az ad sp list --all --filter "startswith(displayName,'$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'] | [0]" --only-show-errors) | ConvertFrom-Json + $ExistingData = (az ad sp list --all --filter "startswith(displayName, '$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'] | [0]" --only-show-errors) | ConvertFrom-Json $ARM_CLIENT_ID = $ExistingData.appId $ARM_TENANT_ID = $ExistingData.appOwnerOrganizationId $ARM_OBJECT_ID = $ExistingData.Id From d5d70f5e19df83e6c3f041b2906651e16cbc53d9 Mon Sep 17 00:00:00 2001 From: Ariel Sepulveda Date: Wed, 7 Feb 2024 12:54:05 +0100 Subject: [PATCH 195/607] Added PAT validation, modified PAT url build based on ADO_Organization. (#540) --- deploy/scripts/New-SDAFDevopsProject.ps1 | 54 +++++++++++++++++------- 1 file changed, 38 insertions(+), 16 deletions(-) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index 5983a15545..66dd37adfb 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -30,16 +30,34 @@ $versionLabel = "v3.10.1.0" -# az logout +az logout -# az account clear +az account clear -# if ($ARM_TENANT_ID.Length -eq 0) { -# az login --output none --only-show-errors -# } -# else { -# az login --output none --tenant $ARM_TENANT_ID --only-show-errors -# } +if ($ARM_TENANT_ID.Length -eq 0) { + az login --output none --only-show-errors +} +else { + az login --output none --tenant $ARM_TENANT_ID --only-show-errors +} + +# Check if access to the Azure DevOps organization is available and prompt for PAT if needed +# Exact permissions required, to be validated, and included in the Read-Host text. +$checkPAT = (az devops user list --organization $ADO_Organization --only-show-errors --top 1) +if ($checkPAT.Length -eq 0) { + $env:AZURE_DEVOPS_EXT_PAT = Read-Host "Please enter your Personal Access Token (PAT) with full access to the Azure DevOps organization $ADO_Organization" + $verifyPAT = (az devops user list --organization $ADO_Organization --only-show-errors --top 1) + if ($verifyPAT.Length -eq 0) { + Read-Host -Prompt "Failed to authenticate to the Azure DevOps organization, press to exit" + exit + } + else { + Write-Host "Successfully authenticated to the Azure DevOps organization $ADO_Organization" -ForegroundColor Green + } +} +else { + Write-Host "Successfully authenticated to the Azure DevOps organization $ADO_Organization" -ForegroundColor Green +} Write-Host "" Write-Host "" @@ -142,16 +160,20 @@ if ($confirmation -ne 'y') { $Pool_Name = Read-Host "Enter the name of the agent pool" } -$url = ( az devops project list --organization $ADO_Organization --query "value | [0].url") -if ($url.Length -eq 0) { - Write-Error "Could not get the DevOps organization URL" - exit -} - $pipeline_permission_url = "" -$idx = $url.IndexOf("_api") -$pat_url = ($url.Substring(0, $idx) + "_usersSettings/tokens").Replace("""", "") +# Commenting this, since ADO_Organization is already validated at the beggining in $checkPAT +# $url = ( az devops project list --organization $ADO_Organization --query "value | [0].url") +# if ($url.Length -eq 0) { +# Write-Error "Could not get the DevOps organization URL" +# exit +# } +# +# $idx = $url.IndexOf("_api") +# $pat_url = ($url.Substring(0, $idx) + "_usersSettings/tokens").Replace("""", "") + +# Get pat_url directly from the $ADO_Organization, avoiding double slashes. +$pat_url = ($ADO_Organization.TrimEnd('/') + "/_usersSettings/tokens").Replace("""", "") $import_code = $false From 3ebe7bad85a5e48a44499f78d5633cd64867c0d6 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 7 Feb 2024 14:20:43 +0200 Subject: [PATCH 196/607] Update service connection name in New-SDAFDevopsProject.ps1 script --- deploy/scripts/New-SDAFDevopsProject.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index 66dd37adfb..9d2f4c9d79 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -854,7 +854,7 @@ else { Write-Host Write-Host "" - Write-Host "The browser will now open, Please create a service connection with the name 'Control_Plane_Service_Connection'." + Write-Host "The browser will now open, Please create an 'Azure Resource Manager' service connection with the name 'Control_Plane_Service_Connection'." $connections_url = $ADO_ORGANIZATION + "/" + [uri]::EscapeDataString($ADO_Project) + "/_settings/adminservices" Write-Host "URL: " $connections_url From b98aa03822b18f0e2ab6b14bef243d8113247a30 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 7 Feb 2024 14:53:46 +0200 Subject: [PATCH 197/607] Update Workload zone creation script to support MSIs --- deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 | 170 +++++++++++++----- 1 file changed, 124 insertions(+), 46 deletions(-) diff --git a/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 b/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 index 14bceabe93..912c3d2ffc 100644 --- a/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 +++ b/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 @@ -1,3 +1,16 @@ +function Show-Menu($data) { + Write-Host "================ $Title ================" + $i = 1 + foreach ($d in $data) { + Write-Host "($i): Select '$i' for $($d)" + $i++ + } + + Write-Host "q: Select 'q' for Exit" + +} + + #region Initialize # Initialize variables from Environment variables @@ -27,13 +40,49 @@ else { #endregion if ($Env:ARM_TENANT_ID.Length -eq 0) { - $Env:ARM_TENANT_ID= Read-Host "Please provide Tenant ID, you can find it in the Azure portal under Microsoft Entra ID -> Overview -> Tenant ID" + $Env:ARM_TENANT_ID = Read-Host "Please provide Tenant ID, you can find it in the Azure portal under Microsoft Entra ID -> Overview -> Tenant ID" az login --output none --only-show-errors } else { az login --output none --tenant $Env:ARM_TENANT_ID --only-show-errors } +# Check if access to the Azure DevOps organization is available and prompt for PAT if needed +# Exact permissions required, to be validated, and included in the Read-Host text. +$checkPAT = (az devops user list --organization $ADO_Organization --only-show-errors --top 1) +if ($checkPAT.Length -eq 0) { + $env:AZURE_DEVOPS_EXT_PAT = Read-Host "Please enter your Personal Access Token (PAT) with full access to the Azure DevOps organization $ADO_Organization" + $verifyPAT = (az devops user list --organization $ADO_Organization --only-show-errors --top 1) + if ($verifyPAT.Length -eq 0) { + Read-Host -Prompt "Failed to authenticate to the Azure DevOps organization, press to exit" + exit + } + else { + Write-Host "Successfully authenticated to the Azure DevOps organization $ADO_Organization" -ForegroundColor Green + } +} +else { + Write-Host "Successfully authenticated to the Azure DevOps organization $ADO_Organization" -ForegroundColor Green +} + +Write-Host "" +Write-Host "" + +if ($Env:SDAF_AuthenticationMethod.Length -eq 0) { + $Title = "Select the authentication method to use" + $data = @('Service Principal', 'Managed Identity') + Show-Menu($data) + $selection = Read-Host $Title + $authenticationMethod = $data[$selection - 1] + +} +else { + $authenticationMethod = $Env:SDAF_AuthenticationMethod +} + +Write-Host "Using authentication method: $authenticationMethod" -ForegroundColor Yellow + + Write-Host "The browser will now open, please copy the name of the Agent Pool" Start-Process "https://dev.azure.com/$ADO_Organization/$ADO_Project/_settings/agentpools" @@ -102,47 +151,62 @@ if ($Env:SDAF_WorkloadZone_SPN_NAME.Length -ne 0) { $workload_zone_spn_name = $Env:SDAF_WorkloadZone_SPN_NAME } else { - $workload_zone_spn_name = Read-Host "Please provide the Service Principal name to be used for the deployments in the workload zone" + if ($authenticationMethod -eq "Service Principal") { + $workload_zone_spn_name = Read-Host "Please provide the Service Principal name to be used for the deployments in the workload zone" + } } -$found_appName = (az ad sp list --all --filter "startswith(displayName,'$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'].displayName | [0]" --only-show-errors) +if ($authenticationMethod -eq "Service Principal") { + + $found_appName = (az ad sp list --all --filter "startswith(displayName,' $workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'].displayName | [0]" --only-show-errors) + + if ($found_appName.Length -ne 0) { + Write-Host "Found an existing Service Principal:" $workload_zone_spn_name -ForegroundColor Green + $ExistingData = (az ad sp list --all --filter "startswith(displayName,'$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'] | [0]" --only-show-errors) | ConvertFrom-Json + $ARM_CLIENT_ID = $ExistingData.appId + $ARM_TENANT_ID = $ExistingData.appOwnerOrganizationId + $ARM_OBJECT_ID = $ExistingData.Id -if ($found_appName.Length -ne 0) { - Write-Host "Found an existing Service Principal:" $workload_zone_spn_name -ForegroundColor Green - $ExistingData = (az ad sp list --all --filter "startswith(displayName,'$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'] | [0]" --only-show-errors) | ConvertFrom-Json - $ARM_CLIENT_ID = $ExistingData.appId - $ARM_TENANT_ID = $ExistingData.appOwnerOrganizationId - $ARM_OBJECT_ID = $ExistingData.Id + $confirmation = Read-Host "Reset the Workload zone Service Principal password y/n?" + if ($confirmation -eq 'y') { + $ARM_CLIENT_SECRET = (az ad sp credential reset --id $ARM_CLIENT_ID --append --query "password" --out tsv --only-show-errors) + } + else { + $ARM_CLIENT_SECRET = Read-Host "Enter the Workload zone Service Principal password" + } - $confirmation = Read-Host "Reset the Workload zone Service Principal password y/n?" - if ($confirmation -eq 'y') { - $ARM_CLIENT_SECRET = (az ad sp credential reset --id $ARM_CLIENT_ID --append --query "password" --out tsv --only-show-errors) } else { - $ARM_CLIENT_SECRET = Read-Host "Enter the Workload zone Service Principal password" + Write-Host "Creating the Service Principal" $workload_zone_spn_name -ForegroundColor Green + $Data = (az ad sp create-for-rbac --role="Contributor" --scopes=$workload_zone_scopes --name=$workload_zone_spn_name --only-show-errors) | ConvertFrom-Json + $ARM_CLIENT_SECRET = $Data.password + $ExistingData = (az ad sp list --all --filter "startswith(displayName,'$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'] | [0]" --only-show-errors) | ConvertFrom-Json + $ARM_CLIENT_ID = $ExistingData.appId + $ARM_TENANT_ID = $ExistingData.appOwnerOrganizationId + $ARM_OBJECT_ID = $ExistingData.Id } -} -else { - Write-Host "Creating the Service Principal" $workload_zone_spn_name -ForegroundColor Green - $Data = (az ad sp create-for-rbac --role="Contributor" --scopes=$workload_zone_scopes --name=$workload_zone_spn_name --only-show-errors) | ConvertFrom-Json - $ARM_CLIENT_SECRET = $Data.password - $ExistingData = (az ad sp list --all --filter "startswith(displayName,'$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'] | [0]" --only-show-errors) | ConvertFrom-Json - $ARM_CLIENT_ID = $ExistingData.appId - $ARM_TENANT_ID = $ExistingData.appOwnerOrganizationId - $ARM_OBJECT_ID = $ExistingData.Id -} + $Service_Connection_Name = $Workload_zone_code + "_WorkloadZone_Service_Connection" -$Service_Connection_Name = $Workload_zone_code + "_WorkloadZone_Service_Connection" + $GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --organization $ADO_ORGANIZATION --project $ADO_Project --only-show-errors ) + if ($GroupID.Length -eq 0) { + Write-Host "Creating the variable group" $WorkloadZonePrefix -ForegroundColor Green + az pipelines variable-group create --name $WorkloadZonePrefix --variables Agent='Azure Pipelines' ARM_CLIENT_ID=$ARM_CLIENT_ID ARM_OBJECT_ID=$ARM_OBJECT_ID ARM_CLIENT_SECRET=$ARM_CLIENT_SECRET ARM_SUBSCRIPTION_ID=$Workload_zone_subscriptionID ARM_TENANT_ID=$ARM_TENANT_ID WZ_PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME=$Service_Connection_Name TF_LOG=OFF Logon_Using_SPN=true USE_MSI=false--output none --authorize true --organization $ADO_ORGANIZATION --project $ADO_Project + $GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --organization $ADO_ORGANIZATION --project $ADO_Project --only-show-errors) + } -$GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --organization $ADO_ORGANIZATION --project $ADO_Project --only-show-errors ) -if ($GroupID.Length -eq 0) { - Write-Host "Creating the variable group" $WorkloadZonePrefix -ForegroundColor Green - az pipelines variable-group create --name $WorkloadZonePrefix --variables Agent='Azure Pipelines' ARM_CLIENT_ID=$ARM_CLIENT_ID ARM_OBJECT_ID=$ARM_OBJECT_ID ARM_CLIENT_SECRET=$ARM_CLIENT_SECRET ARM_SUBSCRIPTION_ID=$Workload_zone_subscriptionID ARM_TENANT_ID=$ARM_TENANT_ID WZ_PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME=$Service_Connection_Name TF_LOG=OFF Logon_Using_SPN=true --output none --authorize true --organization $ADO_ORGANIZATION --project $ADO_Project - $GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --organization $ADO_ORGANIZATION --project $ADO_Project --only-show-errors) } +else { + $Service_Connection_Name = $Workload_zone_code + "_WorkloadZone_Service_Connection" + $GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --organization $ADO_ORGANIZATION --project $ADO_Project --only-show-errors ) + if ($GroupID.Length -eq 0) { + Write-Host "Creating the variable group" $WorkloadZonePrefix -ForegroundColor Green + az pipelines variable-group create --name $WorkloadZonePrefix --variables Agent='Azure Pipelines' ARM_SUBSCRIPTION_ID=$Workload_zone_subscriptionID WZ_PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME=$Service_Connection_Name TF_LOG=OFF Logon_Using_SPN=false USE_MSI=true--output none --authorize true --organization $ADO_ORGANIZATION --project $ADO_Project + $GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --organization $ADO_ORGANIZATION --project $ADO_Project --only-show-errors) + } +} $idx = $url.IndexOf("_api") $pat_url = ($url.Substring(0, $idx) + "_usersSettings/tokens").Replace("""", "") @@ -152,28 +216,42 @@ Start-Process $pat_url $PAT = Read-Host -Prompt "Please enter the PAT token: " az pipelines variable-group variable update --group-id $GroupID --name "WZ_PAT" --value $PAT --secret true --only-show-errors --organization $ADO_ORGANIZATION --project $ADO_Project --output none -$Env:AZURE_DEVOPS_EXT_AZURE_RM_SERVICE_PRINCIPAL_KEY = $ARM_CLIENT_SECRET +if ($authenticationMethod -eq "Service Principal") { + + $Env:AZURE_DEVOPS_EXT_AZURE_RM_SERVICE_PRINCIPAL_KEY = $ARM_CLIENT_SECRET -az pipelines variable-group variable update --group-id $GroupID --name "ARM_CLIENT_SECRET" --value $ARM_CLIENT_SECRET --secret true --organization $ADO_ORGANIZATION --project $ADO_Project --output none --only-show-errors -az pipelines variable-group variable update --group-id $GroupID --name "ARM_CLIENT_ID" --value $ARM_CLIENT_ID --organization $ADO_ORGANIZATION --project $ADO_Project --output none --only-show-errors -az pipelines variable-group variable update --group-id $GroupID --name "ARM_OBJECT_ID" --value $ARM_OBJECT_ID --organization $ADO_ORGANIZATION --project $ADO_Project --output none --only-show-errors + az pipelines variable-group variable update --group-id $GroupID --name "ARM_CLIENT_SECRET" --value $ARM_CLIENT_SECRET --secret true --organization $ADO_ORGANIZATION --project $ADO_Project --output none --only-show-errors + az pipelines variable-group variable update --group-id $GroupID --name "ARM_CLIENT_ID" --value $ARM_CLIENT_ID --organization $ADO_ORGANIZATION --project $ADO_Project --output none --only-show-errors + az pipelines variable-group variable update --group-id $GroupID --name "ARM_OBJECT_ID" --value $ARM_OBJECT_ID --organization $ADO_ORGANIZATION --project $ADO_Project --output none --only-show-errors -$epExists = (az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].name | [0]") -if ($epExists.Length -eq 0) { - Write-Host "Creating Service Endpoint" $Service_Connection_Name -ForegroundColor Green - az devops service-endpoint azurerm create --azure-rm-service-principal-id $ARM_CLIENT_ID --azure-rm-subscription-id $Workload_zone_subscriptionID --azure-rm-subscription-name $Workload_zoneSubscriptionName --azure-rm-tenant-id $ARM_TENANT_ID --name $Service_Connection_Name --organization $ADO_ORGANIZATION --project $ADO_Project --output none --only-show-errors - $epId = az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].id" -o tsv --organization $ADO_ORGANIZATION --project $ADO_Project - az devops service-endpoint update --id $epId --enable-for-all true --output none --only-show-errors --organization $ADO_ORGANIZATION --project $ADO_Project + $epExists = (az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].name | [0]") + if ($epExists.Length -eq 0) { + Write-Host "Creating Service Endpoint" $Service_Connection_Name -ForegroundColor Green + az devops service-endpoint azurerm create --azure-rm-service-principal-id $ARM_CLIENT_ID --azure-rm-subscription-id $Workload_zone_subscriptionID --azure-rm-subscription-name $Workload_zoneSubscriptionName --azure-rm-tenant-id $ARM_TENANT_ID --name $Service_Connection_Name --organization $ADO_ORGANIZATION --project $ADO_Project --output none --only-show-errors + $epId = az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].id" -o tsv --organization $ADO_ORGANIZATION --project $ADO_Project + az devops service-endpoint update --id $epId --enable-for-all true --output none --only-show-errors --organization $ADO_ORGANIZATION --project $ADO_Project + } + else { + Write-Host "Service Endpoint already exists, recreating it with the updated credentials" -ForegroundColor Green + $epId = az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].id" -o tsv --organization $ADO_ORGANIZATION --project $ADO_Project + az devops service-endpoint delete --id $epId --yes --organization $ADO_ORGANIZATION --project $ADO_Project + az devops service-endpoint azurerm create --azure-rm-service-principal-id $ARM_CLIENT_ID --azure-rm-subscription-id $Workload_zone_subscriptionID --azure-rm-subscription-name $Workload_zoneSubscriptionName --azure-rm-tenant-id $ARM_TENANT_ID --name $Service_Connection_Name --output none --only-show-errors --organization $ADO_ORGANIZATION --project $ADO_Project + $epId = az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].id" -o tsv --organization $ADO_ORGANIZATION --project $ADO_Project + az devops service-endpoint update --id $epId --enable-for-all true --output none --only-show-errors --organization $ADO_ORGANIZATION --project $ADO_Project + } } else { - Write-Host "Service Endpoint already exists, recreating it with the updated credentials" -ForegroundColor Green - $epId = az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].id" -o tsv --organization $ADO_ORGANIZATION --project $ADO_Project - az devops service-endpoint delete --id $epId --yes --organization $ADO_ORGANIZATION --project $ADO_Project - az devops service-endpoint azurerm create --azure-rm-service-principal-id $ARM_CLIENT_ID --azure-rm-subscription-id $Workload_zone_subscriptionID --azure-rm-subscription-name $Workload_zoneSubscriptionName --azure-rm-tenant-id $ARM_TENANT_ID --name $Service_Connection_Name --output none --only-show-errors --organization $ADO_ORGANIZATION --project $ADO_Project - $epId = az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].id" -o tsv --organization $ADO_ORGANIZATION --project $ADO_Project - az devops service-endpoint update --id $epId --enable-for-all true --output none --only-show-errors --organization $ADO_ORGANIZATION --project $ADO_Project -} + Write-Host "" + $Service_Connection_Name = $Workload_zone_code + "_WorkloadZone_Service_Connection" + Write-Host "The browser will now open, Please create an 'Azure Resource Manager' service connection with the name '$Service_Connection_Name'." + $connections_url = $ADO_ORGANIZATION + "/" + [uri]::EscapeDataString($ADO_Project) + "/_settings/adminservices" + Write-Host "URL: " $connections_url + + Start-Process $connections_url + Read-Host -Prompt "Once you have created and validated the connection, Press any key to continue" + +} #endregion From ee407e1994deca7d92bdbcab0e885747001ff062 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 7 Feb 2024 15:01:48 +0200 Subject: [PATCH 198/607] Add conditional check for workload zone code input --- deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 b/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 index 912c3d2ffc..798714bc67 100644 --- a/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 +++ b/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 @@ -18,8 +18,12 @@ $ADO_Organization = $Env:SDAF_ADO_ORGANIZATION $ADO_Project = $Env:SDAF_ADO_PROJECT $Workload_zone_subscriptionID = $Env:SDAF_WorkloadZoneSubscriptionID $Workload_zoneSubscriptionName = $Env:SDAF_WorkloadZoneSubscriptionName +$Workload_zone_code = $Env:SDAF_WORKLOAD_ZONE_CODE -$Workload_zone_code = Read-Host "Please provide the workload zone code " +if ($Workload_zone_code.Length -eq 0) { + + $Workload_zone_code = Read-Host "Please provide the workload zone code " +} if ($ADO_Organization.Length -eq 0) { Write-Host "Organization is not set" @@ -84,7 +88,8 @@ Write-Host "Using authentication method: $authenticationMethod" -ForegroundColor Write-Host "The browser will now open, please copy the name of the Agent Pool" -Start-Process "https://dev.azure.com/$ADO_Organization/$ADO_Project/_settings/agentpools" +$pool_url = $ADO_ORGANIZATION + "/" + [uri]::EscapeDataString($ADO_Project) + "/_settings/agentqueues" +Start-Process $pool_url $Pool_Name = Read-Host "Please provide the Agent pool name" From b73e04a6772b288f22849faa329523ebb77be5f5 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 7 Feb 2024 15:03:09 +0200 Subject: [PATCH 199/607] Fix variable group creation command in New-SDAFDevopsWorkloadZone.ps1 script --- deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 b/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 index 798714bc67..f180342c69 100644 --- a/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 +++ b/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 @@ -208,7 +208,7 @@ else { $GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --organization $ADO_ORGANIZATION --project $ADO_Project --only-show-errors ) if ($GroupID.Length -eq 0) { Write-Host "Creating the variable group" $WorkloadZonePrefix -ForegroundColor Green - az pipelines variable-group create --name $WorkloadZonePrefix --variables Agent='Azure Pipelines' ARM_SUBSCRIPTION_ID=$Workload_zone_subscriptionID WZ_PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME=$Service_Connection_Name TF_LOG=OFF Logon_Using_SPN=false USE_MSI=true--output none --authorize true --organization $ADO_ORGANIZATION --project $ADO_Project + az pipelines variable-group create --name $WorkloadZonePrefix --variables Agent='Azure Pipelines' ARM_SUBSCRIPTION_ID=$Workload_zone_subscriptionID WZ_PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME=$Service_Connection_Name TF_LOG=OFF Logon_Using_SPN=false USE_MSI=true --output none --authorize true --organization $ADO_ORGANIZATION --project $ADO_Project $GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --organization $ADO_ORGANIZATION --project $ADO_Project --only-show-errors) } } From b3bebc2d43a7a3084065957d505360aefa489cd0 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 7 Feb 2024 16:10:36 +0200 Subject: [PATCH 200/607] Add Azure DevOps login step to control plane deployment pipeline --- deploy/pipelines/01-deploy-control-plane.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 7a3942e94d..80c3bac098 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -105,6 +105,8 @@ stages: az extension add --name azure-devops --output none + az devops login --organization $(System.CollectionUri) --output none + az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") echo "$(variable_group) id: ${VARIABLE_GROUP_ID}" @@ -416,6 +418,7 @@ stages: az config set extension.use_dynamic_install=yes_without_prompt az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' + az pipelines variable-group list export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") echo VARIABLE_GROUP_ID ${VARIABLE_GROUP_ID} if [ -z ${VARIABLE_GROUP_ID} ]; then From f6de52613adc2d2096df963fbab32ec79219aeb5 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 7 Feb 2024 16:17:12 +0200 Subject: [PATCH 201/607] Add echo statements to display agent, organization, and project information --- deploy/pipelines/01-deploy-control-plane.yaml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 80c3bac098..f7b278ac64 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -105,7 +105,9 @@ stages: az extension add --name azure-devops --output none - az devops login --organization $(System.CollectionUri) --output none + echo "Agent: " $(this_agent) + echo "Organization: " $(System.CollectionUri) + echo "Project: " $(System.TeamProject) az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") @@ -409,7 +411,9 @@ stages: file_REMOTE_STATE_SA="" file_REMOTE_STATE_RG=$(deployerfolder) - echo "Agent: " $(this_agent) + echo "Agent: " $(this_agent) + echo "Organization: " $(System.CollectionUri) + echo "Project: " $(System.TeamProject) echo -e "$green--- Checkout $(Build.SourceBranchName) ---$reset" cd $CONFIG_REPO_PATH git checkout -q $(Build.SourceBranchName) From 04e04b8f742884f9dec936f207cd612d60807819 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 7 Feb 2024 16:33:03 +0200 Subject: [PATCH 202/607] Add az --version command to pipeline for debugging --- deploy/pipelines/01-deploy-control-plane.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index f7b278ac64..b0e5b7349e 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -102,6 +102,7 @@ stages: git checkout -q $(Build.SourceBranchName) echo -e "$green--- Configure devops CLI extension ---$reset" az config set extension.use_dynamic_install=yes_without_prompt + az --version az extension add --name azure-devops --output none @@ -414,6 +415,8 @@ stages: echo "Agent: " $(this_agent) echo "Organization: " $(System.CollectionUri) echo "Project: " $(System.TeamProject) + + az --version echo -e "$green--- Checkout $(Build.SourceBranchName) ---$reset" cd $CONFIG_REPO_PATH git checkout -q $(Build.SourceBranchName) From 5150f3f7582435d6feccaceb88b82c5d157907a7 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 7 Feb 2024 16:39:25 +0200 Subject: [PATCH 203/607] Add Azure DevOps CLI extension and display information --- deploy/pipelines/01-deploy-control-plane.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index b0e5b7349e..91f4da086d 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -412,10 +412,10 @@ stages: file_REMOTE_STATE_SA="" file_REMOTE_STATE_RG=$(deployerfolder) + echo -e "$green--- Information ---$reset" echo "Agent: " $(this_agent) echo "Organization: " $(System.CollectionUri) echo "Project: " $(System.TeamProject) - az --version echo -e "$green--- Checkout $(Build.SourceBranchName) ---$reset" cd $CONFIG_REPO_PATH @@ -423,6 +423,7 @@ stages: echo -e "$green--- Configure devops CLI extension ---$reset" az config set extension.use_dynamic_install=yes_without_prompt + az extension add --name azure-devops --output none az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' az pipelines variable-group list From 9e39235a01872a9ff9e38732d9b0adbe00ef775b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 7 Feb 2024 16:50:32 +0200 Subject: [PATCH 204/607] Add devops CLI extension configuration --- deploy/pipelines/03-sap-system-deployment.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/deploy/pipelines/03-sap-system-deployment.yaml b/deploy/pipelines/03-sap-system-deployment.yaml index 3b0858506b..9f9ec4aa50 100644 --- a/deploy/pipelines/03-sap-system-deployment.yaml +++ b/deploy/pipelines/03-sap-system-deployment.yaml @@ -70,6 +70,15 @@ stages: else source /etc/profile.d/deploy_server.sh fi + + echo -e "$green--- Configure devops CLI extension ---$reset" + + az config set extension.use_dynamic_install=yes_without_prompt --output none + + export AZURE_DEVOPS_EXT_PAT=$PAT + + az extension add --name azure-devops --output none + export AZURE_DEVOPS_EXT_PAT=$PAT HOME_CONFIG=${CONFIG_REPO_PATH}/$(Deployment_Configuration_Path) From b4a5a455872253e347ee47d8be742ca61da424e1 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 7 Feb 2024 17:55:01 +0200 Subject: [PATCH 205/607] Add repository creation if it doesn't exist --- deploy/scripts/New-SDAFDevopsProject.ps1 | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index 9d2f4c9d79..346d08087f 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -230,6 +230,11 @@ else { Write-Host "Using an existing project" $repo_id = (az repos list --query "[?name=='$ADO_Project'].id | [0]" --out tsv) + if ($repo_id.Length -eq 0) { + Write-Host "Creating repository '$ADO_Project'" -ForegroundColor Green + $repo_id = (az repos create --name $ADO_Project --query id --output tsv) + } + az devops configure --defaults organization=$ADO_ORGANIZATION project=$ADO_PROJECT $repo_size = (az repos list --query "[?id=='$repo_id'].size | [0]") From 293b87cc4b4ff00fd8ecaff4e081a76257f65725 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 7 Feb 2024 18:13:16 +0200 Subject: [PATCH 206/607] Fix repository creation and import bug --- deploy/scripts/New-SDAFDevopsProject.ps1 | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index 346d08087f..76ead49385 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -232,12 +232,11 @@ else { $repo_id = (az repos list --query "[?name=='$ADO_Project'].id | [0]" --out tsv) if ($repo_id.Length -eq 0) { Write-Host "Creating repository '$ADO_Project'" -ForegroundColor Green - $repo_id = (az repos create --name $ADO_Project --query id --output tsv) } az devops configure --defaults organization=$ADO_ORGANIZATION project=$ADO_PROJECT - $repo_size = (az repos list --query "[?id=='$repo_id'].size | [0]") + $repo_size = (az repos list --query "[?name=='$ADO_Project'].size | [0]") if ($repo_size -eq 0) { Write-Host "Importing the repository from GitHub" -ForegroundColor Green From 7ba37b41a2b7c7765eeec306de15fde56e031779 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 7 Feb 2024 21:54:26 +0200 Subject: [PATCH 207/607] Web Application Updates --- Webapp/SDAF/Models/LandscapeModel.cs | 3 +- .../ParameterDetails/LandscapeDetails.json | 2 +- .../ParameterDetails/LandscapeTemplate.txt | 37 ++++++++++--------- 3 files changed, 23 insertions(+), 19 deletions(-) diff --git a/Webapp/SDAF/Models/LandscapeModel.cs b/Webapp/SDAF/Models/LandscapeModel.cs index 4e71f05efd..2341e636f8 100644 --- a/Webapp/SDAF/Models/LandscapeModel.cs +++ b/Webapp/SDAF/Models/LandscapeModel.cs @@ -270,6 +270,8 @@ public bool IsValid() public bool? use_AFS_for_installation_media { get; set; } = true; + public bool? use_AFS_for_shared_storage { get; set; } = true; + public bool? create_transport_storage { get; set; } = true; public int? transport_volume_size { get; set; } @@ -308,7 +310,6 @@ public bool IsValid() public string utility_vm_os_disk_type { get; set; } = "Premium_LRS"; - public bool? utility_vm_useDHCP { get; set; } = true; public Image utility_vm_image { get; set; } diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index 8115bb5374..0a7aa76423 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -825,7 +825,7 @@ "Display": 1 }, { - "Name": "use_AFS_for_installation_media", + "Name": "use_AFS_for_shared_storage", "Required": false, "Description": "Defines if shared media is shared from Azure Files when using Azure NetApp Files for data.", "Type": "checkbox", diff --git a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt index e9adea0c32..62aa25d096 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt @@ -34,6 +34,21 @@ $$location$$ $$name_override_file$$ +######################################################################################### +# # +# Resource group details # +# # +######################################################################################### + +# The two resource group name and arm_id can be used to control the naming and the creation of the resource group + +# The resourcegroup_name value is optional, it can be used to override the name of the resource group that will be provisioned +$$resourcegroup_name$$ + +# The resourcegroup_name arm_id is optional, it can be used to provide an existing resource group for the deployment +$$resourcegroup_arm_id$$ + + ######################################################################################### # # # Networking # @@ -234,6 +249,9 @@ $$enable_purge_control_for_keyvaults$$ # enable_rbac_authorization_for_keyvault Controls the access policy model for the workload zone keyvault. $$enable_rbac_authorization_for_keyvault$$ +# Defines a list of Object IDs to be added to the keyvault +$$additional_users_to_add_to_keyvault_policies$$ + ######################################################################################### # # # Credentials # @@ -295,21 +313,6 @@ $$witness_storage_account_arm_id$$ $$storage_account_replication_type$$ -######################################################################################### -# # -# Resource group details # -# # -######################################################################################### - -# The two resource group name and arm_id can be used to control the naming and the creation of the resource group - -# The resourcegroup_name value is optional, it can be used to override the name of the resource group that will be provisioned -$$resourcegroup_name$$ - -# The resourcegroup_name arm_id is optional, it can be used to provide an existing resource group for the deployment -$$resourcegroup_arm_id$$ - - ######################################################################################### # # # Private DNS support # # @@ -334,8 +337,8 @@ $$dns_server_list$$ # NFS indicates that a custom solution is used for NFS $$NFS_provider$$ -# use_AFS_for_installation_media defines if shared media is on AFS even when using ANF for data -$$use_AFS_for_installation_media$$ +# use_AFS_for_shared_storage defines if shared media is on AFS even when using ANF for data +$$use_AFS_for_shared_storage$$ ######################################################################################### # # From e641b56729caf2aa7eb8aa46d5207b57f9f585ba Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 7 Feb 2024 23:43:08 +0200 Subject: [PATCH 208/607] Add soft delete retention days variable --- deploy/terraform/bootstrap/sap_deployer/module.tf | 1 + deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf | 5 +++++ deploy/terraform/run/sap_landscape/module.tf | 1 + deploy/terraform/run/sap_landscape/tfvar_variables.tf | 5 +++++ .../terraform-units/modules/sap_deployer/key_vault.tf | 2 +- .../terraform-units/modules/sap_deployer/variables_global.tf | 2 ++ .../modules/sap_landscape/key_vault_sap_landscape.tf | 2 +- .../modules/sap_landscape/variables_global.tf | 2 ++ 8 files changed, 18 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/bootstrap/sap_deployer/module.tf b/deploy/terraform/bootstrap/sap_deployer/module.tf index 334d917f96..326462f9a6 100644 --- a/deploy/terraform/bootstrap/sap_deployer/module.tf +++ b/deploy/terraform/bootstrap/sap_deployer/module.tf @@ -43,6 +43,7 @@ module "sap_deployer" { place_delete_lock_on_resources = var.place_delete_lock_on_resources public_network_access_enabled = var.public_network_access_enabled || !var.use_private_endpoint sa_connection_string = var.sa_connection_string + soft_delete_retention_days = var.soft_delete_retention_days set_secret_expiry = var.set_secret_expiry spn_id = var.spn_id ssh-timeout = var.ssh-timeout diff --git a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf index 46621aef71..c5a5c5c1b0 100644 --- a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf @@ -332,6 +332,11 @@ variable "enable_purge_control_for_keyvaults" { default = false } +variable "soft_delete_retention_days" { + description = "The number of days that items should be retained in the soft delete period" + default = 7 + } + variable "additional_users_to_add_to_keyvault_policies" { description = "List of object IDs to add to key vault policies" default = [""] diff --git a/deploy/terraform/run/sap_landscape/module.tf b/deploy/terraform/run/sap_landscape/module.tf index 322b8a5771..d401c92c29 100644 --- a/deploy/terraform/run/sap_landscape/module.tf +++ b/deploy/terraform/run/sap_landscape/module.tf @@ -49,6 +49,7 @@ module "sap_landscape" { public_network_access_enabled = var.public_network_access_enabled || !var.use_private_endpoint register_virtual_network_to_dns = var.register_virtual_network_to_dns service_principal = var.use_spn ? local.service_principal : local.account + soft_delete_retention_days = var.soft_delete_retention_days storage_account_replication_type = var.storage_account_replication_type tags = var.tags terraform_template_version = local.version_label diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index b393888078..e210c6b1f9 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -310,6 +310,11 @@ variable "keyvault_private_endpoint_id" { default = "" } +variable "soft_delete_retention_days" { + description = "The number of days that items should be retained in the soft delete period" + default = 7 + } + ######################################################################################### # # # Authentication variables # diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/key_vault.tf b/deploy/terraform/terraform-units/modules/sap_deployer/key_vault.tf index ecd65b2d87..453a835b82 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/key_vault.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/key_vault.tf @@ -21,7 +21,7 @@ resource "azurerm_key_vault" "kv_user" { ) tenant_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].tenant_id : data.azurerm_user_assigned_identity.deployer[0].tenant_id - soft_delete_retention_days = 7 + soft_delete_retention_days = var.soft_delete_retention_days purge_protection_enabled = var.enable_purge_control_for_keyvaults sku_name = "standard" diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_deployer/variables_global.tf index b659447f96..94af1945e2 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/variables_global.tf @@ -30,6 +30,8 @@ variable "additional_users_to_add_to_keyvault_policies" { description = "List of variable "enable_purge_control_for_keyvaults" { description = "Disables the purge protection for Azure keyvaults." } variable "key_vault" { description = "The user brings existing Azure Key Vaults" } variable "set_secret_expiry" { description = "Set expiry date for secrets" } +variable "soft_delete_retention_days" { description = "The number of days that items should be retained in the soft delete period" } + ######################################################################################### # # diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf index d6b8ba9f68..9374ef738d 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf @@ -19,7 +19,7 @@ resource "azurerm_key_vault" "kv_user" { azurerm_resource_group.resource_group[0].name ) tenant_id = local.service_principal.tenant_id - soft_delete_retention_days = 7 + soft_delete_retention_days = var.soft_delete_retention_days purge_protection_enabled = var.enable_purge_control_for_keyvaults sku_name = "standard" enable_rbac_authorization = var.enable_rbac_authorization_for_keyvault diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf index b623e3bc58..b39275ab2d 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf @@ -111,6 +111,8 @@ variable "enable_rbac_authorization_for_keyvault" { description = "Enables variable "keyvault_private_endpoint_id" { description = "Existing private endpoint for key vault" } +variable "soft_delete_retention_days" { description = "The number of days that items should be retained in the soft delete period" } + ######################################################################################### # # From 933aef3180eff0d6533fd7f1a85d60062d76d4d0 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 7 Feb 2024 23:49:20 +0200 Subject: [PATCH 209/607] Add soft delete retention days --- deploy/terraform/run/sap_deployer/module.tf | 1 + deploy/terraform/run/sap_deployer/tfvar_variables.tf | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/deploy/terraform/run/sap_deployer/module.tf b/deploy/terraform/run/sap_deployer/module.tf index fd48b72325..44ac47ec20 100644 --- a/deploy/terraform/run/sap_deployer/module.tf +++ b/deploy/terraform/run/sap_deployer/module.tf @@ -44,6 +44,7 @@ module "sap_deployer" { public_network_access_enabled = var.public_network_access_enabled || !var.use_private_endpoint sa_connection_string = var.sa_connection_string set_secret_expiry = var.set_secret_expiry + soft_delete_retention_days = var.soft_delete_retention_days spn_id = var.spn_id ssh-timeout = var.ssh-timeout subnets_to_add = var.subnets_to_add_to_firewall_for_keyvaults_and_storage diff --git a/deploy/terraform/run/sap_deployer/tfvar_variables.tf b/deploy/terraform/run/sap_deployer/tfvar_variables.tf index 1b0ac83e0e..1afd9c534b 100644 --- a/deploy/terraform/run/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/run/sap_deployer/tfvar_variables.tf @@ -340,6 +340,11 @@ variable "set_secret_expiry" { type = bool } +variable "soft_delete_retention_days" { + description = "The number of days that items should be retained in the soft delete period" + default = 7 + } + #######################################4#######################################8 # # # Miscallaneous settings # From 0b54457a39d123ec072fce9247fce84b64061e7a Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 8 Feb 2024 01:04:28 +0200 Subject: [PATCH 210/607] Add public_network_access_enabled variable to tfvar_variables.tf and transform.tf --- .../terraform/bootstrap/sap_library/tfvar_variables.tf | 5 +++++ deploy/terraform/bootstrap/sap_library/transform.tf | 2 ++ deploy/terraform/run/sap_library/tfvar_variables.tf | 5 +++++ deploy/terraform/run/sap_library/transform.tf | 2 ++ .../modules/sap_library/storage_accounts.tf | 10 ++-------- 5 files changed, 16 insertions(+), 8 deletions(-) diff --git a/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf index 02532b4dde..10d5c3b7d4 100644 --- a/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf @@ -200,6 +200,11 @@ variable "short_named_endpoints_nics" { default = false } +variable "public_network_access_enabled" { + description = "Boolean value indicating if public access should be enabled for key vaults and storage" + default = true + type = bool + } ######################################################################################### diff --git a/deploy/terraform/bootstrap/sap_library/transform.tf b/deploy/terraform/bootstrap/sap_library/transform.tf index b4c38e67a9..fa4dc57114 100644 --- a/deploy/terraform/bootstrap/sap_library/transform.tf +++ b/deploy/terraform/bootstrap/sap_library/transform.tf @@ -66,6 +66,7 @@ locals { ) } shared_access_key_enabled = var.shared_access_key_enabled + public_network_access_enabled = var.public_network_access_enabled } storage_account_tfstate = { arm_id = try( @@ -114,6 +115,7 @@ locals { ) } shared_access_key_enabled = var.shared_access_key_enabled + public_network_access_enabled = var.public_network_access_enabled } } diff --git a/deploy/terraform/run/sap_library/tfvar_variables.tf b/deploy/terraform/run/sap_library/tfvar_variables.tf index aaed70aab2..80c79c3484 100644 --- a/deploy/terraform/run/sap_library/tfvar_variables.tf +++ b/deploy/terraform/run/sap_library/tfvar_variables.tf @@ -201,6 +201,11 @@ variable "use_private_endpoint" { type = bool } +variable "public_network_access_enabled" { + description = "Boolean value indicating if public access should be enabled for key vaults and storage" + default = true + type = bool + } ######################################################################################### # # # Miscallaneous definitioms # diff --git a/deploy/terraform/run/sap_library/transform.tf b/deploy/terraform/run/sap_library/transform.tf index 6e3e13a878..284266a49c 100644 --- a/deploy/terraform/run/sap_library/transform.tf +++ b/deploy/terraform/run/sap_library/transform.tf @@ -66,6 +66,7 @@ locals { ) } shared_access_key_enabled = var.shared_access_key_enabled + public_network_access_enabled = var.public_network_access_enabled } storage_account_tfstate = { @@ -116,6 +117,7 @@ locals { } shared_access_key_enabled = var.shared_access_key_enabled + public_network_access_enabled = var.public_network_access_enabled } } diff --git a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf index 12c92c471b..2695720340 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf @@ -27,10 +27,7 @@ resource "azurerm_storage_account" "storage_tfstate" { min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false - public_network_access_enabled = try(var.deployer_tfstate.public_network_access_enabled, var.bootstrap ? ( - !local.enable_firewall_for_keyvaults_and_storage) : ( - local.enable_firewall_for_keyvaults_and_storage) - ) + public_network_access_enabled = var.storage_account_sapbits.public_network_access_enabled enable_https_traffic_only = true @@ -235,10 +232,7 @@ resource "azurerm_storage_account" "storage_sapbits" { allow_nested_items_to_be_public = false - public_network_access_enabled = try(var.deployer_tfstate.public_network_access_enabled, var.bootstrap ? ( - !local.enable_firewall_for_keyvaults_and_storage) : ( - local.enable_firewall_for_keyvaults_and_storage) - ) + public_network_access_enabled = var.storage_account_sapbits.public_network_access_enabled routing { publish_microsoft_endpoints = true From 9928a283025e9a7fbbbe6488859a911f3ffa50e3 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 8 Feb 2024 09:35:02 +0200 Subject: [PATCH 211/607] Add soft delete retention days and update parameter details --- Webapp/SDAF/Models/LandscapeModel.cs | 13 +++++++++++++ Webapp/SDAF/ParameterDetails/LandscapeDetails.json | 9 +++++++++ Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt | 3 +++ 3 files changed, 25 insertions(+) diff --git a/Webapp/SDAF/Models/LandscapeModel.cs b/Webapp/SDAF/Models/LandscapeModel.cs index 2341e636f8..95f1411671 100644 --- a/Webapp/SDAF/Models/LandscapeModel.cs +++ b/Webapp/SDAF/Models/LandscapeModel.cs @@ -265,6 +265,13 @@ public bool IsValid() public bool? enable_rbac_authorization_for_keyvault { get; set; } = false; + public int? soft_delete_retention_days { get; set; } = 14; + + /*---------------------------------------------------------------------------8 + | | + | NFS information | + | | + +------------------------------------4--------------------------------------*/ public string NFS_provider { get; set; } @@ -276,6 +283,12 @@ public bool IsValid() public int? transport_volume_size { get; set; } + /*---------------------------------------------------------------------------8 + | | + | Storage Account information | + | | + +------------------------------------4--------------------------------------*/ + [StorageAccountIdValidator] public string diagnostics_storage_account_arm_id { get; set; } diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index 0a7aa76423..03ad2d5641 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -726,6 +726,15 @@ "Options": [], "Overrules": "", "Display": 2 + }, + { + "Name": "soft_delete_retention_days", + "Required": false, + "Description": "The number of days that items should be retained in the soft delete period", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 } ] }, diff --git a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt index 62aa25d096..c70363ca2c 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt @@ -252,6 +252,9 @@ $$enable_rbac_authorization_for_keyvault$$ # Defines a list of Object IDs to be added to the keyvault $$additional_users_to_add_to_keyvault_policies$$ +# The number of days that items should be retained in the soft delete period +$$soft_delete_retention_days$$ + ######################################################################################### # # # Credentials # From b723182f53b00637621e264d163c69521ba21f7d Mon Sep 17 00:00:00 2001 From: Nadeen Noaman <95418928+nnoaman@users.noreply.github.com> Date: Thu, 8 Feb 2024 14:23:44 +0100 Subject: [PATCH 212/607] Ensure calling sap notes 3119751 after the mount (#541) --- deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml index 679e0b11fa..88dfda93f6 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml @@ -425,5 +425,11 @@ when: - custom_mounts is defined +# Call SAP Note 3119751 to ensure create symlink after the mounts. +- name: "Calling SAP Note 3119751" + ansible.builtin.include_tasks: roles-sap-os/2.10-sap-notes/tasks/2.10.3119751.yaml + when: + - platform == 'HANA' + - distribution_id in ['redhat8', 'redhat9'] ... From 827e7f858911ff55c99f0242238b91b13b5ea8f4 Mon Sep 17 00:00:00 2001 From: Nadeen Noaman <95418928+nnoaman@users.noreply.github.com> Date: Thu, 8 Feb 2024 14:24:20 +0100 Subject: [PATCH 213/607] Include Red Hat 9 support in setting NFS service name (#542) --- deploy/ansible/roles-sap-os/2.3-sap-exports/tasks/main.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.3-sap-exports/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.3-sap-exports/tasks/main.yaml index ae93a77e87..307685101a 100644 --- a/deploy/ansible/roles-sap-os/2.3-sap-exports/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.3-sap-exports/tasks/main.yaml @@ -264,7 +264,6 @@ when: - custom_exports is defined - - name: "2.3 Exports: - Local NFS" block: - name: "2.3 Exports: - Set the NFS Service name {{ distribution_id }}" @@ -272,11 +271,10 @@ nfs_service: 'nfsserver' when: "'SUSE' == ansible_os_family | upper" - - name: "2.3 Exports: - Set the NFS Service name {{ distribution_id }}" ansible.builtin.set_fact: nfs_service: "nfs-server" - when: "'redhat8' == distribution_id" + when: "'redhat8' == distribution_id or 'redhat9' == distribution_id" - name: "2.3 Exports: - Set the NFS Service name oracle {{ distribution_id }}" ansible.builtin.set_fact: From 8def4a6525b6761e292bfd0d7665c7cd7e28f6ed Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 9 Feb 2024 15:37:42 +0200 Subject: [PATCH 214/607] Add custom disk size and database size to SystemController.cs and custom_sizes.json --- Webapp/SDAF/Controllers/SystemController.cs | 2 ++ Webapp/SDAF/ParameterDetails/custom_sizes.json | 2 +- deploy/terraform/run/sap_system/module.tf | 5 +++++ .../modules/sap_system/output_files/inventory.tf | 1 + .../modules/sap_system/output_files/sap-parameters.yml.tmpl | 3 +++ .../modules/sap_system/output_files/variables_global.tf | 2 ++ 6 files changed, 14 insertions(+), 1 deletion(-) diff --git a/Webapp/SDAF/Controllers/SystemController.cs b/Webapp/SDAF/Controllers/SystemController.cs index 785fef68ce..2a6cc925bf 100644 --- a/Webapp/SDAF/Controllers/SystemController.cs +++ b/Webapp/SDAF/Controllers/SystemController.cs @@ -113,6 +113,7 @@ public async Task GetById(string id, string partitionKey) { file = await _appFileService.GetByIdAsync(id + "_custom_sizes.json", partitionKey); s.custom_disk_sizes_filename = id + "_custom_sizes.json"; + s.database_size = "Custom"; } catch { @@ -266,6 +267,7 @@ public async Task DeployConfirmedAsync(string id, string var stream = new MemoryStream(file.Content); system.custom_disk_sizes_filename = id + "_custom_sizes.json"; + system.database_size = "Custom"; string thisContent = System.Text.Encoding.UTF8.GetString(stream.ToArray()); string pathForNaming = $"/SYSTEM/{id}/{id}_custom_sizes.json"; diff --git a/Webapp/SDAF/ParameterDetails/custom_sizes.json b/Webapp/SDAF/ParameterDetails/custom_sizes.json index 701c598d55..9be1526af9 100644 --- a/Webapp/SDAF/ParameterDetails/custom_sizes.json +++ b/Webapp/SDAF/ParameterDetails/custom_sizes.json @@ -1,6 +1,6 @@ { "db": { - "Default": { + "Custom": { "compute": { "vm_size": "Standard_E20s_v4", "accelerated_networking": true diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index 9e8b226d67..405f0023b4 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -423,8 +423,13 @@ module "output_files" { local.application_tier.scs_server_count ) web_server_count = try(local.application_tier.webdispatcher_count, 0) + + ######################################################################################### + # Miscallaneous # + ######################################################################################### use_simple_mount = local.validated_use_simple_mount upgrade_packages = var.upgrade_packages + scale_out = var.database_HANA_use_ANF_scaleout_scenario ######################################################################################### # iSCSI # diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf index 356601c449..aa50c2933f 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf @@ -196,6 +196,7 @@ resource "local_file" "sap-parameters_yml" { "" ) asd_disks = concat(var.scs_shared_disks, var.database_shared_disks) + scale_out = var.scale_out scs_cluster_loadbalancer_ip = try(format("%s/%s", var.scs_cluster_loadbalancer_ip, var.app_subnet_netmask), "") scs_cluster_type = var.scs_cluster_type scs_high_availability = var.scs_high_availability diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.yml.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.yml.tmpl index 4d7db0805c..df64d79d53 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.yml.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.yml.tmpl @@ -76,6 +76,9 @@ db_instance_number: "${db_instance_number}" platform: ${platform} +# Scale out defines if the database is to be deployed in a scale out configuration +scale_out: ${scale_out} + # db_high_availability is a boolean flag indicating if the # SAP database servers are deployed using high availability db_high_availability: ${database_high_availability} diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf index c007e5e91c..67cdef3f0b 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf @@ -136,8 +136,10 @@ variable "save_naming_information" { description = "If defined, will save the naming information for the resources" default = false } +variable "scale_out" { description = "If true, the SAP System will be scale out" } } variable "scs_shared_disks" { description = "SCS Azure Shared Disk" } + variable "scs_cluster_loadbalancer_ip" { description = "This is a Cluster IP address for Windows load balancer for central services" } variable "scs_cluster_type" { description = "Cluster quorum type; AFA (Azure Fencing Agent), ASD (Azure Shared Disk), ISCSI" From 3c1d906bc45f3f0f6bc8e3621934c7e9f5147481 Mon Sep 17 00:00:00 2001 From: Harm Jan Stam Date: Fri, 9 Feb 2024 15:05:25 +0100 Subject: [PATCH 215/607] Only use SSL generation and distribution when InstanceType is ABAP (#535) * Only use SSL generation and distribution when InstanceType is ABAP * Bugfix virtual_host variable in app install role * Bugfix Terraform deployment withouth deployer VM and state --- .../ansible/playbook_04_00_00_db_install.yaml | 6 +-- .../tasks/4.2.1.0-db2_ha_install_primary.yml | 4 ++ .../4.2.1.2-db2_ha_install_secondary.yml | 5 +++ .../tasks/4.2.1.4-db2_haparameters.yaml | 42 ++++++++++--------- .../4.2.1-db2-hainstall/tasks/main.yml | 8 +++- .../sap_landscape/key_vault_sap_landscape.tf | 2 +- 6 files changed, 42 insertions(+), 25 deletions(-) diff --git a/deploy/ansible/playbook_04_00_00_db_install.yaml b/deploy/ansible/playbook_04_00_00_db_install.yaml index a5a755b5ee..2d9f3df6d7 100644 --- a/deploy/ansible/playbook_04_00_00_db_install.yaml +++ b/deploy/ansible/playbook_04_00_00_db_install.yaml @@ -631,7 +631,7 @@ - name: Setting the DB facts ansible.builtin.set_fact: - tier: db2 # Actions for Oracle DB Servers + tier: db2 # Actions for DB2 Servers main_password: "{{ hostvars.localhost.sap_password }}" sapbits_location_base_path: "{{ hostvars.localhost.sapbits_location_base_path }}" sapbits_sas_token: "{{ hostvars.localhost.sapbits_sas_token }}" @@ -659,8 +659,8 @@ vars: suffix: "_DB" tier: 'db2' - prefix: "{{ bom.product_ids.dblha.replace('.', '/').replace('/ABAP', '').split(':')[1] }}" - path: "INSTALL/DISTRIBUTED/ABAP/DB" + prefix: "{{ bom.product_ids.dblha.replace('.', '/').replace('/' + {{ db2_instance_type }}, '').split(':')[1] }}" + path: "INSTALL/DISTRIBUTED/{{ db2_instance_type }}/DB" this_sid: "{{ sap_sid }}" diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.0-db2_ha_install_primary.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.0-db2_ha_install_primary.yml index d4492ea8d3..3db6b2e8dc 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.0-db2_ha_install_primary.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.0-db2_ha_install_primary.yml @@ -110,6 +110,10 @@ - "{{ bom.product_ids.dblha }}" verbosity: 2 + - name: "SAP DB2 - register InstanceType" + ansible.builtin.set_fact: + db2_instance_type: "{{ bom.InstanceType | default('ABAP') }}" + - name: "Create temp directory for sid" ansible.builtin.file: path: "{{ tmp_directory }}/{{ sap_sid | upper }}" diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.2-db2_ha_install_secondary.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.2-db2_ha_install_secondary.yml index af49e192f4..6ec7b7e6f2 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.2-db2_ha_install_secondary.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.2-db2_ha_install_secondary.yml @@ -101,6 +101,11 @@ - "{{ sap_inifile }}" - "{{ bom.product_ids.dblsby }}" verbosity: 2 + + - name: "SAP DB2 - register InstanceType" + ansible.builtin.set_fact: + db2_instance_type: "{{ bom.InstanceType | default('ABAP') }}" + # *====================================4=======================================8 # SAP DB2: Install # 2230669 - System Provisioning Using a Parameter Input File diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml index c1d7e9582d..4dc6d5139a 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml @@ -2,8 +2,11 @@ # ################### Setting DB2 HA parameters on Primary DB ######################### - name: "DB2 - Set up HA parameters on Primary DB" + when: ansible_hostname == primary_instance_name + become: true + become_user: db2{{ db_sid | lower }} block: - - name: " DB2 Primary DB - Set Fact for hadr local host and remote host " + - name: "DB2 Primary DB - Set Fact for hadr local host and remote host " ansible.builtin.set_fact: db_hadr_local_host: "{{ hostvars[primary_instance_name]['virtual_host'] }}.{{ sap_fqdn }}" db_hadr_remote_host: "{{ hostvars[secondary_instance_name]['virtual_host'] }}.{{ sap_fqdn }}" @@ -30,7 +33,6 @@ ansible.builtin.debug: msg: "Result: {{ pridb2status.stdout }}" - - name: "DB2 Primary DB - Start the Primary DB" ansible.builtin.shell: db2start register: db2startstatus @@ -54,7 +56,6 @@ db2 update db cfg for {{ db_sid }} using HADR_REMOTE_HOST {{ db_hadr_remote_host }} immediate db2 update db cfg for {{ db_sid }} using HADR_REMOTE_SVC {{ db_sid | upper }}_HADR_2 immediate db2 update db cfg for {{ db_sid }} using HADR_REMOTE_INST db2{{ db_sid | lower }} immediate - db2 update db cfg for {{ db_sid }} using HADR_SSL_LABEL {{ db2_ssl_label }} db2 update db cfg for {{ db_sid }} using HADR_TIMEOUT 60 immediate db2 update db cfg for {{ db_sid }} using HADR_SYNCMODE NEARSYNC immediate db2 update db cfg for {{ db_sid }} using HADR_SPOOL_LIMIT 1000 immediate @@ -66,7 +67,7 @@ executable: /bin/csh environment: PATH: "{{ ansible_env.PATH }}:/db2/db2{{ db_sid | lower }}/sqllib/gskit/bin" - when: ansible_os_family == 'Suse' + when: ansible_os_family == 'Suse' - name: "DB2 Primary DB - HA Config - RHEL" # RHEL - Settings - When you use an Azure Pacemaker fencing agent, set the following parameters: @@ -81,7 +82,6 @@ db2 update db cfg for {{ db_sid }} using HADR_REMOTE_HOST {{ db_hadr_remote_host }} immediate db2 update db cfg for {{ db_sid }} using HADR_REMOTE_SVC {{ db_sid | upper }}_HADR_2 immediate db2 update db cfg for {{ db_sid }} using HADR_REMOTE_INST db2{{ db_sid | lower }} immediate - db2 update db cfg for {{ db_sid }} using HADR_SSL_LABEL {{ db2_ssl_label }} db2 update db cfg for {{ db_sid }} using HADR_TIMEOUT 45 immediate db2 update db cfg for {{ db_sid }} using HADR_SYNCMODE NEARSYNC immediate db2 update db cfg for {{ db_sid }} using HADR_SPOOL_LIMIT 1000 immediate @@ -93,18 +93,15 @@ executable: /bin/csh environment: PATH: "{{ ansible_env.PATH }}:/db2/db2{{ db_sid | lower }}/sqllib/gskit/bin" - when: ansible_os_family == 'RedHat' - - when: ansible_hostname == primary_instance_name - become: true - become_user: db2{{ db_sid | lower }} + when: ansible_os_family == 'RedHat' # ################### End of Section for Primary DB ###################### -# ################## Section Start for Secondary DB ############################### # ################## Setup DB2 HA parameters on Secondary ############################### - - name: "DB2 - Set up HA parameters on Secondary DB" + when: ansible_hostname == secondary_instance_name + become: true + become_user: db2{{ db_sid | lower }} block: - name: " DB2 Secondary DB - Set Fact for hadr local host and remote host " ansible.builtin.set_fact: @@ -133,7 +130,6 @@ ansible.builtin.debug: msg: "Result: {{ secdb2status.stdout }}" - - name: "DB2 Secondary DB - Start the Primary DB" ansible.builtin.shell: db2start args: @@ -157,7 +153,6 @@ db2 update db cfg for {{ db_sid }} using HADR_REMOTE_HOST {{ db_hadr_remote_host }} immediate db2 update db cfg for {{ db_sid }} using HADR_REMOTE_SVC {{ db_sid | upper }}_HADR_1 immediate db2 update db cfg for {{ db_sid }} using HADR_REMOTE_INST db2{{ db_sid | lower }} immediate - db2 update db cfg for {{ db_sid }} using HADR_SSL_LABEL {{ db2_ssl_label }} db2 update db cfg for {{ db_sid }} using HADR_TIMEOUT 60 immediate db2 update db cfg for {{ db_sid }} using HADR_SYNCMODE NEARSYNC immediate db2 update db cfg for {{ db_sid }} using HADR_SPOOL_LIMIT 1000 immediate @@ -184,7 +179,6 @@ db2 update db cfg for {{ db_sid }} using HADR_REMOTE_HOST {{ db_hadr_remote_host }} immediate db2 update db cfg for {{ db_sid }} using HADR_REMOTE_SVC {{ db_sid | upper }}_HADR_1 immediate db2 update db cfg for {{ db_sid }} using HADR_REMOTE_INST db2{{ db_sid | lower }} immediate - db2 update db cfg for {{ db_sid }} using HADR_SSL_LABEL {{ db2_ssl_label }} db2 update db cfg for {{ db_sid }} using HADR_TIMEOUT 45 immediate db2 update db cfg for {{ db_sid }} using HADR_SYNCMODE NEARSYNC immediate db2 update db cfg for {{ db_sid }} using HADR_SPOOL_LIMIT 1000 immediate @@ -197,11 +191,21 @@ register: db2_update failed_when: db2_update.rc not in [0,2] when: ansible_os_family == 'RedHat' +# ################### End of Section for Secondary DB ###################### - when: ansible_hostname == secondary_instance_name +- name: "DB2 DB - HADR SSL Configuration" + when: + - db2_instance_type == 'ABAP' + - db2_ssl_label is defined become: true become_user: db2{{ db_sid | lower }} - -# ################### End of Section for Secondary DB ###################### - + block: + - name: "DB2 DB - Set HADR SSL Configuration" + ansible.builtin.shell: db2 update db cfg for {{ db_sid }} using HADR_SSL_LABEL {{ db2_ssl_label }} + register: db2_update + failed_when: db2_update.rc not in [0,2] + args: + executable: /bin/csh + environment: + PATH: "{{ ansible_env.PATH }}:/db2/db2{{ db_sid | lower }}/sqllib/gskit/bin" ... diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/main.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/main.yml index 3e7504e7ab..4a3008312b 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/main.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/main.yml @@ -19,7 +19,6 @@ - name: "DB2 Primary System Setup" block: - - name: "DB2 Primary System Install" ansible.builtin.import_tasks: 4.2.1.0-db2_ha_install_primary.yml @@ -29,7 +28,10 @@ - name: "DB2 - Keystore Setup on Primary node" ansible.builtin.import_tasks: 4.2.1.8-db2_copy_keystore_files.yml + # SSL communication via SWPM is only available if you're using AS ABAP + # Setting up SSL voor AS JAVA requires manual actions with the J2EE Config tool - name: "DB2 - Generate SSL on Primary node" + when: db2_instance_type == 'ABAP' ansible.builtin.import_tasks: 4.2.1.9-db2_generate_distribute_ssl.yml always: - name: "DB2 Primary System Install: result" @@ -56,7 +58,10 @@ - name: "DB2 - Keystore Setup on Secondary node" ansible.builtin.import_tasks: 4.2.1.8-db2_copy_keystore_files.yml + # SSL communication via SWPM is only available if you're using AS ABAP + # Setting up SSL voor AS JAVA requires manual actions with the J2EE Config tool - name: "DB2 - Distribute SSL certificate to Secondary node" + when: db2_instance_type == 'ABAP' ansible.builtin.import_tasks: 4.2.1.9-db2_generate_distribute_ssl.yml - name: "DB2 - Restore Secondary with backup of Primary DB" @@ -88,7 +93,6 @@ - name: "DB2 - HA Configuration Post Install Profile update" ansible.builtin.import_tasks: 4.2.1.7-sap-profile-changes.yaml - # /*---------------------------------------------------------------------------8 # | END | # +------------------------------------4--------------------------------------*/ diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf index 9374ef738d..60da5f9307 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf @@ -98,7 +98,7 @@ resource "azurerm_key_vault_access_policy" "kv_user" { provider = azurerm.main count = (var.key_vault.exists || var.enable_rbac_authorization_for_keyvault) ? ( 0) : ( - (var.deployer_tfstate.deployer_uai.principal_id == local.service_principal.object_id) ? 0 : 1 + (length(var.deployer_tfstate) > 0 ? var.deployer_tfstate.deployer_uai.principal_id == local.service_principal.object_id : false) ? 0 : 1 ) key_vault_id = local.user_keyvault_exist ? local.user_key_vault_id : azurerm_key_vault.kv_user[0].id tenant_id = local.service_principal.tenant_id From 59ce4d324d6db7f8d1c5e36afbe0ab347be40b50 Mon Sep 17 00:00:00 2001 From: "Shekhar Sorot ( MSFT )" Date: Fri, 9 Feb 2024 19:38:50 +0530 Subject: [PATCH 216/607] Feature/scaleout-anf optimizations (#544) * Update 2.6.1-anf-mounts.yaml merge hana anf block from 2.6.1.2 into 2.6.1 * Update 2.6.8-anf-mounts-simplemount.yaml Add block for ScaleOut ANF hana code into ANF simple mount from 2.6.1.2 task * Update main.yaml * Update 2.6.1.2-anf-mounts-scaleout.yaml --- .../tasks/2.6.1-anf-mounts.yaml | 535 +++-- .../tasks/2.6.1.2-anf-mounts-scaleout.yaml | 1976 +++++++++-------- .../tasks/2.6.8-anf-mounts-simplemount.yaml | 540 +++-- .../2.6-sap-mounts/tasks/main.yaml | 20 +- 4 files changed, 1733 insertions(+), 1338 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index 3a5a8e8d8c..8348b84e88 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -558,177 +558,376 @@ - tier == 'sapos' - node_tier == 'hana' -- name: "ANF Mount: Create /hana folder" - ansible.builtin.file: - path: /hana - mode: 0755 - state: directory - group: sapsys - when: - - tier == 'sapos' - - node_tier == 'hana' - -- name: "ANF Mount: HANA data" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'data', - 'temppath': 'hanadata', - 'folder': 'hanadata', - 'mount': '{{ hana_data_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', - 'path': '/hana/data', - 'permissions': '0755', - 'set_chattr_on_dir': false, - 'target_nodes' : ['hana'], - 'create_temp_folders': true - } - vars: - primary_host: "{{ db_hosts[0] }}" - when: - - tier == 'sapos' - - node_tier == 'hana' - - hana_data_mountpoint is defined - - hana_data_mountpoint | length > 0 - - ansible_hostname == db_hosts[0] - -- name: "ANF Mount: HANA log" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'log', - 'temppath': 'hanalog', - 'folder': 'hanalog', - 'mount' : '{{ hana_log_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', - 'path' : '/hana/log', - 'permissions': '0755', - 'set_chattr_on_dir': false, - 'target_nodes': ['hana'], - 'create_temp_folders': true - } - vars: - primary_host: "{{ db_hosts[0] }}" - when: - - tier == 'sapos' - - node_tier == 'hana' - - hana_log_mountpoint is defined - - hana_log_mountpoint | length > 0 - - ansible_hostname == db_hosts[0] - -- name: "ANF Mount: HANA shared" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'shared', - 'temppath': 'hanashared', - 'folder': 'hanashared', - 'mount': '{{ hana_shared_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', - 'path': '/hana/shared', - 'permissions': '0775', - 'set_chattr_on_dir': false, - 'target_nodes' : ['hana'], - 'create_temp_folders': true - } - vars: - primary_host: "{{ db_hosts[0] }}" - when: - - tier == 'sapos' - - node_tier == 'hana' - - hana_shared_mountpoint is defined - - hana_shared_mountpoint | length > 0 - - ansible_hostname == db_hosts[0] - -- name: "ANF Mount: HANA data (secondary)" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'data', - 'temppath': 'hanadata', - 'folder': 'hanadata', - 'mount': '{{ hana_data_mountpoint[1] }}', - 'opts': '{{ mnt_options }}', - 'path': '/hana/data', - 'permissions': '0755', - 'set_chattr_on_dir': false, - 'target_nodes' : ['hana'], - 'create_temp_folders': true - } - vars: - primary_host: "{{ db_hosts[1] }}" - when: - - tier == 'sapos' - - node_tier == 'hana' - - hana_data_mountpoint is defined - - hana_data_mountpoint | length > 1 - - db_hosts | length == 2 - - ansible_hostname == db_hosts[1] - -- name: "ANF Mount: HANA log (secondary)" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'log', - 'temppath': 'hanalog', - 'folder': 'hanalog', - 'mount' : '{{ hana_log_mountpoint[1] }}', - 'opts': '{{ mnt_options }}', - 'path' : '/hana/log', - 'permissions': '0755', - 'set_chattr_on_dir': false, - 'target_nodes': ['hana'], - 'create_temp_folders': true - } - vars: - primary_host: "{{ db_hosts[1] }}" - when: - - tier == 'sapos' - - node_tier == 'hana' - - hana_log_mountpoint is defined - - hana_log_mountpoint | length > 1 - - db_hosts | length ==2 - - ansible_hostname == db_hosts[1] - -- name: "ANF Mount: HANA shared (secondary)" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'shared', - 'temppath': 'hanashared', - 'folder': 'hanashared', - 'mount': '{{ hana_shared_mountpoint[1] }}', - 'opts': '{{ mnt_options }}', - 'path': '/hana/shared', - 'permissions': '0775', - 'set_chattr_on_dir': false, - 'target_nodes' : ['hana'], - 'create_temp_folders': true - } - vars: - primary_host: "{{ db_hosts[1] }}" +# Standard block tasks for non scale out setups +- name: "ANF Mount: Run tasks for non-scale out setups" + block: + - name: "ANF Mount: Create /hana folder" + ansible.builtin.file: + path: /hana + mode: 0755 + state: directory + group: sapsys + when: + - tier == 'sapos' + - node_tier == 'hana' + + - name: "ANF Mount: HANA data" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'data', + 'temppath': 'hanadata', + 'folder': 'hanadata', + 'mount': '{{ hana_data_mountpoint[0] }}', + 'opts': '{{ mnt_options }}', + 'path': '/hana/data', + 'permissions': '0755', + 'set_chattr_on_dir': false, + 'target_nodes' : ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ db_hosts[0] }}" + when: + - tier == 'sapos' + - node_tier == 'hana' + - hana_data_mountpoint is defined + - hana_data_mountpoint | length > 0 + - ansible_hostname == db_hosts[0] + + - name: "ANF Mount: HANA log" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'log', + 'temppath': 'hanalog', + 'folder': 'hanalog', + 'mount' : '{{ hana_log_mountpoint[0] }}', + 'opts': '{{ mnt_options }}', + 'path' : '/hana/log', + 'permissions': '0755', + 'set_chattr_on_dir': false, + 'target_nodes': ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ db_hosts[0] }}" + when: + - tier == 'sapos' + - node_tier == 'hana' + - hana_log_mountpoint is defined + - hana_log_mountpoint | length > 0 + - ansible_hostname == db_hosts[0] + + - name: "ANF Mount: HANA shared" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'shared', + 'temppath': 'hanashared', + 'folder': 'hanashared', + 'mount': '{{ hana_shared_mountpoint[0] }}', + 'opts': '{{ mnt_options }}', + 'path': '/hana/shared', + 'permissions': '0775', + 'set_chattr_on_dir': false, + 'target_nodes' : ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ db_hosts[0] }}" + when: + - tier == 'sapos' + - node_tier == 'hana' + - hana_shared_mountpoint is defined + - hana_shared_mountpoint | length > 0 + - ansible_hostname == db_hosts[0] + + - name: "ANF Mount: HANA data (secondary)" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'data', + 'temppath': 'hanadata', + 'folder': 'hanadata', + 'mount': '{{ hana_data_mountpoint[1] }}', + 'opts': '{{ mnt_options }}', + 'path': '/hana/data', + 'permissions': '0755', + 'set_chattr_on_dir': false, + 'target_nodes' : ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ db_hosts[1] }}" + when: + - tier == 'sapos' + - node_tier == 'hana' + - hana_data_mountpoint is defined + - hana_data_mountpoint | length > 1 + - db_hosts | length == 2 + - ansible_hostname == db_hosts[1] + + - name: "ANF Mount: HANA log (secondary)" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'log', + 'temppath': 'hanalog', + 'folder': 'hanalog', + 'mount' : '{{ hana_log_mountpoint[1] }}', + 'opts': '{{ mnt_options }}', + 'path' : '/hana/log', + 'permissions': '0755', + 'set_chattr_on_dir': false, + 'target_nodes': ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ db_hosts[1] }}" + when: + - tier == 'sapos' + - node_tier == 'hana' + - hana_log_mountpoint is defined + - hana_log_mountpoint | length > 1 + - db_hosts | length ==2 + - ansible_hostname == db_hosts[1] + + - name: "ANF Mount: HANA shared (secondary)" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'shared', + 'temppath': 'hanashared', + 'folder': 'hanashared', + 'mount': '{{ hana_shared_mountpoint[1] }}', + 'opts': '{{ mnt_options }}', + 'path': '/hana/shared', + 'permissions': '0775', + 'set_chattr_on_dir': false, + 'target_nodes' : ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ db_hosts[1] }}" + when: + - tier == 'sapos' + - node_tier == 'hana' + - hana_shared_mountpoint is defined + - hana_shared_mountpoint | length > 1 + - db_hosts | length == 2 + - ansible_hostname == db_hosts[1] + + - name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" + ansible.builtin.file: + owner: '{{ hdbadm_uid }}' + group: sapsys + path: "{{ item.path }}" + state: directory + recurse: true + loop: + - { 'path': '/hana/data' } + - { 'path': '/hana/log' } + - { 'path': '/hana/shared' } + when: + - tier == 'sapos' + - node_tier == 'hana' when: - - tier == 'sapos' - - node_tier == 'hana' - - hana_shared_mountpoint is defined - - hana_shared_mountpoint | length > 1 - - db_hosts | length == 2 - - ansible_hostname == db_hosts[1] + - not db_scale_out -- name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" - ansible.builtin.file: - owner: '{{ hdbadm_uid }}' - group: sapsys - path: "{{ item.path }}" - state: directory - recurse: true - loop: - - { 'path': '/hana/data' } - - { 'path': '/hana/log' } - - { 'path': '/hana/shared' } +# Run this block set when db_Scale_out is true but db_high_availability is false +- name: "ANF Mount: Run tasks for scale out setups" + block: + - name: "ANF Mount: Scale Out - Create SAP Directories (usrsap)" + ansible.builtin.file: + owner: "{{ db_sid | lower }}adm" + group: sapsys + mode: 0755 + path: "/usr/sap/{{ db_sid | upper }}" + state: directory + when: + - tier == 'hana' + + - name: "ANF Mount: Scale Out - Create SAP Directories (hana data)" + ansible.builtin.file: + owner: "{{ db_sid | lower }}adm" + group: sapsys + mode: 0755 + path: "/hana/data/{{ db_sid | upper }}" + state: directory + when: + - tier == 'hana' + + - name: "ANF Mount: Scale Out - Create SAP Directories (hana log)" + ansible.builtin.file: + owner: "{{ db_sid | lower }}adm" + group: sapsys + mode: 0755 + path: "/hana/log/{{ db_sid | upper }}" + state: directory + when: + - tier == 'hana' + + - name: "ANF Mount: HANA shared - Scale out" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'shared', + 'temppath': 'shared', + # change folder to match the mount folder within the share + 'folder': 'shared', + 'mount': '{{ hana_shared_mountpoint[0] }}', + 'opts': '{{ mnt_options }}', + 'path': '/hana/shared', + 'permissions': '0775', + 'set_chattr_on_dir': false, + 'target_nodes' : ['hana'], + 'create_temp_folders': true + } + vars: + # Run this on all the nodes, not just primary. + primary_host: "{{ ansible_hostname }}" + when: + - node_tier == 'hana' + - hana_shared_mountpoint is defined + - hana_shared_mountpoint | length > 0 + + # This runs for unique share per node + - name: "ANF Mount: usrsap - Scale out" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'usrsap', + 'temppath': 'usrsap', + 'folder': "usr-sap-hanadb{{ lookup('ansible.utils.index_of', db_hosts, 'eq', ansible_hostname) }}", + 'mount': '{{ hana_shared_mountpoint[0] }}', + 'opts': '{{ mnt_options }}', + 'path': '/usr/sap/{{ db_sid | upper }}', + 'permissions': '0775', + 'set_chattr_on_dir': false, + 'target_nodes' : ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ ansible_hostname }}" + when: + - node_tier == 'hana' + - hana_shared_mountpoint is defined + - hana_shared_mountpoint | length == 1 + # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. + - db_scale_out is defined + - db_scale_out + + - name: "ANF Mount: HANA Data - Scale out - Create mount list" + block: + - name: "Initialize HANA Data mountpoints" + ansible.builtin.set_fact: + hana_data_scaleout_mountpoint: [] + - name: "Build HANA Data mountpoints" + ansible.builtin.set_fact: + # hana_data_mountpoint: "{{ hana_data_mountpoint | default([]) + [item] }}" + hana_data_scaleout_mountpoint: "{{ hana_data_scaleout_mountpoint + dataupdate }}" + loop: "{{ hana_data_mountpoint }}" + loop_control: + index_var: my_index + # Note the object structure and specific key:pair value. Do not modify those hard coded. + vars: + dataupdate: + - { type: 'data', + temppath: 'hanadata', + folder: 'hanadata', + mount: "{{ item }}", + opts: "{{ mnt_options }}", + path: "{{ '/hana/data/' + (db_sid | upper ) + '/mnt0000' + ( my_index + 1 )| string }}", + permissions: '0775', + set_chattr_on_dir: false, + target_nodes: ['hana'], + create_temp_folders: 'true' + } + when: + - node_tier == 'hana' + - hana_data_mountpoint is defined + + - name: "DEBUG:ANF Mount: HANA Data - Scale out - Create mount list" + ansible.builtin.debug: + var: hana_data_scaleout_mountpoint + + - name: "ANF Mount: HANA Data - Scale out" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + # Do not change this to loop:. It Breaks. i don't know why but this modification only seems to work with with_items: despite multiple formatting attempts. + with_items: + - "{{ hana_data_scaleout_mountpoint | list }}" + vars: + primary_host: "{{ ansible_hostname }}" + when: + - node_tier == 'hana' + - hana_data_mountpoint is defined + + + + - name: "ANF Mount: HANA Log - Scale out - Create mount list" + block: + - name: "Initialize HANA Log mountpoints" + ansible.builtin.set_fact: + hana_log_scaleout_mountpoint: [] + + - name: "Build HANA log mountpoints" + ansible.builtin.set_fact: + hana_log_scaleout_mountpoint: "{{ hana_log_scaleout_mountpoint + logupdate }}" + loop: "{{ hana_log_mountpoint }}" + loop_control: + index_var: my_index + # Note the object structure and specific key:pair value. Do not modify those hard coded. + vars: + logupdate: + - { type: 'log', + temppath: 'hanalog', + folder: 'hanalog', + mount: "{{ item }}", + opts: "{{ mnt_options }}", + path: "{{ '/hana/log/' + (db_sid | upper ) + '/mnt0000' + ( my_index + 1 )| string }}", + permissions: '0775', + set_chattr_on_dir: false, + target_nodes: ['hana'], + create_temp_folders: 'true' + } + when: + - node_tier == 'hana' + - hana_log_mountpoint is defined + + - name: "DEBUG:ANF Mount: HANA Log - Scale out - Create mount list" + ansible.builtin.debug: + var: hana_log_scaleout_mountpoint + + - name: "ANF Mount: HANA Log - Scale out" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + # Do not change this to loop:. It Breaks. i don't know why but this modification only seems to work with with_items: despite multiple formatting attempts. + with_items: + - "{{ hana_log_scaleout_mountpoint | list }}" + vars: + primary_host: "{{ ansible_hostname }}" + when: + - node_tier == 'hana' + - hana_log_mountpoint is defined + + - name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" + ansible.builtin.file: + owner: '{{ hdbadm_uid }}' + group: sapsys + path: "{{ item.path }}" + state: directory + recurse: true + with_items: + - "{{ hana_log_scaleout_mountpoint }}" + - "{{ hana_data_scaleout_mountpoint }}" + - { 'path': '/hana/shared' } + - { 'path': '/usr/sap/{{ db_sid | upper }}' } + when: + - tier == 'sapos' + - node_tier == 'hana' when: - - tier == 'sapos' - - node_tier == 'hana' + - db_scale_out + - not db_high_availability ... diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.2-anf-mounts-scaleout.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.2-anf-mounts-scaleout.yaml index f3eb975ac9..f9d7046401 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.2-anf-mounts-scaleout.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.2-anf-mounts-scaleout.yaml @@ -1,986 +1,990 @@ -# /*---------------------------------------------------------------------------8 -# | | -# | Perform the ANF system mounts for Scale out systems only | -# | | -# +------------------------------------4--------------------------------------*/ ---- - -- name: "ANF Mount: Set the NFS Service name" - ansible.builtin.set_fact: - nfs_service: "{% if distribution_id in ['redhat8', 'redhat9'] %}nfs-server{% else %}{% if distribution_id == 'redhat7' %}nfs{% else %}{% if distribution_id == 'oraclelinux8' %}rpcbind{% else %}nfsserver{% endif %}{% endif %}{% endif %}" - -- name: "ANF Mount: Set the NFSmount options" - ansible.builtin.set_fact: - mnt_options: 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys' - when: distribution_full_id not in ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5'] - -- name: "ANF Mount: Set the NFSmount options" - ansible.builtin.set_fact: - mnt_options: 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys,nconnect=8' - when: distribution_full_id in ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5'] - -- name: "ANF Mount: Define this SID" - ansible.builtin.set_fact: - this_sid: - { - 'sid': '{{ sap_sid | upper }}', - 'dbsid_uid': '{{ hdbadm_uid }}', - 'sidadm_uid': '{{ sidadm_uid }}', - 'ascs_inst_no': '{{ scs_instance_number }}', - 'pas_inst_no': '{{ pas_instance_number }}', - 'app_inst_no': '{{ app_instance_number }}' - } - -- name: "ANF Mount: Create list of all_sap_mounts to support " - ansible.builtin.set_fact: - all_sap_mounts: "{% if MULTI_SIDS is defined %}{{ MULTI_SIDS }}{% else %}{{ all_sap_mounts | default([]) + [this_sid] }}{% endif %}" - db_hosts: "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" - -- name: "ANF Mount: Ensure the NFS service is stopped" - ansible.builtin.systemd: - name: "{{ nfs_service }}" - state: stopped - when: - - "'scs' in supported_tiers" - - sap_mnt is not defined - - sap_trans is not defined - -# /*---------------------------------------------------------------------------8 -# | | -# | Mount the ANF Volumes | -# | Make sure to set the NFS domain in /etc/idmapd.conf on the VM to match the | -# | default domain configuration on Azure NetApp Files: defaultv4iddomain.com. | -# | and the mapping is set to nobody | -# | We use tier in tasks as well, to treat any special scenarios that may arise| -# +------------------------------------4--------------------------------------*/ -# For additional information refer to the below URLs -# https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/sap-hana-high-availability-netapp-files-suse#mount-the-azure-netapp-files-volume -# https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/sap-hana-high-availability-netapp-files-red-hat#mount-the-azure-netapp-files-volume -- name: "ANF Mount: NFS Domain Setting (ANF)" - block: - - name: "ANF Mount: Domain is configured as - the default Azure NetApp Files domain" - ansible.builtin.lineinfile: - path: /etc/idmapd.conf - regexp: '^[ #]*Domain = ' - line: 'Domain = defaultv4iddomain.com' - insertafter: '[General]' - when: - - tier == 'sapos' - register: id_mapping_changed - - - name: "ANF Mount: Make sure that user - mapping is set to 'nobody'" - ansible.builtin.lineinfile: - path: /etc/idmapd.conf - regexp: '^[ #]*Nobody-User = ' - line: 'Nobody-User = nobody' - insertafter: '^[ #]*Nobody-User = ' - when: - - tier == 'sapos' - register: id_mapping_changed - - - name: "ANF Mount: Make sure that group - mapping is set to 'nobody'" - ansible.builtin.lineinfile: - path: /etc/idmapd.conf - regexp: '^[ #]*Nobody-Group = ' - line: 'Nobody-Group = nobody' - insertafter: '^[ #]*Nobody-Group = ' - when: - - tier == 'sapos' - register: id_mapping_changed - when: - - tier == 'sapos' - -- name: "ANF Mount: Set nfs4_disable_idmapping to Y" - ansible.builtin.lineinfile: - path: /etc/modprobe.d/nfs.conf - line: 'options nfs nfs4_disable_idmapping=Y' - create: true - mode: 0644 - when: - - tier == 'sapos' - -- name: "ANF Mount: Ensure the services are restarted" - block: - - name: "AF Mount: Ensure the rpcbind service is restarted" - ansible.builtin.systemd: - name: rpcbind - state: restarted - - name: "ANF Mount: Ensure the NFS ID Map service is restarted" - ansible.builtin.systemd: - name: "nfs-idmapd" - daemon-reload: true - state: restarted - - name: "ANF Mount: Pause for 5 seconds" - ansible.builtin.pause: - seconds: 5 - - name: "ANF Mount: Ensure the NFS service is restarted" - ansible.builtin.systemd: - name: "{{ nfs_service }}" - state: restarted - when: - - id_mapping_changed is changed - -# /*---------------------------------------------------------------------------8 -# | | -# | Prepare for the /usr/sap mounts | -# | Create temporary directory structure | -# | Mount the share, create the directory structure on share | -# | Unmount and clean up temporary directory structure | -# | | -# +------------------------------------4--------------------------------------*/ - -- name: "ANF Mount: install:Get the Server name list" - ansible.builtin.set_fact: - first_app_server_temp: "{{ first_app_server_temp | default([]) + [item] }}" - with_items: - - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_PAS') }}" - - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" - -- name: "ANF Mount: usr/sap" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'usrsap', - 'temppath': 'tmpusersap', - 'mount': '{{ usr_sap_mountpoint }}', - 'opts': 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp', - 'path': '/usr/sap', - 'set_chattr_on_dir': false, - 'target_nodes': ['app','pas'], - 'create_temp_folders': false - } - vars: - primary_host: "{{ first_app_server_temp | first }}" - when: - - tier == 'sapos' - - usr_sap_mountpoint is defined - -# /*---------------------------------------------------------------------------8 -# | | -# | Prepare for the sap_mnt mounts | -# | Create temporary directory structure | -# | Mount the share, create the directory structure on share | -# | Unmount and clean up temporary directory structure | -# | | -# +------------------------------------4--------------------------------------*/ -- name: "ANF Mount: (sapmnt)" - block: - - name: "ANF Mount: Create /saptmp" - ansible.builtin.file: - path: "/saptmp" - state: directory - mode: 0755 - group: sapsys - - - name: "ANF Mount: (sapmnt)" - block: - - name: "ANF Mount: Filesystems on ANF (sapmnt)" - ansible.posix.mount: - src: "{{ sap_mnt }}" - path: "/saptmp" - fstype: "nfs4" - opts: "rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp" - state: mounted - rescue: - - name: "ANF Mount: Clear the cache of the nfsidmap daemon (ANF)" - ansible.builtin.shell: | - nfsidmap -c - - name: "ANF Mount: Ensure the rpcbind service is restarted" - ansible.builtin.systemd: - name: rpcbind - daemon-reload: true - state: restarted - - - name: "ANF Mount: Create SAP Directories (spmnt & usrsap)" - ansible.builtin.file: - path: "{{ item.path }}" - state: directory - mode: 0755 - loop: - - { path: '/saptmp/sapmnt{{ sap_sid | upper }}' } - - { path: '/saptmp/usrsap{{ sap_sid | upper }}' } - - { path: '/saptmp/usrsap{{ sap_sid | upper }}ascs{{ scs_instance_number }}' } - - { path: '/saptmp/usrsap{{ sap_sid | upper }}ers{{ ers_instance_number }}' } - - { path: '/saptmp/usrsap{{ sap_sid | upper }}sys' } - - - name: "ANF Mount: Create SAP Directories (ANF)" - ansible.builtin.file: - path: "/saptmp/sapmnt{{ item.sid | upper }}" - state: directory - mode: 0755 - loop: "{{ MULTI_SIDS }}" - when: MULTI_SIDS is defined - - - name: "ANF Mount: Unmount file systems (sap_mnt)" - ansible.posix.mount: - src: "{{ sap_mnt }}" - path: "/saptmp" - state: unmounted - - - name: "ANF Mount: Delete locally created SAP Directories" - ansible.builtin.file: - path: "{{ item.path }}" - state: absent - loop: - - { path: '/saptmp/sapmnt{{ sap_sid | upper }}' } - - { path: '/saptmp/usrsap{{ sap_sid | upper }}' } - - { path: '/saptmp/usrsap{{ sap_sid | upper }}ascs{{ scs_instance_number }}' } - - { path: '/saptmp/usrsap{{ sap_sid | upper }}ers{{ ers_instance_number }}' } - - { path: '/saptmp/usrsap{{ sap_sid | upper }}sys' } - - - name: "ANF Mount: Remove SAP Directories (ANF)" - ansible.builtin.file: - path: "/saptmp/sapmnt{{ item.sid | upper }}" - state: absent - loop: "{{ MULTI_SIDS }}" - when: MULTI_SIDS is defined - - - name: "ANF Mount: Cleanup fstab and directory (sap_mnt)" - ansible.posix.mount: - src: "{{ sap_mnt }}" - path: "/saptmp" - fstype: "nfs4" - opts: "rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp" - state: absent - - when: - - tier == 'sapos' - - "'scs' in supported_tiers" - - sap_mnt is defined - -# /*---------------------------------------------------------------------------8 -# | | -# | Perform the sap_mnt mounts | -# | Create directories and make them immutable | -# | | -# +------------------------------------4--------------------------------------*/ - -- name: "ANF Mount: Create SAP Directories (sapmnt)" - ansible.builtin.file: - owner: "{{ item.sidadm_uid }}" - group: sapsys - mode: 0755 - path: "/sapmnt/{{ item.sid }}" - state: directory - register: is_created_now - loop: "{{ all_sap_mounts }}" - when: - - tier == 'sapos' - - node_tier in ['app','scs','ers', 'pas'] or 'scs' in supported_tiers - - sap_mnt is defined - -- name: "ANF Mount: Change attribute only when we create SAP Directories (sap_mnt)" - ansible.builtin.file: - path: "{{ item.item.path }}" - state: directory - mode: 0755 - attr: i+ - loop: "{{ is_created_now.results }}" - when: - - tier == 'sapos' - - item.item is changed - register: set_immutable_attribute - -- name: "ANF Mount: Create SAP Directories (scs & ers)" - ansible.builtin.file: - path: "{{ item.path }}" - state: directory - owner: '{{ sidadm_uid }}' - group: sapsys - mode: 0755 - loop: - - { path: '/usr/sap/{{ sap_sid | upper }}' } - - { path: '/usr/sap/{{ sap_sid | upper }}/SYS' } - - { path: '/usr/sap/{{ sap_sid | upper }}/{{ instance_type | upper }}{{ scs_instance_number }}' } - - { path: '/usr/sap/{{ sap_sid | upper }}/ERS{{ ers_instance_number }}' } - when: - - tier == 'sapos' - - node_tier in ['scs','ers'] or 'scs' in supported_tiers - - sap_mnt is defined - - MULTI_SIDS is undefined - register: is_created_now3 - -- name: "ANF Mount: Change attribute only when we create SAP Directories (scs & ers)" - ansible.builtin.file: - path: "{{ item.item.path }}" - state: directory - mode: 0755 - attr: i+ - loop: "{{ is_created_now3.results }}" - when: - - tier == 'sapos' - - item.item is changed - register: set_immutable_attribute - -- name: "ANF Mount: Debug" - ansible.builtin.debug: - msg: 'isHA:{{ scs_high_availability }} | node_tier:{{ node_tier }} | tier:{{ tier }} | sapmnt:{{ sap_mnt }}' - -- name: "ANF Mount: sapmnt/{{ sap_sid | upper }} - Distributed Non-HA" - ansible.posix.mount: - src: "{{ item.src }}" - path: "{{ item.path }}" - fstype: "{{ item.type }}" - opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' - state: mounted - loop: - - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } - when: - - tier == 'sapos' - - sap_mnt is defined - - not scs_high_availability - - ansible_play_hosts_all | length > 1 - - node_tier != 'hana' - -- name: "ANF Mount: sapmnt/{{ sap_sid | upper }} - Single instance" - ansible.posix.mount: - src: "{{ item.src }}" - path: "{{ item.path }}" - fstype: "{{ item.type }}" - opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' - state: mounted - loop: - - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } - when: - - tier == 'sapos' - - sap_mnt is defined - - not scs_high_availability - - ansible_play_hosts_all | length == 1 - - -- name: "ANF Mount: sapmnt/{{ sap_sid | upper }} - Standalone MULTI_SIDS" - become: true - become_user: root - ansible.posix.mount: - src: "{{ sap_mnt }}/sapmnt{{ item.sid }}" - path: "/sapmnt/{{ item.sid }}" - fstype: 'nfs4' - opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' - state: mounted - loop: "{{ MULTI_SIDS }}" - when: - - not scs_high_availability - - sap_mnt is defined - - MULTI_SIDS is defined - -- name: "ANF Mount: sapmnt/{{ sap_sid | upper }} - High Availability" - ansible.posix.mount: - src: "{{ item.src }}" - path: "{{ item.path }}" - fstype: "{{ item.type }}" - opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' - state: mounted - loop: - - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } - when: - - scs_high_availability - - tier in ['sapos'] - - node_tier != 'hana' - - sap_mnt is defined - -- name: "ANF Mount: usr/sap/{{ sap_sid | upper }}/SYS" - ansible.posix.mount: - src: "{{ item.src }}" - path: "{{ item.path }}" - fstype: "{{ item.type }}" - opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' - state: mounted - loop: - - { type: 'nfs4', src: '{{ sap_mnt }}/usrsap{{ sap_sid | upper }}sys', path: '/usr/sap/{{ sap_sid | upper }}/SYS' } - when: - - scs_high_availability - - tier in ['sapos'] - - node_tier in ['scs','ers'] - - sap_mnt is defined - - -# /*---------------------------------------------------------------------------8 -# | | -# | Prepare for the sap_trans, install mounts | -# | Create temporary directory structure | -# | Mount the share, create the directory structure on share | -# | Unmount and clean up temporary directory structure | -# | | -# +------------------------------------4--------------------------------------*/ - -- name: "ANF Mount: install:Get the Server name list" - ansible.builtin.set_fact: - first_server_temp: "{{ first_server_temp | default([]) + [item] }}" - with_items: - - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" - - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" - -- name: "ANF Mount: sap_trans" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'trans', - 'temppath': 'saptrans', - 'mount': '{{ sap_trans }}', - 'opts': 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp', - 'path': '/usr/sap/trans', - 'permissions': '0775', - 'set_chattr_on_dir': false, - 'target_nodes': ['app','pas', 'ers', 'scs'], - 'create_temp_folders': false - } - vars: - primary_host: "{{ first_server_temp | first }}" - when: - - tier == 'sapos' - - sap_trans is defined - -- name: "ANF Mount: install" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'install', - 'temppath': 'sapinstall', - 'folder': '{{ bom_base_name }}', - 'mount': '{{ usr_sap_install_mountpoint }}', - 'opts': 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp', - 'path': '/usr/sap/install', - 'permissions': '0775', - 'set_chattr_on_dir': false, - 'target_nodes': ['all'], - 'create_temp_folders': true - } - vars: - primary_host: "{{ first_server_temp | first }}" - when: - - tier == 'sapos' - - usr_sap_install_mountpoint is defined - -# /*---------------------------------------------------------------------------8 -# | | -# | Prepare the OS for running SAP HANA on | -# | Azure NetApp Files with NFS | -# | Except Scale out + ANF | -# +------------------------------------4--------------------------------------*/ -- name: "ANF Mount: Prepare the OS for running - SAP HANA on Azure NetApp with NFS" - block: - - name: "ANF Mount: Create configuration file for the NetApp configuration settings" - ansible.builtin.blockinfile: - path: /etc/sysctl.d/91-NetApp-HANA.conf - backup: true - create: true - mode: 0644 - marker: "# {mark} HANA NetApp configuration high availability" - block: | - net.core.rmem_max = 16777216 - net.core.wmem_max = 16777216 - net.core.rmem_default = 16777216 - net.core.wmem_default = 16777216 - net.core.optmem_max = 16777216 - net.ipv4.tcp_rmem = 4096 131072 16777216 - net.ipv4.tcp_wmem = 4096 16384 16777216 - net.core.netdev_max_backlog = 300000 - net.ipv4.tcp_slow_start_after_idle=0 - net.ipv4.tcp_no_metrics_save = 1 - net.ipv4.tcp_moderate_rcvbuf = 1 - net.ipv4.tcp_window_scaling = 1 - net.ipv4.tcp_timestamps = 0 - net.ipv4.tcp_sack = 1 - when: - - node_tier == 'hana' - - - name: "Backward Compatibility - Check required Database HA variables" - ansible.builtin.set_fact: - database_high_availability: "{{ db_high_availability | default(false) }}" - when: - - db_high_availability is defined - - database_high_availability is not defined - - - name: "ANF Mount: Create configuration file for the NetApp configuration settings" - ansible.builtin.blockinfile: - path: /etc/sysctl.d/91-NetApp-HANA.conf - backup: true - create: true - mode: 0644 - marker: "# {mark} HANA NetApp configuration standalone" - block: | - net.core.rmem_max = 16777216 - net.core.wmem_max = 16777216 - net.core.rmem_default = 16777216 - net.core.wmem_default = 16777216 - net.core.optmem_max = 16777216 - net.ipv4.tcp_rmem = 4096 131072 16777216 - net.ipv4.tcp_wmem = 4096 16384 16777216 - net.core.netdev_max_backlog = 300000 - net.ipv4.tcp_slow_start_after_idle=0 - net.ipv4.tcp_no_metrics_save = 1 - net.ipv4.tcp_moderate_rcvbuf = 1 - net.ipv4.tcp_window_scaling = 1 - net.ipv4.tcp_timestamps = 1 - net.ipv4.tcp_sack = 1 - when: - - node_tier == 'hana' - - not database_high_availability - - - name: "ANF Mount: Create configuration file - with additional optimization settings" - ansible.builtin.blockinfile: - path: /etc/sysctl.d/ms-az.conf - backup: true - create: true - mode: 0644 - marker: "# {mark} HANA NetApp optimizations" - block: | - net.ipv6.conf.all.disable_ipv6 = 1 - net.ipv4.tcp_max_syn_backlog = 16348 - net.ipv4.conf.all.rp_filter = 0 - sunrpc.tcp_slot_table_entries = 128 - vm.swappiness=10 - when: - - node_tier == 'hana' - - # /*-----------------------------------------------------------------------8 - # | Configure the maximum number of (TCP) RPC requests that can be in | - # | flight at a time (to the NFS server) to be 128 | - # |--------------------------------4--------------------------------------*/ - - name: "ANF Mount: configure the maximum number - of RPC requests for the NFS session" - ansible.builtin.blockinfile: - path: /etc/modprobe.d/sunrpc.conf - backup: true - create: true - mode: 0644 - marker: "# {mark} NFS RPC Connections" - block: "options sunrpc tcp_max_slot_table_entries=128" - when: - - node_tier == 'hana' - - when: - - tier == 'sapos' - - node_tier == 'hana' - -- name: "ANF Mount: Create /hana folder" - ansible.builtin.file: - path: /hana - mode: 0755 - state: directory - group: sapsys - when: - - tier == 'sapos' - - node_tier == 'hana' - -# Note: This block ( and one for second DB note) must run only for HSR - pacemaker HANA scale out -# Currently we only support two node cluster + observer. -# TODO: Add support for >2(even count) node cluster + observer -- name: "ANF Mount: HANA data" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'data', - 'temppath': 'hanadata', - 'folder': 'hanadata', - 'mount': '{{ hana_data_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', - 'path': '/hana/data', - 'permissions': '0755', - 'set_chattr_on_dir': false, - 'target_nodes' : ['hana'], - 'create_temp_folders': true - } - vars: - primary_host: "{{ db_hosts[0] }}" - when: - - tier == 'sapos' - - node_tier == 'hana' - - hana_data_mountpoint is defined - - hana_data_mountpoint | length > 0 - - ansible_hostname == db_hosts[0] - # For HSR based scale out, needs DB high availability - - db_high_availability is defined - - db_high_availability - -- name: "ANF Mount: HANA log" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'log', - 'temppath': 'hanalog', - 'folder': 'hanalog', - 'mount' : '{{ hana_log_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', - 'path' : '/hana/log', - 'permissions': '0755', - 'set_chattr_on_dir': false, - 'target_nodes': ['hana'], - 'create_temp_folders': true - } - vars: - primary_host: "{{ db_hosts[0] }}" - when: - - tier == 'sapos' - - node_tier == 'hana' - - hana_log_mountpoint is defined - - hana_log_mountpoint | length > 0 - - ansible_hostname == db_hosts[0] - # For HSR based scale out, needs DB high availability - - db_high_availability is defined - - db_high_availability - -- name: "ANF Mount: HANA shared" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'shared', - 'temppath': 'hanashared', - 'folder': 'hanashared', - 'mount': '{{ hana_shared_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', - 'path': '/hana/shared', - 'permissions': '0775', - 'set_chattr_on_dir': false, - 'target_nodes' : ['hana'], - 'create_temp_folders': true - } - vars: - primary_host: "{{ db_hosts[0] }}" - when: - - tier == 'sapos' - - node_tier == 'hana' - - hana_shared_mountpoint is defined - - hana_shared_mountpoint | length > 0 - - ansible_hostname == db_hosts[0] - # For HSR based scale out, needs DB high availability - - db_high_availability is defined - - db_high_availability - -- name: "ANF Mount: HANA data (secondary)" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'data', - 'temppath': 'hanadata', - 'folder': 'hanadata', - 'mount': '{{ hana_data_mountpoint[1] }}', - 'opts': '{{ mnt_options }}', - 'path': '/hana/data', - 'permissions': '0755', - 'set_chattr_on_dir': false, - 'target_nodes' : ['hana'], - 'create_temp_folders': true - } - vars: - primary_host: "{{ db_hosts[1] }}" - when: - - tier == 'sapos' - - node_tier == 'hana' - - hana_data_mountpoint is defined - - hana_data_mountpoint | length > 1 - - db_hosts | length == 2 - - ansible_hostname == db_hosts[1] - # For HSR based scale out, needs DB high availability - - db_high_availability is defined - - db_high_availability - -- name: "ANF Mount: HANA log (secondary)" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'log', - 'temppath': 'hanalog', - 'folder': 'hanalog', - 'mount' : '{{ hana_log_mountpoint[1] }}', - 'opts': '{{ mnt_options }}', - 'path' : '/hana/log', - 'permissions': '0755', - 'set_chattr_on_dir': false, - 'target_nodes': ['hana'], - 'create_temp_folders': true - } - vars: - primary_host: "{{ db_hosts[1] }}" - when: - - tier == 'sapos' - - node_tier == 'hana' - - hana_log_mountpoint is defined - - hana_log_mountpoint | length > 1 - - db_hosts | length ==2 - - ansible_hostname == db_hosts[1] - # For HSR based scale out, needs DB high availability - - db_high_availability is defined - - db_high_availability - -- name: "ANF Mount: HANA shared (secondary)" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'shared', - 'temppath': 'hanashared', - 'folder': 'hanashared', - 'mount': '{{ hana_shared_mountpoint[1] }}', - 'opts': '{{ mnt_options }}', - 'path': '/hana/shared', - 'permissions': '0775', - 'set_chattr_on_dir': false, - 'target_nodes' : ['hana'], - 'create_temp_folders': true - } - vars: - primary_host: "{{ db_hosts[1] }}" - when: - - tier == 'sapos' - - node_tier == 'hana' - - hana_shared_mountpoint is defined - - hana_shared_mountpoint | length > 1 - - db_hosts | length == 2 - - ansible_hostname == db_hosts[1] - # For HSR based scale out, needs DB high availability - - db_high_availability is defined - - db_high_availability - -# /*---------------------------------------------------------------------------8 -# | | -# | Prepare the OS for running SAP HANA on | -# | Azure NetApp Files with NFS | -# | Scale out + ANF | -# +------------------------------------4--------------------------------------*/ - -# FOR ANF mount on SLES and RHEl, the below tasks replicate the steps in the link https://learn.microsoft.com/en-us/azure/sap/workloads/sap-hana-scale-out-standby-netapp-files-suse#mount-the-azure-netapp-files-volumes -# Mount the HANA shared on to the temp path - -- name: "ANF Mount: Scale Out - Create SAP Directories (usrsap)" - ansible.builtin.file: - owner: "{{ db_sid | lower }}adm" - group: sapsys - mode: 0755 - path: "/usr/sap/{{ db_sid | upper }}" - state: directory - when: - - tier == 'hana' - # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. - - db_scale_out is defined - - db_scale_out - -- name: "ANF Mount: Scale Out - Create SAP Directories (hana data)" - ansible.builtin.file: - owner: "{{ db_sid | lower }}adm" - group: sapsys - mode: 0755 - path: "/hana/data/{{ db_sid | upper }}" - state: directory - when: - - tier == 'hana' - # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. - - db_scale_out is defined - - db_scale_out - - -- name: "ANF Mount: Scale Out - Create SAP Directories (hana log)" - ansible.builtin.file: - owner: "{{ db_sid | lower }}adm" - group: sapsys - mode: 0755 - path: "/hana/log/{{ db_sid | upper }}" - state: directory - when: - - tier == 'hana' - # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. - - db_scale_out is defined - - db_scale_out - -- name: "ANF Mount: HANA shared - Scale out" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'shared', - 'temppath': 'shared', - # change folder to match the mount folder within the share - 'folder': 'shared', - 'mount': '{{ hana_shared_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', - 'path': '/hana/shared', - 'permissions': '0775', - 'set_chattr_on_dir': false, - 'target_nodes' : ['hana'], - 'create_temp_folders': true - } - vars: - # Run this on all the nodes, not just primary. - primary_host: "{{ ansible_hostname }}" - when: - - node_tier == 'hana' - - hana_shared_mountpoint is defined - - hana_shared_mountpoint | length > 0 - # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. - - db_scale_out is defined - - db_scale_out - -# This runs for unique share per node -- name: "ANF Mount: usrsap - Scale out" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'usrsap', - 'temppath': 'usrsap', - 'folder': "usr-sap-hanadb{{ lookup('ansible.utils.index_of', db_hosts, 'eq', ansible_hostname) }}", - 'mount': '{{ hana_shared_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', - 'path': '/usr/sap/{{ db_sid | upper }}', - 'permissions': '0775', - 'set_chattr_on_dir': false, - 'target_nodes' : ['hana'], - 'create_temp_folders': true - } - vars: - primary_host: "{{ ansible_hostname }}" - when: - - node_tier == 'hana' - - hana_shared_mountpoint is defined - - hana_shared_mountpoint | length == 1 - # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. - - db_scale_out is defined - - db_scale_out - -- name: "ANF Mount: HANA Data - Scale out - Create mount list" - block: - - name: "Initialize HANA Data mountpoints" - ansible.builtin.set_fact: - hana_data_scaleout_mountpoint: [] - - name: "Build HANA Data mountpoints" - ansible.builtin.set_fact: - # hana_data_mountpoint: "{{ hana_data_mountpoint | default([]) + [item] }}" - hana_data_scaleout_mountpoint: "{{ hana_data_scaleout_mountpoint + dataupdate }}" - loop: "{{ hana_data_mountpoint }}" - loop_control: - index_var: my_index - # Note the object structure and specific key:pair value. Do not modify those hard coded. - vars: - dataupdate: - - { type: 'data', - temppath: 'hanadata', - folder: 'hanadata', - mount: "{{ item }}", - opts: "{{ mnt_options }}", - path: "{{ '/hana/data/' + (db_sid | upper ) + '/mnt0000' + ( my_index + 1 )| string }}", - permissions: '0775', - set_chattr_on_dir: false, - target_nodes: ['hana'], - create_temp_folders: 'true' - } - when: - - node_tier == 'hana' - - hana_data_mountpoint is defined - # - hana_data_mountpoint | length == db_hosts | length - # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. - - db_scale_out is defined - - db_scale_out - -- name: "DEBUG:ANF Mount: HANA Data - Scale out - Create mount list" - ansible.builtin.debug: - var: hana_data_scaleout_mountpoint - -- name: "ANF Mount: HANA Data - Scale out" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - # Do not change this to loop:. It Breaks. i don't know why but this modification only seems to work with with_items: despite multiple formatting attempts. - with_items: - - "{{ hana_data_scaleout_mountpoint | list }}" - vars: - primary_host: "{{ ansible_hostname }}" - when: - - node_tier == 'hana' - - hana_data_mountpoint is defined - # - hana_data_mountpoint | length == db_hosts | length - # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. - - db_scale_out is defined - - db_scale_out - - -- name: "ANF Mount: HANA Log - Scale out - Create mount list" - block: - - name: "Initialize HANA Log mountpoints" - ansible.builtin.set_fact: - hana_log_scaleout_mountpoint: [] - - - name: "Build HANA log mountpoints" - ansible.builtin.set_fact: - hana_log_scaleout_mountpoint: "{{ hana_log_scaleout_mountpoint + logupdate }}" - loop: "{{ hana_log_mountpoint }}" - loop_control: - index_var: my_index - # Note the object structure and specific key:pair value. Do not modify those hard coded. - vars: - logupdate: - - { type: 'log', - temppath: 'hanalog', - folder: 'hanalog', - mount: "{{ item }}", - opts: "{{ mnt_options }}", - path: "{{ '/hana/log/' + (db_sid | upper ) + '/mnt0000' + ( my_index + 1 )| string }}", - permissions: '0775', - set_chattr_on_dir: false, - target_nodes: ['hana'], - create_temp_folders: 'true' - } - when: - - node_tier == 'hana' - - hana_log_mountpoint is defined - # - hana_log_mountpoint | length == db_hosts | length - # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. - - db_scale_out is defined - - db_scale_out - -- name: "DEBUG:ANF Mount: HANA Log - Scale out - Create mount list" - ansible.builtin.debug: - var: hana_log_scaleout_mountpoint - -- name: "ANF Mount: HANA Log - Scale out" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - # Do not change this to loop:. It Breaks. i don't know why but this modification only seems to work with with_items: despite multiple formatting attempts. - with_items: - - "{{ hana_log_scaleout_mountpoint | list }}" - vars: - primary_host: "{{ ansible_hostname }}" - when: - - node_tier == 'hana' - - hana_log_mountpoint is defined - # - hana_log_mountpoint | length == db_hosts | length - # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. - - db_scale_out is defined - - db_scale_out - - -- name: "ANF Mount: Set Permissons on HANA (HSR) Directories ({{ item.path }})" - ansible.builtin.file: - owner: '{{ hdbadm_uid }}' - group: sapsys - path: "{{ item.path }}" - state: directory - recurse: true - loop: - - { 'path': '/hana/data' } - - { 'path': '/hana/log' } - - { 'path': '/hana/shared' } - when: - - tier == 'sapos' - - node_tier == 'hana' - - db_high_availability is defined - - db_high_availability - - -- name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" - ansible.builtin.file: - owner: '{{ hdbadm_uid }}' - group: sapsys - path: "{{ item.path }}" - state: directory - recurse: true - with_items: - - "{{ hana_log_scaleout_mountpoint }}" - - "{{ hana_data_scaleout_mountpoint }}" - - { 'path': '/hana/shared' } - - { 'path': '/usr/sap/{{ db_sid | upper }}' } - when: - - tier == 'sapos' - - node_tier == 'hana' - - not (db_high_availability | default(false)) - - db_scale_out - -... +# This task is now deprecated as the functionality is merged into 2.6.1 and 2.6.8 +# This file will be removed in the later releases. Its left here for tracing and debugging + + +# # /*---------------------------------------------------------------------------8 +# # | | +# # | Perform the ANF system mounts for Scale out systems only | +# # | | +# # +------------------------------------4--------------------------------------*/ +# --- + +# - name: "ANF Mount: Set the NFS Service name" +# ansible.builtin.set_fact: +# nfs_service: "{% if distribution_id in ['redhat8', 'redhat9'] %}nfs-server{% else %}{% if distribution_id == 'redhat7' %}nfs{% else %}{% if distribution_id == 'oraclelinux8' %}rpcbind{% else %}nfsserver{% endif %}{% endif %}{% endif %}" + +# - name: "ANF Mount: Set the NFSmount options" +# ansible.builtin.set_fact: +# mnt_options: 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys' +# when: distribution_full_id not in ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5'] + +# - name: "ANF Mount: Set the NFSmount options" +# ansible.builtin.set_fact: +# mnt_options: 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys,nconnect=8' +# when: distribution_full_id in ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5'] + +# - name: "ANF Mount: Define this SID" +# ansible.builtin.set_fact: +# this_sid: +# { +# 'sid': '{{ sap_sid | upper }}', +# 'dbsid_uid': '{{ hdbadm_uid }}', +# 'sidadm_uid': '{{ sidadm_uid }}', +# 'ascs_inst_no': '{{ scs_instance_number }}', +# 'pas_inst_no': '{{ pas_instance_number }}', +# 'app_inst_no': '{{ app_instance_number }}' +# } + +# - name: "ANF Mount: Create list of all_sap_mounts to support " +# ansible.builtin.set_fact: +# all_sap_mounts: "{% if MULTI_SIDS is defined %}{{ MULTI_SIDS }}{% else %}{{ all_sap_mounts | default([]) + [this_sid] }}{% endif %}" +# db_hosts: "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + +# - name: "ANF Mount: Ensure the NFS service is stopped" +# ansible.builtin.systemd: +# name: "{{ nfs_service }}" +# state: stopped +# when: +# - "'scs' in supported_tiers" +# - sap_mnt is not defined +# - sap_trans is not defined + +# # /*---------------------------------------------------------------------------8 +# # | | +# # | Mount the ANF Volumes | +# # | Make sure to set the NFS domain in /etc/idmapd.conf on the VM to match the | +# # | default domain configuration on Azure NetApp Files: defaultv4iddomain.com. | +# # | and the mapping is set to nobody | +# # | We use tier in tasks as well, to treat any special scenarios that may arise| +# # +------------------------------------4--------------------------------------*/ +# # For additional information refer to the below URLs +# # https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/sap-hana-high-availability-netapp-files-suse#mount-the-azure-netapp-files-volume +# # https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/sap-hana-high-availability-netapp-files-red-hat#mount-the-azure-netapp-files-volume +# - name: "ANF Mount: NFS Domain Setting (ANF)" +# block: +# - name: "ANF Mount: Domain is configured as +# the default Azure NetApp Files domain" +# ansible.builtin.lineinfile: +# path: /etc/idmapd.conf +# regexp: '^[ #]*Domain = ' +# line: 'Domain = defaultv4iddomain.com' +# insertafter: '[General]' +# when: +# - tier == 'sapos' +# register: id_mapping_changed + +# - name: "ANF Mount: Make sure that user +# mapping is set to 'nobody'" +# ansible.builtin.lineinfile: +# path: /etc/idmapd.conf +# regexp: '^[ #]*Nobody-User = ' +# line: 'Nobody-User = nobody' +# insertafter: '^[ #]*Nobody-User = ' +# when: +# - tier == 'sapos' +# register: id_mapping_changed + +# - name: "ANF Mount: Make sure that group +# mapping is set to 'nobody'" +# ansible.builtin.lineinfile: +# path: /etc/idmapd.conf +# regexp: '^[ #]*Nobody-Group = ' +# line: 'Nobody-Group = nobody' +# insertafter: '^[ #]*Nobody-Group = ' +# when: +# - tier == 'sapos' +# register: id_mapping_changed +# when: +# - tier == 'sapos' + +# - name: "ANF Mount: Set nfs4_disable_idmapping to Y" +# ansible.builtin.lineinfile: +# path: /etc/modprobe.d/nfs.conf +# line: 'options nfs nfs4_disable_idmapping=Y' +# create: true +# mode: 0644 +# when: +# - tier == 'sapos' + +# - name: "ANF Mount: Ensure the services are restarted" +# block: +# - name: "AF Mount: Ensure the rpcbind service is restarted" +# ansible.builtin.systemd: +# name: rpcbind +# state: restarted +# - name: "ANF Mount: Ensure the NFS ID Map service is restarted" +# ansible.builtin.systemd: +# name: "nfs-idmapd" +# daemon-reload: true +# state: restarted +# - name: "ANF Mount: Pause for 5 seconds" +# ansible.builtin.pause: +# seconds: 5 +# - name: "ANF Mount: Ensure the NFS service is restarted" +# ansible.builtin.systemd: +# name: "{{ nfs_service }}" +# state: restarted +# when: +# - id_mapping_changed is changed + +# # /*---------------------------------------------------------------------------8 +# # | | +# # | Prepare for the /usr/sap mounts | +# # | Create temporary directory structure | +# # | Mount the share, create the directory structure on share | +# # | Unmount and clean up temporary directory structure | +# # | | +# # +------------------------------------4--------------------------------------*/ + +# - name: "ANF Mount: install:Get the Server name list" +# ansible.builtin.set_fact: +# first_app_server_temp: "{{ first_app_server_temp | default([]) + [item] }}" +# with_items: +# - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_PAS') }}" +# - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + +# - name: "ANF Mount: usr/sap" +# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml +# loop: +# - { +# 'type': 'usrsap', +# 'temppath': 'tmpusersap', +# 'mount': '{{ usr_sap_mountpoint }}', +# 'opts': 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp', +# 'path': '/usr/sap', +# 'set_chattr_on_dir': false, +# 'target_nodes': ['app','pas'], +# 'create_temp_folders': false +# } +# vars: +# primary_host: "{{ first_app_server_temp | first }}" +# when: +# - tier == 'sapos' +# - usr_sap_mountpoint is defined + +# # /*---------------------------------------------------------------------------8 +# # | | +# # | Prepare for the sap_mnt mounts | +# # | Create temporary directory structure | +# # | Mount the share, create the directory structure on share | +# # | Unmount and clean up temporary directory structure | +# # | | +# # +------------------------------------4--------------------------------------*/ +# - name: "ANF Mount: (sapmnt)" +# block: +# - name: "ANF Mount: Create /saptmp" +# ansible.builtin.file: +# path: "/saptmp" +# state: directory +# mode: 0755 +# group: sapsys + +# - name: "ANF Mount: (sapmnt)" +# block: +# - name: "ANF Mount: Filesystems on ANF (sapmnt)" +# ansible.posix.mount: +# src: "{{ sap_mnt }}" +# path: "/saptmp" +# fstype: "nfs4" +# opts: "rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp" +# state: mounted +# rescue: +# - name: "ANF Mount: Clear the cache of the nfsidmap daemon (ANF)" +# ansible.builtin.shell: | +# nfsidmap -c +# - name: "ANF Mount: Ensure the rpcbind service is restarted" +# ansible.builtin.systemd: +# name: rpcbind +# daemon-reload: true +# state: restarted + +# - name: "ANF Mount: Create SAP Directories (spmnt & usrsap)" +# ansible.builtin.file: +# path: "{{ item.path }}" +# state: directory +# mode: 0755 +# loop: +# - { path: '/saptmp/sapmnt{{ sap_sid | upper }}' } +# - { path: '/saptmp/usrsap{{ sap_sid | upper }}' } +# - { path: '/saptmp/usrsap{{ sap_sid | upper }}ascs{{ scs_instance_number }}' } +# - { path: '/saptmp/usrsap{{ sap_sid | upper }}ers{{ ers_instance_number }}' } +# - { path: '/saptmp/usrsap{{ sap_sid | upper }}sys' } + +# - name: "ANF Mount: Create SAP Directories (ANF)" +# ansible.builtin.file: +# path: "/saptmp/sapmnt{{ item.sid | upper }}" +# state: directory +# mode: 0755 +# loop: "{{ MULTI_SIDS }}" +# when: MULTI_SIDS is defined + +# - name: "ANF Mount: Unmount file systems (sap_mnt)" +# ansible.posix.mount: +# src: "{{ sap_mnt }}" +# path: "/saptmp" +# state: unmounted + +# - name: "ANF Mount: Delete locally created SAP Directories" +# ansible.builtin.file: +# path: "{{ item.path }}" +# state: absent +# loop: +# - { path: '/saptmp/sapmnt{{ sap_sid | upper }}' } +# - { path: '/saptmp/usrsap{{ sap_sid | upper }}' } +# - { path: '/saptmp/usrsap{{ sap_sid | upper }}ascs{{ scs_instance_number }}' } +# - { path: '/saptmp/usrsap{{ sap_sid | upper }}ers{{ ers_instance_number }}' } +# - { path: '/saptmp/usrsap{{ sap_sid | upper }}sys' } + +# - name: "ANF Mount: Remove SAP Directories (ANF)" +# ansible.builtin.file: +# path: "/saptmp/sapmnt{{ item.sid | upper }}" +# state: absent +# loop: "{{ MULTI_SIDS }}" +# when: MULTI_SIDS is defined + +# - name: "ANF Mount: Cleanup fstab and directory (sap_mnt)" +# ansible.posix.mount: +# src: "{{ sap_mnt }}" +# path: "/saptmp" +# fstype: "nfs4" +# opts: "rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp" +# state: absent + +# when: +# - tier == 'sapos' +# - "'scs' in supported_tiers" +# - sap_mnt is defined + +# # /*---------------------------------------------------------------------------8 +# # | | +# # | Perform the sap_mnt mounts | +# # | Create directories and make them immutable | +# # | | +# # +------------------------------------4--------------------------------------*/ + +# - name: "ANF Mount: Create SAP Directories (sapmnt)" +# ansible.builtin.file: +# owner: "{{ item.sidadm_uid }}" +# group: sapsys +# mode: 0755 +# path: "/sapmnt/{{ item.sid }}" +# state: directory +# register: is_created_now +# loop: "{{ all_sap_mounts }}" +# when: +# - tier == 'sapos' +# - node_tier in ['app','scs','ers', 'pas'] or 'scs' in supported_tiers +# - sap_mnt is defined + +# - name: "ANF Mount: Change attribute only when we create SAP Directories (sap_mnt)" +# ansible.builtin.file: +# path: "{{ item.item.path }}" +# state: directory +# mode: 0755 +# attr: i+ +# loop: "{{ is_created_now.results }}" +# when: +# - tier == 'sapos' +# - item.item is changed +# register: set_immutable_attribute + +# - name: "ANF Mount: Create SAP Directories (scs & ers)" +# ansible.builtin.file: +# path: "{{ item.path }}" +# state: directory +# owner: '{{ sidadm_uid }}' +# group: sapsys +# mode: 0755 +# loop: +# - { path: '/usr/sap/{{ sap_sid | upper }}' } +# - { path: '/usr/sap/{{ sap_sid | upper }}/SYS' } +# - { path: '/usr/sap/{{ sap_sid | upper }}/{{ instance_type | upper }}{{ scs_instance_number }}' } +# - { path: '/usr/sap/{{ sap_sid | upper }}/ERS{{ ers_instance_number }}' } +# when: +# - tier == 'sapos' +# - node_tier in ['scs','ers'] or 'scs' in supported_tiers +# - sap_mnt is defined +# - MULTI_SIDS is undefined +# register: is_created_now3 + +# - name: "ANF Mount: Change attribute only when we create SAP Directories (scs & ers)" +# ansible.builtin.file: +# path: "{{ item.item.path }}" +# state: directory +# mode: 0755 +# attr: i+ +# loop: "{{ is_created_now3.results }}" +# when: +# - tier == 'sapos' +# - item.item is changed +# register: set_immutable_attribute + +# - name: "ANF Mount: Debug" +# ansible.builtin.debug: +# msg: 'isHA:{{ scs_high_availability }} | node_tier:{{ node_tier }} | tier:{{ tier }} | sapmnt:{{ sap_mnt }}' + +# - name: "ANF Mount: sapmnt/{{ sap_sid | upper }} - Distributed Non-HA" +# ansible.posix.mount: +# src: "{{ item.src }}" +# path: "{{ item.path }}" +# fstype: "{{ item.type }}" +# opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' +# state: mounted +# loop: +# - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } +# when: +# - tier == 'sapos' +# - sap_mnt is defined +# - not scs_high_availability +# - ansible_play_hosts_all | length > 1 +# - node_tier != 'hana' + +# - name: "ANF Mount: sapmnt/{{ sap_sid | upper }} - Single instance" +# ansible.posix.mount: +# src: "{{ item.src }}" +# path: "{{ item.path }}" +# fstype: "{{ item.type }}" +# opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' +# state: mounted +# loop: +# - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } +# when: +# - tier == 'sapos' +# - sap_mnt is defined +# - not scs_high_availability +# - ansible_play_hosts_all | length == 1 + + +# - name: "ANF Mount: sapmnt/{{ sap_sid | upper }} - Standalone MULTI_SIDS" +# become: true +# become_user: root +# ansible.posix.mount: +# src: "{{ sap_mnt }}/sapmnt{{ item.sid }}" +# path: "/sapmnt/{{ item.sid }}" +# fstype: 'nfs4' +# opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' +# state: mounted +# loop: "{{ MULTI_SIDS }}" +# when: +# - not scs_high_availability +# - sap_mnt is defined +# - MULTI_SIDS is defined + +# - name: "ANF Mount: sapmnt/{{ sap_sid | upper }} - High Availability" +# ansible.posix.mount: +# src: "{{ item.src }}" +# path: "{{ item.path }}" +# fstype: "{{ item.type }}" +# opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' +# state: mounted +# loop: +# - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } +# when: +# - scs_high_availability +# - tier in ['sapos'] +# - node_tier != 'hana' +# - sap_mnt is defined + +# - name: "ANF Mount: usr/sap/{{ sap_sid | upper }}/SYS" +# ansible.posix.mount: +# src: "{{ item.src }}" +# path: "{{ item.path }}" +# fstype: "{{ item.type }}" +# opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' +# state: mounted +# loop: +# - { type: 'nfs4', src: '{{ sap_mnt }}/usrsap{{ sap_sid | upper }}sys', path: '/usr/sap/{{ sap_sid | upper }}/SYS' } +# when: +# - scs_high_availability +# - tier in ['sapos'] +# - node_tier in ['scs','ers'] +# - sap_mnt is defined + + +# # /*---------------------------------------------------------------------------8 +# # | | +# # | Prepare for the sap_trans, install mounts | +# # | Create temporary directory structure | +# # | Mount the share, create the directory structure on share | +# # | Unmount and clean up temporary directory structure | +# # | | +# # +------------------------------------4--------------------------------------*/ + +# - name: "ANF Mount: install:Get the Server name list" +# ansible.builtin.set_fact: +# first_server_temp: "{{ first_server_temp | default([]) + [item] }}" +# with_items: +# - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" +# - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + +# - name: "ANF Mount: sap_trans" +# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml +# loop: +# - { +# 'type': 'trans', +# 'temppath': 'saptrans', +# 'mount': '{{ sap_trans }}', +# 'opts': 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp', +# 'path': '/usr/sap/trans', +# 'permissions': '0775', +# 'set_chattr_on_dir': false, +# 'target_nodes': ['app','pas', 'ers', 'scs'], +# 'create_temp_folders': false +# } +# vars: +# primary_host: "{{ first_server_temp | first }}" +# when: +# - tier == 'sapos' +# - sap_trans is defined + +# - name: "ANF Mount: install" +# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml +# loop: +# - { +# 'type': 'install', +# 'temppath': 'sapinstall', +# 'folder': '{{ bom_base_name }}', +# 'mount': '{{ usr_sap_install_mountpoint }}', +# 'opts': 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp', +# 'path': '/usr/sap/install', +# 'permissions': '0775', +# 'set_chattr_on_dir': false, +# 'target_nodes': ['all'], +# 'create_temp_folders': true +# } +# vars: +# primary_host: "{{ first_server_temp | first }}" +# when: +# - tier == 'sapos' +# - usr_sap_install_mountpoint is defined + +# # /*---------------------------------------------------------------------------8 +# # | | +# # | Prepare the OS for running SAP HANA on | +# # | Azure NetApp Files with NFS | +# # | Except Scale out + ANF | +# # +------------------------------------4--------------------------------------*/ +# - name: "ANF Mount: Prepare the OS for running +# SAP HANA on Azure NetApp with NFS" +# block: +# - name: "ANF Mount: Create configuration file for the NetApp configuration settings" +# ansible.builtin.blockinfile: +# path: /etc/sysctl.d/91-NetApp-HANA.conf +# backup: true +# create: true +# mode: 0644 +# marker: "# {mark} HANA NetApp configuration high availability" +# block: | +# net.core.rmem_max = 16777216 +# net.core.wmem_max = 16777216 +# net.core.rmem_default = 16777216 +# net.core.wmem_default = 16777216 +# net.core.optmem_max = 16777216 +# net.ipv4.tcp_rmem = 4096 131072 16777216 +# net.ipv4.tcp_wmem = 4096 16384 16777216 +# net.core.netdev_max_backlog = 300000 +# net.ipv4.tcp_slow_start_after_idle=0 +# net.ipv4.tcp_no_metrics_save = 1 +# net.ipv4.tcp_moderate_rcvbuf = 1 +# net.ipv4.tcp_window_scaling = 1 +# net.ipv4.tcp_timestamps = 0 +# net.ipv4.tcp_sack = 1 +# when: +# - node_tier == 'hana' + +# - name: "Backward Compatibility - Check required Database HA variables" +# ansible.builtin.set_fact: +# database_high_availability: "{{ db_high_availability | default(false) }}" +# when: +# - db_high_availability is defined +# - database_high_availability is not defined + +# - name: "ANF Mount: Create configuration file for the NetApp configuration settings" +# ansible.builtin.blockinfile: +# path: /etc/sysctl.d/91-NetApp-HANA.conf +# backup: true +# create: true +# mode: 0644 +# marker: "# {mark} HANA NetApp configuration standalone" +# block: | +# net.core.rmem_max = 16777216 +# net.core.wmem_max = 16777216 +# net.core.rmem_default = 16777216 +# net.core.wmem_default = 16777216 +# net.core.optmem_max = 16777216 +# net.ipv4.tcp_rmem = 4096 131072 16777216 +# net.ipv4.tcp_wmem = 4096 16384 16777216 +# net.core.netdev_max_backlog = 300000 +# net.ipv4.tcp_slow_start_after_idle=0 +# net.ipv4.tcp_no_metrics_save = 1 +# net.ipv4.tcp_moderate_rcvbuf = 1 +# net.ipv4.tcp_window_scaling = 1 +# net.ipv4.tcp_timestamps = 1 +# net.ipv4.tcp_sack = 1 +# when: +# - node_tier == 'hana' +# - not database_high_availability + +# - name: "ANF Mount: Create configuration file +# with additional optimization settings" +# ansible.builtin.blockinfile: +# path: /etc/sysctl.d/ms-az.conf +# backup: true +# create: true +# mode: 0644 +# marker: "# {mark} HANA NetApp optimizations" +# block: | +# net.ipv6.conf.all.disable_ipv6 = 1 +# net.ipv4.tcp_max_syn_backlog = 16348 +# net.ipv4.conf.all.rp_filter = 0 +# sunrpc.tcp_slot_table_entries = 128 +# vm.swappiness=10 +# when: +# - node_tier == 'hana' + +# # /*-----------------------------------------------------------------------8 +# # | Configure the maximum number of (TCP) RPC requests that can be in | +# # | flight at a time (to the NFS server) to be 128 | +# # |--------------------------------4--------------------------------------*/ +# - name: "ANF Mount: configure the maximum number +# of RPC requests for the NFS session" +# ansible.builtin.blockinfile: +# path: /etc/modprobe.d/sunrpc.conf +# backup: true +# create: true +# mode: 0644 +# marker: "# {mark} NFS RPC Connections" +# block: "options sunrpc tcp_max_slot_table_entries=128" +# when: +# - node_tier == 'hana' + +# when: +# - tier == 'sapos' +# - node_tier == 'hana' + +# - name: "ANF Mount: Create /hana folder" +# ansible.builtin.file: +# path: /hana +# mode: 0755 +# state: directory +# group: sapsys +# when: +# - tier == 'sapos' +# - node_tier == 'hana' + +# # Note: This block ( and one for second DB note) must run only for HSR - pacemaker HANA scale out +# # Currently we only support two node cluster + observer. +# # TODO: Add support for >2(even count) node cluster + observer +# - name: "ANF Mount: HANA data" +# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml +# loop: +# - { +# 'type': 'data', +# 'temppath': 'hanadata', +# 'folder': 'hanadata', +# 'mount': '{{ hana_data_mountpoint[0] }}', +# 'opts': '{{ mnt_options }}', +# 'path': '/hana/data', +# 'permissions': '0755', +# 'set_chattr_on_dir': false, +# 'target_nodes' : ['hana'], +# 'create_temp_folders': true +# } +# vars: +# primary_host: "{{ db_hosts[0] }}" +# when: +# - tier == 'sapos' +# - node_tier == 'hana' +# - hana_data_mountpoint is defined +# - hana_data_mountpoint | length > 0 +# - ansible_hostname == db_hosts[0] +# # For HSR based scale out, needs DB high availability +# - db_high_availability is defined +# - db_high_availability + +# - name: "ANF Mount: HANA log" +# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml +# loop: +# - { +# 'type': 'log', +# 'temppath': 'hanalog', +# 'folder': 'hanalog', +# 'mount' : '{{ hana_log_mountpoint[0] }}', +# 'opts': '{{ mnt_options }}', +# 'path' : '/hana/log', +# 'permissions': '0755', +# 'set_chattr_on_dir': false, +# 'target_nodes': ['hana'], +# 'create_temp_folders': true +# } +# vars: +# primary_host: "{{ db_hosts[0] }}" +# when: +# - tier == 'sapos' +# - node_tier == 'hana' +# - hana_log_mountpoint is defined +# - hana_log_mountpoint | length > 0 +# - ansible_hostname == db_hosts[0] +# # For HSR based scale out, needs DB high availability +# - db_high_availability is defined +# - db_high_availability + +# - name: "ANF Mount: HANA shared" +# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml +# loop: +# - { +# 'type': 'shared', +# 'temppath': 'hanashared', +# 'folder': 'hanashared', +# 'mount': '{{ hana_shared_mountpoint[0] }}', +# 'opts': '{{ mnt_options }}', +# 'path': '/hana/shared', +# 'permissions': '0775', +# 'set_chattr_on_dir': false, +# 'target_nodes' : ['hana'], +# 'create_temp_folders': true +# } +# vars: +# primary_host: "{{ db_hosts[0] }}" +# when: +# - tier == 'sapos' +# - node_tier == 'hana' +# - hana_shared_mountpoint is defined +# - hana_shared_mountpoint | length > 0 +# - ansible_hostname == db_hosts[0] +# # For HSR based scale out, needs DB high availability +# - db_high_availability is defined +# - db_high_availability + +# - name: "ANF Mount: HANA data (secondary)" +# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml +# loop: +# - { +# 'type': 'data', +# 'temppath': 'hanadata', +# 'folder': 'hanadata', +# 'mount': '{{ hana_data_mountpoint[1] }}', +# 'opts': '{{ mnt_options }}', +# 'path': '/hana/data', +# 'permissions': '0755', +# 'set_chattr_on_dir': false, +# 'target_nodes' : ['hana'], +# 'create_temp_folders': true +# } +# vars: +# primary_host: "{{ db_hosts[1] }}" +# when: +# - tier == 'sapos' +# - node_tier == 'hana' +# - hana_data_mountpoint is defined +# - hana_data_mountpoint | length > 1 +# - db_hosts | length == 2 +# - ansible_hostname == db_hosts[1] +# # For HSR based scale out, needs DB high availability +# - db_high_availability is defined +# - db_high_availability + +# - name: "ANF Mount: HANA log (secondary)" +# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml +# loop: +# - { +# 'type': 'log', +# 'temppath': 'hanalog', +# 'folder': 'hanalog', +# 'mount' : '{{ hana_log_mountpoint[1] }}', +# 'opts': '{{ mnt_options }}', +# 'path' : '/hana/log', +# 'permissions': '0755', +# 'set_chattr_on_dir': false, +# 'target_nodes': ['hana'], +# 'create_temp_folders': true +# } +# vars: +# primary_host: "{{ db_hosts[1] }}" +# when: +# - tier == 'sapos' +# - node_tier == 'hana' +# - hana_log_mountpoint is defined +# - hana_log_mountpoint | length > 1 +# - db_hosts | length ==2 +# - ansible_hostname == db_hosts[1] +# # For HSR based scale out, needs DB high availability +# - db_high_availability is defined +# - db_high_availability + +# - name: "ANF Mount: HANA shared (secondary)" +# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml +# loop: +# - { +# 'type': 'shared', +# 'temppath': 'hanashared', +# 'folder': 'hanashared', +# 'mount': '{{ hana_shared_mountpoint[1] }}', +# 'opts': '{{ mnt_options }}', +# 'path': '/hana/shared', +# 'permissions': '0775', +# 'set_chattr_on_dir': false, +# 'target_nodes' : ['hana'], +# 'create_temp_folders': true +# } +# vars: +# primary_host: "{{ db_hosts[1] }}" +# when: +# - tier == 'sapos' +# - node_tier == 'hana' +# - hana_shared_mountpoint is defined +# - hana_shared_mountpoint | length > 1 +# - db_hosts | length == 2 +# - ansible_hostname == db_hosts[1] +# # For HSR based scale out, needs DB high availability +# - db_high_availability is defined +# - db_high_availability + +# # /*---------------------------------------------------------------------------8 +# # | | +# # | Prepare the OS for running SAP HANA on | +# # | Azure NetApp Files with NFS | +# # | Scale out + ANF | +# # +------------------------------------4--------------------------------------*/ + +# # FOR ANF mount on SLES and RHEl, the below tasks replicate the steps in the link https://learn.microsoft.com/en-us/azure/sap/workloads/sap-hana-scale-out-standby-netapp-files-suse#mount-the-azure-netapp-files-volumes +# # Mount the HANA shared on to the temp path + +# - name: "ANF Mount: Scale Out - Create SAP Directories (usrsap)" +# ansible.builtin.file: +# owner: "{{ db_sid | lower }}adm" +# group: sapsys +# mode: 0755 +# path: "/usr/sap/{{ db_sid | upper }}" +# state: directory +# when: +# - tier == 'hana' +# # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. +# - db_scale_out is defined +# - db_scale_out + +# - name: "ANF Mount: Scale Out - Create SAP Directories (hana data)" +# ansible.builtin.file: +# owner: "{{ db_sid | lower }}adm" +# group: sapsys +# mode: 0755 +# path: "/hana/data/{{ db_sid | upper }}" +# state: directory +# when: +# - tier == 'hana' +# # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. +# - db_scale_out is defined +# - db_scale_out + + +# - name: "ANF Mount: Scale Out - Create SAP Directories (hana log)" +# ansible.builtin.file: +# owner: "{{ db_sid | lower }}adm" +# group: sapsys +# mode: 0755 +# path: "/hana/log/{{ db_sid | upper }}" +# state: directory +# when: +# - tier == 'hana' +# # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. +# - db_scale_out is defined +# - db_scale_out + +# - name: "ANF Mount: HANA shared - Scale out" +# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml +# loop: +# - { +# 'type': 'shared', +# 'temppath': 'shared', +# # change folder to match the mount folder within the share +# 'folder': 'shared', +# 'mount': '{{ hana_shared_mountpoint[0] }}', +# 'opts': '{{ mnt_options }}', +# 'path': '/hana/shared', +# 'permissions': '0775', +# 'set_chattr_on_dir': false, +# 'target_nodes' : ['hana'], +# 'create_temp_folders': true +# } +# vars: +# # Run this on all the nodes, not just primary. +# primary_host: "{{ ansible_hostname }}" +# when: +# - node_tier == 'hana' +# - hana_shared_mountpoint is defined +# - hana_shared_mountpoint | length > 0 +# # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. +# - db_scale_out is defined +# - db_scale_out + +# # This runs for unique share per node +# - name: "ANF Mount: usrsap - Scale out" +# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml +# loop: +# - { +# 'type': 'usrsap', +# 'temppath': 'usrsap', +# 'folder': "usr-sap-hanadb{{ lookup('ansible.utils.index_of', db_hosts, 'eq', ansible_hostname) }}", +# 'mount': '{{ hana_shared_mountpoint[0] }}', +# 'opts': '{{ mnt_options }}', +# 'path': '/usr/sap/{{ db_sid | upper }}', +# 'permissions': '0775', +# 'set_chattr_on_dir': false, +# 'target_nodes' : ['hana'], +# 'create_temp_folders': true +# } +# vars: +# primary_host: "{{ ansible_hostname }}" +# when: +# - node_tier == 'hana' +# - hana_shared_mountpoint is defined +# - hana_shared_mountpoint | length == 1 +# # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. +# - db_scale_out is defined +# - db_scale_out + +# - name: "ANF Mount: HANA Data - Scale out - Create mount list" +# block: +# - name: "Initialize HANA Data mountpoints" +# ansible.builtin.set_fact: +# hana_data_scaleout_mountpoint: [] +# - name: "Build HANA Data mountpoints" +# ansible.builtin.set_fact: +# # hana_data_mountpoint: "{{ hana_data_mountpoint | default([]) + [item] }}" +# hana_data_scaleout_mountpoint: "{{ hana_data_scaleout_mountpoint + dataupdate }}" +# loop: "{{ hana_data_mountpoint }}" +# loop_control: +# index_var: my_index +# # Note the object structure and specific key:pair value. Do not modify those hard coded. +# vars: +# dataupdate: +# - { type: 'data', +# temppath: 'hanadata', +# folder: 'hanadata', +# mount: "{{ item }}", +# opts: "{{ mnt_options }}", +# path: "{{ '/hana/data/' + (db_sid | upper ) + '/mnt0000' + ( my_index + 1 )| string }}", +# permissions: '0775', +# set_chattr_on_dir: false, +# target_nodes: ['hana'], +# create_temp_folders: 'true' +# } +# when: +# - node_tier == 'hana' +# - hana_data_mountpoint is defined +# # - hana_data_mountpoint | length == db_hosts | length +# # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. +# - db_scale_out is defined +# - db_scale_out + +# - name: "DEBUG:ANF Mount: HANA Data - Scale out - Create mount list" +# ansible.builtin.debug: +# var: hana_data_scaleout_mountpoint + +# - name: "ANF Mount: HANA Data - Scale out" +# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml +# # Do not change this to loop:. It Breaks. i don't know why but this modification only seems to work with with_items: despite multiple formatting attempts. +# with_items: +# - "{{ hana_data_scaleout_mountpoint | list }}" +# vars: +# primary_host: "{{ ansible_hostname }}" +# when: +# - node_tier == 'hana' +# - hana_data_mountpoint is defined +# # - hana_data_mountpoint | length == db_hosts | length +# # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. +# - db_scale_out is defined +# - db_scale_out + + +# - name: "ANF Mount: HANA Log - Scale out - Create mount list" +# block: +# - name: "Initialize HANA Log mountpoints" +# ansible.builtin.set_fact: +# hana_log_scaleout_mountpoint: [] + +# - name: "Build HANA log mountpoints" +# ansible.builtin.set_fact: +# hana_log_scaleout_mountpoint: "{{ hana_log_scaleout_mountpoint + logupdate }}" +# loop: "{{ hana_log_mountpoint }}" +# loop_control: +# index_var: my_index +# # Note the object structure and specific key:pair value. Do not modify those hard coded. +# vars: +# logupdate: +# - { type: 'log', +# temppath: 'hanalog', +# folder: 'hanalog', +# mount: "{{ item }}", +# opts: "{{ mnt_options }}", +# path: "{{ '/hana/log/' + (db_sid | upper ) + '/mnt0000' + ( my_index + 1 )| string }}", +# permissions: '0775', +# set_chattr_on_dir: false, +# target_nodes: ['hana'], +# create_temp_folders: 'true' +# } +# when: +# - node_tier == 'hana' +# - hana_log_mountpoint is defined +# # - hana_log_mountpoint | length == db_hosts | length +# # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. +# - db_scale_out is defined +# - db_scale_out + +# - name: "DEBUG:ANF Mount: HANA Log - Scale out - Create mount list" +# ansible.builtin.debug: +# var: hana_log_scaleout_mountpoint + +# - name: "ANF Mount: HANA Log - Scale out" +# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml +# # Do not change this to loop:. It Breaks. i don't know why but this modification only seems to work with with_items: despite multiple formatting attempts. +# with_items: +# - "{{ hana_log_scaleout_mountpoint | list }}" +# vars: +# primary_host: "{{ ansible_hostname }}" +# when: +# - node_tier == 'hana' +# - hana_log_mountpoint is defined +# # - hana_log_mountpoint | length == db_hosts | length +# # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. +# - db_scale_out is defined +# - db_scale_out + + +# - name: "ANF Mount: Set Permissons on HANA (HSR) Directories ({{ item.path }})" +# ansible.builtin.file: +# owner: '{{ hdbadm_uid }}' +# group: sapsys +# path: "{{ item.path }}" +# state: directory +# recurse: true +# loop: +# - { 'path': '/hana/data' } +# - { 'path': '/hana/log' } +# - { 'path': '/hana/shared' } +# when: +# - tier == 'sapos' +# - node_tier == 'hana' +# - db_high_availability is defined +# - db_high_availability + + +# - name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" +# ansible.builtin.file: +# owner: '{{ hdbadm_uid }}' +# group: sapsys +# path: "{{ item.path }}" +# state: directory +# recurse: true +# with_items: +# - "{{ hana_log_scaleout_mountpoint }}" +# - "{{ hana_data_scaleout_mountpoint }}" +# - { 'path': '/hana/shared' } +# - { 'path': '/usr/sap/{{ db_sid | upper }}' } +# when: +# - tier == 'sapos' +# - node_tier == 'hana' +# - not (db_high_availability | default(false)) +# - db_scale_out + +# ... diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml index de9834b44a..bf3d53dd6e 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml @@ -503,184 +503,376 @@ - tier == 'sapos' - node_tier == 'hana' -- name: "ANF Mount: Create /hana folder" - ansible.builtin.file: - path: /hana - mode: 0755 - state: directory - group: sapsys - when: - - tier == 'sapos' - - node_tier == 'hana' - - not db_scale_out - -- name: "ANF Mount: HANA data" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'data', - 'temppath': 'hanadata', - 'folder': 'hanadata', - 'mount': '{{ hana_data_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', - 'path': '/hana/data', - 'permissions': '0755', - 'set_chattr_on_dir': false, - 'target_nodes' : ['hana'], - 'create_temp_folders': true - } - vars: - primary_host: "{{ db_hosts[0] }}" - when: - - tier == 'sapos' - - node_tier == 'hana' - - not db_scale_out - - hana_data_mountpoint is defined - - hana_data_mountpoint | length > 0 - - ansible_hostname == db_hosts[0] - -- name: "ANF Mount: HANA log" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'log', - 'temppath': 'hanalog', - 'folder': 'hanalog', - 'mount' : '{{ hana_log_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', - 'path' : '/hana/log', - 'permissions': '0755', - 'set_chattr_on_dir': false, - 'target_nodes': ['hana'], - 'create_temp_folders': true - } - vars: - primary_host: "{{ db_hosts[0] }}" - when: - - tier == 'sapos' - - node_tier == 'hana' - - not db_scale_out - - hana_log_mountpoint is defined - - hana_log_mountpoint | length > 0 - - ansible_hostname == db_hosts[0] - -- name: "ANF Mount: HANA shared" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'shared', - 'temppath': 'hanashared', - 'folder': 'hanashared', - 'mount': '{{ hana_shared_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', - 'path': '/hana/shared', - 'permissions': '0775', - 'set_chattr_on_dir': false, - 'target_nodes' : ['hana'], - 'create_temp_folders': true - } - vars: - primary_host: "{{ db_hosts[0] }}" - when: - - tier == 'sapos' - - node_tier == 'hana' - - not db_scale_out - - hana_shared_mountpoint is defined - - hana_shared_mountpoint | length > 0 - - ansible_hostname == db_hosts[0] - -- name: "ANF Mount: HANA data (secondary)" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'data', - 'temppath': 'hanadata', - 'folder': 'hanadata', - 'mount': '{{ hana_data_mountpoint[1] }}', - 'opts': '{{ mnt_options }}', - 'path': '/hana/data', - 'permissions': '0755', - 'set_chattr_on_dir': false, - 'target_nodes' : ['hana'], - 'create_temp_folders': true - } - vars: - primary_host: "{{ db_hosts[1] }}" - when: - - tier == 'sapos' - - node_tier == 'hana' - - not db_scale_out - - hana_data_mountpoint is defined - - hana_data_mountpoint | length > 1 - - db_hosts | length == 2 - - ansible_hostname == db_hosts[1] - -- name: "ANF Mount: HANA log (secondary)" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'log', - 'temppath': 'hanalog', - 'folder': 'hanalog', - 'mount' : '{{ hana_log_mountpoint[1] }}', - 'opts': '{{ mnt_options }}', - 'path' : '/hana/log', - 'permissions': '0755', - 'set_chattr_on_dir': false, - 'target_nodes': ['hana'], - 'create_temp_folders': true - } - vars: - primary_host: "{{ db_hosts[1] }}" - when: - - tier == 'sapos' - - node_tier == 'hana' - - not db_scale_out - - hana_log_mountpoint is defined - - hana_log_mountpoint | length > 1 - - db_hosts | length ==2 - - ansible_hostname == db_hosts[1] - -- name: "ANF Mount: HANA shared (secondary)" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml - loop: - - { - 'type': 'shared', - 'temppath': 'hanashared', - 'folder': 'hanashared', - 'mount': '{{ hana_shared_mountpoint[1] }}', - 'opts': '{{ mnt_options }}', - 'path': '/hana/shared', - 'permissions': '0775', - 'set_chattr_on_dir': false, - 'target_nodes' : ['hana'], - 'create_temp_folders': true - } - vars: - primary_host: "{{ db_hosts[1] }}" +# Standard block tasks for non scale out setups +- name: "ANF Mount: Run tasks for non-scale out setups" + block: + - name: "ANF Mount: Create /hana folder" + ansible.builtin.file: + path: /hana + mode: 0755 + state: directory + group: sapsys + when: + - tier == 'sapos' + - node_tier == 'hana' + + - name: "ANF Mount: HANA data" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'data', + 'temppath': 'hanadata', + 'folder': 'hanadata', + 'mount': '{{ hana_data_mountpoint[0] }}', + 'opts': '{{ mnt_options }}', + 'path': '/hana/data', + 'permissions': '0755', + 'set_chattr_on_dir': false, + 'target_nodes' : ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ db_hosts[0] }}" + when: + - tier == 'sapos' + - node_tier == 'hana' + - hana_data_mountpoint is defined + - hana_data_mountpoint | length > 0 + - ansible_hostname == db_hosts[0] + + - name: "ANF Mount: HANA log" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'log', + 'temppath': 'hanalog', + 'folder': 'hanalog', + 'mount' : '{{ hana_log_mountpoint[0] }}', + 'opts': '{{ mnt_options }}', + 'path' : '/hana/log', + 'permissions': '0755', + 'set_chattr_on_dir': false, + 'target_nodes': ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ db_hosts[0] }}" + when: + - tier == 'sapos' + - node_tier == 'hana' + - hana_log_mountpoint is defined + - hana_log_mountpoint | length > 0 + - ansible_hostname == db_hosts[0] + + - name: "ANF Mount: HANA shared" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'shared', + 'temppath': 'hanashared', + 'folder': 'hanashared', + 'mount': '{{ hana_shared_mountpoint[0] }}', + 'opts': '{{ mnt_options }}', + 'path': '/hana/shared', + 'permissions': '0775', + 'set_chattr_on_dir': false, + 'target_nodes' : ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ db_hosts[0] }}" + when: + - tier == 'sapos' + - node_tier == 'hana' + - hana_shared_mountpoint is defined + - hana_shared_mountpoint | length > 0 + - ansible_hostname == db_hosts[0] + + - name: "ANF Mount: HANA data (secondary)" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'data', + 'temppath': 'hanadata', + 'folder': 'hanadata', + 'mount': '{{ hana_data_mountpoint[1] }}', + 'opts': '{{ mnt_options }}', + 'path': '/hana/data', + 'permissions': '0755', + 'set_chattr_on_dir': false, + 'target_nodes' : ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ db_hosts[1] }}" + when: + - tier == 'sapos' + - node_tier == 'hana' + - hana_data_mountpoint is defined + - hana_data_mountpoint | length > 1 + - db_hosts | length == 2 + - ansible_hostname == db_hosts[1] + + - name: "ANF Mount: HANA log (secondary)" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'log', + 'temppath': 'hanalog', + 'folder': 'hanalog', + 'mount' : '{{ hana_log_mountpoint[1] }}', + 'opts': '{{ mnt_options }}', + 'path' : '/hana/log', + 'permissions': '0755', + 'set_chattr_on_dir': false, + 'target_nodes': ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ db_hosts[1] }}" + when: + - tier == 'sapos' + - node_tier == 'hana' + - hana_log_mountpoint is defined + - hana_log_mountpoint | length > 1 + - db_hosts | length ==2 + - ansible_hostname == db_hosts[1] + + - name: "ANF Mount: HANA shared (secondary)" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'shared', + 'temppath': 'hanashared', + 'folder': 'hanashared', + 'mount': '{{ hana_shared_mountpoint[1] }}', + 'opts': '{{ mnt_options }}', + 'path': '/hana/shared', + 'permissions': '0775', + 'set_chattr_on_dir': false, + 'target_nodes' : ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ db_hosts[1] }}" + when: + - tier == 'sapos' + - node_tier == 'hana' + - hana_shared_mountpoint is defined + - hana_shared_mountpoint | length > 1 + - db_hosts | length == 2 + - ansible_hostname == db_hosts[1] + + - name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" + ansible.builtin.file: + owner: '{{ hdbadm_uid }}' + group: sapsys + path: "{{ item.path }}" + state: directory + recurse: true + loop: + - { 'path': '/hana/data' } + - { 'path': '/hana/log' } + - { 'path': '/hana/shared' } + when: + - tier == 'sapos' + - node_tier == 'hana' when: - - tier == 'sapos' - - node_tier == 'hana' - not db_scale_out - - hana_shared_mountpoint is defined - - hana_shared_mountpoint | length > 1 - - db_hosts | length == 2 - - ansible_hostname == db_hosts[1] -- name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" - ansible.builtin.file: - owner: '{{ hdbadm_uid }}' - group: sapsys - path: "{{ item.path }}" - state: directory - recurse: true - loop: - - { 'path': '/hana/data' } - - { 'path': '/hana/log' } - - { 'path': '/hana/shared' } +# Run this block set when db_Scale_out is true but db_high_availability is false +- name: "ANF Mount: Run tasks for scale out setups" + block: + - name: "ANF Mount: Scale Out - Create SAP Directories (usrsap)" + ansible.builtin.file: + owner: "{{ db_sid | lower }}adm" + group: sapsys + mode: 0755 + path: "/usr/sap/{{ db_sid | upper }}" + state: directory + when: + - tier == 'hana' + + - name: "ANF Mount: Scale Out - Create SAP Directories (hana data)" + ansible.builtin.file: + owner: "{{ db_sid | lower }}adm" + group: sapsys + mode: 0755 + path: "/hana/data/{{ db_sid | upper }}" + state: directory + when: + - tier == 'hana' + + - name: "ANF Mount: Scale Out - Create SAP Directories (hana log)" + ansible.builtin.file: + owner: "{{ db_sid | lower }}adm" + group: sapsys + mode: 0755 + path: "/hana/log/{{ db_sid | upper }}" + state: directory + when: + - tier == 'hana' + + - name: "ANF Mount: HANA shared - Scale out" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'shared', + 'temppath': 'shared', + # change folder to match the mount folder within the share + 'folder': 'shared', + 'mount': '{{ hana_shared_mountpoint[0] }}', + 'opts': '{{ mnt_options }}', + 'path': '/hana/shared', + 'permissions': '0775', + 'set_chattr_on_dir': false, + 'target_nodes' : ['hana'], + 'create_temp_folders': true + } + vars: + # Run this on all the nodes, not just primary. + primary_host: "{{ ansible_hostname }}" + when: + - node_tier == 'hana' + - hana_shared_mountpoint is defined + - hana_shared_mountpoint | length > 0 + + # This runs for unique share per node + - name: "ANF Mount: usrsap - Scale out" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'usrsap', + 'temppath': 'usrsap', + 'folder': "usr-sap-hanadb{{ lookup('ansible.utils.index_of', db_hosts, 'eq', ansible_hostname) }}", + 'mount': '{{ hana_shared_mountpoint[0] }}', + 'opts': '{{ mnt_options }}', + 'path': '/usr/sap/{{ db_sid | upper }}', + 'permissions': '0775', + 'set_chattr_on_dir': false, + 'target_nodes' : ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ ansible_hostname }}" + when: + - node_tier == 'hana' + - hana_shared_mountpoint is defined + - hana_shared_mountpoint | length == 1 + # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. + - db_scale_out is defined + - db_scale_out + + - name: "ANF Mount: HANA Data - Scale out - Create mount list" + block: + - name: "Initialize HANA Data mountpoints" + ansible.builtin.set_fact: + hana_data_scaleout_mountpoint: [] + - name: "Build HANA Data mountpoints" + ansible.builtin.set_fact: + # hana_data_mountpoint: "{{ hana_data_mountpoint | default([]) + [item] }}" + hana_data_scaleout_mountpoint: "{{ hana_data_scaleout_mountpoint + dataupdate }}" + loop: "{{ hana_data_mountpoint }}" + loop_control: + index_var: my_index + # Note the object structure and specific key:pair value. Do not modify those hard coded. + vars: + dataupdate: + - { type: 'data', + temppath: 'hanadata', + folder: 'hanadata', + mount: "{{ item }}", + opts: "{{ mnt_options }}", + path: "{{ '/hana/data/' + (db_sid | upper ) + '/mnt0000' + ( my_index + 1 )| string }}", + permissions: '0775', + set_chattr_on_dir: false, + target_nodes: ['hana'], + create_temp_folders: 'true' + } + when: + - node_tier == 'hana' + - hana_data_mountpoint is defined + + - name: "DEBUG:ANF Mount: HANA Data - Scale out - Create mount list" + ansible.builtin.debug: + var: hana_data_scaleout_mountpoint + + - name: "ANF Mount: HANA Data - Scale out" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + # Do not change this to loop:. It Breaks. i don't know why but this modification only seems to work with with_items: despite multiple formatting attempts. + with_items: + - "{{ hana_data_scaleout_mountpoint | list }}" + vars: + primary_host: "{{ ansible_hostname }}" + when: + - node_tier == 'hana' + - hana_data_mountpoint is defined + + + + - name: "ANF Mount: HANA Log - Scale out - Create mount list" + block: + - name: "Initialize HANA Log mountpoints" + ansible.builtin.set_fact: + hana_log_scaleout_mountpoint: [] + + - name: "Build HANA log mountpoints" + ansible.builtin.set_fact: + hana_log_scaleout_mountpoint: "{{ hana_log_scaleout_mountpoint + logupdate }}" + loop: "{{ hana_log_mountpoint }}" + loop_control: + index_var: my_index + # Note the object structure and specific key:pair value. Do not modify those hard coded. + vars: + logupdate: + - { type: 'log', + temppath: 'hanalog', + folder: 'hanalog', + mount: "{{ item }}", + opts: "{{ mnt_options }}", + path: "{{ '/hana/log/' + (db_sid | upper ) + '/mnt0000' + ( my_index + 1 )| string }}", + permissions: '0775', + set_chattr_on_dir: false, + target_nodes: ['hana'], + create_temp_folders: 'true' + } + when: + - node_tier == 'hana' + - hana_log_mountpoint is defined + + - name: "DEBUG:ANF Mount: HANA Log - Scale out - Create mount list" + ansible.builtin.debug: + var: hana_log_scaleout_mountpoint + + - name: "ANF Mount: HANA Log - Scale out" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + # Do not change this to loop:. It Breaks. i don't know why but this modification only seems to work with with_items: despite multiple formatting attempts. + with_items: + - "{{ hana_log_scaleout_mountpoint | list }}" + vars: + primary_host: "{{ ansible_hostname }}" + when: + - node_tier == 'hana' + - hana_log_mountpoint is defined + + - name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" + ansible.builtin.file: + owner: '{{ hdbadm_uid }}' + group: sapsys + path: "{{ item.path }}" + state: directory + recurse: true + with_items: + - "{{ hana_log_scaleout_mountpoint }}" + - "{{ hana_data_scaleout_mountpoint }}" + - { 'path': '/hana/shared' } + - { 'path': '/usr/sap/{{ db_sid | upper }}' } + when: + - tier == 'sapos' + - node_tier == 'hana' when: - - tier == 'sapos' - - node_tier == 'hana' + - db_scale_out + - not db_high_availability ... diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml index 88dfda93f6..be8cdea59e 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml @@ -358,18 +358,18 @@ - sap_mnt is defined or sap_trans is defined or usr_sap_install_mountpoint is defined - not use_simple_mount - NFS_provider == 'ANF' - # only run when no scale out configuration is used. - - db_scale_out is not defined or (not db_scale_out) - + +# Update : Deprecated as the scale out anf mount code functionality is now integrated into 2.6.1 and 2.6.8 +# This will be removed in the next release, left here for tracing and documentation # Import this task only if db_scale_out is defined. Implies that sap_mnt is defined, i.e. ANF is used -- name: "2.6 SAP Mounts: - Import ANF tasks for Scale-Out" - ansible.builtin.import_tasks: 2.6.1.2-anf-mounts-scaleout.yaml - when: - - NFS_provider == 'ANF' - - db_scale_out is defined - - db_scale_out - - sap_mnt is defined or sap_trans is defined or usr_sap_install_mountpoint is defined +# - name: "2.6 SAP Mounts: - Import ANF tasks for Scale-Out" +# ansible.builtin.import_tasks: 2.6.1.2-anf-mounts-scaleout.yaml +# when: +# - NFS_provider == 'ANF' +# - db_scale_out is defined +# - db_scale_out +# - sap_mnt is defined or sap_trans is defined or usr_sap_install_mountpoint is defined - name: "2.6 SAP Mounts: - Import ANF tasks" ansible.builtin.import_tasks: 2.6.8-anf-mounts-simplemount.yaml From 7262fbb94274f386dbe93b2c6b17e3f5b03f0bf3 Mon Sep 17 00:00:00 2001 From: Steffen Bo Thomsen Date: Fri, 9 Feb 2024 15:23:15 +0100 Subject: [PATCH 217/607] Add post configuration step to 05-DB-and-SAP-installation.yaml that has no actions within the SDAF framework itself, but empowers users to place hooks in their configuration repo that will run after the full installation and configuration of the systems. (#545) --- ...k_08_00_00_post_configuration_actions.yaml | 55 +++++++++++++++++++ .../pipelines/05-DB-and-SAP-installation.yaml | 23 ++++++++ 2 files changed, 78 insertions(+) create mode 100644 deploy/ansible/playbook_08_00_00_post_configuration_actions.yaml diff --git a/deploy/ansible/playbook_08_00_00_post_configuration_actions.yaml b/deploy/ansible/playbook_08_00_00_post_configuration_actions.yaml new file mode 100644 index 0000000000..7f95dd1c33 --- /dev/null +++ b/deploy/ansible/playbook_08_00_00_post_configuration_actions.yaml @@ -0,0 +1,55 @@ +# /*---------------------------------------------------------------------------8 +# | | +# | Playbook for Post Configuration | +# | | +# +------------------------------------4--------------------------------------*/ + +--- + +- hosts: localhost + name: "Post Configuration Actions Playbook: - Initialization" + gather_facts: true + vars_files: + - vars/ansible-input-api.yaml # API Input template with defaults + + tasks: + + - name: "Post Configuration Actions Playbook: - Create Progress folder" + ansible.builtin.file: + path: "{{ _workspace_directory }}/.progress" + state: directory + mode: 0755 + + - name: "Post Configuration Actions Playbook: - Remove post-configuration-actions-done flag" + ansible.builtin.file: + path: "{{ _workspace_directory }}/.progress/post-configuration-actions-done" + state: absent + + +# /*---------------------------------------------------------------------------8 +# | | +# | Currently this playbook does nothing, it's here to ensure that | +# | custom ansible hooks in the Config Repo can be run after | +# | the full installation and configuration of VMs for a system | +# | | +# +------------------------------------4--------------------------------------*/ + + +- hosts: localhost + name: "Post Configuration Actions Playbook: - Done" + gather_facts: true + vars_files: + - vars/ansible-input-api.yaml # API Input template with defaults + + tasks: + + - name: "Post Configuration Actions Playbook: - Create post-configuration-actions-done flag" + ansible.builtin.file: + path: "{{ _workspace_directory }}/.progress/post-configuration-actions-done" + state: touch + mode: 0755 + +... +# /*---------------------------------------------------------------------------8 +# | END | +# +------------------------------------4--------------------------------------*/ diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index 0b8e6a6252..af69c3654b 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -77,6 +77,11 @@ parameters: type: boolean default: false + - name: post_configuration_actions + displayName: Post Configuration Actions + type: boolean + default: false + # 20220929 MKD - ACSS Registration - name: acss_registration displayName: Register System in ACSS @@ -505,6 +510,24 @@ stages: azureTenantId: $(ARM_TENANT_ID) azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) USE_MSI: $(USE_MSI) + - ${{ if eq(parameters.post_configuration_actions, true) }}: + - template: templates\run-ansible.yaml + parameters: + displayName: Post Configuration Actions + ansibleFilePath: ${{ parameters.sap_automation_repo_path }}/deploy/ansible/playbook_08_00_00_post_configuration_actions.yaml + secretName: "$(Preparation.SSH_KEY_NAME)" + passwordSecretName: "$(Preparation.PASSWORD_KEY_NAME)" + userNameSecretName: "$(Preparation.USERNAME_KEY_NAME)" + vaultName: $(Preparation.VAULT_NAME) + parametersFolder: $(Preparation.FOLDER) + sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" + sidHosts: $(Preparation.HOSTS) + extraParams: "$(Preparation.NEW_PARAMETERS)" + azureClientId: $(ARM_CLIENT_ID) + azureClientSecret: $(ARM_CLIENT_SECRET) + azureTenantId: $(ARM_TENANT_ID) + azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + USE_MSI: $(USE_MSI) - ${{ if eq(parameters.acss_registration, true) }}: - template: templates\acss-registration.yaml parameters: From ebcdaf1e56e985666ce8628c9235fdb472c465cb Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 9 Feb 2024 16:28:00 +0200 Subject: [PATCH 218/607] Update default_action in storage_accounts.tf --- .../terraform-units/modules/sap_library/storage_accounts.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf index 2695720340..b21ff30a7b 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf @@ -45,7 +45,7 @@ resource "azurerm_storage_account" "storage_tfstate" { } network_rules { - default_action = "Deny" + default_action = local.enable_firewall_for_keyvaults_and_storage ? "Deny" : "Allow" virtual_network_subnet_ids = local.virtual_additional_network_ids ip_rules = local.deployer_public_ip_address_used ? ( [ @@ -239,7 +239,7 @@ resource "azurerm_storage_account" "storage_sapbits" { choice = "MicrosoftRouting" } network_rules { - default_action = "Deny" + default_action = local.enable_firewall_for_keyvaults_and_storage ? "Deny" : "Allow" virtual_network_subnet_ids = local.virtual_additional_network_ids ip_rules = local.deployer_public_ip_address_used ? ( [ From f5b45cf7f49a168392dcce56de3eed21426fd918 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 9 Feb 2024 16:44:05 +0200 Subject: [PATCH 219/607] simplify the logic for VNets --- .../modules/sap_landscape/storage_accounts.tf | 63 +++++++------------ 1 file changed, 22 insertions(+), 41 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf index 7b7880e1d4..0d477d3fd5 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf @@ -150,7 +150,7 @@ resource "azurerm_storage_account" "witness_storage" { tags = var.tags network_rules { - default_action = "Deny" + default_action = var.enable_firewall_for_keyvaults_and_storage ? "Deny" : "Allow" virtual_network_subnet_ids = compact([ local.database_subnet_defined ? ( local.database_subnet_existing ? var.infrastructure.vnets.sap.subnet_db.arm_id : azurerm_subnet.db[0].id) : ( @@ -159,10 +159,7 @@ resource "azurerm_storage_account" "witness_storage" { local.application_subnet_existing ? var.infrastructure.vnets.sap.subnet_app.arm_id : azurerm_subnet.app[0].id) : ( null ), - data.azurerm_resource_group.mgmt[0].location == (local.resource_group_exists ? ( - data.azurerm_resource_group.resource_group[0].location) : ( - azurerm_resource_group.resource_group[0].location - )) ? local.deployer_subnet_management_id : null + length(local.deployer_subnet_management_id) > 0 ? local.deployer_subnet_management_id : null ] ) ip_rules = compact([ @@ -299,24 +296,16 @@ resource "azurerm_storage_account" "transport" { public_network_access_enabled = var.public_network_access_enabled network_rules { - default_action = "Deny" - virtual_network_subnet_ids = compact( - [ - local.database_subnet_defined ? ( - local.database_subnet_existing ? var.infrastructure.vnets.sap.subnet_db.arm_id : azurerm_subnet.db[0].id) : ( - "" - ), local.application_subnet_defined ? ( - local.application_subnet_existing ? var.infrastructure.vnets.sap.subnet_app.arm_id : azurerm_subnet.app[0].id) : ( - "" - ), local.web_subnet_defined ? ( - local.web_subnet_existing ? var.infrastructure.vnets.sap.subnet_web.arm_id : azurerm_subnet.web[0].id) : ( - "" - ), - data.azurerm_resource_group.mgmt[0].location == (local.resource_group_exists ? ( - data.azurerm_resource_group.resource_group[0].location) : ( - azurerm_resource_group.resource_group[0].location - )) ? local.deployer_subnet_management_id : null - + default_action = var.enable_firewall_for_keyvaults_and_storage ? "Deny" : "Allow" + virtual_network_subnet_ids = compact([ + local.database_subnet_defined ? ( + local.database_subnet_existing ? var.infrastructure.vnets.sap.subnet_db.arm_id : azurerm_subnet.db[0].id) : ( + null + ), local.application_subnet_defined ? ( + local.application_subnet_existing ? var.infrastructure.vnets.sap.subnet_app.arm_id : azurerm_subnet.app[0].id) : ( + null + ), + length(local.deployer_subnet_management_id) > 0 ? local.deployer_subnet_management_id : null ] ) ip_rules = compact([ @@ -525,24 +514,16 @@ resource "azurerm_storage_account" "install" { public_network_access_enabled = var.public_network_access_enabled tags = var.tags network_rules { - default_action = "Deny" - virtual_network_subnet_ids = compact( - [ - local.database_subnet_defined ? ( - local.database_subnet_existing ? var.infrastructure.vnets.sap.subnet_db.arm_id : azurerm_subnet.db[0].id) : ( - "" - ), local.application_subnet_defined ? ( - local.application_subnet_existing ? var.infrastructure.vnets.sap.subnet_app.arm_id : azurerm_subnet.app[0].id) : ( - "" - ), local.web_subnet_defined ? ( - local.web_subnet_existing ? var.infrastructure.vnets.sap.subnet_web.arm_id : azurerm_subnet.web[0].id) : ( - "" - ), - data.azurerm_resource_group.mgmt[0].location == (local.resource_group_exists ? ( - data.azurerm_resource_group.resource_group[0].location) : ( - azurerm_resource_group.resource_group[0].location - )) ? local.deployer_subnet_management_id : null - + default_action = var.enable_firewall_for_keyvaults_and_storage ? "Deny" : "Allow" + virtual_network_subnet_ids = compact([ + local.database_subnet_defined ? ( + local.database_subnet_existing ? var.infrastructure.vnets.sap.subnet_db.arm_id : azurerm_subnet.db[0].id) : ( + null + ), local.application_subnet_defined ? ( + local.application_subnet_existing ? var.infrastructure.vnets.sap.subnet_app.arm_id : azurerm_subnet.app[0].id) : ( + null + ), + length(local.deployer_subnet_management_id) > 0 ? local.deployer_subnet_management_id : null ] ) ip_rules = compact([ From a4a9bec3dd4d6172d3bb1a2438355685bea3c95b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 10 Feb 2024 11:31:11 +0200 Subject: [PATCH 220/607] Web App Updates --- Webapp/SDAF/Controllers/SystemController.cs | 3 + .../SDAF/ParameterDetails/custom_naming.json | 133 +++++++++++------- .../SDAF/ParameterDetails/custom_sizes.json | 22 +-- 3 files changed, 97 insertions(+), 61 deletions(-) diff --git a/Webapp/SDAF/Controllers/SystemController.cs b/Webapp/SDAF/Controllers/SystemController.cs index 785fef68ce..444a4ac2a6 100644 --- a/Webapp/SDAF/Controllers/SystemController.cs +++ b/Webapp/SDAF/Controllers/SystemController.cs @@ -113,6 +113,7 @@ public async Task GetById(string id, string partitionKey) { file = await _appFileService.GetByIdAsync(id + "_custom_sizes.json", partitionKey); s.custom_disk_sizes_filename = id + "_custom_sizes.json"; + } catch { @@ -266,6 +267,8 @@ public async Task DeployConfirmedAsync(string id, string var stream = new MemoryStream(file.Content); system.custom_disk_sizes_filename = id + "_custom_sizes.json"; + system.database_size = "Custom"; + system.app_tier_sizing_dictionary_key = "Custom"; string thisContent = System.Text.Encoding.UTF8.GetString(stream.ToArray()); string pathForNaming = $"/SYSTEM/{id}/{id}_custom_sizes.json"; diff --git a/Webapp/SDAF/ParameterDetails/custom_naming.json b/Webapp/SDAF/ParameterDetails/custom_naming.json index 9a3734b81b..a854d40009 100644 --- a/Webapp/SDAF/ParameterDetails/custom_naming.json +++ b/Webapp/SDAF/ParameterDetails/custom_naming.json @@ -1,33 +1,33 @@ { "availabilityset_names": { - "app": ["z1_app-avset", "z2_app-avset"], - "db": ["z1_db-avset", "z2_db-avset"], - "scs": ["z1_scs-avset", "z2_scs-avset"], - "web": ["z1_web-avset", "z2_web-avset"] + "app": [ "z1_app-avset" ], + "db": [ "z1_db-avset" ], + "scs": [ "z1_scs-avset" ], + "web": [ "z1_web-avset" ] }, "keyvault_names": { "DEPLOYER": { - "private_access": "DEVWEEUprvt86D", - "user_access": "DEVWEEUuser86D" + "private_access": "DEVWEEUprvt###", + "user_access": "DEVWEEUuser###" }, "LIBRARY": { - "private_access": "DEVWEEUSAPLIBprvt86D", - "user_access": "DEVWEEUSAPLIBuser86D" + "private_access": "DEVWEEUSAPLIBprvt###", + "user_access": "DEVWEEUSAPLIBuser###" }, "SDU": { - "private_access": "DEVWEEUSAP01X00p86D", - "user_access": "DEVWEEUSAP01X00u86D" + "private_access": "DEVWEEUSAP01SIDp###", + "user_access": "DEVWEEUSAP01SIDu###" }, "WORKLOAD_ZONE": { - "private_access": "DEVWEEUSAP01prvt86D", - "user_access": "DEVWEEUSAP01user86D" + "private_access": "DEVWEEUSAP01prvt###", + "user_access": "DEVWEEUSAP01user###" } }, - "ppg_names": ["-z1-ppg", "-z2-ppg"], + "ppg_names": [ "-z1-ppg" ], "prefix": { "DEPLOYER": "DEV-WEEU-", "LIBRARY": "DEV-WEEU", - "SDU": "DEV-WEEU-SAP01-X00", + "SDU": "DEV-WEEU-SAP01-SID", "WORKLOAD_ZONE": "DEV-WEEU-SAP01" }, "resource_prefixes": { @@ -45,7 +45,6 @@ "app_subnet_nsg": "", "bastion_host": "", "bastion_pip": "", - "cluster_disk": "", "database_cluster_disk": "", "db_alb": "", "db_alb_bepool": "", @@ -107,8 +106,8 @@ "scs_avset": "", "scs_clst_feip": "", "scs_clst_hp": "", - "scs_cluster_disk": "", "scs_clst_rule": "", + "scs_cluster_disk": "", "scs_ers_feip": "", "scs_ers_hp": "", "scs_ers_rule": "", @@ -138,6 +137,7 @@ "transport_volume": "", "usrsap": "", "vm": "", + "vmss": "", "vnet": "", "vnet_rg": "", "web_alb": "", @@ -168,9 +168,7 @@ "app_subnet_nsg": "appSubnet-nsg", "bastion_host": "bastion-host", "bastion_pip": "bastion-pip", - "cluster_disk": "cluster-disks", - "database_cluster_disk": "", - "db-cluster-disk": null, + "database_cluster_disk": "db-cluster-disk", "db_alb": "db-alb", "db_alb_bepool": "dbAlb-bePool", "db_alb_feip": "dbAlb-feip", @@ -233,7 +231,7 @@ "scs_clst_feip": "scsClst-feip", "scs_clst_hp": "scsClst-hp", "scs_clst_rule": "scsClst-rule", - "scs_cluster_disk" : "scs-cluster-disk", + "scs_cluster_disk": "scs-cluster-disk", "scs_ers_feip": "scsErs-feip", "scs_ers_hp": "scsErs-hp", "scs_ers_rule": "scsErs-rule", @@ -263,6 +261,7 @@ "transport_volume": "transport", "usrsap": "usrsap", "vm": "", + "vmss": "-vmss", "vnet": "-vnet", "vnet_rg": "-INFRASTRUCTURE", "web_alb": "web-alb", @@ -281,42 +280,76 @@ }, "separator": "_", "storageaccount_names": { - "DEPLOYER": "devweeudiag86d", + "DEPLOYER": "devweeudiag###", "LIBRARY": { - "library_storageaccount_name": "devweeusaplib86d", - "terraformstate_storageaccount_name": "devweeutfstate86d" + "library_storageaccount_name": "devweeusaplib###", + "terraformstate_storageaccount_name": "devweeutfstate###" }, - "SDU": "devweeusap01diag86d", + "SDU": "devweeusap01diag###", "WORKLOAD_ZONE": { - "landscape_shared_install_storage_account_name": "devweeusap01install86d", - "landscape_shared_transport_storage_account_name": "devweeusap01transport86d", - "landscape_storageaccount_name": "devweeusap01diag86d", - "witness_storageaccount_name": "devweeusap01witness86d" + "landscape_shared_install_storage_account_name": "devweeusap01install###", + "landscape_shared_transport_storage_account_name": "devweeusap01transport###", + "landscape_storageaccount_name": "devweeusap01diag###", + "witness_storageaccount_name": "devweeusap01witness###" } }, "virtualmachine_names": { - "ANCHOR_COMPUTERNAME": ["x00anchorz101l86d", "x00anchorz202l86d"], - "ANCHOR_SECONDARY_DNSNAME": ["x00anchorz101l86d", "x00anchorz202l86d"], - "ANCHOR_VMNAME": ["x00anchor_z1_01l86d", "x00anchor_z2_02l86d"], - "ANYDB_COMPUTERNAME": ["x00db01l086d", "x00db01l186d"], - "ANYDB_SECONDARY_DNSNAME": ["vx00dhdb01l086", "vx00dhdb01l186"], - "ANYDB_VMNAME": ["x00db01l086d", "x00db01l186d"], - "APP_COMPUTERNAME": ["x00app01l86d", "x00app02l86d"], - "APP_SECONDARY_DNSNAME": ["vx00app01l86", "vx00app02l86"], - "APP_VMNAME": ["x00app01l86d", "x00app02l86d"], - "DEPLOYER": ["devweeudeploy01"], - "HANA_COMPUTERNAME": ["x00dhdb01l086", "x00dhdb01l186"], - "HANA_SECONDARY_DNSNAME": ["vx00dhdb01l086", "vx00dhdb01l186"], - "HANA_VMNAME": ["x00dhdb01l086d", "x00dhdb01l186d"], - "ISCSI_COMPUTERNAME": ["devsap01weeuiscsi00"], - "OBSERVER_COMPUTERNAME": ["x00observer01l86d", "x00observer02l86d"], - "OBSERVER_VMNAME": ["x00observer01l86d", "x00observer02l86d"], - "SCS_COMPUTERNAME": ["x00scs01l86d", "x00scs02l86d"], - "SCS_SECONDARY_DNSNAME": ["vx00scs01l86", "vx00scs02l86"], - "SCS_VMNAME": ["x00scs01l86d", "x00scs02l86d"], - "WEB_COMPUTERNAME": ["x00web01l86d", "x00web02l86d"], - "WEB_SECONDARY_DNSNAME": ["vw01web01l86", "vw01web02l86"], - "WEB_VMNAME": ["w01web01l86d", "w01web02l86d"], + "ANCHOR_COMPUTERNAME": [ "sidanchorz100l###" ], + "ANCHOR_SECONDARY_DNSNAME": [ "sidanchorz100l###" ], + "ANCHOR_VMNAME": [ "sidanchor_z1_00l###" ], + "ANYDB_COMPUTERNAME": [ + "siddb00l0###", + "siddb01l0###", + "siddb02l0###", + "siddb03l0###" + ], + "ANYDB_SECONDARY_DNSNAME": [ + "vsidd00l014", + "vsidd01l014", + "vsidd02l014", + "vsidd03l014", + "vsiddhdb00l114", + "vsiddhdb01l114", + "vsiddhdb01l114", + "vsiddhdb03l114" + ], + "ANYDB_VMNAME": [ + "siddb_z1_00l0###", + "siddb_z1_01l0###", + "siddb_z1_02l0###", + "siddb_z1_03l0###" + ], + "APP_COMPUTERNAME": [ "sidapp00l###" ], + "APP_SECONDARY_DNSNAME": [ "vsida00l14" ], + "APP_VMNAME": [ "sidapp_z1_00l###" ], + "DEPLOYER": [ "devweeudeploy00" ], + "HANA_COMPUTERNAME": [ + "siddhdb00l014", + "siddhdb01l014", + "siddhdb02l014", + "siddhdb03l014" + ], + "HANA_SECONDARY_DNSNAME": [ + "vsiddhdb00l014", + "vsiddhdb01l014", + "vsiddhdb02l014", + "vsiddhdb03l014" + ], + "HANA_VMNAME": [ + "siddhdb_z1_00l0###", + "siddhdb_z1_01l0###", + "siddhdb_z1_02l0###", + "siddhdb_z1_03l0###" + ], + "ISCSI_COMPUTERNAME": [ "anfsap01noeuiscsi00" ], + "OBSERVER_COMPUTERNAME": [ "sidobserver00l###" ], + "OBSERVER_VMNAME": [ "sidobserver_z1_00l###" ], + "SCS_COMPUTERNAME": [ "sidscs00l###" ], + "SCS_SECONDARY_DNSNAME": [ "vsids00l14" ], + "SCS_VMNAME": [ "sidscs_z1_00l###" ], + "WEB_COMPUTERNAME": [ "sidweb00l###" ], + "WEB_SECONDARY_DNSNAME": [ "vw00l14" ], + "WEB_VMNAME": [ "web_z1_00l###" ], "WORKLOAD_VMNAME": [] } } diff --git a/Webapp/SDAF/ParameterDetails/custom_sizes.json b/Webapp/SDAF/ParameterDetails/custom_sizes.json index 701c598d55..9858e57720 100644 --- a/Webapp/SDAF/ParameterDetails/custom_sizes.json +++ b/Webapp/SDAF/ParameterDetails/custom_sizes.json @@ -1,8 +1,8 @@ { "db": { - "Default": { + "Custom": { "compute": { - "vm_size": "Standard_E20s_v4", + "vm_size": "Standard_E20ds_v4", "accelerated_networking": true }, "storage": [ @@ -55,9 +55,9 @@ } }, "app": { - "Default": { + "Custom": { "compute": { - "vm_size": "Standard_D4s_v5", + "vm_size": "Standard_D4ds_v5", "accelerated_networking": true }, "storage": [ @@ -81,7 +81,7 @@ }, "Optimized": { "compute": { - "vm_size": "Standard_D4s_v5", + "vm_size": "Standard_D4ds_v5", "accelerated_networking": true }, "storage": [ @@ -105,9 +105,9 @@ } }, "scs": { - "Default": { + "Custom": { "compute": { - "vm_size": "Standard_D4s_v5", + "vm_size": "Standard_D4ds_v5", "accelerated_networking": true }, "storage": [ @@ -131,9 +131,9 @@ } }, "scsha": { - "Default": { + "Custom": { "compute": { - "vm_size": "Standard_D4s_v5", + "vm_size": "Standard_D4ds_v5", "accelerated_networking": true }, "storage": [ @@ -181,9 +181,9 @@ } }, "web": { - "Default": { + "Custom": { "compute": { - "vm_size": "Standard_D4s_v5", + "vm_size": "Standard_D4ds_v5", "accelerated_networking": true }, "storage": [ From 4dd54d10c40e6d0e793821bb9b25ae3ecd738762 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 10 Feb 2024 11:37:24 +0200 Subject: [PATCH 221/607] Fixing typo --- .../modules/sap_system/output_files/variables_global.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf index 67cdef3f0b..531669636e 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf @@ -136,7 +136,7 @@ variable "save_naming_information" { description = "If defined, will save the naming information for the resources" default = false } -variable "scale_out" { description = "If true, the SAP System will be scale out" } } +variable "scale_out" { description = "If true, the SAP System will be scale out" } variable "scs_shared_disks" { description = "SCS Azure Shared Disk" } From 7dc247eea2cfb7aefe0efedeb41287b07a66c150 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 10 Feb 2024 16:03:01 +0200 Subject: [PATCH 222/607] Add ANF subnet --- .../modules/sap_landscape/nsg.tf | 37 +++++++++++++++++++ .../modules/sap_landscape/variables_local.tf | 22 +++++++++++ 2 files changed, 59 insertions(+) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf b/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf index 998a6b9cd6..fd4fac7590 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf @@ -82,6 +82,43 @@ resource "azurerm_network_security_group" "app" { ) } + + +# Creates SAP anf subnet nsg +resource "azurerm_network_security_group" "anf" { + provider = azurerm.main + count = var.NFS_provider == "ANF" ? ( + local.ANF_subnet_nsg_exists ? ( + 0) : ( + 1 + )) : ( + 0 + ) + name = local.ANF_subnet_nsg_name + resource_group_name = local.SAP_virtualnetwork_exists ? ( + data.azurerm_virtual_network.vnet_sap[0].resource_group_name + ) : ( + azurerm_virtual_network.vnet_sap[0].resource_group_name + ) + location = local.SAP_virtualnetwork_exists ? ( + data.azurerm_virtual_network.vnet_sap[0].location) : ( + azurerm_virtual_network.vnet_sap[0].location + ) +} + +# Associates anf nsg to anf subnet + +resource "azurerm_subnet_network_security_group_association" "anf" { + provider = azurerm.main + count = local.ANF_subnet_defined && !local.ANF_subnet_nsg_exists ? 1 : 0 + depends_on = [ + azurerm_subnet.anf + ] + subnet_id = local.ANF_subnet_existing ? var.infrastructure.vnets.sap.subnet_anf.arm_id : azurerm_subnet.anf[0].id + network_security_group_id = azurerm_network_security_group.anf[0].id +} + + # Associates app nsg to app subnet resource "azurerm_subnet_network_security_group_association" "app" { provider = azurerm.main diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf index 9c880dccea..04ecad8781 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf @@ -458,6 +458,28 @@ locals { try(var.infrastructure.vnets.sap.subnet_anf.prefix, "")) : ( "" ) + ANF_subnet_nsg_arm_id = local.ANF_subnet_defined ? ( + try(var.infrastructure.vnets.sap.subnet_anf.nsg.arm_id, "")) : ( + "" + ) + ANF_subnet_nsg_exists = length(local.ANF_subnet_nsg_arm_id) > 0 + + ANF_subnet_nsg_name = local.ANF_subnet_nsg_exists ? ( + try(split("/", local.ANF_subnet_nsg_arm_id)[8], "")) : ( + length(try(var.infrastructure.vnets.sap.subnet_anf.nsg.name, "")) > 0 ? ( + var.infrastructure.vnets.sap.subnet_anf.nsg.name) : ( + format("%s%s%s%s", + var.naming.resource_prefixes.anf_subnet_nsg, + length(local.prefix) > 0 ? ( + local.prefix) : ( + var.infrastructure.environment + ), + var.naming.separator, + local.resource_suffixes.anf_subnet_nsg + ) + ) + ) + # Store the Deployer KV in workload zone KV deployer_keyvault_user_name = try(var.deployer_tfstate.deployer_kv_user_name, "") From 0b4d3a2bf255863ddbe833ec372bf7d2386edd52 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 10 Feb 2024 21:16:37 +0200 Subject: [PATCH 223/607] Improve ANF zonal support --- Webapp/SDAF/Models/LandscapeModel.cs | 5 ++ Webapp/SDAF/Models/SystemModel.cs | 2 + .../ParameterDetails/LandscapeDetails.json | 52 +++++++++++++++++++ .../ParameterDetails/LandscapeTemplate.txt | 14 +++-- .../SDAF/ParameterDetails/SystemDetails.json | 9 ++++ .../SDAF/ParameterDetails/SystemTemplate.txt | 10 ++++ .../run/sap_landscape/tfvar_variables.tf | 11 ++++ .../terraform/run/sap_landscape/transform.tf | 25 ++++++++- .../run/sap_landscape/variables_local.tf | 22 -------- .../modules/sap_landscape/anf.tf | 12 +++++ 10 files changed, 135 insertions(+), 27 deletions(-) diff --git a/Webapp/SDAF/Models/LandscapeModel.cs b/Webapp/SDAF/Models/LandscapeModel.cs index 95f1411671..8a68327929 100644 --- a/Webapp/SDAF/Models/LandscapeModel.cs +++ b/Webapp/SDAF/Models/LandscapeModel.cs @@ -214,6 +214,8 @@ public bool IsValid() public int? ANF_transport_volume_size { get; set; } + public string ANF_transport_volume_zone { get; set; } + public bool? ANF_install_volume_use_existing { get; set; } public string ANF_install_volume_name { get; set; } @@ -222,6 +224,9 @@ public bool IsValid() public int? ANF_install_volume_size { get; set; } + public string ANF_install_volume_zone { get; set; } + + /*---------------------------------------------------------------------------8 | | | DNS information | diff --git a/Webapp/SDAF/Models/SystemModel.cs b/Webapp/SDAF/Models/SystemModel.cs index 9bd4d4941d..c84edc0d49 100644 --- a/Webapp/SDAF/Models/SystemModel.cs +++ b/Webapp/SDAF/Models/SystemModel.cs @@ -474,6 +474,8 @@ public bool IsValid() public bool? ANF_HANA_use_AVG { get; set; } = false; + public bool? ANF_HANA_use_Zones { get; set; } = true; + /*---------------------------------------------------------------------------8 | | | Data | diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index 03ad2d5641..2a75b224f3 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -960,6 +960,32 @@ "Overrules": "", "Display": 3 }, + { + "Name": "ANF_transport_volume_zone", + "Required": false, + "Description": "Azure NetApp transport volume availability zone.", + "Type": "field", + "Options": [ + { + "Text": "", + "Value": "" + }, + { + "Text": "1", + "Value": "1" + }, + { + "Text": "2", + "Value": "2" + }, + { + "Text": "3", + "Value": "3" + } + ], + "Overrules": "", + "Display": 2 + }, { "Name": "ANF_install_volume_throughput", "Required": false, @@ -968,6 +994,32 @@ "Options": [], "Overrules": "", "Display": 3 + }, + { + "Name": "ANF_install_volume_zone", + "Required": false, + "Description": "Azure NetApp install volume availability zone.", + "Type": "field", + "Options": [ + { + "Text": "", + "Value": "" + }, + { + "Text": "1", + "Value": "1" + }, + { + "Text": "2", + "Value": "2" + }, + { + "Text": "3", + "Value": "3" + } + ], + "Overrules": "", + "Display": 2 } ] }, diff --git a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt index c70363ca2c..9a31c49b0c 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt @@ -376,18 +376,21 @@ $$ANF_use_existing_pool$$ # # ######################################################################################### -#ANF_transport_volume_use_existing defines if an existing volume is used for transport +# ANF_transport_volume_use_existing defines if an existing volume is used for transport $$ANF_transport_volume_use_existing$$ -#ANF_transport_volume_name is the name of the transport volume +# ANF_transport_volume_name is the name of the transport volume $$ANF_transport_volume_name$$ -#ANF_transport_volume_throughput is the throughput for the transport volume +# ANF_transport_volume_throughput is the throughput for the transport volume $$ANF_transport_volume_throughput$$ -#ANF_transport_volume_size is the size for the transport volume +# ANF_transport_volume_size is the size for the transport volume $$ANF_transport_volume_size$$ +# ANF_transport_volume_zone is the zone for the transport volume +$$ANF_transport_volume_zone$$ + ######################################################################################### # # # Install ANF Volume # @@ -406,6 +409,9 @@ $$ANF_install_volume_throughput$$ #ANF_install_volume_size is the size for the install volume $$ANF_install_volume_size$$ +# ANF_install_volume_zone is the zone for the transport volume +$$ANF_install_volume_zone$$ + ########################################################################### # # diff --git a/Webapp/SDAF/ParameterDetails/SystemDetails.json b/Webapp/SDAF/ParameterDetails/SystemDetails.json index 6e30cd8a26..aac9fa9ac4 100644 --- a/Webapp/SDAF/ParameterDetails/SystemDetails.json +++ b/Webapp/SDAF/ParameterDetails/SystemDetails.json @@ -1810,6 +1810,15 @@ "Overrules": "", "Display": 2 }, + { + "Name": "ANF_HANA_use_Zones", + "Required": false, + "Description": "defines if the ANF volume(s) will be deployed in availability zones", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, { "Name": "database_HANA_use_ANF_scaleout_scenario", "Required": false, diff --git a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt index dcf5303a79..7e8e8a111d 100644 --- a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt @@ -476,9 +476,19 @@ $$sapmnt_private_endpoint_id$$ # use_random_id_for_storageaccounts defines if the sapmnt storage account name will have a random suffix $$use_random_id_for_storageaccounts$$ +######################################################################################### +# # +# ANF # +# # +######################################################################################### + # ANF_HANA_use_AVG defines if the ANF volume will be created in an Application Volume Group $$ANF_HANA_use_AVG$$ +# ANF_HANA_use_Zones defines if the ANF volume will be created in an Availability zones +$$ANF_HANA_use_Zones$$ + + ######################################################################################### # # # HANA Data # diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index e210c6b1f9..3ecf41b26a 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -538,6 +538,11 @@ variable "ANF_transport_volume_size" { default = 128 } +variable "ANF_transport_volume_zone" { + description = "Transport volume availability zone" + default = "" + } + variable "ANF_install_volume_use_existing" { description = "Use existing install volume" default = false @@ -558,6 +563,12 @@ variable "ANF_install_volume_size" { default = 1024 } + +variable "ANF_install_volume_zone" { + description = "Install volume availability zone" + default = "" + } + variable "use_AFS_for_shared_storage" { description = "If true, will use AFS for all shared storage." default = false diff --git a/deploy/terraform/run/sap_landscape/transform.tf b/deploy/terraform/run/sap_landscape/transform.tf index dea04c3de2..0840953f91 100644 --- a/deploy/terraform/run/sap_landscape/transform.tf +++ b/deploy/terraform/run/sap_landscape/transform.tf @@ -461,7 +461,30 @@ locals { private_ip_address = var.utility_vm_nic_ips disk_size = var.utility_vm_os_disk_size disk_type = var.utility_vm_os_disk_type - } + ANF_settings = { + use = var.NFS_provider == "ANF" + name = var.ANF_account_name + arm_id = var.ANF_account_arm_id + pool_name = var.ANF_pool_name + use_existing_pool = var.ANF_use_existing_pool + service_level = var.ANF_service_level + size_in_tb = var.ANF_pool_size + qos_type = var.ANF_qos_type + + use_existing_transport_volume = var.ANF_transport_volume_use_existing + transport_volume_name = var.ANF_transport_volume_name + transport_volume_size = var.ANF_transport_volume_size + transport_volume_throughput = var.ANF_transport_volume_throughput + transport_volume_zone = var.ANF_transport_volume_zone + + use_existing_install_volume = var.ANF_install_volume_use_existing + install_volume_name = var.ANF_install_volume_name + install_volume_size = var.ANF_install_volume_size + install_volume_throughput = var.ANF_install_volume_throughput + install_volume_zone = var.ANF_install_volume_zone + + } + } diff --git a/deploy/terraform/run/sap_landscape/variables_local.tf b/deploy/terraform/run/sap_landscape/variables_local.tf index ee1b19ad44..3e1cc00b5c 100644 --- a/deploy/terraform/run/sap_landscape/variables_local.tf +++ b/deploy/terraform/run/sap_landscape/variables_local.tf @@ -58,28 +58,6 @@ locals { object_id = data.azurerm_client_config.current.object_id } - ANF_settings = { - use = var.NFS_provider == "ANF" - name = var.ANF_account_name - arm_id = var.ANF_account_arm_id - pool_name = var.ANF_pool_name - use_existing_pool = var.ANF_use_existing_pool - service_level = var.ANF_service_level - size_in_tb = var.ANF_pool_size - qos_type = var.ANF_qos_type - - use_existing_transport_volume = var.ANF_transport_volume_use_existing - transport_volume_name = var.ANF_transport_volume_name - transport_volume_size = var.ANF_transport_volume_size - transport_volume_throughput = var.ANF_transport_volume_throughput - - use_existing_install_volume = var.ANF_install_volume_use_existing - install_volume_name = var.ANF_install_volume_name - install_volume_size = var.ANF_install_volume_size - install_volume_throughput = var.ANF_install_volume_throughput - - } - custom_names = length(var.name_override_file) > 0 ? ( jsondecode(file(format("%s/%s", path.cwd, var.name_override_file))) ) : ( diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/anf.tf b/deploy/terraform/terraform-units/modules/sap_landscape/anf.tf index c94232a067..61e7a36bbf 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/anf.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/anf.tf @@ -134,6 +134,11 @@ resource "azurerm_netapp_volume" "transport" { ) : ( azurerm_netapp_pool.workload_netapp_pool[0].name ) + zone = length(var.ANF_settings.transport_volume_zone) > 0 ? ( + var.ANF_settings.transport_volume_zone + ) : ( + null + ) throughput_in_mibps = var.ANF_settings.use_existing_pool ? ( var.ANF_settings.transport_volume_throughput @@ -243,6 +248,13 @@ resource "azurerm_netapp_volume" "install" { azurerm_netapp_pool.workload_netapp_pool[0].name ) + zone = length(var.ANF_settings.install_volume_zone) > 0 ? ( + var.ANF_settings.install_volume_zone + ) : ( + null + ) + + throughput_in_mibps = var.ANF_settings.use_existing_pool ? ( var.ANF_settings.install_volume_throughput ) : ( From 08479eaf11e477a44800a00d1f6d8752f038b1c2 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 10 Feb 2024 21:44:51 +0200 Subject: [PATCH 224/607] Change parameter type from "field" to "list" in LandscapeDetails.json --- Webapp/SDAF/ParameterDetails/LandscapeDetails.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index 2a75b224f3..80d0db38ab 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -999,7 +999,7 @@ "Name": "ANF_install_volume_zone", "Required": false, "Description": "Azure NetApp install volume availability zone.", - "Type": "field", + "Type": "list", "Options": [ { "Text": "", From 584ae8dc4b63263d9d1838f5c297056a628b447a Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 10 Feb 2024 22:22:25 +0200 Subject: [PATCH 225/607] TFState fixes --- deploy/terraform/bootstrap/sap_deployer/output.tf | 2 +- deploy/terraform/run/sap_deployer/output.tf | 2 +- .../terraform-units/modules/sap_library/variables_local.tf | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/terraform/bootstrap/sap_deployer/output.tf b/deploy/terraform/bootstrap/sap_deployer/output.tf index de73fe44ca..fe36c32d3e 100644 --- a/deploy/terraform/bootstrap/sap_deployer/output.tf +++ b/deploy/terraform/bootstrap/sap_deployer/output.tf @@ -166,7 +166,7 @@ output "enable_firewall_for_keyvaults_and_storage" { output "public_network_access_enabled" { description = "Defines if the public access should be enabled for keyvaults and storage" - value = var.public_network_access_enabled || !var.use_private_endpoint + value = var.public_network_access_enabled } diff --git a/deploy/terraform/run/sap_deployer/output.tf b/deploy/terraform/run/sap_deployer/output.tf index de73fe44ca..fe36c32d3e 100644 --- a/deploy/terraform/run/sap_deployer/output.tf +++ b/deploy/terraform/run/sap_deployer/output.tf @@ -166,7 +166,7 @@ output "enable_firewall_for_keyvaults_and_storage" { output "public_network_access_enabled" { description = "Defines if the public access should be enabled for keyvaults and storage" - value = var.public_network_access_enabled || !var.use_private_endpoint + value = var.public_network_access_enabled } diff --git a/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf index e9ca35c39c..9940826e05 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf @@ -45,9 +45,9 @@ locals { // deployer_users_id = try(local.deployer.users.object_id, []) // Current service principal - service_principal = try(var.service_principal, {}) + service_principal = try(var.service_principal, {}) - deployer_public_ip_address = try(var.deployer_tfstate.deployer_public_ip_address, "") + deployer_public_ip_address = try(var.deployer_tfstate.deployer_public_ip_address, "") enable_firewall_for_keyvaults_and_storage = try(var.deployer_tfstate.enable_firewall_for_keyvaults_and_storage, false) From 353016fff16dbbf1f117ae0df76dc15e371a1c37 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 10 Feb 2024 22:36:23 +0200 Subject: [PATCH 226/607] Bump up the version --- deploy/ansible/vars/ansible-input-api.yaml | 2 +- deploy/configs/version.txt | 2 +- deploy/scripts/New-SDAFDevopsProject.ps1 | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index 31e9624ea8..82baef9ad3 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -4,7 +4,7 @@ become_user_name: root # ------------------- Begin - SDAF Ansible Version ---------------------------8 -SDAF_Version: "3.10.1.0" +SDAF_Version: "3.11.0.0" # ------------------- End - SDAF Ansible Version ---------------------------8 # ------------------- Begin - OS Config Settings variables -------------------8 diff --git a/deploy/configs/version.txt b/deploy/configs/version.txt index e05acb17e7..acbeef0954 100644 --- a/deploy/configs/version.txt +++ b/deploy/configs/version.txt @@ -1 +1 @@ -3.10.1.0 +3.11.0.0 diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index 76ead49385..1963eb145d 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -26,7 +26,7 @@ $Workload_zoneSubscriptionName = $Env:SDAF_WorkloadZoneSubscriptionName $ARM_TENANT_ID = $Env:ARM_TENANT_ID #endregion -$versionLabel = "v3.10.1.0" +$versionLabel = "v3.11.0.0" From 24220bcd44fe0ed35a6f25ea064d21f04fbba5a9 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 10 Feb 2024 22:42:15 +0200 Subject: [PATCH 227/607] TFState access tests --- .../bootstrap/sap_deployer/module.tf | 2 +- deploy/terraform/run/sap_deployer/module.tf | 2 +- deploy/terraform/run/sap_landscape/module.tf | 2 +- .../modules/sap_library/variables_local.tf | 76 +++++++++---------- 4 files changed, 41 insertions(+), 41 deletions(-) diff --git a/deploy/terraform/bootstrap/sap_deployer/module.tf b/deploy/terraform/bootstrap/sap_deployer/module.tf index 326462f9a6..e71123286c 100644 --- a/deploy/terraform/bootstrap/sap_deployer/module.tf +++ b/deploy/terraform/bootstrap/sap_deployer/module.tf @@ -41,7 +41,7 @@ module "sap_deployer" { management_dns_subscription_id = var.management_dns_subscription_id options = local.options place_delete_lock_on_resources = var.place_delete_lock_on_resources - public_network_access_enabled = var.public_network_access_enabled || !var.use_private_endpoint + public_network_access_enabled = var.public_network_access_enabled sa_connection_string = var.sa_connection_string soft_delete_retention_days = var.soft_delete_retention_days set_secret_expiry = var.set_secret_expiry diff --git a/deploy/terraform/run/sap_deployer/module.tf b/deploy/terraform/run/sap_deployer/module.tf index 44ac47ec20..aaa1b2cc35 100644 --- a/deploy/terraform/run/sap_deployer/module.tf +++ b/deploy/terraform/run/sap_deployer/module.tf @@ -41,7 +41,7 @@ module "sap_deployer" { management_dns_subscription_id = var.management_dns_subscription_id options = local.options place_delete_lock_on_resources = var.place_delete_lock_on_resources - public_network_access_enabled = var.public_network_access_enabled || !var.use_private_endpoint + public_network_access_enabled = var.public_network_access_enabled sa_connection_string = var.sa_connection_string set_secret_expiry = var.set_secret_expiry soft_delete_retention_days = var.soft_delete_retention_days diff --git a/deploy/terraform/run/sap_landscape/module.tf b/deploy/terraform/run/sap_landscape/module.tf index d401c92c29..8f19a39678 100644 --- a/deploy/terraform/run/sap_landscape/module.tf +++ b/deploy/terraform/run/sap_landscape/module.tf @@ -46,7 +46,7 @@ module "sap_landscape" { options = local.options peer_with_control_plane_vnet = var.peer_with_control_plane_vnet place_delete_lock_on_resources = var.place_delete_lock_on_resources - public_network_access_enabled = var.public_network_access_enabled || !var.use_private_endpoint + public_network_access_enabled = var.public_network_access_enabled register_virtual_network_to_dns = var.register_virtual_network_to_dns service_principal = var.use_spn ? local.service_principal : local.account soft_delete_retention_days = var.soft_delete_retention_days diff --git a/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf index 9940826e05..c179a5df96 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf @@ -6,39 +6,39 @@ locals { // Region - prefix = length(var.infrastructure.resource_group.name) > 0 ? ( - var.infrastructure.resource_group.name) : ( - trimspace(var.naming.prefix.LIBRARY) - ) + prefix = length(var.infrastructure.resource_group.name) > 0 ? ( + var.infrastructure.resource_group.name) : ( + trimspace(var.naming.prefix.LIBRARY) + ) // Resource group - resource_group_exists = length(var.infrastructure.resource_group.arm_id) > 0 - - resource_group_name = local.resource_group_exists ? ( - try(split("/", var.infrastructure.resource_group.arm_id)[4], "")) : ( - length(var.infrastructure.resource_group.name) > 0 ? ( - var.infrastructure.resource_group.name) : ( - format("%s%s%s", - var.naming.resource_prefixes.library_rg, - local.prefix, - var.naming.resource_suffixes.library_rg - ) - ) - ) - resource_group_library_location = local.resource_group_exists ? ( - data.azurerm_resource_group.library[0].location) : ( - azurerm_resource_group.library[0].location - ) + resource_group_exists = length(var.infrastructure.resource_group.arm_id) > 0 + + resource_group_name = local.resource_group_exists ? ( + try(split("/", var.infrastructure.resource_group.arm_id)[4], "")) : ( + length(var.infrastructure.resource_group.name) > 0 ? ( + var.infrastructure.resource_group.name) : ( + format("%s%s%s", + var.naming.resource_prefixes.library_rg, + local.prefix, + var.naming.resource_suffixes.library_rg + ) + ) + ) + resource_group_library_location = local.resource_group_exists ? ( + data.azurerm_resource_group.library[0].location) : ( + azurerm_resource_group.library[0].location + ) // Storage account for sapbits - sa_sapbits_exists = length(var.storage_account_sapbits.arm_id) > 0 - sa_sapbits_name = local.sa_sapbits_exists ? ( - split("/", var.storage_account_sapbits.arm_id)[8]) : ( - var.naming.storageaccount_names.LIBRARY.library_storageaccount_name - ) + sa_sapbits_exists = length(var.storage_account_sapbits.arm_id) > 0 + sa_sapbits_name = local.sa_sapbits_exists ? ( + split("/", var.storage_account_sapbits.arm_id)[8]) : ( + var.naming.storageaccount_names.LIBRARY.library_storageaccount_name + ) // Storage account for tfstate - sa_tfstate_exists = length(var.storage_account_tfstate.arm_id) > 0 + sa_tfstate_exists = length(var.storage_account_tfstate.arm_id) > 0 // Comment out code with users.object_id for the time being. @@ -51,19 +51,19 @@ locals { enable_firewall_for_keyvaults_and_storage = try(var.deployer_tfstate.enable_firewall_for_keyvaults_and_storage, false) - use_local_private_dns = (length(var.dns_label) > 0 && !var.use_custom_dns_a_registration && length(trimspace(var.management_dns_resourcegroup_name)) == 0) + use_local_private_dns = (length(var.dns_label) > 0 && !var.use_custom_dns_a_registration && length(trimspace(var.management_dns_resourcegroup_name)) == 0) - keyvault_id = try(var.deployer_tfstate.deployer_kv_user_arm_id, "") + keyvault_id = try(var.deployer_tfstate.deployer_kv_user_arm_id, "") - virtual_additional_network_ids = compact( - flatten( - [ - try(var.deployer_tfstate.subnet_mgmt_id, ""), - try(var.deployer_tfstate.subnet_webapp_id, ""), - try(var.deployer_tfstate.subnets_to_add_to_firewall_for_keyvaults_and_storage, []) - ] - ) - ) + virtual_additional_network_ids = compact( + flatten( + [ + try(var.deployer_tfstate.subnet_mgmt_id, ""), + try(var.deployer_tfstate.subnet_webapp_id, ""), + try(var.deployer_tfstate.subnets_to_add_to_firewall_for_keyvaults_and_storage, []) + ] + ) + ) From e121f86c321d892877578dea764642fb1b829c53 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 10 Feb 2024 22:49:20 +0200 Subject: [PATCH 228/607] Add custom sizes to hanadb_sizes and anydb_sizes arrays --- Webapp/SDAF/Models/CustomValidators.cs | 2 + deploy/configs/anydb_sizes.json | 535 ------------------------- 2 files changed, 2 insertions(+), 535 deletions(-) delete mode 100644 deploy/configs/anydb_sizes.json diff --git a/Webapp/SDAF/Models/CustomValidators.cs b/Webapp/SDAF/Models/CustomValidators.cs index acf5b80374..ba0142842f 100644 --- a/Webapp/SDAF/Models/CustomValidators.cs +++ b/Webapp/SDAF/Models/CustomValidators.cs @@ -315,6 +315,7 @@ protected override ValidationResult IsValid(object value, ValidationContext cont { string[] hanadb_sizes = new string[] { "Default", + "Custom", "S4Demo", "E20ds_v4", "E20ds_v5", @@ -340,6 +341,7 @@ protected override ValidationResult IsValid(object value, ValidationContext cont }; string[] anydb_sizes = new string[] { "Default", + "Custom", "256", "512", "1024", diff --git a/deploy/configs/anydb_sizes.json b/deploy/configs/anydb_sizes.json deleted file mode 100644 index 656ce2b849..0000000000 --- a/deploy/configs/anydb_sizes.json +++ /dev/null @@ -1,535 +0,0 @@ -{ - "db": { - "Default": { - "compute": { - "vm_size" : "Standard_E4s_v3" - }, - "storage": [ - { - "name" : "os", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 64, - "caching" : "ReadWrite" - }, - { - "name" : "data", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 256, - "caching" : "None", - "write_accelerator" : false, - "lun_start" : 0 - }, - { - "name" : "log", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "None", - "write_accelerator" : false, - "lun_start" : 9 - } - ] - }, - "200": { - "compute": { - "vm_size" : "Standard_E4s_v3", - "swap_size_gb": 2 - }, - "storage": [ - { - "name" : "os", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 64, - "caching" : "ReadWrite" - }, - { - "name" : "sap", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "ReadWrite", - "lun_start" : 0 - }, - { - "name" : "data", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 256, - "caching" : "None", - "write_accelerator" : false, - "lun_start" : 1 - }, - { - "name" : "log", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "None", - "write_accelerator" : false, - "lun_start" : 9 - } - ] - }, - "500": { - "compute": { - "vm_size" : "Standard_E8s_v3", - "swap_size_gb": 2 - }, - "storage": [ - { - "name" : "os", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 64, - "caching" : "ReadWrite" - }, - { - "name" : "sap", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "ReadWrite", - "lun_start" : 0 - }, - { - "name" : "data", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 512, - "caching" : "None", - "write_accelerator" : false, - "lun_start" : 1 - }, - { - "name" : "log", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 256, - "caching" : "None", - "write_accelerator" : false, - "lun_start" : 9 - } - ] - }, - "1024": { - "compute": { - "vm_size" : "Standard_E16s_v3", - "swap_size_gb": 2 - }, - "storage": [ - { - "name" : "os", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "ReadWrite" - }, - { - "name" : "sap", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "ReadWrite", - "lun_start" : 0 - }, - { - "name" : "data", - "fullname" : "", - "count" : 4, - "disk_type" : "Premium_LRS", - "size_gb" : 512, - "caching" : "None", - "write_accelerator" : false, - "lun_start" : 1 - }, - { - "name" : "log", - "fullname" : "", - "count" : 2, - "disk_type" : "Premium_LRS", - "size_gb" : 256, - "caching" : "None", - "write_accelerator" : false, - "lun_start" : 9 - } - - ] - }, - "2048": { - "compute": { - "vm_size" : "Standard_E32s_v3", - "swap_size_gb": 2 - }, - "storage": [ - { - "name" : "os", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "ReadWrite" - }, - { - "name" : "sap", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "ReadWrite", - "lun_start" : 0 - }, - { - "name" : "data", - "fullname" : "", - "count" : 2, - "disk_type" : "Premium_LRS", - "size_gb" : 1024, - "caching" : "None", - "write_accelerator" : false, - "lun_start" : 1 - }, - { - "name" : "log", - "fullname" : "", - "count" : 2, - "disk_type" : "Premium_LRS", - "size_gb" : 512, - "caching" : "None", - "write_accelerator" : false, - "lun_start" : 9 - } - ] - }, - "5120": { - "compute": { - "vm_size" : "Standard_M64ls", - "swap_size_gb": 2 - }, - "storage": [ - { - "name" : "os", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "ReadWrite" - }, - { - "name" : "sap", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "ReadWrite", - "lun_start" : 0 - }, - { - "name" : "data", - "fullname" : "", - "count" : 5, - "disk_type" : "Premium_LRS", - "size_gb" : 1024, - "caching" : "None", - "write_accelerator" : false, - "lun_start" : 1 - }, - { - "name" : "log", - "fullname" : "", - "count" : 2, - "disk_type" : "Premium_LRS", - "size_gb" : 512, - "caching" : "None", - "write_accelerator" : true, - "lun_start" : 17 - } - ] - }, - "10240": { - "compute": { - "vm_size" : "Standard_M64s", - "swap_size_gb": 2 - }, - "storage": [ - { - "name" : "os", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "ReadWrite" - }, - { - "name" : "sap", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "ReadWrite", - "lun_start" : 0 - }, - { - "name" : "data", - "fullname" : "", - "count" : 5, - "disk_type" : "Premium_LRS", - "size_gb" : 2048, - "caching" : "None", - "write_accelerator" : false, - "lun_start" : 1 - }, - { - "name" : "log", - "fullname" : "", - "count" : 2, - "disk_type" : "Premium_LRS", - "size_gb" : 512, - "caching" : "None", - "write_accelerator" : true, - "lun_start" : 17 - } - ] - }, - "15360": { - "compute": { - "vm_size" : "Standard_M64s", - "swap_size_gb": 2 - }, - "storage": [ - { - "name" : "os", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "ReadWrite" - }, - { - "name" : "sap", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "ReadWrite", - "lun_start" : 0 - }, - { - "name" : "data", - "fullname" : "", - "count" : 4, - "disk_type" : "Premium_LRS", - "size_gb" : 4096, - "caching" : "None", - "write_accelerator" : false, - "lun_start" : 1 - }, - { - "name" : "log", - "fullname" : "", - "count" : 2, - "disk_type" : "Premium_LRS", - "size_gb" : 512, - "caching" : "None", - "write_accelerator" : true, - "lun_start" : 17 - } - ] - }, - "20480": { - "compute": { - "vm_size" : "Standard_M64s", - "swap_size_gb": 2 - }, - "storage": [ - { - "name" : "os", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "ReadWrite" - }, - { - "name" : "sap", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "ReadWrite", - "lun_start" : 0 - }, - { - "name" : "data", - "fullname" : "", - "count" : 5, - "disk_type" : "Premium_LRS", - "size_gb" : 4096, - "caching" : "None", - "write_accelerator" : false, - "lun_start" : 1 - }, - { - "name" : "log", - "fullname" : "", - "count" : 2, - "disk_type" : "Premium_LRS", - "size_gb" : 512, - "caching" : "None", - "write_accelerator" : true, - "lun_start" : 17 - } - ] - }, - "30720": { - "compute": { - "vm_size" : "Standard_M128s", - "swap_size_gb": 2 - }, - "storage": [ - { - "name" : "os", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "ReadWrite" - }, - { - "name" : "sap", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "ReadWrite", - "lun_start" : 0 - }, - { - "name" : "data", - "fullname" : "", - "count" : 8, - "disk_type" : "Premium_LRS", - "size_gb" : 4096, - "caching" : "None", - "write_accelerator" : false, - "lun_start" : 1 - }, - { - "name" : "log", - "fullname" : "", - "count" : 2, - "disk_type" : "Premium_LRS", - "size_gb" : 2048, - "caching" : "None", - "write_accelerator" : true, - "lun_start" : 17 - } - ] - }, - "40960": { - "compute": { - "vm_size" : "Standard_M128s", - "swap_size_gb": 2 - }, - "storage": [ - { - "name" : "os", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "ReadWrite" - }, - { - "name" : "sap", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "ReadWrite", - "lun_start" : 0 - }, - { - "name" : "data", - "fullname" : "", - "count" : 10, - "disk_type" : "Premium_LRS", - "size_gb" : 4096, - "caching" : "None", - "write_accelerator" : false, - "lun_start" : 1 - }, - { - "name" : "log", - "fullname" : "", - "count" : 2, - "disk_type" : "Premium_LRS", - "size_gb" : 2048, - "caching" : "None", - "write_accelerator" : true, - "lun_start" : 17 - } - ] - }, - "51200": { - "compute": { - "vm_size" : "Standard_M128s", - "swap_size_gb": 2 - }, - "storage": [ - { - "name" : "os", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "ReadWrite" - }, - { - "name" : "sap", - "fullname" : "", - "count" : 1, - "disk_type" : "Premium_LRS", - "size_gb" : 128, - "caching" : "ReadWrite", - "lun_start" : 0 - }, - { - "name" : "data", - "fullname" : "", - "count" : 13, - "disk_type" : "Premium_LRS", - "size_gb" : 4096, - "caching" : "None", - "write_accelerator" : false, - "lun_start" : 1 - }, - { - "name" : "log", - "fullname" : "", - "count" : 3, - "disk_type" : "Premium_LRS", - "size_gb" : 2048, - "caching" : "None", - "write_accelerator" : true, - "lun_start" : 17 - } - ] - } - } -} From 6deb43e05bf5b3623acb607f49acdc476c3a3d2e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 10 Feb 2024 22:58:00 +0200 Subject: [PATCH 229/607] convert to list --- Webapp/SDAF/ParameterDetails/LandscapeDetails.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index 80d0db38ab..b0999dc414 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -964,7 +964,7 @@ "Name": "ANF_transport_volume_zone", "Required": false, "Description": "Azure NetApp transport volume availability zone.", - "Type": "field", + "Type": "list", "Options": [ { "Text": "", From 3c3f4666cd1967c8c6426a2f2d685bcf56465423 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 10 Feb 2024 23:37:59 +0200 Subject: [PATCH 230/607] ANF zonal logic --- Webapp/SDAF/Models/LandscapeModel.cs | 4 ++-- deploy/terraform/run/sap_landscape/tfvar_variables.tf | 4 ++-- deploy/terraform/run/sap_landscape/transform.tf | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Webapp/SDAF/Models/LandscapeModel.cs b/Webapp/SDAF/Models/LandscapeModel.cs index 8a68327929..1f1a94621b 100644 --- a/Webapp/SDAF/Models/LandscapeModel.cs +++ b/Webapp/SDAF/Models/LandscapeModel.cs @@ -214,7 +214,7 @@ public bool IsValid() public int? ANF_transport_volume_size { get; set; } - public string ANF_transport_volume_zone { get; set; } + public string[] ANF_transport_volume_zone { get; set; } public bool? ANF_install_volume_use_existing { get; set; } @@ -224,7 +224,7 @@ public bool IsValid() public int? ANF_install_volume_size { get; set; } - public string ANF_install_volume_zone { get; set; } + public string[] ANF_install_volume_zone { get; set; } /*---------------------------------------------------------------------------8 diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index 3ecf41b26a..7b3e4d4c4f 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -540,7 +540,7 @@ variable "ANF_transport_volume_size" { variable "ANF_transport_volume_zone" { description = "Transport volume availability zone" - default = "" + default = [""] } variable "ANF_install_volume_use_existing" { @@ -566,7 +566,7 @@ variable "ANF_install_volume_size" { variable "ANF_install_volume_zone" { description = "Install volume availability zone" - default = "" + default = [""] } variable "use_AFS_for_shared_storage" { diff --git a/deploy/terraform/run/sap_landscape/transform.tf b/deploy/terraform/run/sap_landscape/transform.tf index 0840953f91..8b5680781b 100644 --- a/deploy/terraform/run/sap_landscape/transform.tf +++ b/deploy/terraform/run/sap_landscape/transform.tf @@ -477,13 +477,13 @@ locals { transport_volume_name = var.ANF_transport_volume_name transport_volume_size = var.ANF_transport_volume_size transport_volume_throughput = var.ANF_transport_volume_throughput - transport_volume_zone = var.ANF_transport_volume_zone + transport_volume_zone = var.ANF_transport_volume_zone[0] use_existing_install_volume = var.ANF_install_volume_use_existing install_volume_name = var.ANF_install_volume_name install_volume_size = var.ANF_install_volume_size install_volume_throughput = var.ANF_install_volume_throughput - install_volume_zone = var.ANF_install_volume_zone + install_volume_zone = var.ANF_install_volume_zone[0] } From 28adf03184a8a2f7c3b9a5e2b2be3f15fe3f4984 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 11 Feb 2024 00:22:27 +0200 Subject: [PATCH 231/607] Update appsettings.json and storage accounts configuration --- Webapp/SDAF/Services/TableStorageService.cs | 15 ++++++- Webapp/SDAF/appsettings.json | 2 +- .../modules/sap_deployer/app_service.tf | 42 +++++++++---------- .../modules/sap_library/key_vault.tf | 19 +++++++++ .../modules/sap_library/storage_accounts.tf | 2 +- .../modules/sap_library/variables_local.tf | 7 +++- 6 files changed, 61 insertions(+), 26 deletions(-) diff --git a/Webapp/SDAF/Services/TableStorageService.cs b/Webapp/SDAF/Services/TableStorageService.cs index 23a5b2992d..9bac3598a7 100644 --- a/Webapp/SDAF/Services/TableStorageService.cs +++ b/Webapp/SDAF/Services/TableStorageService.cs @@ -1,7 +1,10 @@ using AutomationForm.Models; using Azure.Data.Tables; +using Azure.Identity; using Azure.Storage.Blobs; using Microsoft.Extensions.Configuration; +using Microsoft.Extensions.Options; +using System; using System.Threading.Tasks; namespace AutomationForm.Services @@ -18,7 +21,11 @@ public TableStorageService(IConfiguration configuration, IDatabaseSettings setti public async Task GetTableClient(string table) { - var serviceClient = new TableServiceClient(_configuration.GetConnectionString(_settings.ConnectionStringKey)); + string accountName = _configuration.GetConnectionString(_settings.ConnectionStringKey); + TableServiceClient serviceClient = new( + new Uri(accountName), + new DefaultAzureCredential()); + var tableClient = serviceClient.GetTableClient(table); await tableClient.CreateIfNotExistsAsync(); return tableClient; @@ -26,7 +33,11 @@ public async Task GetTableClient(string table) public async Task GetBlobClient(string container) { - var serviceClient = new BlobServiceClient(_configuration.GetConnectionString(_settings.ConnectionStringKey)); + string accountName = _configuration.GetConnectionString(_settings.ConnectionStringKey); + BlobServiceClient serviceClient = new( + new Uri(accountName), + new DefaultAzureCredential()); + var blobClient = serviceClient.GetBlobContainerClient(container); await blobClient.CreateIfNotExistsAsync(); return blobClient; diff --git a/Webapp/SDAF/appsettings.json b/Webapp/SDAF/appsettings.json index 76534d9d69..88261a95c6 100644 --- a/Webapp/SDAF/appsettings.json +++ b/Webapp/SDAF/appsettings.json @@ -1,6 +1,6 @@ { "DatabaseSettings": { - "ConnectionStringKey": "sa_tfstate_conn_str", + "ConnectionStringKey": "tfstate", "DatabaseName": "Deployment-Objects", "LandscapeCollectionName": "Landscapes", "SystemCollectionName": "Systems", diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf index ac4f9169f2..a55decd556 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf @@ -67,12 +67,12 @@ resource "azurerm_service_plan" "appserviceplan" { # Create the app service with AD authentication and storage account connection string resource "azurerm_windows_web_app" "webapp" { - count = var.use_webapp ? 1 : 0 - name = lower(format("%s%s%s%s", var.naming.resource_prefixes.app_service_plan, var.naming.prefix.LIBRARY, var.naming.resource_suffixes.webapp_url, substr(random_id.deployer.hex, 0, 3))) - resource_group_name = local.resourcegroup_name - location = local.rg_appservice_location - service_plan_id = azurerm_service_plan.appserviceplan[0].id - https_only = true + count = var.use_webapp ? 1 : 0 + name = lower(format("%s%s%s%s", var.naming.resource_prefixes.app_service_plan, var.naming.prefix.LIBRARY, var.naming.resource_suffixes.webapp_url, substr(random_id.deployer.hex, 0, 3))) + resource_group_name = local.resourcegroup_name + location = local.rg_appservice_location + service_plan_id = azurerm_service_plan.appserviceplan[0].id + https_only = true # auth_settings { # enabled = true @@ -94,23 +94,23 @@ resource "azurerm_windows_web_app" "webapp" { } sticky_settings { - app_setting_names = ["MICROSOFT_PROVIDER_AUTHENTICATION_SECRET"] - connection_string_names = ["sa_tfstate_conn_str"] + app_setting_names = ["MICROSOFT_PROVIDER_AUTHENTICATION_SECRET"] + connection_string_names = ["sa_tfstate_conn_str"] } auth_settings_v2 { - auth_enabled = true - unauthenticated_action = "RedirectToLoginPage" - default_provider = "AzureActiveDirectory" + auth_enabled = true + unauthenticated_action = "RedirectToLoginPage" + default_provider = "AzureActiveDirectory" active_directory_v2 { - client_id = var.app_registration_app_id - tenant_auth_endpoint = "https://sts.windows.net/${data.azurerm_client_config.deployer.tenant_id}/v2.0" - www_authentication_disabled = false - client_secret_setting_name = "MICROSOFT_PROVIDER_AUTHENTICATION_SECRET" - allowed_applications = [var.app_registration_app_id] - allowed_audiences = [] - allowed_groups = [] - allowed_identities = [] + client_id = var.app_registration_app_id + tenant_auth_endpoint = "https://sts.windows.net/${data.azurerm_client_config.deployer.tenant_id}/v2.0" + www_authentication_disabled = false + client_secret_setting_name = "MICROSOFT_PROVIDER_AUTHENTICATION_SECRET" + allowed_applications = [var.app_registration_app_id] + allowed_audiences = [] + allowed_groups = [] + allowed_identities = [] } login { token_store_enabled = false @@ -140,9 +140,9 @@ resource "azurerm_windows_web_app" "webapp" { identity_ids = [length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].id : data.azurerm_user_assigned_identity.deployer[0].id ] } connection_string { - name = "sa_tfstate_conn_str" + name = "tfstate" type = "Custom" - value = var.use_private_endpoint ? format("@Microsoft.KeyVault(SecretUri=https://%s.privatelink.vaultcore.azure.net/secrets/sa-connection-string/)", local.user_keyvault_name) : format("@Microsoft.KeyVault(SecretUri=https://%s.vault.azure.net/secrets/sa-connection-string/)", local.user_keyvault_name) + value = var.use_private_endpoint ? format("@Microsoft.KeyVault(SecretUri=https://%s.privatelink.vaultcore.azure.net/secrets/tfstate/)", local.user_keyvault_name) : format("@Microsoft.KeyVault(SecretUri=https://%s.vault.azure.net/secrets/tfstate/)", local.user_keyvault_name) } lifecycle { diff --git a/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf b/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf index 0e1534c3a0..deb39f6692 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf @@ -62,6 +62,25 @@ resource "azurerm_key_vault_secret" "sa_connection_string" { null ) } + +resource "azurerm_key_vault_secret" "tfstate" { + provider = azurerm.deployer + count = length(var.key_vault.kv_spn_id) > 0 ? 1 : 0 + depends_on = [azurerm_private_endpoint.kv_user] + name = "tfstate" + value = var.use_private_endpoint ? ( + format("%s.privatelink.blob.core.windows.net:/%s/%s", local.sa_sapbits_name,local.sa_sapbits_name,var.storage_account_sapbits.sapbits_blob_container.name)) : ( + format("%s.blob.core.windows.net:/%s/%s", local.sa_sapbits_name,local.sa_sapbits_name,var.storage_account_sapbits.sapbits_blob_container.name) + ) + key_vault_id = var.key_vault.kv_spn_id + expiration_date = try(var.deployer_tfstate.set_secret_expiry, false) ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) +} + + + resource "azurerm_private_dns_a_record" "kv_user" { provider = azurerm.deployer count = var.use_private_endpoint && var.use_custom_dns_a_registration ? 1 : 0 diff --git a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf index b21ff30a7b..ef63efd30a 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf @@ -356,7 +356,7 @@ resource "azurerm_storage_container" "storagecontainer_sapbits" { azurerm_private_dns_a_record.storage_sapbits_pep_a_record_registry ] name = var.storage_account_sapbits.sapbits_blob_container.name - storage_account_name = local.sa_sapbits_exists ? ( + storage_account_name = local.sa_sapbits_exists ? ( data.azurerm_storage_account.storage_sapbits[0].name) : ( azurerm_storage_account.storage_sapbits[0].name ) diff --git a/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf index c179a5df96..2a2940cb97 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf @@ -34,9 +34,14 @@ locals { sa_sapbits_exists = length(var.storage_account_sapbits.arm_id) > 0 sa_sapbits_name = local.sa_sapbits_exists ? ( split("/", var.storage_account_sapbits.arm_id)[8]) : ( - var.naming.storageaccount_names.LIBRARY.library_storageaccount_name + length(var.storage_account_sapbits.name) > 0 ? ( + var.storage_account_sapbits.name) : ( + var.naming.storageaccount_names.LIBRARY.library_storageaccount_name + ) ) + + // Storage account for tfstate sa_tfstate_exists = length(var.storage_account_tfstate.arm_id) > 0 From 71efc1e97b85120a69e8a5c0334f8dd8ef45802f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 11 Feb 2024 00:33:42 +0200 Subject: [PATCH 232/607] Update key_vault.tf to support private endpoint and storage account URL --- .../terraform-units/modules/sap_library/key_vault.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf b/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf index deb39f6692..6a83481ea8 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf @@ -69,8 +69,8 @@ resource "azurerm_key_vault_secret" "tfstate" { depends_on = [azurerm_private_endpoint.kv_user] name = "tfstate" value = var.use_private_endpoint ? ( - format("%s.privatelink.blob.core.windows.net:/%s/%s", local.sa_sapbits_name,local.sa_sapbits_name,var.storage_account_sapbits.sapbits_blob_container.name)) : ( - format("%s.blob.core.windows.net:/%s/%s", local.sa_sapbits_name,local.sa_sapbits_name,var.storage_account_sapbits.sapbits_blob_container.name) + format("https://%s.privatelink.blob.core.windows.net", local.sa_tfstate_exists ? (data.azurerm_storage_account.storage_tfstate[0].name) : (azurerm_storage_account.storage_tfstate[0].name))) : ( + format("https://%s.blob.core.windows.net", local.sa_tfstate_exists ? (data.azurerm_storage_account.storage_tfstate[0].name) : (azurerm_storage_account.storage_tfstate[0].name)) ) key_vault_id = var.key_vault.kv_spn_id expiration_date = try(var.deployer_tfstate.set_secret_expiry, false) ? ( From 02f401128629bd224eaa6657f4f5f2975ce4b694 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 11 Feb 2024 00:35:51 +0200 Subject: [PATCH 233/607] Remove connection_string from ignore_changes in app_service.tf --- .../terraform-units/modules/sap_deployer/app_service.tf | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf index a55decd556..9506e0e055 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf @@ -148,7 +148,6 @@ resource "azurerm_windows_web_app" "webapp" { lifecycle { ignore_changes = [ app_settings, - connection_string, zip_deploy_file, tags ] From d3e5548139477d117ddf5135338c4ea997b386dd Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 11 Feb 2024 00:42:49 +0200 Subject: [PATCH 234/607] Replace "blob" with "table" in GetConnectionString method --- Webapp/SDAF/Services/TableStorageService.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Webapp/SDAF/Services/TableStorageService.cs b/Webapp/SDAF/Services/TableStorageService.cs index 9bac3598a7..ec613f931e 100644 --- a/Webapp/SDAF/Services/TableStorageService.cs +++ b/Webapp/SDAF/Services/TableStorageService.cs @@ -21,7 +21,7 @@ public TableStorageService(IConfiguration configuration, IDatabaseSettings setti public async Task GetTableClient(string table) { - string accountName = _configuration.GetConnectionString(_settings.ConnectionStringKey); + string accountName = _configuration.GetConnectionString(_settings.ConnectionStringKey).Replace("blob", "table"); TableServiceClient serviceClient = new( new Uri(accountName), new DefaultAzureCredential()); From d9351cbd0fe480f061acaaa40910f09eaedcb7ec Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 11 Feb 2024 13:03:25 +0200 Subject: [PATCH 235/607] Add private endpoint for table storage --- .../run/sap_library/tfvar_variables.tf | 7 ++-- .../modules/sap_library/dns.tf | 34 +++++++++++++------ .../modules/sap_library/storage_accounts.tf | 2 +- 3 files changed, 29 insertions(+), 14 deletions(-) diff --git a/deploy/terraform/run/sap_library/tfvar_variables.tf b/deploy/terraform/run/sap_library/tfvar_variables.tf index 80c79c3484..a54f5fe9b6 100644 --- a/deploy/terraform/run/sap_library/tfvar_variables.tf +++ b/deploy/terraform/run/sap_library/tfvar_variables.tf @@ -288,9 +288,10 @@ variable "dns_zone_names" { description = "Private DNS zone names" type = map(string) default = { - "file_dns_zone_name" = "privatelink.file.core.windows.net" - "blob_dns_zone_name" = "privatelink.blob.core.windows.net" - "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" + "file_dns_zone_name" = "privatelink.file.core.windows.net" + "blob_dns_zone_name" = "privatelink.blob.core.windows.net" + "table_dns_zone_name" = "privatelink.table.core.windows.net" + "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" } } diff --git a/deploy/terraform/terraform-units/modules/sap_library/dns.tf b/deploy/terraform/terraform-units/modules/sap_library/dns.tf index 0d4737955c..5eae5a3a7a 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/dns.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/dns.tf @@ -29,27 +29,19 @@ resource "azurerm_private_dns_zone" "blob" { ) } -resource "azurerm_private_dns_zone" "vault" { +resource "azurerm_private_dns_zone" "table" { provider = azurerm.main count = local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 depends_on = [ azurerm_resource_group.library ] - name = var.dns_zone_names.vault_dns_zone_name + name = var.dns_zone_names.table_dns_zone_name resource_group_name = local.resource_group_exists ? ( split("/", var.infrastructure.resource_group.arm_id)[4]) : ( azurerm_resource_group.library[0].name ) } -data "azurerm_private_dns_zone" "vault" { - provider = azurerm.dnsmanagement - count = !local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 - name = var.dns_zone_names.vault_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name -} - - resource "azurerm_private_dns_zone" "file" { provider = azurerm.main count = local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 @@ -62,3 +54,25 @@ resource "azurerm_private_dns_zone" "file" { azurerm_resource_group.library[0].name ) } + +resource "azurerm_private_dns_zone" "vault" { + provider = azurerm.main + count = local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 + depends_on = [ + azurerm_resource_group.library + ] + name = var.dns_zone_names.vault_dns_zone_name + resource_group_name = local.resource_group_exists ? ( + split("/", var.infrastructure.resource_group.arm_id)[4]) : ( + azurerm_resource_group.library[0].name + ) +} + +data "azurerm_private_dns_zone" "vault" { + provider = azurerm.dnsmanagement + count = !local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 + name = var.dns_zone_names.vault_dns_zone_name + resource_group_name = var.management_dns_resourcegroup_name +} + + diff --git a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf index ef63efd30a..e8f3420513 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf @@ -328,7 +328,7 @@ resource "azurerm_private_endpoint" "storage_sapbits" { azurerm_storage_account.storage_sapbits[0].id ) subresource_names = [ - "Blob" + "Blob", "Table" ] } From 2c442d08ee1a3262ba81e347317c7879c6e86613 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 11 Feb 2024 18:09:08 +0200 Subject: [PATCH 236/607] debug --- .../modules/sap_library/storage_accounts.tf | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf index e8f3420513..846cf0c89e 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf @@ -27,7 +27,7 @@ resource "azurerm_storage_account" "storage_tfstate" { min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false - public_network_access_enabled = var.storage_account_sapbits.public_network_access_enabled + public_network_access_enabled = true #var.storage_account_sapbits.public_network_access_enabled enable_https_traffic_only = true @@ -62,6 +62,12 @@ resource "azurerm_storage_account" "storage_tfstate" { ignore_changes = [tags] } + tags = { + "enable_firewall_for_keyvaults_and_storage" = local.enable_firewall_for_keyvaults_and_storage + "public_network_access_enabled" = var.storage_account_sapbits.public_network_access_enabled + } + ) + } From e27b979ea3b4218dc56dc693bba2cbfbcd1d5754 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 11 Feb 2024 18:12:39 +0200 Subject: [PATCH 237/607] Refactor storage account tags --- .../modules/sap_library/storage_accounts.tf | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf index 846cf0c89e..b34efbb3d6 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf @@ -62,11 +62,10 @@ resource "azurerm_storage_account" "storage_tfstate" { ignore_changes = [tags] } - tags = { - "enable_firewall_for_keyvaults_and_storage" = local.enable_firewall_for_keyvaults_and_storage - "public_network_access_enabled" = var.storage_account_sapbits.public_network_access_enabled - } - ) + tags = { + "enable_firewall_for_keyvaults_and_storage" = local.enable_firewall_for_keyvaults_and_storage + "public_network_access_enabled" = var.storage_account_sapbits.public_network_access_enabled + } } From 1ea757f394d5b966fca27e4a03978bb407c5ef36 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 11 Feb 2024 18:32:59 +0200 Subject: [PATCH 238/607] Add private endpoint for table storage --- .../modules/sap_library/storage_accounts.tf | 67 ++++++++++++++++++- 1 file changed, 66 insertions(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf index b34efbb3d6..b46bcb60a7 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf @@ -187,6 +187,63 @@ resource "azurerm_private_endpoint" "storage_tfstate" { } } +resource "azurerm_private_endpoint" "table_tfstate" { + provider = azurerm.main + count = var.use_private_endpoint && !local.sa_tfstate_exists ? 1 : 0 + name = format("%s%s-table%s", + var.naming.resource_prefixes.storage_private_link_tf, + local.prefix, + var.naming.resource_suffixes.storage_private_link_tf + ) + resource_group_name = local.resource_group_exists ? ( + data.azurerm_resource_group.library[0].name) : ( + azurerm_resource_group.library[0].name + ) + location = local.resource_group_exists ? ( + data.azurerm_resource_group.library[0].location) : ( + azurerm_resource_group.library[0].location + ) + + subnet_id = var.deployer_tfstate.subnet_mgmt_id + + custom_network_interface_name = var.short_named_endpoints_nics ? format("%s%s%s%s", + var.naming.resource_prefixes.storage_private_link_tf, + length(local.prefix) > 0 ? ( + local.prefix) : ( + var.infrastructure.environment + ), + var.naming.resource_suffixes.storage_private_link_tf, + var.naming.resource_suffixes.nic + ) : null + + private_service_connection { + name = format("%s%s%s", var.naming.resource_prefixes.storage_private_svc_tf, + local.prefix, + var.naming.resource_suffixes.storage_private_svc_tf + ) + is_manual_connection = false + private_connection_resource_id = local.sa_tfstate_exists ? ( + var.storage_account_tfstate.arm_id) : ( + azurerm_storage_account.storage_tfstate[0].id + ) + subresource_names = [ + "table" + ] + } + + dynamic "private_dns_zone_group" { + for_each = range(var.use_private_endpoint && !var.use_custom_dns_a_registration ? 1 : 0) + content { + name = var.dns_zone_names.blob_dns_zone_name + private_dns_zone_ids = [local.use_local_private_dns ? azurerm_private_dns_zone.table[0].id : data.azurerm_private_dns_zone.table[0].id] + } + } + + lifecycle { + ignore_changes = [tags] + } +} + // Creates the storage container inside the storage account for sapsystem resource "azurerm_storage_container" "storagecontainer_tfstate" { provider = azurerm.main @@ -333,7 +390,7 @@ resource "azurerm_private_endpoint" "storage_sapbits" { azurerm_storage_account.storage_sapbits[0].id ) subresource_names = [ - "Blob", "Table" + "Blob" ] } @@ -417,6 +474,14 @@ data "azurerm_private_dns_zone" "storage" { } +data "azurerm_private_dns_zone" "table" { + provider = azurerm.dnsmanagement + count = !local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 + name = var.dns_zone_names.table_dns_zone_name + resource_group_name = var.management_dns_resourcegroup_name + +} + data "azurerm_network_interface" "storage_tfstate" { count = var.use_private_endpoint && !local.sa_tfstate_exists ? 1 : 0 name = azurerm_private_endpoint.storage_tfstate[count.index].network_interface[0].name From a0fb683c9f987f83d21298297f296a2bbd8895c0 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 11 Feb 2024 20:22:21 +0200 Subject: [PATCH 239/607] add tags --- .../terraform-units/modules/sap_library/storage_accounts.tf | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf index b46bcb60a7..ca7d70cb9b 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf @@ -58,9 +58,9 @@ resource "azurerm_storage_account" "storage_tfstate" { ) } - lifecycle { - ignore_changes = [tags] - } + # lifecycle { + # ignore_changes = [tags] + # } tags = { "enable_firewall_for_keyvaults_and_storage" = local.enable_firewall_for_keyvaults_and_storage From 0ea94fb3e55c980a44cc9aa5e459b2df0d072b08 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 11 Feb 2024 21:13:40 +0200 Subject: [PATCH 240/607] Update publish profile and disable basic authentication for web app --- Webapp/SDAF/SDAFWebApp.csproj.user | 2 +- .../modules/sap_deployer/app_service.tf | 14 ++++++++------ .../modules/sap_library/key_vault.tf | 2 +- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/Webapp/SDAF/SDAFWebApp.csproj.user b/Webapp/SDAF/SDAFWebApp.csproj.user index a8a7cd905d..0799d9e00f 100644 --- a/Webapp/SDAF/SDAFWebApp.csproj.user +++ b/Webapp/SDAF/SDAFWebApp.csproj.user @@ -11,6 +11,6 @@ True False True - C:\Work\Repos\SDAF-ACSS\Webapp\SDAF\Properties\PublishProfiles\mgmt-weeu-sapdeployment32e - Web Deploy1.pubxml + C:\Work\Repos\sap-automation\Webapp\SDAF\Properties\PublishProfiles\cpln-noeu-sapdeployment597 - Web Deploy1.pubxml \ No newline at end of file diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf index 9506e0e055..b47c2e790c 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf @@ -67,12 +67,14 @@ resource "azurerm_service_plan" "appserviceplan" { # Create the app service with AD authentication and storage account connection string resource "azurerm_windows_web_app" "webapp" { - count = var.use_webapp ? 1 : 0 - name = lower(format("%s%s%s%s", var.naming.resource_prefixes.app_service_plan, var.naming.prefix.LIBRARY, var.naming.resource_suffixes.webapp_url, substr(random_id.deployer.hex, 0, 3))) - resource_group_name = local.resourcegroup_name - location = local.rg_appservice_location - service_plan_id = azurerm_service_plan.appserviceplan[0].id - https_only = true + count = var.use_webapp ? 1 : 0 + name = lower(format("%s%s%s%s", var.naming.resource_prefixes.app_service_plan, var.naming.prefix.LIBRARY, var.naming.resource_suffixes.webapp_url, substr(random_id.deployer.hex, 0, 3))) + resource_group_name = local.resourcegroup_name + location = local.rg_appservice_location + service_plan_id = azurerm_service_plan.appserviceplan[0].id + https_only = true + webdeploy_publish_basic_authentication_enabled = false + ftp_publish_basic_authentication_enabled = false # auth_settings { # enabled = true diff --git a/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf b/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf index 6a83481ea8..7470f37608 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf @@ -69,7 +69,7 @@ resource "azurerm_key_vault_secret" "tfstate" { depends_on = [azurerm_private_endpoint.kv_user] name = "tfstate" value = var.use_private_endpoint ? ( - format("https://%s.privatelink.blob.core.windows.net", local.sa_tfstate_exists ? (data.azurerm_storage_account.storage_tfstate[0].name) : (azurerm_storage_account.storage_tfstate[0].name))) : ( + format("https://%s.blob.core.windows.net", local.sa_tfstate_exists ? (data.azurerm_storage_account.storage_tfstate[0].name) : (azurerm_storage_account.storage_tfstate[0].name))) : ( format("https://%s.blob.core.windows.net", local.sa_tfstate_exists ? (data.azurerm_storage_account.storage_tfstate[0].name) : (azurerm_storage_account.storage_tfstate[0].name)) ) key_vault_id = var.key_vault.kv_spn_id From 1165c419522bb481ea573da1e53683689a2a789a Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 12 Feb 2024 10:26:03 +0200 Subject: [PATCH 241/607] ANF subnet should not have NSGs --- .../modules/sap_landscape/nsg.tf | 36 ------------------- 1 file changed, 36 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf b/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf index fd4fac7590..2ec18be8ab 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf @@ -83,42 +83,6 @@ resource "azurerm_network_security_group" "app" { } - -# Creates SAP anf subnet nsg -resource "azurerm_network_security_group" "anf" { - provider = azurerm.main - count = var.NFS_provider == "ANF" ? ( - local.ANF_subnet_nsg_exists ? ( - 0) : ( - 1 - )) : ( - 0 - ) - name = local.ANF_subnet_nsg_name - resource_group_name = local.SAP_virtualnetwork_exists ? ( - data.azurerm_virtual_network.vnet_sap[0].resource_group_name - ) : ( - azurerm_virtual_network.vnet_sap[0].resource_group_name - ) - location = local.SAP_virtualnetwork_exists ? ( - data.azurerm_virtual_network.vnet_sap[0].location) : ( - azurerm_virtual_network.vnet_sap[0].location - ) -} - -# Associates anf nsg to anf subnet - -resource "azurerm_subnet_network_security_group_association" "anf" { - provider = azurerm.main - count = local.ANF_subnet_defined && !local.ANF_subnet_nsg_exists ? 1 : 0 - depends_on = [ - azurerm_subnet.anf - ] - subnet_id = local.ANF_subnet_existing ? var.infrastructure.vnets.sap.subnet_anf.arm_id : azurerm_subnet.anf[0].id - network_security_group_id = azurerm_network_security_group.anf[0].id -} - - # Associates app nsg to app subnet resource "azurerm_subnet_network_security_group_association" "app" { provider = azurerm.main From 51623ba9d4aa212fb5de439b6a5bff868d31e29e Mon Sep 17 00:00:00 2001 From: Harm Jan Stam Date: Mon, 12 Feb 2024 10:18:22 +0100 Subject: [PATCH 242/607] Fix ansible lint errors and upgrade ansible-lint to 24.2.0 (#546) Lint fixes --- .ansible-lint | 3 +-- .../workflows/github-actions-ansible-lint.yml | 2 +- deploy/ansible/.ansible-lint | 27 ------------------- .../ansible/playbook_04_00_00_db_install.yaml | 2 +- .../tasks/2.6.1-anf-mounts.yaml | 2 -- .../tasks/2.6.8-anf-mounts-simplemount.yaml | 2 -- .../2.6-sap-mounts/tasks/main.yaml | 1 - deploy/scripts/validate_dict.yml | 13 +++++---- 8 files changed, 9 insertions(+), 43 deletions(-) delete mode 100644 deploy/ansible/.ansible-lint diff --git a/.ansible-lint b/.ansible-lint index e04e512877..6ceb406df7 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -1,7 +1,5 @@ exclude_paths: - deploy/pipelines - - deploy/ansible/BOM-catalog - - deploy/ansible/filter_plugins - deploy/terraform - .github @@ -12,6 +10,7 @@ warn_list: skip_list: - no-handler # Ignored because there is no easier way to do - key-order[task] # Refactor needed + - key-order[play] # Refactor needed - name[casing] # Following are ignored because of readability: - no-changed-when - role-name diff --git a/.github/workflows/github-actions-ansible-lint.yml b/.github/workflows/github-actions-ansible-lint.yml index 0c0cf58a18..fb61500c1b 100644 --- a/.github/workflows/github-actions-ansible-lint.yml +++ b/.github/workflows/github-actions-ansible-lint.yml @@ -16,7 +16,7 @@ jobs: - name: Install Ansible and Ansible-Lint run: | python -m pip install --upgrade pip - pip install ansible-core ansible-lint==6.20.3 jmespath netaddr + pip install ansible-core ansible-lint==24.2.0 jmespath netaddr - name: Install Ansible Collections run: | diff --git a/deploy/ansible/.ansible-lint b/deploy/ansible/.ansible-lint deleted file mode 100644 index 2f0555ae00..0000000000 --- a/deploy/ansible/.ansible-lint +++ /dev/null @@ -1,27 +0,0 @@ -exclude_paths: - - deploy/pipelines - - deploy/ansible/BOM-catalog - - deploy/ansible/filter_plugins - - deploy/terraform - - .github - -warn_list: - # FIXME: Experimental violations should be fixed in separate PR - - experimental - -skip_list: - - no-handler # Ignored because there is no easier way to do - - key-order[task] # Refactor needed - - name[casing] # Following are ignored because of readability: - - no-changed-when - - role-name - - var-naming - - yaml[braces] - - yaml[colons] - - yaml[commas] - - yaml[line-length] - - yaml[octal-values] - - yaml[indentation] - - name[template] - - command-instead-of-shell # csh is not supported in commands - - jinja[spacing] diff --git a/deploy/ansible/playbook_04_00_00_db_install.yaml b/deploy/ansible/playbook_04_00_00_db_install.yaml index 2d9f3df6d7..e21fa466f1 100644 --- a/deploy/ansible/playbook_04_00_00_db_install.yaml +++ b/deploy/ansible/playbook_04_00_00_db_install.yaml @@ -659,7 +659,7 @@ vars: suffix: "_DB" tier: 'db2' - prefix: "{{ bom.product_ids.dblha.replace('.', '/').replace('/' + {{ db2_instance_type }}, '').split(':')[1] }}" + prefix: "{{ bom.product_ids.dblha.replace('.', '/').replace('/' + db2_instance_type, '').split(':')[1] }}" path: "INSTALL/DISTRIBUTED/{{ db2_instance_type }}/DB" this_sid: "{{ sap_sid }}" diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index 8348b84e88..91ccb56bf8 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -863,8 +863,6 @@ - node_tier == 'hana' - hana_data_mountpoint is defined - - - name: "ANF Mount: HANA Log - Scale out - Create mount list" block: - name: "Initialize HANA Log mountpoints" diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml index bf3d53dd6e..fa41192925 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml @@ -808,8 +808,6 @@ - node_tier == 'hana' - hana_data_mountpoint is defined - - - name: "ANF Mount: HANA Log - Scale out - Create mount list" block: - name: "Initialize HANA Log mountpoints" diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml index be8cdea59e..a82f135f8d 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml @@ -358,7 +358,6 @@ - sap_mnt is defined or sap_trans is defined or usr_sap_install_mountpoint is defined - not use_simple_mount - NFS_provider == 'ANF' - # Update : Deprecated as the scale out anf mount code functionality is now integrated into 2.6.1 and 2.6.8 # This will be removed in the next release, left here for tracing and documentation diff --git a/deploy/scripts/validate_dict.yml b/deploy/scripts/validate_dict.yml index 248147ea59..13de7ee3cc 100755 --- a/deploy/scripts/validate_dict.yml +++ b/deploy/scripts/validate_dict.yml @@ -1,7 +1,6 @@ --- - -- name: "Determine list of dict keys" - set_fact: +- name: Determine list of dict keys + ansible.builtin.set_fact: dict_keys: "{{ dict | dict2items | @@ -12,8 +11,8 @@ }}" dict_name: "{{ ' (Check name: ' + dict.name + ')' if dict.name is defined }}" -- name: "Check required keys" - assert: +- name: Check required keys + ansible.builtin.assert: that: "required_key in dict_keys" fail_msg: "Expected to find key '{{ required_key }}' in '{{ reference }}'{{ dict_name }}" loop: "{{ required }}" @@ -21,8 +20,8 @@ loop_var: required_key ignore_errors: true -- name: "Check optional keys" - assert: +- name: Check optional keys + ansible.builtin.assert: that: "key in required + optional" fail_msg: "Unexpected key '{{ key }} in '{{ reference }}'{{ dict_name }}" loop: "{{ dict_keys | flatten }}" From 97ae08359b49cc9c95e81e895a92ddd8f38f5a5b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 12 Feb 2024 11:46:59 +0200 Subject: [PATCH 243/607] Update database load balancer IP address --- .../tasks/4.4.1.1-mssql-alwayson-config.yaml | 10 +++++----- .../templates/Set-AzLBHealthProbeSQL.j2 | 2 +- .../roles-sap-os/2.4-hosts-file/tasks/main.yaml | 4 ++-- .../tasks/5.5.4.1-cluster-RedHat.yml | 2 +- .../tasks/5.5.4.1-cluster-Suse.yml | 2 +- .../5.7-db2-pacemaker/tasks/5.7.3.0-cluster-RedHat.yml | 2 +- .../5.7-db2-pacemaker/tasks/5.7.3.0-cluster-Suse.yml | 2 +- deploy/ansible/vars/ansible-input-api.yaml | 2 +- 8 files changed, 13 insertions(+), 13 deletions(-) diff --git a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.1-mssql-alwayson-config.yaml b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.1-mssql-alwayson-config.yaml index 1267d2970e..0dd285f2ba 100644 --- a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.1-mssql-alwayson-config.yaml +++ b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.1-mssql-alwayson-config.yaml @@ -114,18 +114,18 @@ - name: "calculate LB IP with subnet prefix" ansible.builtin.set_fact: - db_lb_ipv4: "{{ db_lb_ip }}/{{ db_subnet_prefix }}" + database_loadbalancer_ipv4: "{{ database_loadbalancer_ip }}/{{ db_subnet_prefix }}" - name: "calculate subnet mask" ansible.builtin.set_fact: - db_subnet_mask: "{{ db_lb_ipv4 | ansible.utils.ipaddr('host/prefix') | ansible.utils.ipaddr('netmask') }}" + db_subnet_mask: "{{ database_loadbalancer_ipv4 | ansible.utils.ipaddr('host/prefix') | ansible.utils.ipaddr('netmask') }}" - name: "print subnet mask" ansible.builtin.debug: msg: - "Subnet prefix: {{ db_subnet_prefix }}" - - "LB IP: {{ db_lb_ip }}" - - "LB IP with subnet prefix: {{ db_lb_ipv4 }}" + - "LB IP: {{ database_loadbalancer_ip }}" + - "LB IP with subnet prefix: {{ database_loadbalancer_ipv4 }}" - "Subnet mask: {{ db_subnet_mask }}" # restart SQL Server service @@ -144,7 +144,7 @@ InstanceName: "{{ mssql_instance_name }}" AvailabilityGroup: "{{ mssql_ag_name }}" Port: 1433 - IpAddress: "{{ db_lb_ip }}/{{ db_subnet_mask }}" + IpAddress: "{{ database_loadbalancer_ip }}/{{ db_subnet_mask }}" PsDscRunAsCredential_username: "{{ domain_service_account }}@{{ domain_name }}" PsDscRunAsCredential_password: "{{ domain_service_password }}" register: sql_ag_listener_creation diff --git a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/templates/Set-AzLBHealthProbeSQL.j2 b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/templates/Set-AzLBHealthProbeSQL.j2 index 7946778fb2..1afa18ada0 100644 --- a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/templates/Set-AzLBHealthProbeSQL.j2 +++ b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/templates/Set-AzLBHealthProbeSQL.j2 @@ -101,6 +101,6 @@ function Set-DatabaseHealthProbe { } # Set database healthprobe for SQL Server always on cluster -Set-DatabaseHealthProbe -SQLAGName "{{ mssql_ag_name }}" -SQLAGListenerName "{{ mssql_ag_listener_name }}" -ProbePort "{{ mssql_lb_probeport }}" -IPAddress "{{ db_lb_ip}}" -SubnetMask "{{ db_subnet_mask }}" +Set-DatabaseHealthProbe -SQLAGName "{{ mssql_ag_name }}" -SQLAGListenerName "{{ mssql_ag_listener_name }}" -ProbePort "{{ mssql_lb_probeport }}" -IPAddress "{{ database_loadbalancer_ip }}" -SubnetMask "{{ db_subnet_mask }}" diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml index 52d80900ec..841f6f9aea 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml @@ -88,7 +88,7 @@ create: true state: present block: | - {{ '%-19s' | format(db_lb_ip) }} {{ '%-50s' | format(db_virtual_hostname + '.' + sap_fqdn) }} {{ '%-21s' | format(db_virtual_hostname) }} + {{ '%-19s' | format(database_loadbalancer_ip) }} {{ '%-50s' | format(db_virtual_hostname + '.' + sap_fqdn) }} {{ '%-21s' | format(db_virtual_hostname) }} marker: "# {mark} DB Entries {{ db_virtual_hostname }}" when: - database_high_availability @@ -101,7 +101,7 @@ create: true state: present block: | - {{ '%-19s' | format(db_lb_ip) }} {{ '%-50s' | format(db_virtual_hostname + '.' + sap_fqdn) }} {{ '%-21s' | format(db_virtual_hostname) }} + {{ '%-19s' | format(database_loadbalancer_ip) }} {{ '%-50s' | format(db_virtual_hostname + '.' + sap_fqdn) }} {{ '%-21s' | format(db_virtual_hostname) }} marker: "# {mark} DB Entries {{ db_virtual_hostname }}" when: - database_high_availability diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index 711587ae0b..0245fcec20 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -77,7 +77,7 @@ failed_when: netcat.rc > 1 - name: "5.5.4.1 HANA Cluster configuration - Ensure the Virtual IP resource for the Load Balancer Front End IP is created" - ansible.builtin.shell: pcs resource create vip_{{ db_sid | upper }}_{{ db_instance_number }} IPaddr2 ip="{{ db_lb_ip }}" + ansible.builtin.shell: pcs resource create vip_{{ db_sid | upper }}_{{ db_instance_number }} IPaddr2 ip="{{ database_loadbalancer_ip }}" register: vip failed_when: vip.rc > 1 diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml index 5b19674f47..b08b603ff6 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml @@ -92,7 +92,7 @@ meta target-role="Started" operations \$id="rsc_ip_{{ db_sid | upper }}_HDB{{ db_instance_number }}-operations" op monitor interval="10s" timeout="20s" - params ip="{{ db_lb_ip }}" + params ip="{{ database_loadbalancer_ip }}" register: sap_hana_rsc_ip failed_when: sap_hana_rsc_ip.rc > 1 # socat is recommended in place of netcat on Azure: https://www.suse.com/support/kb/doc/?id=000019536 diff --git a/deploy/ansible/roles-sap/5.7-db2-pacemaker/tasks/5.7.3.0-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.7-db2-pacemaker/tasks/5.7.3.0-cluster-RedHat.yml index 68c8a6a2f0..369ef34c2b 100644 --- a/deploy/ansible/roles-sap/5.7-db2-pacemaker/tasks/5.7.3.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.7-db2-pacemaker/tasks/5.7.3.0-cluster-RedHat.yml @@ -60,7 +60,7 @@ when: ansible_distribution_major_version in ["8", "9"] - name: "Ensure the Virtual IP resource for the Load Balancer Front End IP is created" - ansible.builtin.command: pcs resource create vip_db2{{ db_sid | lower }}_{{ db_sid | upper }} IPaddr2 ip='{{ db_lb_ip }}' + ansible.builtin.command: pcs resource create vip_db2{{ db_sid | lower }}_{{ db_sid | upper }} IPaddr2 ip='{{ database_loadbalancer_ip }}' register: vip failed_when: vip.rc > 1 diff --git a/deploy/ansible/roles-sap/5.7-db2-pacemaker/tasks/5.7.3.0-cluster-Suse.yml b/deploy/ansible/roles-sap/5.7-db2-pacemaker/tasks/5.7.3.0-cluster-Suse.yml index fb54e450c3..ec571d25ba 100644 --- a/deploy/ansible/roles-sap/5.7-db2-pacemaker/tasks/5.7.3.0-cluster-Suse.yml +++ b/deploy/ansible/roles-sap/5.7-db2-pacemaker/tasks/5.7.3.0-cluster-Suse.yml @@ -57,7 +57,7 @@ ansible.builtin.command: >- crm configure primitive rsc_ip_db2{{ db_sid | lower }}_{{ db_sid | upper }} IPaddr2 op monitor interval="10s" timeout="20s" - params ip="{{ db_lb_ip }}" + params ip="{{ database_loadbalancer_ip }}" register: sap_db2_rsc_ip failed_when: sap_db2_rsc_ip.rc > 1 diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index 82baef9ad3..14b1fc1a4a 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -135,7 +135,7 @@ app_instance_number: "00" sidadm_uid: 2003 hdbadm_uid: 2200 scs_lb_ip: "" # SAP HA -db_lb_ip: "" # SAP HA +database_loadbalancer_ip: "" # SAP HA reinstall: false # Provide a way to override the idempotency flags for SAP tasks ora_release: 19 From 70d6a5cea7846ef82e442c1f3f2be7bd1575c010 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 12 Feb 2024 14:06:35 +0200 Subject: [PATCH 244/607] Update sap-parameters.yml.tmpl file --- .../tasks/4.4.1.1-mssql-alwayson-config.yaml | 2 +- .../sap_system/output_files/sap-parameters.yml.tmpl | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.1-mssql-alwayson-config.yaml b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.1-mssql-alwayson-config.yaml index 0dd285f2ba..9ca5160aa4 100644 --- a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.1-mssql-alwayson-config.yaml +++ b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.1-mssql-alwayson-config.yaml @@ -114,7 +114,7 @@ - name: "calculate LB IP with subnet prefix" ansible.builtin.set_fact: - database_loadbalancer_ipv4: "{{ database_loadbalancer_ip }}/{{ db_subnet_prefix }}" + database_loadbalancer_ipv4: "{{ database_loadbalancer_ip }}/{{ db_subnet_prefix }}" - name: "calculate subnet mask" ansible.builtin.set_fact: diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.yml.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.yml.tmpl index df64d79d53..c8cbfdfd1e 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.yml.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.yml.tmpl @@ -74,10 +74,10 @@ db_instance_number: "${db_instance_number}" # ORACLE # SQLSERVER -platform: ${platform} +platform: ${platform} # Scale out defines if the database is to be deployed in a scale out configuration -scale_out: ${scale_out} +db_scale_out: ${scale_out} # db_high_availability is a boolean flag indicating if the # SAP database servers are deployed using high availability @@ -85,6 +85,9 @@ db_high_availability: ${database_high_availability} database_high_availability: ${database_high_availability} database_cluster_type: ${database_cluster_type} # database_loadbalancer_ip is the IP address of the load balancer for the database virtual machines +database_loadbalancer_ip: ${database_loadbalancer_ip} + +# Backwards copmpatibility db_lb_ip: ${database_loadbalancer_ip} # database_cluster_ip is the IP address of the load balancer for the database cluster in Windows From 9c409f3ab7527dd1a4622f0e00516ebed0d999f4 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 12 Feb 2024 14:16:54 +0200 Subject: [PATCH 245/607] Ad a time out --- .../tasks/2.6.1.1-anf-mount.yaml | 33 ++++++++++++++----- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml index d0f5e3525e..8e0950daff 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml @@ -107,15 +107,30 @@ - is_created_now.changed - item.set_chattr_on_dir -- name: "ANF Mount: ({{ item.path }} on {% if item.create_temp_folders %}{{ item.mount }}/{{ item.folder }}{% else %}{{ item.mount }}{% endif %})" - ansible.posix.mount: - src: "{% if item.create_temp_folders %}{{ item.mount }}/{{ item.folder }}{% else %}{{ item.mount }}{% endif %}" - path: "{{ item.path }}" - fstype: "nfs4" - opts: "{{ item.opts }}" - state: mounted - when: - - node_tier in item.target_nodes or item.target_nodes == ['all'] +- name: "ANF Mount: ({{ item.path }} " + block: + - name: "ANF Mount: ({{ item.path }} on {% if item.create_temp_folders %}{{ item.mount }}/{{ item.folder }}{% else %}{{ item.mount }}{% endif %})" + ansible.posix.mount: + src: "{% if item.create_temp_folders %}{{ item.mount }}/{{ item.folder }}{% else %}{{ item.mount }}{% endif %}" + path: "{{ item.path }}" + fstype: "nfs4" + opts: "{{ item.opts }}" + state: mounted + when: + - node_tier in item.target_nodes or item.target_nodes == ['all'] + rescue: + - name: "ANF Mount: Sleep for 10 seconds and continue with play" + ansible.builtin.wait_for: + timeout: + - name: "ANF Mount: ({{ item.path }} on {% if item.create_temp_folders %}{{ item.mount }}/{{ item.folder }}{% else %}{{ item.mount }}{% endif %})" + ansible.posix.mount: + src: "{% if item.create_temp_folders %}{{ item.mount }}/{{ item.folder }}{% else %}{{ item.mount }}{% endif %}" + path: "{{ item.path }}" + fstype: "nfs4" + opts: "{{ item.opts }}" + state: mounted + when: + - node_tier in item.target_nodes or item.target_nodes == ['all'] - name: "Backward Compatibility - Check required Database HA variables" ansible.builtin.set_fact: From 05766e5f5b091016ebc05fa4b72ec52e126eba50 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 12 Feb 2024 14:36:06 +0200 Subject: [PATCH 246/607] Refactor primary_host variable in ANF mounts tasks --- .../2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index 91ccb56bf8..993c354d7e 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -785,8 +785,7 @@ 'create_temp_folders': true } vars: - # Run this on all the nodes, not just primary. - primary_host: "{{ ansible_hostname }}" + primary_host: "{{ first_server_temp | first }}" when: - node_tier == 'hana' - hana_shared_mountpoint is defined @@ -809,7 +808,7 @@ 'create_temp_folders': true } vars: - primary_host: "{{ ansible_hostname }}" + primary_host: "{{ first_server_temp | first }}" when: - node_tier == 'hana' - hana_shared_mountpoint is defined @@ -858,7 +857,7 @@ with_items: - "{{ hana_data_scaleout_mountpoint | list }}" vars: - primary_host: "{{ ansible_hostname }}" + primary_host: "{{ first_server_temp | first }}" when: - node_tier == 'hana' - hana_data_mountpoint is defined @@ -903,7 +902,7 @@ with_items: - "{{ hana_log_scaleout_mountpoint | list }}" vars: - primary_host: "{{ ansible_hostname }}" + primary_host: "{{ first_server_temp | first }}" when: - node_tier == 'hana' - hana_log_mountpoint is defined From d0fcf4d5aa1c0da33e2db70da1b6947afb38fe41 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 12 Feb 2024 14:47:47 +0200 Subject: [PATCH 247/607] Update timeout value in ANF Mount task --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml index 8e0950daff..3d52f53828 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml @@ -121,7 +121,8 @@ rescue: - name: "ANF Mount: Sleep for 10 seconds and continue with play" ansible.builtin.wait_for: - timeout: + timeout: 10 + - name: "ANF Mount: ({{ item.path }} on {% if item.create_temp_folders %}{{ item.mount }}/{{ item.folder }}{% else %}{{ item.mount }}{% endif %})" ansible.posix.mount: src: "{% if item.create_temp_folders %}{{ item.mount }}/{{ item.folder }}{% else %}{{ item.mount }}{% endif %}" From a006934cd6a04c7e80be4c0c52f98340f64c6a3f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 12 Feb 2024 15:09:40 +0200 Subject: [PATCH 248/607] Update ANF mount configuration --- .../2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml | 10 +++++----- .../2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index 993c354d7e..8cef1effe1 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -782,10 +782,10 @@ 'permissions': '0775', 'set_chattr_on_dir': false, 'target_nodes' : ['hana'], - 'create_temp_folders': true + 'create_temp_folders': false } vars: - primary_host: "{{ first_server_temp | first }}" + primary_host: "{{ ansible_hostname }}" when: - node_tier == 'hana' - hana_shared_mountpoint is defined @@ -808,7 +808,7 @@ 'create_temp_folders': true } vars: - primary_host: "{{ first_server_temp | first }}" + primary_host: "{{ ansible_hostname }}" when: - node_tier == 'hana' - hana_shared_mountpoint is defined @@ -857,7 +857,7 @@ with_items: - "{{ hana_data_scaleout_mountpoint | list }}" vars: - primary_host: "{{ first_server_temp | first }}" + primary_host: "{{ ansible_hostname }}" when: - node_tier == 'hana' - hana_data_mountpoint is defined @@ -902,7 +902,7 @@ with_items: - "{{ hana_log_scaleout_mountpoint | list }}" vars: - primary_host: "{{ first_server_temp | first }}" + primary_host: "{{ ansible_hostname }}" when: - node_tier == 'hana' - hana_log_mountpoint is defined diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml index 3d52f53828..87ca25490e 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml @@ -14,7 +14,7 @@ - name: "ANF Mount: HANA {{ item.type }}" block: - - name: "ANF Mount: Create /{{ item.temppath }}" + - name: "ANF Mount: Create temporary directory /{{ item.temppath }}" ansible.builtin.file: path: "/{{ item.temppath }}" mode: "{{ item.permissions }}" @@ -23,7 +23,7 @@ - name: "ANF Mount: ({{ item.type }})" block: - - name: "ANF Mount: Filesystems on ANF ({{ item.temppath }})" + - name: "ANF Mount: Filesystems on ANF ({{ item.temppath }}) (temporary)" ansible.posix.mount: src: "{{ item.mount }}" path: "/{{ item.temppath }}" @@ -40,13 +40,13 @@ name: rpcbind state: restarted - - name: "ANF Mount: Create Directories ({{ item.temppath }})" + - name: "ANF Mount: Create Directories ({{ item.temppath }}) on temporary mount" ansible.builtin.file: path: "/{{ item.temppath }}/{{ item.folder }}" state: directory mode: 0755 - - name: "ANF Mount: Unmount file systems ({{ item.temppath }})" + - name: "ANF Mount: Unmount file systems ({{ item.temppath }}) from temporary mount" ansible.posix.mount: src: "{{ item.mount }}" path: "/{{ item.temppath }}" From 6bb4e58be044779f82333b1ce4b17aa435c9eccf Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 12 Feb 2024 16:06:13 +0200 Subject: [PATCH 249/607] Add TF_VAR_agent_pat and TF_VAR_agent_pool environment variables*** --- deploy/pipelines/01-deploy-control-plane.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 91f4da086d..35f2ce9574 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -358,6 +358,8 @@ stages: TF_IN_AUTOMATION: true TF_LOG: $(TF_LOG) TF_VAR_agent_ado_url: $(System.CollectionUri) + TF_VAR_agent_pat: $(PAT) + TF_VAR_agent_pool: $(POOL) TF_VAR_ansible_core_version: $(ansible_core_version) TF_VAR_app_registration_app_id: $(APP_REGISTRATION_APP_ID) TF_VAR_deployer_kv_user_arm_id: $(Deployer_Key_Vault) From d08baefe7d3dd3f10359bf0c75353b8253d15df6 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 12 Feb 2024 16:06:25 +0200 Subject: [PATCH 250/607] Refactor ANF Mount tasks for scale out and non-scale out setups --- .../tasks/2.6.1-anf-mounts.yaml | 42 ++++++++++++------- .../tasks/2.6.1.1-anf-mount.yaml | 9 ++++ 2 files changed, 35 insertions(+), 16 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index 8cef1effe1..15a7a92169 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -558,6 +558,11 @@ - tier == 'sapos' - node_tier == 'hana' +# /*---------------------------------------------------------------------------8 +# | | +# | ANF Mount: Run tasks for non-scale out setups | +# | | +# +------------------------------------4--------------------------------------*/ # Standard block tasks for non scale out setups - name: "ANF Mount: Run tasks for non-scale out setups" block: @@ -735,10 +740,15 @@ when: - not db_scale_out +# /*---------------------------------------------------------------------------8 +# | | +# | ANF Mount: Run tasks for scale out setups | +# | | +# +------------------------------------4--------------------------------------*/ # Run this block set when db_Scale_out is true but db_high_availability is false - name: "ANF Mount: Run tasks for scale out setups" block: - - name: "ANF Mount: Scale Out - Create SAP Directories (usrsap)" + - name: "ANF Mount: Scale Out - Create SAP Directories (usrsap)" ansible.builtin.file: owner: "{{ db_sid | lower }}adm" group: sapsys @@ -748,28 +758,28 @@ when: - tier == 'hana' - - name: "ANF Mount: Scale Out - Create SAP Directories (hana data)" + - name: "ANF Mount: Scale Out - Create SAP Directories (hana data)" ansible.builtin.file: - owner: "{{ db_sid | lower }}adm" - group: sapsys - mode: 0755 - path: "/hana/data/{{ db_sid | upper }}" - state: directory + owner: "{{ db_sid | lower }}adm" + group: sapsys + mode: 0755 + path: "/hana/data/{{ db_sid | upper }}" + state: directory when: - tier == 'hana' - - name: "ANF Mount: Scale Out - Create SAP Directories (hana log)" + - name: "ANF Mount: Scale Out - Create SAP Directories (hana log)" ansible.builtin.file: - owner: "{{ db_sid | lower }}adm" - group: sapsys - mode: 0755 - path: "/hana/log/{{ db_sid | upper }}" - state: directory + owner: "{{ db_sid | lower }}adm" + group: sapsys + mode: 0755 + path: "/hana/log/{{ db_sid | upper }}" + state: directory when: - tier == 'hana' - - name: "ANF Mount: HANA shared - Scale out" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + - name: "ANF Mount: HANA shared - Scale out" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml loop: - { 'type': 'shared', @@ -782,7 +792,7 @@ 'permissions': '0775', 'set_chattr_on_dir': false, 'target_nodes' : ['hana'], - 'create_temp_folders': false + 'create_temp_folders': true } vars: primary_host: "{{ ansible_hostname }}" diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml index 87ca25490e..c5302e8550 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml @@ -40,6 +40,14 @@ name: rpcbind state: restarted + - name: "ANF Mount: Filesystems on ANF ({{ item.temppath }}) (temporary)" + ansible.posix.mount: + src: "{{ item.mount }}" + path: "/{{ item.temppath }}" + fstype: "nfs4" + opts: "{{ item.opts }}" + state: mounted + - name: "ANF Mount: Create Directories ({{ item.temppath }}) on temporary mount" ansible.builtin.file: path: "/{{ item.temppath }}/{{ item.folder }}" @@ -93,6 +101,7 @@ - name: "ANF Mount: Create SAP Directories ({{ item.path }})" ansible.builtin.debug: var: is_created_now + verbosity: 2 - name: "ANF Mount: Change attribute only when we create SAP Directories ({{ item.temppath }})" become: true From d7f24acb0d000a6b4b4af9974d47843770f08fb2 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 12 Feb 2024 16:06:34 +0200 Subject: [PATCH 251/607] Add DOTNET_ROOT export for RHEL --- .../modules/sap_deployer/templates/configure_deployer.sh.tmpl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl index 44ff5224c6..324a9a06bd 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl @@ -693,6 +693,8 @@ else export DOTNET_ROOT="$${DOTNET_INSTALL_DIR}" ;; (rhel*) + echo "export DOTNET_ROOT=$(pwd)/.dotnet" | tee -a /tmp/deploy_server.sh + export DOTNET_ROOT=$(pwd)/.dotnet ;; esac From b1db55a953862ce4b712282a29c3a6e63a8f8bd7 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 12 Feb 2024 16:08:33 +0200 Subject: [PATCH 252/607] Update DOTNET_ROOT environment variable in configure_deployer.sh.tmpl --- .../modules/sap_deployer/templates/configure_deployer.sh.tmpl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl index 324a9a06bd..e6811fdb08 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl @@ -693,8 +693,8 @@ else export DOTNET_ROOT="$${DOTNET_INSTALL_DIR}" ;; (rhel*) - echo "export DOTNET_ROOT=$(pwd)/.dotnet" | tee -a /tmp/deploy_server.sh - export DOTNET_ROOT=$(pwd)/.dotnet + echo "export DOTNET_ROOT=$${DOTNET_INSTALL_DIR}" | tee -a /tmp/deploy_server.sh + export DOTNET_ROOT="$${DOTNET_INSTALL_DIR}" ;; esac From 954e398f39b54768b62f953fe6e48189d625e239 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 12 Feb 2024 18:20:46 +0200 Subject: [PATCH 253/607] Update parameter name and platform condition in playbook_00_validate_parameters.yaml --- deploy/ansible/playbook_00_validate_parameters.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/playbook_00_validate_parameters.yaml b/deploy/ansible/playbook_00_validate_parameters.yaml index e8ee37e142..4ccef8d314 100644 --- a/deploy/ansible/playbook_00_validate_parameters.yaml +++ b/deploy/ansible/playbook_00_validate_parameters.yaml @@ -254,7 +254,7 @@ fail_msg: "{{ item_to_check.error }}" loop: - { - parameter: "db_lb_ip", + parameter: "database_loadbalancer_ip", error: "A highly available SCS deployment requires that scs_lb_ip is provided", } - { @@ -278,7 +278,7 @@ when: # - db_high_availability - (database_high_availability and database_cluster_type == "AFA") - - platform == "HANA" + - platform in ["HANA", "DB2"] - name: "0.0 Validations - sharedHome variables" ansible.builtin.debug: From c12b987ffb2e57a96db791fd83cb5c998c7178eb Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 12 Feb 2024 19:25:21 +0200 Subject: [PATCH 254/607] RHEL testing --- .../templates/configure_deployer.sh.tmpl | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl index e6811fdb08..9787bf9462 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl @@ -311,7 +311,7 @@ else case "$(get_distro_name)" in (ubuntu) - echo "we are inside ubuntu" + echo "we are inside ubuntu block" rel=$(lsb_release -a | grep Release | cut -d':' -f2 | xargs) if [ "$rel" == "22.04" ]; then ansible_version="$${ansible_version:-2.15}" @@ -320,7 +320,7 @@ else fi ;; (sles) - echo "we are inside sles" + echo "we are inside SLES block" ansible_version="2.11" ansible_major="$${ansible_version%%.*}" ansible_minor=$(echo "$${ansible_version}." | cut -d . -f 2) @@ -334,7 +334,7 @@ else sudo python3 -m pip install virtualenv; ;; (rhel) - echo "we are inside RHEL" + echo "we are inside RHEL block" ansible_version="2.11" ansible_major="$${ansible_version%%.*}" ansible_minor=$(echo "$${ansible_version}." | cut -d . -f 2) @@ -391,6 +391,7 @@ else ) ;; (rhel) + echo "we are inside RHEL block" cli_pkgs+=( azure-cli ) @@ -410,17 +411,18 @@ else ;; esac + # Install required packages as determined above + set +o errexit + pkg_mgr_install "$${required_pkgs[@]}" + + set -o errexit + # Upgrade packages pkg_mgr_upgrade # Ensure our package metadata cache is up to date pkg_mgr_refresh - # Install required packages as determined above - set +o errexit - pkg_mgr_install "$${required_pkgs[@]}" - - set -o errexit # Prepare Azure SAP Automated Deployment folder structure From 887b04378c647708ee441c9f991c9c6f0dc81c70 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 13 Feb 2024 17:31:44 +0200 Subject: [PATCH 255/607] Fix SSL certificate generation and keystore file copying*** --- .../4.2.1.9-db2_generate_distribute_ssl.yml | 33 ++++++++++++------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.9-db2_generate_distribute_ssl.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.9-db2_generate_distribute_ssl.yml index e28d2056b6..ccfb49ae0f 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.9-db2_generate_distribute_ssl.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.9-db2_generate_distribute_ssl.yml @@ -6,6 +6,7 @@ db2_ssl_stash_file: sapdb2{{ db_sid | lower }}_ssl_comm.sth db2_ssl_label: sap_db2_{{ custom_db_virtual_hostname | default(db_virtual_hostname, true) }}_ssl_comm_000 + - name: "DB2 Primary DB: Generate SSL" when: ansible_hostname == primary_instance_name become: true @@ -50,24 +51,17 @@ - "{{ db2_ssl_keydb_file }}" - "{{ db2_ssl_stash_file }}" + - name: "DB2 Primary DB: Flag to show that copied" + ansible.builtin.set_fact: + keystore_copied: true + + - name: "DB2 Primary DB: Update SSL certificate in db2cli.ini" ansible.builtin.lineinfile: path: /sapmnt/{{ sap_sid | upper }}/global/db6/db2cli.ini regexp: '^SSLServerCertificate=' line: SSLServerCertificate=/usr/sap/{{ db_sid | upper }}/SYS/global/SSL_client/{{ db2_ssl_label }}.arm -- name: "DB2: Copy keystore files from Controller to Secondary node" - when: ansible_hostname == secondary_instance_name - ansible.builtin.copy: - src: /tmp/keystore_files/{{ item }} - dest: /db2/db2{{ db_sid | lower }}/keystore/ - mode: 0600 - owner: db2{{ db_sid | lower }} - group: db{{ db_sid | lower }}adm - loop: - - "{{ db2_ssl_keydb_file }}" - - "{{ db2_ssl_stash_file }}" - - name: "DB2 DB - Set SSL parameters" become: true become_user: db2{{ db_sid | lower }} @@ -80,3 +74,18 @@ executable: /bin/csh environment: PATH: "{{ ansible_env.PATH }}:/db2/db2{{ db_sid | lower }}/sqllib/gskit/bin" + +- name: "DB2: Copy keystore files from Controller to Secondary node" + when: + - ansible_hostname == secondary_instance_name + - hostvars[primary_instance_name]['keystore_copied'] is defined + - hostvars[primary_instance_name]['keystore_copied'] + ansible.builtin.copy: + src: /tmp/keystore_files/{{ item }} + dest: /db2/db2{{ db_sid | lower }}/keystore/ + mode: 0600 + owner: db2{{ db_sid | lower }} + group: db{{ db_sid | lower }}adm + loop: + - "{{ db2_ssl_keydb_file }}" + - "{{ db2_ssl_stash_file }}" From dd1bed880bdd33411db058149bbda331d1f708ae Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 13 Feb 2024 17:56:09 +0200 Subject: [PATCH 256/607] Add VM name into hosts file --- deploy/terraform/run/sap_system/module.tf | 7 ++++ deploy/terraform/run/sap_system/output.tf | 4 +- .../modules/sap_system/anydb_node/outputs.tf | 14 ++++++- .../modules/sap_system/app_tier/outputs.tf | 37 +++++++++++++++++++ .../modules/sap_system/hdb_node/outputs.tf | 10 +++++ .../output_files/ansible_inventory.tmpl | 6 +++ .../sap_system/output_files/inventory.tf | 24 ++++++++++++ .../output_files/variables_global.tf | 4 ++ 8 files changed, 103 insertions(+), 3 deletions(-) diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index 405f0023b4..a75ace3f4a 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -342,6 +342,10 @@ module "output_files" { database_server_ips = upper(try(local.database.platform, "HANA")) == "HANA" ? (module.hdb_node.database_server_ips ) : (module.anydb_node.database_server_ips ) + database_server_vm_names = upper(try(local.database.platform, "HANA")) == "HANA" ? ( + module.hdb_node.database_server_vm_names) : ( + module.anydb_node.database_server_vm_names + ) database_server_secondary_ips = upper(try(local.database.platform, "HANA")) == "HANA" ? (module.hdb_node.database_server_secondary_ips ) : (module.anydb_node.database_server_secondary_ips ) @@ -372,6 +376,7 @@ module "output_files" { app_tier_os_types = module.app_tier.app_tier_os_types application_server_ips = module.app_tier.application_server_ips application_server_secondary_ips = module.app_tier.application_server_secondary_ips + app_vm_names = module.app_tier.app_vm_names ers_instance_number = var.ers_instance_number ers_server_loadbalancer_ip = module.app_tier.ers_server_loadbalancer_ip pas_instance_number = var.pas_instance_number @@ -384,11 +389,13 @@ module "output_files" { scs_server_loadbalancer_ip = module.app_tier.scs_server_loadbalancer_ip scs_server_ips = module.app_tier.scs_server_ips scs_server_secondary_ips = module.app_tier.scs_server_secondary_ips + scs_vm_names = module.app_tier.scs_vm_names use_local_credentials = module.common_infrastructure.use_local_credentials use_msi_for_clusters = var.use_msi_for_clusters use_secondary_ips = var.use_secondary_ips webdispatcher_server_ips = module.app_tier.webdispatcher_server_ips webdispatcher_server_secondary_ips = module.app_tier.webdispatcher_server_secondary_ips + webdispatcher_server_vm_names = module.app_tier.webdispatcher_server_vm_names ######################################################################################### # Mounting information # diff --git a/deploy/terraform/run/sap_system/output.tf b/deploy/terraform/run/sap_system/output.tf index 993c8ec813..ecba87b054 100644 --- a/deploy/terraform/run/sap_system/output.tf +++ b/deploy/terraform/run/sap_system/output.tf @@ -150,9 +150,9 @@ output "hanadb_vm_ids" { value = module.hdb_node.hanadb_vm_ids } -output "database_server_vm_ips" { +output "database_server_vm_ids" { description = "VM IDs for the AnyDB Servers" - value = module.anydb_node.database_server_vm_ips + value = module.anydb_node.database_server_vm_ids } output "db_vm_ips" { diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf index 505ca9b6b5..eff55991dc 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf @@ -49,7 +49,7 @@ output "database_server_secondary_ips" { value = local.enable_deployment && var.use_secondary_ips ? try(azurerm_network_interface.anydb_db[*].private_ip_addresses[1], []) : [] } -output "database_server_vm_ips" { +output "database_server_vm_ids" { description = "AnyDB Virtual machine resource IDs" value = local.enable_deployment ? ( coalesce(azurerm_linux_virtual_machine.dbserver[*].id, @@ -60,6 +60,18 @@ output "database_server_vm_ips" { ) } +output "database_server_vm_names" { + description = "AnyDB Virtual machine names" + value = local.enable_deployment ? ( + coalesce(azurerm_linux_virtual_machine.dbserver[*].name, + azurerm_windows_virtual_machine.dbserver[*].name + ) + ) : ( + [""] + ) + } + + output "database_disks" { description = "AnyDB Virtual machine disks" value = local.enable_deployment ? local.db_disks_ansible : [] diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/outputs.tf index 3de617b15b..d6fdc7543c 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/outputs.tf @@ -93,6 +93,18 @@ output "scs_vm_ids" { ) } +output "scs_vm_names" { + description = "SCS virtual machine names" + value = local.enable_deployment ? ( + concat( + azurerm_windows_virtual_machine.scs[*].name, + azurerm_linux_virtual_machine.scs[*].name + ) + ) : ( + [] + ) + } + ############################################################################### # # # Application Servers # @@ -125,6 +137,19 @@ output "app_vm_ids" { ) } +output "app_vm_names" { + description = "Application virtual machine names" + value = local.enable_deployment ? ( + concat( + azurerm_windows_virtual_machine.app[*].name, + azurerm_linux_virtual_machine.app[*].name + ) + ) : ( + [] + ) + } + + ############################################################################### # # # Web Dispatchers # @@ -162,6 +187,18 @@ output "webdispatcher_server_vm_ids" { ) } +output "webdispatcher_server_vm_names" { + description = "Web dispatcher virtual machine resource names" + value = local.enable_deployment ? ( + concat( + azurerm_windows_virtual_machine.web[*].name, + azurerm_linux_virtual_machine.web[*].name + ) + ) : ( + [] + ) + } + ############################################################################### # # # DNS Information # diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf index 3c27c4a7ee..8cefe1ad41 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf @@ -73,6 +73,16 @@ output "hanadb_vm_ids" { value = local.enable_deployment ? azurerm_linux_virtual_machine.vm_dbnode[*].id : [] } +output "database_server_vm_names" { + description = "HANA Virtual machine names" + value = local.enable_deployment ? ( + azurerm_linux_virtual_machine.vm_dbnode[*].name + ) : ( + [""] + ) + } + + output "database_disks" { description = "Disks used by the database tier" diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/ansible_inventory.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/ansible_inventory.tmpl index 39d9f20a48..d9e615cf0e 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/ansible_inventory.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/ansible_inventory.tmpl @@ -9,6 +9,7 @@ ${sid}_DB: virtual_host : ${virt_dbnodes[idx]} become_user : ${db_become_user} os_type : ${db_os_type} + vm_name : ${db_vmnodes[idx]} ${winrm_cert_valid} ${winrm_timeout_sec} ${winrm_read_timeout} @@ -30,6 +31,7 @@ ${sid}_SCS: virtual_host : ${virt_scs_servers[idx]} become_user : ${scs_become_user} os_type : ${scs_os_type} + vm_name : ${scs_vmnodes[idx]} ${winrm_cert_valid} ${winrm_timeout_sec} ${winrm_read_timeout} @@ -51,6 +53,7 @@ ${sid}_ERS: virtual_host : ${virt_ers_servers[idx]} become_user : ${scs_become_user} os_type : ${scs_os_type} + vm_name : ${ers_vmnodes[idx]} ${winrm_cert_valid} ${winrm_timeout_sec} ${winrm_read_timeout} @@ -73,6 +76,7 @@ ${sid}_PAS: virtual_host : ${virt_pas_servers[idx]} become_user : ${app_become_user} os_type : ${app_os_type} + vm_name : ${pas_vmnodes[idx]} ${winrm_cert_valid} ${winrm_timeout_sec} ${winrm_read_timeout} @@ -94,6 +98,7 @@ ${sid}_APP: virtual_host : ${virt_app_servers[idx]} become_user : ${app_become_user} os_type : ${app_os_type} + vm_name : ${app_vmnodes[idx]} ${winrm_cert_valid} ${winrm_timeout_sec} ${winrm_read_timeout} @@ -115,6 +120,7 @@ ${sid}_WEB: virtual_host : ${virt_web_servers[idx]} become_user : ${web_become_user} os_type : ${web_os_type} + vm_name : ${web_vmnodes[idx]} ${winrm_cert_valid} ${winrm_timeout_sec} ${winrm_read_timeout} diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf index aa50c2933f..49cee1d89a 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf @@ -8,6 +8,7 @@ resource "local_file" "ansible_inventory_new_yml" { content = templatefile(format("%s%s", path.module, "/ansible_inventory.tmpl"), { ips_dbnodes = var.database_server_ips dbnodes = var.platform == "HANA" ? var.naming.virtualmachine_names.HANA_COMPUTERNAME : var.naming.virtualmachine_names.ANYDB_COMPUTERNAME + db_vmnodes = var.database_server_vm_names virt_dbnodes = var.use_secondary_ips ? ( var.platform == "HANA" ? var.naming.virtualmachine_names.HANA_SECONDARY_DNSNAME : var.naming.virtualmachine_names.ANYDB_SECONDARY_DNSNAME ) : ( @@ -34,6 +35,10 @@ resource "local_file" "ansible_inventory_new_yml" { slice(var.naming.virtualmachine_names.APP_COMPUTERNAME, 0, 1)) : ( [] ), + pas_vmnodes = length(var.application_server_ips) > 0 ? ( + slice(var.app_vm_names, 0, 1)) : ( + [] + ), virt_pas_servers = var.use_secondary_ips ? ( length(var.application_server_ips) > 0 ? slice(var.naming.virtualmachine_names.APP_SECONDARY_DNSNAME, 0, 1) : []) : ( @@ -45,6 +50,11 @@ resource "local_file" "ansible_inventory_new_yml" { [] ), + app_vmnodes = length(var.application_server_ips) > 0 ? ( + slice(var.app_vm_names, 1, length(var.app_vm_names))) : ( + [] + ), + virt_app_servers = var.use_secondary_ips ? ( length(var.application_server_ips) > 1 ? slice(var.naming.virtualmachine_names.APP_SECONDARY_DNSNAME, 1, length(var.application_server_ips)) : []) : ( length(var.application_server_ips) > 1 ? slice(var.naming.virtualmachine_names.APP_COMPUTERNAME, 1, length(var.application_server_ips)) : [] @@ -54,6 +64,10 @@ resource "local_file" "ansible_inventory_new_yml" { slice(var.naming.virtualmachine_names.SCS_COMPUTERNAME, 0, 1)) : ( [] ), + scs_vmnodes = length(var.scs_server_ips) > 0 ? ( + slice(var.scs_vm_names, 0, 1)) : ( + [] + ), virt_scs_servers = var.use_secondary_ips ? ( length(var.scs_server_ips) > 0 ? slice(var.naming.virtualmachine_names.SCS_SECONDARY_DNSNAME, 0, 1) : []) : ( @@ -64,6 +78,10 @@ resource "local_file" "ansible_inventory_new_yml" { slice(var.naming.virtualmachine_names.SCS_COMPUTERNAME, 1, length(var.scs_server_ips))) : ( [] ), + scs_vmnodes = length(var.scs_server_ips) > 0 ? ( + slice(var.scs_vm_names, 1, length(var.scs_vm_names))) : ( + [] + ), virt_ers_servers = var.use_secondary_ips ? ( length(var.scs_server_ips) > 1 ? slice(var.naming.virtualmachine_names.SCS_SECONDARY_DNSNAME, 1, length(var.scs_server_ips)) : []) : ( @@ -74,6 +92,12 @@ resource "local_file" "ansible_inventory_new_yml" { slice(var.naming.virtualmachine_names.WEB_COMPUTERNAME, 0, length(var.webdispatcher_server_ips))) : ( [] ), + + web_vmnodes = length(var.webdispatcher_server_ips) > 0 ? ( + slice(var.webdispatcher_server_vm_names, 0, length(var.webdispatcher_server_ips))) : ( + [] + ), + virt_web_servers = var.use_secondary_ips ? ( length(var.webdispatcher_server_ips) > 0 ? slice(var.naming.virtualmachine_names.WEB_SECONDARY_DNSNAME, 0, length(var.webdispatcher_server_ips)) : []) : ( length(var.webdispatcher_server_ips) > 0 ? slice(var.naming.virtualmachine_names.WEB_COMPUTERNAME, 0, length(var.webdispatcher_server_ips)) : [] diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf index 531669636e..d0f8b02475 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf @@ -12,6 +12,7 @@ variable "app_server_count" { } variable "app_subnet_netmask" { description = "netmask for the SAP application subnet" } variable "app_tier_os_types" { description = "Defines the app tier os types" } +variable "app_vm_names" { description = "List of VM names for the Application Servers" } variable "application_server_ips" { description = "List of IP addresses for the Application Servers" } variable "application_server_secondary_ips" { description = "List of secondary IP addresses for the Application Servers" } variable "authentication_type" { @@ -50,6 +51,7 @@ variable "db_server_count" { variable "database_server_ips" { description = "List of IP addresses for the database servers" } variable "database_server_secondary_ips" { description = "List of secondary IP addresses for the database servers" } variable "database_shared_disks" { description = "Database Azure Shared Disk" } +variable "database_server_vm_names" { description = "List of VM names for the database servers" } variable "db_sid" { description = "Database SID" } variable "database_subnet_netmask" { description = "netmask for the database subnet" } variable "disks" { description = "List of disks" } @@ -163,6 +165,7 @@ variable "scs_server_count" { } variable "scs_server_ips" { description = "List of IP addresses for the SCS Servers" } variable "scs_server_secondary_ips" { description = "List of secondary IP addresses for the SCS Servers" } +variable "scs_vm_names" { description = "List of VM names for the SCS Servers" } variable "shared_home" { description = "If defined provides shared-home support" } variable "sid_keyvault_user_id" { description = "Defines the names for the resources" } variable "tfstate_resource_id" { description = "Resource ID for tf state file" } @@ -194,3 +197,4 @@ variable "web_sid" { } variable "webdispatcher_server_ips" { description = "List of IP addresses for the Web dispatchers" } variable "webdispatcher_server_secondary_ips" { description = "List of secondary IP addresses for the Web dispatchers" } +variable "webdispatcher_server_vm_names" { description = "List of VM names for the Web dispatchers" } From 148e68c0cf0377e3b0e4a061f110b84a63fd8a64 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 13 Feb 2024 18:01:56 +0200 Subject: [PATCH 257/607] Add db2_instance_type and update inventory.tf --- deploy/ansible/vars/ansible-input-api.yaml | 1 + .../modules/sap_system/output_files/inventory.tf | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index 14b1fc1a4a..2215d455ef 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -101,6 +101,7 @@ db2hadr_port1: 51012 db2hadr_port2: 51013 # Name of the database connect user for ABAP. Default value is 'sap'. db2_abap_connect_user: "" +db2_instance_type: "ABAP" tmp_directory: "/var/tmp" url_internet: "https://azure.status.microsoft/en-us/status" # URL to use for internet access checks" diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf index 49cee1d89a..b88f9775cd 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf @@ -78,7 +78,7 @@ resource "local_file" "ansible_inventory_new_yml" { slice(var.naming.virtualmachine_names.SCS_COMPUTERNAME, 1, length(var.scs_server_ips))) : ( [] ), - scs_vmnodes = length(var.scs_server_ips) > 0 ? ( + ers_vmnodes = length(var.scs_server_ips) > 0 ? ( slice(var.scs_vm_names, 1, length(var.scs_vm_names))) : ( [] ), From 067a2d64ab201429d9e9c9fc85e2abbb3900a099 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 13 Feb 2024 18:27:53 +0200 Subject: [PATCH 258/607] Add DB2 encryption support and SSL certificate generation --- .../4.2.1.9-db2_generate_distribute_ssl.yml | 36 +++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.9-db2_generate_distribute_ssl.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.9-db2_generate_distribute_ssl.yml index ccfb49ae0f..8ca31dec3d 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.9-db2_generate_distribute_ssl.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.9-db2_generate_distribute_ssl.yml @@ -1,4 +1,29 @@ --- + +- name: "DB2: Variable for keystore files" + ansible.builtin.set_fact: + keystore_files: + - sapdb2{{ db_sid | lower }}_db_encr.p12 + - sapdb2{{ db_sid | lower }}_db_encr.sth + +- name: "DB2: Stat if the keystore files exist on Primary node" + ansible.builtin.stat: + path: "/db2/db2{{ db_sid | lower }}/keystore/{{ item }}" + loop: "{{ keystore_files }}" + register: keystore_files_stat + when: ansible_hostname == primary_instance_name + +- name: "DB2: Determine if the database is encrypted" + ansible.builtin.set_fact: + db_encrypted: "{{ (keystore_files_stat.results | map(attribute='stat.exists')) is all }}" + when: ansible_hostname == primary_instance_name + +- name: "DB2: Debug if the database is encrypted" + ansible.builtin.debug: + msg: + - "Database is encrypted: {{ db_encrypted }}" + when: ansible_hostname == primary_instance_name + - name: "DB2: variables for SSL certificate" ansible.builtin.set_fact: db2_ssl_cn: "{{ custom_db_virtual_hostname | default(db_virtual_hostname, true) }}.{{ sap_fqdn }}" @@ -8,7 +33,9 @@ - name: "DB2 Primary DB: Generate SSL" - when: ansible_hostname == primary_instance_name + when: + - ansible_hostname == primary_instance_name + - db_encrypted become: true become_user: db2{{ db_sid | lower }} block: @@ -31,7 +58,9 @@ LD_LIBRARY_PATH: /db2/db2{{ db_sid | lower }}/sqllib/lib64:/db2/db2{{ db_sid | lower }}/sqllib/lib64/gskit:/db2/db2{{ db_sid | lower }}/sqllib/lib - name: "DB2 Primary DB - Copy SSL Certificate and Keystore files" - when: ansible_hostname == primary_instance_name + when: + - ansible_hostname == primary_instance_name + - db_encrypted block: - name: "DB2 Primary DB - Copy SSL certificate to SSL_client directory" ansible.builtin.copy: @@ -63,6 +92,9 @@ line: SSLServerCertificate=/usr/sap/{{ db_sid | upper }}/SYS/global/SSL_client/{{ db2_ssl_label }}.arm - name: "DB2 DB - Set SSL parameters" + when: + - hostvars[primary_instance_name]['db_encrypted'] is defined + - hostvars[primary_instance_name]['db_encrypted'] become: true become_user: db2{{ db_sid | lower }} ansible.builtin.shell: | From d3f5998b50ad975d26f56a5e875a2c1433d98c82 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 13 Feb 2024 18:40:31 +0200 Subject: [PATCH 259/607] Fix database instance number in variables_local.tf and inventory.tf --- .../modules/sap_system/hdb_node/variables_local.tf | 2 +- .../modules/sap_system/output_files/inventory.tf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf index f9c8826b62..2cbe719fc0 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf @@ -159,7 +159,7 @@ locals { flatten([ for port in local.lb_ports[split(".", local.hdb_version)[0]] : { sid = var.sap_sid - port = tonumber(port) + (tonumber(try(var.database.instance.instance_number, 0)) * 100) + port = tonumber(port) + (tonumber(try(var.database.instance.number, 0)) * 100) } ])) : ( null diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf index b88f9775cd..aea9308da3 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf @@ -181,7 +181,7 @@ resource "local_file" "sap-parameters_yml" { database_cluster_type = var.database_cluster_type database_high_availability = var.database_high_availability database_cluster_ip = try(format("%s/%s", var.database_cluster_ip, var.database_subnet_netmask), "") - db_instance_number = try(var.database.instance.instance_number, "00") + db_instance_number = try(var.database.instance.number, "00") database_loadbalancer_ip = var.database_loadbalancer_ip db_sid = var.db_sid disks = var.disks From f52d65db90553400bfa9a9b9e32b8ef76e72d795 Mon Sep 17 00:00:00 2001 From: Devansh Jain <86314060+devanshjainms@users.noreply.github.com> Date: Tue, 13 Feb 2024 10:48:45 -0800 Subject: [PATCH 260/607] Update repository tasks for RedHat OS family (#549) --- deploy/ansible/roles-os/1.3-repository/tasks/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/roles-os/1.3-repository/tasks/main.yml b/deploy/ansible/roles-os/1.3-repository/tasks/main.yml index 9b87bd6b5e..5b7bb12108 100644 --- a/deploy/ansible/roles-os/1.3-repository/tasks/main.yml +++ b/deploy/ansible/roles-os/1.3-repository/tasks/main.yml @@ -51,7 +51,7 @@ # Doing it this way to handle also Oracle Distros - name: "1.3 Repository: - Prepare the repositories." ansible.builtin.include_tasks: "1.3.0-preparation-RedHat.yaml" - when: ansible_os_family | upper != 'SUSE' + when: ansible_os_family | upper == 'REDHAT' - name: "1.3 Repository: - Manage the repositories." ansible.builtin.include_tasks: "1.3.1-repositories-Suse.yaml" @@ -60,7 +60,7 @@ # Doing it this way to handle also Oracle Distros - name: "1.3 Repository: - Prepare the repositories." ansible.builtin.include_tasks: "1.3.1-repositories-RedHat.yaml" - when: ansible_os_family | upper != 'SUSE' + when: ansible_os_family | upper == 'REDHAT' - name: "1.3 Repository: - Manage the repositories." @@ -70,7 +70,7 @@ # Doing it this way to handle also Oracle Distros - name: "1.3 Repository: - Prepare the repositories." ansible.builtin.include_tasks: "1.3.2-custom-repositories-RedHat.yaml" - when: ansible_os_family | upper != 'SUSE' + when: ansible_os_family | upper == 'REDHAT' # - name: "1.3 Repos: Install EPEL repo" # ansible.builtin.yum_repository: From 91f2156a273408c996d5adbde9c76615e62a063d Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 14 Feb 2024 09:40:10 +0200 Subject: [PATCH 261/607] Add support for encrypted database connections --- .../4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml index 4dc6d5139a..48ab4ca212 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml @@ -197,6 +197,7 @@ when: - db2_instance_type == 'ABAP' - db2_ssl_label is defined + - db_encrypted become: true become_user: db2{{ db_sid | lower }} block: From cf38ddbc3d6a1becb836a87e09a29268033c0547 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 14 Feb 2024 11:30:21 +0200 Subject: [PATCH 262/607] Add 'oinstall' group for Oracle ASM on observer --- deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml index cf525ca402..951e1b6ec7 100644 --- a/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml @@ -54,10 +54,10 @@ - { group: 'dgdba', gid: '{{ dgdba_gid }}' } - { group: 'kmdba', gid: '{{ kmdba_gid }}' } - { group: 'racdba', gid: '{{ racdba_gid }}' } + - { group: 'oinstall', gid: '{{ oinstall_gid }}' } when: - node_tier == "oracle-asm" - - name: "2.5.1 SAP Users: - Create SAP Groups for Oracle ASM on observer" ansible.builtin.group: name: "{{ item.group }}" From 623c8ef530514fb48d0f77723cfae8cf51b06e12 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 14 Feb 2024 11:58:12 +0200 Subject: [PATCH 263/607] Remove 'dba' group from 'oracle' user --- deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml index 951e1b6ec7..d20177b42f 100644 --- a/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml @@ -112,7 +112,7 @@ ansible.builtin.user: name: "oracle" group: "oinstall" - groups: asmoper,asmadmin,asmdba,dba,oper,backupdba,dgdba,kmdba,racdba + groups: asmoper,asmadmin,asmdba,oper,backupdba,dgdba,kmdba,racdba append: true shell: /bin/csh # create_home: true From 4ea2a05970d2ddb375c9e93356f3c2b9fb3dec18 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 14 Feb 2024 12:08:42 +0200 Subject: [PATCH 264/607] Fix group duplication in main.yaml --- deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml index d20177b42f..bc5765fc5a 100644 --- a/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml @@ -54,7 +54,7 @@ - { group: 'dgdba', gid: '{{ dgdba_gid }}' } - { group: 'kmdba', gid: '{{ kmdba_gid }}' } - { group: 'racdba', gid: '{{ racdba_gid }}' } - - { group: 'oinstall', gid: '{{ oinstall_gid }}' } + - { group: 'oinstall', gid: '{{ oinstall_gid }}' } when: - node_tier == "oracle-asm" @@ -68,6 +68,7 @@ - { group: 'kmdba', gid: '{{ kmdba_gid }}' } - { group: 'racdba', gid: '{{ racdba_gid }}' } - { group: 'backupdba', gid: '{{ backupdba_gid }}' } + - { group: 'oinstall', gid: '{{ oinstall_gid }}' } when: - node_tier == "observer" @@ -112,7 +113,7 @@ ansible.builtin.user: name: "oracle" group: "oinstall" - groups: asmoper,asmadmin,asmdba,oper,backupdba,dgdba,kmdba,racdba + groups: asmoper,asmadmin,asmdba,asmoper,backupdba,dgdba,kmdba,racdba append: true shell: /bin/csh # create_home: true From 968f104a8c3239c58d9a5aa59e202f106b510674 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 14 Feb 2024 12:34:54 +0200 Subject: [PATCH 265/607] Update import task filenames in 2.6 SAP Mounts role --- ...{oracle-asm-prereq.yaml => 2.6.3-oracle-asm-prereq.yaml} | 0 .../{oracle-multi-sid.yaml => 2.6.3-oracle-multi-sid.yaml} | 0 .../{oracle-observer.yaml => 2.6.3-oracle-observer.yaml} | 0 deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml | 6 +++--- .../tasks/{oracle-asm.yaml => oracle-asm.old} | 0 .../tasks/{oracle-nfs-mounts.yaml => oracle-nfs-mounts.old} | 0 6 files changed, 3 insertions(+), 3 deletions(-) rename deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/{oracle-asm-prereq.yaml => 2.6.3-oracle-asm-prereq.yaml} (100%) rename deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/{oracle-multi-sid.yaml => 2.6.3-oracle-multi-sid.yaml} (100%) rename deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/{oracle-observer.yaml => 2.6.3-oracle-observer.yaml} (100%) rename deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/{oracle-asm.yaml => oracle-asm.old} (100%) rename deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/{oracle-nfs-mounts.yaml => oracle-nfs-mounts.old} (100%) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/oracle-asm-prereq.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-prereq.yaml similarity index 100% rename from deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/oracle-asm-prereq.yaml rename to deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-prereq.yaml diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/oracle-multi-sid.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-multi-sid.yaml similarity index 100% rename from deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/oracle-multi-sid.yaml rename to deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-multi-sid.yaml diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/oracle-observer.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-observer.yaml similarity index 100% rename from deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/oracle-observer.yaml rename to deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-observer.yaml diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml index a82f135f8d..9bd0451ee9 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml @@ -301,7 +301,7 @@ # Import this task only if the tier is ora for oracle-asm. - name: "2.6 SAP Mounts: - Import Oracle ASM pre-requisite tasks" - ansible.builtin.import_tasks: "oracle-asm-prereq.yaml" + ansible.builtin.import_tasks: "2.6.3-oracle-asm-prereq.yaml" when: - node_tier == "oracle-asm" @@ -313,12 +313,12 @@ # - tier == "ora" - name: "2.6 SAP Mounts: - Import Oracle observer tasks" - ansible.builtin.import_tasks: "oracle-observer.yaml" + ansible.builtin.import_tasks: "2.6.3-oracle-observer.yaml" when: - node_tier == "observer" - name: "2.6 SAP Mounts: - Import Oracle shared home tasks" - ansible.builtin.import_tasks: "oracle-multi-sid.yaml" + ansible.builtin.import_tasks: ""2.6.3-oracle-multi-sid.yaml" when: - node_tier == "oracle-multi-sid" - node_tier != "oracle-asm" diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/oracle-asm.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/oracle-asm.old similarity index 100% rename from deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/oracle-asm.yaml rename to deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/oracle-asm.old diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/oracle-nfs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/oracle-nfs-mounts.old similarity index 100% rename from deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/oracle-nfs-mounts.yaml rename to deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/oracle-nfs-mounts.old From 2d28ec915afc2bc1c3e60dce475ca19a62ca7f99 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 14 Feb 2024 12:38:28 +0200 Subject: [PATCH 266/607] Refactor AFS Mount: install task in 2.6.3-oracle-observer.yaml --- .../tasks/2.6.3-oracle-observer.yaml | 52 ++++++++----------- 1 file changed, 21 insertions(+), 31 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-observer.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-observer.yaml index 17af55ade8..563452f9c7 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-observer.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-observer.yaml @@ -86,35 +86,25 @@ - usr_sap_install_mountpoint is not defined -- name: "AFS Mount: Mount Install" - block: - - - name: "AFS Mount: Mount Install folder when using AFS ({{ usr_sap_install_mountpoint }}/{{ bom_base_name }})" - ansible.posix.mount: - src: "{{ usr_sap_install_mountpoint }}/{{ bom_base_name }}" - path: "{{ target_media_location }}" - fstype: 'nfs4' - opts: "vers=4,minorversion=1,sec=sys" - state: mounted - when: - - node_tier == 'observer' - - use_AFS - rescue: - - name: "AFS Mount: Re-Mount Install folder when using AFS" - ansible.builtin.debug: - msg: "Trying to remount Install Folder" - - - name: "AFS Mount: Pause for 5 seconds" - ansible.builtin.pause: - seconds: 15 +- name: "AFS Mount: install" + ansible.builtin.include_tasks: 2.6.0.1-afs-mount.yaml + loop: + - { + 'type': 'install', + 'temppath': 'sapinstall', + 'folder': '{{ bom_base_name }}', + 'mount': '{{ usr_sap_install_mountpoint }}', + 'opts': 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp', + 'path': '/usr/sap/install', + 'permissions': '0777', + 'set_chattr_on_dir': false, + 'target_nodes': ['all'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ nfs_server }}" + when: + - tier == 'sapos' + - usr_sap_install_mountpoint is defined + - node_tier == 'observer' - - name: Mount Install folder when using AFS - ansible.posix.mount: - src: "{{ usr_sap_install_mountpoint }}/{{ bom_base_name }}" - path: "{{ target_media_location }}" - fstype: 'nfs4' - opts: "vers=4,minorversion=1,sec=sys" - state: mounted - when: - - node_tier == 'observer' - - use_AFS From 73b9ebfa4935f11d068971d48d3f4ba81b27d7ae Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 14 Feb 2024 12:40:55 +0200 Subject: [PATCH 267/607] change the when condition --- .../4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml index 48ab4ca212..726781485b 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml @@ -197,7 +197,8 @@ when: - db2_instance_type == 'ABAP' - db2_ssl_label is defined - - db_encrypted + - hostvars[primary_instance_name]['db_encrypted'] is defined + - hostvars[primary_instance_name]['db_encrypted'] become: true become_user: db2{{ db_sid | lower }} block: From 64194505d89e7f62720c965eeecba6cdf0d7d41b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 14 Feb 2024 12:51:55 +0200 Subject: [PATCH 268/607] Fix import_tasks path in 2.6 SAP Mounts role --- deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml index 9bd0451ee9..d82014b1f0 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml @@ -318,7 +318,7 @@ - node_tier == "observer" - name: "2.6 SAP Mounts: - Import Oracle shared home tasks" - ansible.builtin.import_tasks: ""2.6.3-oracle-multi-sid.yaml" + ansible.builtin.import_tasks: "2.6.3-oracle-multi-sid.yaml" when: - node_tier == "oracle-multi-sid" - node_tier != "oracle-asm" From 5f81c37adc4dbef05a426d371f0988987c3c4e2e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 14 Feb 2024 13:10:03 +0200 Subject: [PATCH 269/607] Import SAP mount tasks based on defined mountpoints and NFS providers --- .../2.6-sap-mounts/tasks/main.yaml | 48 ++++++++++--------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml index d82014b1f0..ad7d0a5cfa 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml @@ -293,6 +293,31 @@ - node_tier != 'oracle-multi-sid' - sap_mnt is undefined +# Import this task only if the any of the AFS mountpoints are defined +- name: "2.6 SAP Mounts: - Import AFS tasks" + ansible.builtin.import_tasks: 2.6.0-afs-mounts.yaml + when: + - sap_mnt is defined or sap_trans is defined or usr_sap_install_mountpoint is defined + - not use_simple_mount + - NFS_provider in ['AFS', 'NONE'] + +- name: "2.6 SAP Mounts: - Import AFS simple mount tasks" + ansible.builtin.import_tasks: 2.6.7-afs-mounts-simplemount.yaml + when: + - use_simple_mount is defined and use_simple_mount + - sap_mnt is defined or sap_trans is defined or usr_sap_install_mountpoint is defined + - NFS_provider == 'AFS' + + +# Import this task only if the sap_mnt is defined, i.e. ANF is used +- name: "2.6 SAP Mounts: - Import ANF tasks" + ansible.builtin.import_tasks: 2.6.1-anf-mounts.yaml + when: + - sap_mnt is defined or sap_trans is defined or usr_sap_install_mountpoint is defined + - not use_simple_mount + - NFS_provider == 'ANF' + + # Import this task only if the tier is ora. - name: "2.6 SAP Mounts: - Import Oracle tasks" ansible.builtin.import_tasks: "2.6.2-oracle-mounts.yaml" @@ -335,29 +360,6 @@ when: - node_tier == "sybase" -# Import this task only if the any of the AFS mountpoints are defined -- name: "2.6 SAP Mounts: - Import AFS tasks" - ansible.builtin.import_tasks: 2.6.0-afs-mounts.yaml - when: - - sap_mnt is defined or sap_trans is defined or usr_sap_install_mountpoint is defined - - not use_simple_mount - - NFS_provider in ['AFS', 'NONE'] - -- name: "2.6 SAP Mounts: - Import AFS simple mount tasks" - ansible.builtin.import_tasks: 2.6.7-afs-mounts-simplemount.yaml - when: - - use_simple_mount is defined and use_simple_mount - - sap_mnt is defined or sap_trans is defined or usr_sap_install_mountpoint is defined - - NFS_provider == 'AFS' - - -# Import this task only if the sap_mnt is defined, i.e. ANF is used -- name: "2.6 SAP Mounts: - Import ANF tasks" - ansible.builtin.import_tasks: 2.6.1-anf-mounts.yaml - when: - - sap_mnt is defined or sap_trans is defined or usr_sap_install_mountpoint is defined - - not use_simple_mount - - NFS_provider == 'ANF' # Update : Deprecated as the scale out anf mount code functionality is now integrated into 2.6.1 and 2.6.8 # This will be removed in the next release, left here for tracing and documentation From 32b02852b94cf4db8adbad402b6fcab6a964101f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 14 Feb 2024 20:45:35 +0200 Subject: [PATCH 270/607] Update oracle.install.asm.OSASM value in gridinstallresponsefile --- .../templates/gridinstallresponsefile | 2 +- .../ORACLE_19c_00_ASM_db_v1_install.rsp.j2 | 62 +++++++++---------- .../templates/gridinstallresponsefile | 2 +- 3 files changed, 33 insertions(+), 33 deletions(-) diff --git a/deploy/ansible/roles-db/4.1.1-ora-asm-grid/templates/gridinstallresponsefile b/deploy/ansible/roles-db/4.1.1-ora-asm-grid/templates/gridinstallresponsefile index 53c0b51304..19770e235b 100644 --- a/deploy/ansible/roles-db/4.1.1-ora-asm-grid/templates/gridinstallresponsefile +++ b/deploy/ansible/roles-db/4.1.1-ora-asm-grid/templates/gridinstallresponsefile @@ -108,7 +108,7 @@ oracle.install.asm.OSOPER=oinstall # The OSASM_GROUP is the OS group which is to be granted SYSASM privileges. This # must be different than the previous two. #------------------------------------------------------------------------------- -oracle.install.asm.OSASM=dba +oracle.install.asm.OSASM=asmdba ################################################################################ # # diff --git a/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/templates/ORACLE_19c_00_ASM_db_v1_install.rsp.j2 b/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/templates/ORACLE_19c_00_ASM_db_v1_install.rsp.j2 index e39854771a..5a5fe7518d 100644 --- a/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/templates/ORACLE_19c_00_ASM_db_v1_install.rsp.j2 +++ b/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/templates/ORACLE_19c_00_ASM_db_v1_install.rsp.j2 @@ -16,7 +16,7 @@ #------------------------------------------------------------------------------ -# Do not change the following system generated value. +# Do not change the following system generated value. #------------------------------------------------------------------------------ oracle.install.responseFileVersion=/oracle/install/rspfmt_dbinstall_response_schema_v19.0.0 @@ -29,7 +29,7 @@ oracle.install.responseFileVersion=/oracle/install/rspfmt_dbinstall_response_sch oracle.install.option=INSTALL_DB_AND_CONFIG #------------------------------------------------------------------------------- -# Specify the Unix group to be set for the inventory directory. +# Specify the Unix group to be set for the inventory directory. #------------------------------------------------------------------------------- UNIX_GROUP_NAME=oinstall @@ -41,17 +41,17 @@ UNIX_GROUP_NAME=oinstall INVENTORY_LOCATION=/oracle/oraInventory #------------------------------------------------------------------------------- -# Specify the complete path of the Oracle Base. +# Specify the complete path of the Oracle Base. #------------------------------------------------------------------------------- ORACLE_BASE=/oracle/{{ db_sid| upper}} #------------------------------------------------------------------------------- -# Specify the installation edition of the component. -# -# The value should contain only one of these choices. - -# - EE : Enterprise Edition - +# Specify the installation edition of the component. +# +# The value should contain only one of these choices. + +# - EE : Enterprise Edition + # - SE2 : Standard Edition 2 @@ -74,33 +74,33 @@ oracle.install.db.InstallEdition=EE #------------------------------------------------------------------------------ # The OSDBA_GROUP is the OS group which is to be granted SYSDBA privileges. #------------------------------------------------------------------------------- -oracle.install.db.OSDBA_GROUP=dba +oracle.install.db.OSDBA_GROUP=asmdba #------------------------------------------------------------------------------ # The OSOPER_GROUP is the OS group which is to be granted SYSOPER privileges. # The value to be specified for OSOPER group is optional. #------------------------------------------------------------------------------ -oracle.install.db.OSOPER_GROUP=oper +oracle.install.db.OSOPER_GROUP=asmoper #------------------------------------------------------------------------------ # The OSBACKUPDBA_GROUP is the OS group which is to be granted SYSBACKUP privileges. #------------------------------------------------------------------------------ -oracle.install.db.OSBACKUPDBA_GROUP=oper +oracle.install.db.OSBACKUPDBA_GROUP=asmoper #------------------------------------------------------------------------------ # The OSDGDBA_GROUP is the OS group which is to be granted SYSDG privileges. #------------------------------------------------------------------------------ -oracle.install.db.OSDGDBA_GROUP=dba +oracle.install.db.OSDGDBA_GROUP=asmdba #------------------------------------------------------------------------------ # The OSKMDBA_GROUP is the OS group which is to be granted SYSKM privileges. #------------------------------------------------------------------------------ -oracle.install.db.OSKMDBA_GROUP=dba +oracle.install.db.OSKMDBA_GROUP=asmdba #------------------------------------------------------------------------------ # The OSRACDBA_GROUP is the OS group which is to be granted SYSRAC privileges. #------------------------------------------------------------------------------ -oracle.install.db.OSRACDBA_GROUP=dba +oracle.install.db.OSRACDBA_GROUP=asmdba ################################################################################ # # # Root script execution configuration # @@ -133,7 +133,7 @@ oracle.install.db.rootconfig.configMethod= oracle.install.db.rootconfig.sudoPath= #-------------------------------------------------------------------------------------- -# Specify the name of the user who is in the sudoers list. +# Specify the name of the user who is in the sudoers list. # Applicable only when SUDO configuration method was chosen. # Note:For Single Instance database installations,the sudo user name must be the username of the user installing the database. #-------------------------------------------------------------------------------------- @@ -147,9 +147,9 @@ oracle.install.db.rootconfig.sudoUserName=oracle #------------------------------------------------------------------------------ # Value is required only if the specified install option is INSTALL_DB_SWONLY -# +# # Specify the cluster node names selected during the installation. -# +# # Example : oracle.install.db.CLUSTER_NODES=node1,node2 #------------------------------------------------------------------------------ oracle.install.db.CLUSTER_NODES= @@ -163,8 +163,8 @@ oracle.install.db.CLUSTER_NODES= #------------------------------------------------------------------------------- # Specify the type of database to create. # It can be one of the following: -# - GENERAL_PURPOSE -# - DATA_WAREHOUSE +# - GENERAL_PURPOSE +# - DATA_WAREHOUSE # GENERAL_PURPOSE: A starter database designed for general purpose use or transaction-heavy applications. # DATA_WAREHOUSE : A starter database optimized for data warehousing applications. #------------------------------------------------------------------------------- @@ -172,7 +172,7 @@ oracle.install.db.config.starterdb.type=GENERAL_PURPOSE #------------------------------------------------------------------------------- -# Specify the Starter Database Global Database Name. +# Specify the Starter Database Global Database Name. #------------------------------------------------------------------------------- oracle.install.db.config.starterdb.globalDBName=orcl.sap.com @@ -195,7 +195,7 @@ oracle.install.db.config.PDBName= #------------------------------------------------------------------------------- # Specify the Starter Database character set. -# +# # One of the following # AL32UTF8, WE8ISO8859P15, WE8MSWIN1252, EE8ISO8859P2, # EE8MSWIN1250, NE8ISO8859P10, NEE8ISO8859P4, BLT8MSWIN1257, @@ -208,7 +208,7 @@ oracle.install.db.config.PDBName= oracle.install.db.config.starterdb.characterSet=AL32UTF8 #------------------------------------------------------------------------------ -# This variable should be set to true if Automatic Memory Management +# This variable should be set to true if Automatic Memory Management # in Database is desired. # If Automatic Memory Management is not desired, and memory allocation # is to be done manually, then set it to false. @@ -217,7 +217,7 @@ oracle.install.db.config.starterdb.memoryOption=false #------------------------------------------------------------------------------- # Specify the total memory allocation for the database. Value(in MB) should be -# at least 256 MB, and should not exceed the total physical memory available +# at least 256 MB, and should not exceed the total physical memory available # on the system. # Example: oracle.install.db.config.starterdb.memoryLimit=512 #------------------------------------------------------------------------------- @@ -312,7 +312,7 @@ oracle.install.db.config.starterdb.emAdminPassword= ############################################################################### #------------------------------------------------------------------------------ -# This variable is to be set to false if database recovery is not required. Else +# This variable is to be set to false if database recovery is not required. Else # this can be set to true. #------------------------------------------------------------------------------- oracle.install.db.config.starterdb.enableRecovery=true @@ -327,16 +327,16 @@ oracle.install.db.config.starterdb.storageType=ASM_STORAGE #------------------------------------------------------------------------------- # Specify the database file location which is a directory for datafiles, control -# files, redo logs. +# files, redo logs. # -# Applicable only when oracle.install.db.config.starterdb.storage=FILE_SYSTEM_STORAGE +# Applicable only when oracle.install.db.config.starterdb.storage=FILE_SYSTEM_STORAGE #------------------------------------------------------------------------------- oracle.install.db.config.starterdb.fileSystemStorage.dataLocation= #------------------------------------------------------------------------------- # Specify the recovery location. # -# Applicable only when oracle.install.db.config.starterdb.storage=FILE_SYSTEM_STORAGE +# Applicable only when oracle.install.db.config.starterdb.storage=FILE_SYSTEM_STORAGE #------------------------------------------------------------------------------- oracle.install.db.config.starterdb.fileSystemStorage.recoveryLocation= @@ -347,7 +347,7 @@ oracle.install.db.config.starterdb.fileSystemStorage.recoveryLocation= #------------------------------------------------------------------------------- oracle.install.db.config.asm.diskGroup=DATA #------------------------------------------------------------------------------- -# Specify the password for ASMSNMP user of the ASM instance. +# Specify the password for ASMSNMP user of the ASM instance. # -# Applicable only when oracle.install.db.config.starterdb.storage=ASM_STORAGE -#------------------------------------------------------------------------------- \ No newline at end of file +# Applicable only when oracle.install.db.config.starterdb.storage=ASM_STORAGE +#------------------------------------------------------------------------------- diff --git a/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/templates/gridinstallresponsefile b/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/templates/gridinstallresponsefile index 323bc122fa..c7a5728f97 100644 --- a/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/templates/gridinstallresponsefile +++ b/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/templates/gridinstallresponsefile @@ -108,7 +108,7 @@ oracle.install.asm.OSOPER=oinstall # The OSASM_GROUP is the OS group which is to be granted SYSASM privileges. This # must be different than the previous two. #------------------------------------------------------------------------------- -oracle.install.asm.OSASM=dba +oracle.install.asm.OSASM=asmdba ################################################################################ # # From 13e88ba73e7ccca69622c479ded22d393f9c20cb Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 14 Feb 2024 21:20:25 +0200 Subject: [PATCH 271/607] Add new groups for dba and oper --- deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml index bc5765fc5a..eccb18e3f8 100644 --- a/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml @@ -51,6 +51,8 @@ - { group: 'asmadmin', gid: '{{ asmadmin_gid }}' } - { group: 'asmdba', gid: '{{ asmdba_gid }}' } - { group: 'backupdba', gid: '{{ backupdba_gid }}' } + - { group: 'dba', gid: '{{ dba_gid }}' } + - { group: 'oper', gid: '{{ oper_gid }}' } - { group: 'dgdba', gid: '{{ dgdba_gid }}' } - { group: 'kmdba', gid: '{{ kmdba_gid }}' } - { group: 'racdba', gid: '{{ racdba_gid }}' } From 0f91757a4725cf4b7c8e592359a71e66128ec2ca Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 15 Feb 2024 10:32:22 +0200 Subject: [PATCH 272/607] Add SAP user and Oracle ASM user creation tasks*** --- .../roles-sap-os/2.5-sap-users/tasks/main.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml index eccb18e3f8..93b4370432 100644 --- a/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml @@ -40,6 +40,8 @@ - { group: 'sapsys', gid: '{{ sapsys_gid }}' } - { group: 'sapinst', gid: '{{ sapinst_gid }}' } + + # Create Groups for Oracle ASM. - name: "2.5.1 SAP Users: - Create SAP Groups for Oracle ASM" ansible.builtin.group: @@ -74,6 +76,16 @@ when: - node_tier == "observer" +- name: "2.5.1 SAP Users: - Create SAP User for Oracle ASM" + ansible.builtin.user: + name: oracle + uid: "{{ oracle_uid }}" + group: oinstall + groups: dba,racdba,oper,backupdba,dgdba,kmdba + when: + - node_tier == "oracle-asm" + + # *=====================================4=======================================8 # # Create Users From 365d76eee44bd681b94aa939ce244d41eea74030 Mon Sep 17 00:00:00 2001 From: Morgan Deegan <54906896+mkdeegan@users.noreply.github.com> Date: Thu, 15 Feb 2024 10:59:23 -0800 Subject: [PATCH 273/607] OS aware path separator in PowerShell script (#551) * OS aware path separator * OS aware path separator * OS aware path separator --------- Co-authored-by: Morgan Deegan --- deploy/scripts/New-SDAFDevopsProject.ps1 | 45 +++++++++++------------- 1 file changed, 20 insertions(+), 25 deletions(-) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index 1963eb145d..e8878cde33 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -1,3 +1,6 @@ +# Write-Host "..............." -ForegroundColor Cyan + + function Show-Menu($data) { Write-Host "================ $Title ================" $i = 1 @@ -13,17 +16,17 @@ function Show-Menu($data) { #region Initialize # Initialize variables from Environment variables -$ADO_Organization = $Env:SDAF_ADO_ORGANIZATION -$ADO_Project = $Env:SDAF_ADO_PROJECT -$Control_plane_code = $Env:SDAF_CONTROL_PLANE_CODE -$Workload_zone_code = $Env:SDAF_WORKLOAD_ZONE_CODE - -$Control_plane_subscriptionID = $Env:SDAF_ControlPlaneSubscriptionID -$Workload_zone_subscriptionID = $Env:SDAF_WorkloadZoneSubscriptionID -$ControlPlaneSubscriptionName = $Env:SDAF_ControlPlaneSubscriptionName +$ADO_Organization = $Env:SDAF_ADO_ORGANIZATION +$ADO_Project = $Env:SDAF_ADO_PROJECT +$ARM_TENANT_ID = $Env:ARM_TENANT_ID +$Control_plane_code = $Env:SDAF_CONTROL_PLANE_CODE +$Control_plane_subscriptionID = $Env:SDAF_ControlPlaneSubscriptionID +$ControlPlaneSubscriptionName = $Env:SDAF_ControlPlaneSubscriptionName +$Workload_zone_code = $Env:SDAF_WORKLOAD_ZONE_CODE +$Workload_zone_subscriptionID = $Env:SDAF_WorkloadZoneSubscriptionID $Workload_zoneSubscriptionName = $Env:SDAF_WorkloadZoneSubscriptionName -$ARM_TENANT_ID = $Env:ARM_TENANT_ID +if ($IsWindows) { $pathSeparator = "\" } else { $pathSeparator = "/" } #endregion $versionLabel = "v3.11.0.0" @@ -62,10 +65,7 @@ else { Write-Host "" Write-Host "" -if (Test-Path .\start.md) { - Write-Host "Removing start.md" - Remove-Item .\start.md -} +if (Test-Path ".${pathSeparator}start.md") { Write-Host "Removing start.md" ; Remove-Item ".${pathSeparator}start.md" } if ($Env:SDAF_AuthenticationMethod.Length -eq 0) { $Title = "Select the authentication method to use" @@ -745,9 +745,9 @@ else { Write-Host "Creating an App Registration for" $ApplicationName -ForegroundColor Green Add-Content -Path manifest.json -Value '[{"resourceAppId":"00000003-0000-0000-c000-000000000000","resourceAccess":[{"id":"e1fe6dd8-ba31-4d61-89e7-88639da4683d","type":"Scope"}]}]' - $APP_REGISTRATION_ID = (az ad app create --display-name $ApplicationName --enable-id-token-issuance true --sign-in-audience AzureADMyOrg --required-resource-access .\manifest.json --query "appId").Replace('"', "") + $APP_REGISTRATION_ID = (az ad app create --display-name $ApplicationName --enable-id-token-issuance true --sign-in-audience AzureADMyOrg --required-resource-access ".${pathSeparator}manifest.json" --query "appId").Replace('"', "") - Remove-Item manifest.json + if (Test-Path ".${pathSeparator}manifest.json") { Write-Host "Removing manifest.json" ; Remove-Item ".${pathSeparator}manifest.json" } $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors) } @@ -1006,16 +1006,14 @@ if (!$AlreadySet -or $ResetPAT ) { Write-Host "Creating agent pool" $Pool_Name -ForegroundColor Green Set-Content -Path pool.json -Value (ConvertTo-Json @{name = $Pool_Name; autoProvision = $true }) - az devops invoke --area distributedtask --resource pools --http-method POST --api-version "7.1-preview" --in-file .\pool.json --query-parameters authorizePipelines=true --query id --output none --only-show-errors + az devops invoke --area distributedtask --resource pools --http-method POST --api-version "7.1-preview" --in-file ".${pathSeparator}pool.json" --query-parameters authorizePipelines=true --query id --output none --only-show-errors $POOL_ID = (az pipelines pool list --query "[?name=='$Pool_Name'].id | [0]" --output tsv) Write-Host "Agent pool" $Pool_Name "created" $queue_id = (az pipelines queue list --query "[?name=='$Pool_Name'].id | [0]" --output tsv) } - if (Test-Path .\pool.json) { - Remove-Item .\pool.json - } + if (Test-Path ".${pathSeparator}pool.json") { Write-Host "Removing pool.json" ; Remove-Item ".${pathSeparator}pool.json" } # Create header with PAT $base64AuthInfo = [Convert]::ToBase64String([Text.Encoding]::ASCII.GetBytes((":{0}" -f $PAT))) @@ -1096,12 +1094,12 @@ if ($WIKI_NAME_FOUND.Length -gt 0) { Write-Host "Wiki SDAF already exists" $eTag = (az devops wiki page show --path 'Next steps' --wiki SDAF --query eTag ) if ($eTag -ne $null) { - $page_id = (az devops wiki page update --path 'Next steps' --wiki SDAF --file-path .\start.md --only-show-errors --version $eTag --query page.id) + $page_id = (az devops wiki page update --path 'Next steps' --wiki SDAF --file-path ".${pathSeparator}start.md" --only-show-errors --version $eTag --query page.id) } } else { az devops wiki create --name SDAF --output none --only-show-errors - az devops wiki page create --path 'Next steps' --wiki SDAF --file-path .\start.md --output none --only-show-errors + az devops wiki page create --path 'Next steps' --wiki SDAF --file-path ".${pathSeparator}start.md" --output none --only-show-errors } $page_id = (az devops wiki page show --path 'Next steps' --wiki SDAF --query page.id ) @@ -1110,7 +1108,4 @@ $wiki_url = $ADO_ORGANIZATION + "/" + [uri]::EscapeDataString($ADO_Project) + "/ Write-Host "URL: " $wiki_url Start-Process $wiki_url -if (Test-Path .\start.md) { - Write-Host "Removing start.md" - Remove-Item .\start.md -} +if (Test-Path ".${pathSeparator}start.md") { Write-Host "Removing start.md" ; Remove-Item ".${pathSeparator}start.md" } From e547c812aa16cd916b5acb266946099f1681a519 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 16 Feb 2024 13:36:00 +0200 Subject: [PATCH 274/607] Don't create DNS zone for table if not using the Web App --- deploy/terraform/terraform-units/modules/sap_library/dns.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_library/dns.tf b/deploy/terraform/terraform-units/modules/sap_library/dns.tf index 5eae5a3a7a..c0b41cda3d 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/dns.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/dns.tf @@ -31,7 +31,7 @@ resource "azurerm_private_dns_zone" "blob" { resource "azurerm_private_dns_zone" "table" { provider = azurerm.main - count = local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 + count = local.use_local_private_dns && var.use_private_endpoint && length(try(var.deployer_tfstate.webapp_id),"")> 0 ? 1 : 0 depends_on = [ azurerm_resource_group.library ] From 7115be2ac9ff1b20ae471d22b4c8399a02635b00 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 16 Feb 2024 13:51:49 +0200 Subject: [PATCH 275/607] typo --- deploy/terraform/terraform-units/modules/sap_library/dns.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_library/dns.tf b/deploy/terraform/terraform-units/modules/sap_library/dns.tf index c0b41cda3d..d9196a16e1 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/dns.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/dns.tf @@ -31,7 +31,7 @@ resource "azurerm_private_dns_zone" "blob" { resource "azurerm_private_dns_zone" "table" { provider = azurerm.main - count = local.use_local_private_dns && var.use_private_endpoint && length(try(var.deployer_tfstate.webapp_id),"")> 0 ? 1 : 0 + count = local.use_local_private_dns && var.use_private_endpoint && length(try(var.deployer_tfstate.webapp_id,""))> 0 ? 1 : 0 depends_on = [ azurerm_resource_group.library ] From af9706af51d91c4223bb3937807596efd6a6a454 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 16 Feb 2024 14:47:24 +0200 Subject: [PATCH 276/607] Update subscription_id in providers.tf --- deploy/terraform/run/sap_system/providers.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/run/sap_system/providers.tf b/deploy/terraform/run/sap_system/providers.tf index 7d19f9ab51..504c8662ad 100644 --- a/deploy/terraform/run/sap_system/providers.tf +++ b/deploy/terraform/run/sap_system/providers.tf @@ -43,7 +43,7 @@ provider "azurerm" { provider "azurerm" { features {} alias = "dnsmanagement" - subscription_id = coalesce(var.management_dns_subscription_id, length(local.deployer_subscription_id) > 0 ? local.deployer_subscription_id : "") + subscription_id = coalesce(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, var.management_dns_subscription_id, length(local.deployer_subscription_id) > 0 ? local.deployer_subscription_id : "") client_id = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? local.cp_spn.client_id : null client_secret = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? local.cp_spn.client_secret : null tenant_id = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? local.cp_spn.tenant_id : null From 81779865198e3b2a4e8393fe2003aca31d1afdb0 Mon Sep 17 00:00:00 2001 From: Harm Jan Stam Date: Fri, 16 Feb 2024 19:02:18 +0100 Subject: [PATCH 277/607] Supress scs installation lines when return code is 0 (#550) --- deploy/ansible/playbook_05_00_00_sap_scs_install.yaml | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/deploy/ansible/playbook_05_00_00_sap_scs_install.yaml b/deploy/ansible/playbook_05_00_00_sap_scs_install.yaml index 37f275a4bb..c3a7811996 100644 --- a/deploy/ansible/playbook_05_00_00_sap_scs_install.yaml +++ b/deploy/ansible/playbook_05_00_00_sap_scs_install.yaml @@ -196,6 +196,9 @@ # # -------------------------------------+---------------------------------------8 - name: "SCS Installation Playbook: - Standalone SCS Setup" + when: + - not scs_high_availability + - "'scs' in supported_tiers" block: - name: "SCS Installation Playbook: Define this SID" ansible.builtin.set_fact: @@ -388,11 +391,6 @@ tags: - 5.0.0-scs-install - when: - - not scs_high_availability - - "'scs' in supported_tiers" - - # /*---------------------------------------------------------------------------8 # | | # | Playbook for SAP SCS HA and Pacemaker Resources | @@ -588,6 +586,7 @@ when: - scs_installation.stdout_lines is defined - scs_installation.rc is defined + - scs_installation.rc > 0 - '"ERROR" in item' - name: "SCS HA Installation Playbook: - Show errors from SCS installation" @@ -603,8 +602,8 @@ when: - ers_installation.stdout_lines is defined - ers_installation.rc is defined - - '"ERROR" in item' - ers_installation.rc > 0 + - '"ERROR" in item' - name: "SCS HA Installation Playbook: - Show errors from ERS installation" ansible.builtin.debug: From 1e0fb208264b993e420a59d64e08803b3937ae9c Mon Sep 17 00:00:00 2001 From: Harm Jan Stam Date: Fri, 16 Feb 2024 19:06:46 +0100 Subject: [PATCH 278/607] Only create/copy SSL cert when DB2 SSL communication is used (#552) * Only create/copy SSL cert when DB2 SSL communication is used Database encryption and SSL communication are seperate things and can be configured with the BOM parameters individually. By creating a fact that contains the existence of the encryption and/or SSL keystore the relevant tasks can be imported. * Remove db2_instance_type and db2_abap_connect_user variables db2_instance_type has a default of ABAP in de code but because it's a set_fact you can't override it from a vars_file. db2_abap_connect_user isn't used in the codebase or the BOM templates. --- .../tasks/4.2.1.0-db2_ha_install_primary.yml | 26 ++++++++-- .../tasks/4.2.1.3-db2_restore_secondary.yml | 31 +----------- .../tasks/4.2.1.4-db2_haparameters.yaml | 6 +-- .../tasks/4.2.1.8-db2_copy_keystore_files.yml | 27 +---------- .../4.2.1.9-db2_generate_distribute_ssl.yml | 47 ++----------------- .../4.2.1-db2-hainstall/tasks/main.yml | 30 +++++------- .../2.5-sap-users/tasks/main.yaml | 2 - .../tasks/2.6.3-oracle-observer.yaml | 1 - deploy/ansible/vars/ansible-input-api.yaml | 3 -- 9 files changed, 43 insertions(+), 130 deletions(-) diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.0-db2_ha_install_primary.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.0-db2_ha_install_primary.yml index 3db6b2e8dc..b5d9b88798 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.0-db2_ha_install_primary.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.0-db2_ha_install_primary.yml @@ -15,8 +15,6 @@ # TODO: Considerations --- - -# Set BOM facts for SAP DB2 Install - Refer to sap-automation/deploy/ansible/BOM-catalog/ERP6_EHP8_LNX_DB2UDB_11_5_v0001ms - name: "SAP DB2 Install: Preparation" block: - name: "SAP DB2 Install: Set BOM facts" @@ -146,8 +144,6 @@ msg: "SAP DB2 Installation succeeded." # TBC - Add another check to remove the contents of /sapmnt/ if installation fails - - - name: "DB2 Install: flag" ansible.builtin.file: path: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/sap_deployment_db2.txt" @@ -172,6 +168,28 @@ state: touch mode: 0755 when: db2_lock_escalation.rc == 0 + + - name: "DB2: Stat if the encryption keystore exists" + ansible.builtin.stat: + path: /db2/db2{{ db_sid | lower }}/keystore/sapdb2{{ db_sid | lower }}_db_encr.p12 + register: db2_encryption_keystore_file_stat + + - name: "DB2: Stat if the SSL keystore exists" + ansible.builtin.stat: + path: /db2/db2{{ db_sid | lower }}/keystore/sapdb2{{ db_sid | lower }}_ssl_comm.kdb + register: db2_ssl_keystore_file_stat + + - name: "DB2: create db_encrypted and ssl_communication variables" + ansible.builtin.set_fact: + db_encrypted: "{{ db2_encryption_keystore_file_stat.stat.exists }}" + ssl_communication: "{{ db2_ssl_keystore_file_stat.stat.exists }}" + + - name: "DB2: Debug encrypted database and SSL communication" + ansible.builtin.debug: + msg: + - "Database encrypted: {{ db_encrypted }}" + - "SSL communication: {{ ssl_communication }}" + - name: "DB2 Install: check if ARM Deployment done" ansible.builtin.stat: path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_db_arm.txt" diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.3-db2_restore_secondary.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.3-db2_restore_secondary.yml index d7bd544f59..f0eb0f3394 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.3-db2_restore_secondary.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.3-db2_restore_secondary.yml @@ -78,8 +78,7 @@ failed_when: db2_restore_result.rc > 2 when: - db2_started.rc == 0 - - hostvars[primary_instance_name]['db_encrypted'] is defined - - not hostvars[primary_instance_name]['db_encrypted'] + - not hostvars[primary_instance_name]['db_encrypted'] | default(false) # ######### ########### End of Restore without Encryption #################################### # ##################### Start of Restore with Encryption ################################## @@ -93,35 +92,9 @@ failed_when: db2_restore_result.rc > 2 when: - db2_started.rc == 0 - - hostvars[primary_instance_name]['db_encrypted'] is defined - - hostvars[primary_instance_name]['db_encrypted'] + - hostvars[primary_instance_name]['db_encrypted'] | default(false) # ######### ########### End of Restore with Encryption #################################### when: - ansible_hostname == secondary_instance_name become: true become_user: db2{{ db_sid | lower }} - -# #################Start of Restore with Encryption############################ -# Restore with Encryption - commented now till we decide on the DB2 encrpytion Strategy -# - name: Import Master Key -# shell: >- -# gsk8capicmd_64 -cert -import -db {{ db2standby_deployment_source_keystorefile }}.p12 -target \ -# /db2/db2{{ db2deploy_sid | lower }}/keystore/sapdb2{{ db2deploy_sid | lower }}_db_encr.p12gsk8capicmd_64 -cert -import \ -# -db {{ db2standby_deployment_source_keystorefile }}.p12 -target /db2/db2{{ db2deploy_sid | lower }}/keystore/sapdb2 \ -# {{ db2deploy_sid | lower }}_db_encr.p12 -# become: true -# become_user: db2id2 -# when: -# - db2standby_deployment_encryption == 'true' - -# - name: Restore with encryption -# shell: >- -# db2 restore database {{ db2deploy_sid }} from {{ db2standby_deployment_backupdir }} \ -# encrypt cipher {{ db2standby_deployment_ciphertype }} key length {{ db2standby_deployment_cipherkeylength }} master key label \ -# sap_db2{{ db2deploy_sid | lower }}_{{ db2standby_deployment_standbyhostname }}_dbencr_000 -# become: true -# become_user: db2id2 -# when: -# - db2standby_deployment_encryption == 'true' - -# ######### END of Restore with Encryption ################################ diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml index 726781485b..57ed0a7ec7 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.4-db2_haparameters.yaml @@ -194,11 +194,7 @@ # ################### End of Section for Secondary DB ###################### - name: "DB2 DB - HADR SSL Configuration" - when: - - db2_instance_type == 'ABAP' - - db2_ssl_label is defined - - hostvars[primary_instance_name]['db_encrypted'] is defined - - hostvars[primary_instance_name]['db_encrypted'] + when: db2_ssl_label is defined become: true become_user: db2{{ db_sid | lower }} block: diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml index 376cdd0f18..6189f9c773 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml @@ -13,35 +13,16 @@ - sapdb2{{ db_sid | lower }}_db_encr.p12 - sapdb2{{ db_sid | lower }}_db_encr.sth -- name: "DB2: Stat if the keystore files exist on Primary node" - ansible.builtin.stat: - path: "/db2/db2{{ db_sid | lower }}/keystore/{{ item }}" - loop: "{{ keystore_files }}" - register: keystore_files_stat - when: ansible_hostname == primary_instance_name - -- name: "DB2: Determine if the database is encrypted" - ansible.builtin.set_fact: - db_encrypted: "{{ (keystore_files_stat.results | map(attribute='stat.exists')) is all }}" - when: ansible_hostname == primary_instance_name - -- name: "DB2: Debug if the database is encrypted" - ansible.builtin.debug: - msg: - - "Database is encrypted: {{ db_encrypted }}" - when: ansible_hostname == primary_instance_name - - name: "DB2: Fetch keystore files from Primary node to Controller" + when: ansible_hostname == primary_instance_name ansible.builtin.fetch: src: "/db2/db2{{ db_sid | lower }}/keystore/{{ item }}" dest: /tmp/keystore_files/ flat: true loop: "{{ keystore_files }}" - when: - - ansible_hostname == primary_instance_name - - db_encrypted - name: "DB2: Copy keystore files from Controller to Secondary node" + when: ansible_hostname == secondary_instance_name ansible.builtin.copy: src: /tmp/keystore_files/{{ item }} dest: /db2/db2{{ db_sid | lower }}/keystore/ @@ -49,7 +30,3 @@ owner: db2{{ db_sid | lower }} group: db{{ db_sid | lower }}adm loop: "{{ keystore_files }}" - when: - - ansible_hostname == secondary_instance_name - - hostvars[primary_instance_name]['db_encrypted'] is defined - - hostvars[primary_instance_name]['db_encrypted'] diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.9-db2_generate_distribute_ssl.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.9-db2_generate_distribute_ssl.yml index 8ca31dec3d..b6548de8d9 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.9-db2_generate_distribute_ssl.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.9-db2_generate_distribute_ssl.yml @@ -1,29 +1,4 @@ --- - -- name: "DB2: Variable for keystore files" - ansible.builtin.set_fact: - keystore_files: - - sapdb2{{ db_sid | lower }}_db_encr.p12 - - sapdb2{{ db_sid | lower }}_db_encr.sth - -- name: "DB2: Stat if the keystore files exist on Primary node" - ansible.builtin.stat: - path: "/db2/db2{{ db_sid | lower }}/keystore/{{ item }}" - loop: "{{ keystore_files }}" - register: keystore_files_stat - when: ansible_hostname == primary_instance_name - -- name: "DB2: Determine if the database is encrypted" - ansible.builtin.set_fact: - db_encrypted: "{{ (keystore_files_stat.results | map(attribute='stat.exists')) is all }}" - when: ansible_hostname == primary_instance_name - -- name: "DB2: Debug if the database is encrypted" - ansible.builtin.debug: - msg: - - "Database is encrypted: {{ db_encrypted }}" - when: ansible_hostname == primary_instance_name - - name: "DB2: variables for SSL certificate" ansible.builtin.set_fact: db2_ssl_cn: "{{ custom_db_virtual_hostname | default(db_virtual_hostname, true) }}.{{ sap_fqdn }}" @@ -31,11 +6,8 @@ db2_ssl_stash_file: sapdb2{{ db_sid | lower }}_ssl_comm.sth db2_ssl_label: sap_db2_{{ custom_db_virtual_hostname | default(db_virtual_hostname, true) }}_ssl_comm_000 - - name: "DB2 Primary DB: Generate SSL" - when: - - ansible_hostname == primary_instance_name - - db_encrypted + when: ansible_hostname == primary_instance_name become: true become_user: db2{{ db_sid | lower }} block: @@ -58,9 +30,7 @@ LD_LIBRARY_PATH: /db2/db2{{ db_sid | lower }}/sqllib/lib64:/db2/db2{{ db_sid | lower }}/sqllib/lib64/gskit:/db2/db2{{ db_sid | lower }}/sqllib/lib - name: "DB2 Primary DB - Copy SSL Certificate and Keystore files" - when: - - ansible_hostname == primary_instance_name - - db_encrypted + when: ansible_hostname == primary_instance_name block: - name: "DB2 Primary DB - Copy SSL certificate to SSL_client directory" ansible.builtin.copy: @@ -80,11 +50,6 @@ - "{{ db2_ssl_keydb_file }}" - "{{ db2_ssl_stash_file }}" - - name: "DB2 Primary DB: Flag to show that copied" - ansible.builtin.set_fact: - keystore_copied: true - - - name: "DB2 Primary DB: Update SSL certificate in db2cli.ini" ansible.builtin.lineinfile: path: /sapmnt/{{ sap_sid | upper }}/global/db6/db2cli.ini @@ -92,9 +57,6 @@ line: SSLServerCertificate=/usr/sap/{{ db_sid | upper }}/SYS/global/SSL_client/{{ db2_ssl_label }}.arm - name: "DB2 DB - Set SSL parameters" - when: - - hostvars[primary_instance_name]['db_encrypted'] is defined - - hostvars[primary_instance_name]['db_encrypted'] become: true become_user: db2{{ db_sid | lower }} ansible.builtin.shell: | @@ -108,10 +70,7 @@ PATH: "{{ ansible_env.PATH }}:/db2/db2{{ db_sid | lower }}/sqllib/gskit/bin" - name: "DB2: Copy keystore files from Controller to Secondary node" - when: - - ansible_hostname == secondary_instance_name - - hostvars[primary_instance_name]['keystore_copied'] is defined - - hostvars[primary_instance_name]['keystore_copied'] + when: ansible_hostname == secondary_instance_name ansible.builtin.copy: src: /tmp/keystore_files/{{ item }} dest: /db2/db2{{ db_sid | lower }}/keystore/ diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/main.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/main.yml index 4a3008312b..4e5458435a 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/main.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/main.yml @@ -18,6 +18,7 @@ # +------------------------------------4--------------------------------------*/ - name: "DB2 Primary System Setup" + when: ansible_hostname == primary_instance_name block: - name: "DB2 Primary System Install" ansible.builtin.import_tasks: 4.2.1.0-db2_ha_install_primary.yml @@ -25,13 +26,14 @@ - name: "DB2 - Take offline backup of Primary DB" ansible.builtin.import_tasks: 4.2.1.1-db2_primary_backup.yml - - name: "DB2 - Keystore Setup on Primary node" + - name: "DB2 - Fetch Keystore from Primary node" + when: hostvars[primary_instance_name]['db_encrypted'] | default(false) ansible.builtin.import_tasks: 4.2.1.8-db2_copy_keystore_files.yml - # SSL communication via SWPM is only available if you're using AS ABAP + # SSL communication via SWPM is only available if you're using AS ABAP and configured UseDb2SSLClientServerComm in the BOM # Setting up SSL voor AS JAVA requires manual actions with the J2EE Config tool - name: "DB2 - Generate SSL on Primary node" - when: db2_instance_type == 'ABAP' + when: hostvars[primary_instance_name]['ssl_communication'] | default(false) ansible.builtin.import_tasks: 4.2.1.9-db2_generate_distribute_ssl.yml always: - name: "DB2 Primary System Install: result" @@ -40,28 +42,24 @@ verbosity: 4 - name: "DB2 Primary System Install: result" - ansible.builtin.fail: - msg: db2_installation.stdout_lines when: - db2_installation.rc is defined - db2_installation.rc != 0 - - when: - - ansible_hostname == primary_instance_name + ansible.builtin.fail: + msg: db2_installation.stdout_lines - name: "DB2 Secondary System Setup" + when: ansible_hostname == secondary_instance_name block: - - name: "DB2 Secondary System Install" ansible.builtin.import_tasks: 4.2.1.2-db2_ha_install_secondary.yml - - name: "DB2 - Keystore Setup on Secondary node" + - name: "DB2 - Copy Keystore to Secondary node" + when: hostvars[primary_instance_name]['db_encrypted'] | default(false) ansible.builtin.import_tasks: 4.2.1.8-db2_copy_keystore_files.yml - # SSL communication via SWPM is only available if you're using AS ABAP - # Setting up SSL voor AS JAVA requires manual actions with the J2EE Config tool - name: "DB2 - Distribute SSL certificate to Secondary node" - when: db2_instance_type == 'ABAP' + when: hostvars[primary_instance_name]['ssl_communication'] | default(false) ansible.builtin.import_tasks: 4.2.1.9-db2_generate_distribute_ssl.yml - name: "DB2 - Restore Secondary with backup of Primary DB" @@ -73,13 +71,11 @@ verbosity: 4 - name: "DB2 Primary System Install: result" - ansible.builtin.fail: - msg: db2_installation.stdout_lines when: - db2_installation.rc is defined - db2_installation.rc != 0 - when: - - ansible_hostname == secondary_instance_name + ansible.builtin.fail: + msg: db2_installation.stdout_lines - name: "DB2 - Apply high availability database parameters on Primary & Secondary System" ansible.builtin.import_tasks: 4.2.1.4-db2_haparameters.yaml diff --git a/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml index 93b4370432..fb70b77aef 100644 --- a/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml @@ -40,8 +40,6 @@ - { group: 'sapsys', gid: '{{ sapsys_gid }}' } - { group: 'sapinst', gid: '{{ sapinst_gid }}' } - - # Create Groups for Oracle ASM. - name: "2.5.1 SAP Users: - Create SAP Groups for Oracle ASM" ansible.builtin.group: diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-observer.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-observer.yaml index 563452f9c7..6fc2d14e2a 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-observer.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-observer.yaml @@ -107,4 +107,3 @@ - tier == 'sapos' - usr_sap_install_mountpoint is defined - node_tier == 'observer' - diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index 2215d455ef..8704dc2ebc 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -99,9 +99,6 @@ db2sidadm_uid: 3004 db2sapsid_uid: 3005 # Uid of the database connect user db2hadr_port1: 51012 db2hadr_port2: 51013 -# Name of the database connect user for ABAP. Default value is 'sap'. -db2_abap_connect_user: "" -db2_instance_type: "ABAP" tmp_directory: "/var/tmp" url_internet: "https://azure.status.microsoft/en-us/status" # URL to use for internet access checks" From 0e97859119271ee9304522d0df2316cc1e16d9e4 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 18 Feb 2024 22:33:42 +0200 Subject: [PATCH 279/607] Add the ability to provide custom subnets and ip addresses to Azure Firewall --- .../bootstrap/sap_deployer/module.tf | 2 ++ deploy/terraform/run/sap_deployer/module.tf | 2 ++ .../run/sap_deployer/tfvar_variables.tf | 4 ++-- .../modules/sap_deployer/firewall.tf | 23 +++++++++++++++++++ .../modules/sap_deployer/variables_global.tf | 12 +++++++++- 5 files changed, 40 insertions(+), 3 deletions(-) diff --git a/deploy/terraform/bootstrap/sap_deployer/module.tf b/deploy/terraform/bootstrap/sap_deployer/module.tf index e71123286c..661b63985d 100644 --- a/deploy/terraform/bootstrap/sap_deployer/module.tf +++ b/deploy/terraform/bootstrap/sap_deployer/module.tf @@ -35,6 +35,8 @@ module "sap_deployer" { enable_firewall_for_keyvaults_and_storage = var.enable_firewall_for_keyvaults_and_storage enable_purge_control_for_keyvaults = var.enable_purge_control_for_keyvaults firewall_deployment = local.firewall_deployment + firewall_rule_subnets = local.firewall_rule_subnets + firewall_allowed_ipaddresses = local.firewall_allowed_ipaddresses infrastructure = local.infrastructure key_vault = local.key_vault management_dns_resourcegroup_name = var.management_dns_resourcegroup_name diff --git a/deploy/terraform/run/sap_deployer/module.tf b/deploy/terraform/run/sap_deployer/module.tf index aaa1b2cc35..63d4142613 100644 --- a/deploy/terraform/run/sap_deployer/module.tf +++ b/deploy/terraform/run/sap_deployer/module.tf @@ -35,6 +35,8 @@ module "sap_deployer" { enable_firewall_for_keyvaults_and_storage = var.enable_firewall_for_keyvaults_and_storage enable_purge_control_for_keyvaults = var.enable_purge_control_for_keyvaults firewall_deployment = local.firewall_deployment + firewall_rule_subnets = local.firewall_rule_subnets + firewall_allowed_ipaddresses = local.firewall_allowed_ipaddresses infrastructure = local.infrastructure key_vault = local.key_vault management_dns_resourcegroup_name = var.management_dns_resourcegroup_name diff --git a/deploy/terraform/run/sap_deployer/tfvar_variables.tf b/deploy/terraform/run/sap_deployer/tfvar_variables.tf index 1afd9c534b..3d555ceba8 100644 --- a/deploy/terraform/run/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/run/sap_deployer/tfvar_variables.tf @@ -120,12 +120,12 @@ variable "firewall_deployment" { variable "firewall_rule_subnets" { description = "List of subnets that are part of the firewall rule" - default = null + default = [] } variable "firewall_allowed_ipaddresses" { description = "List of allowed IP addresses to be part of the firewall rule" - default = null + default = [] } #######################################4#######################################8 diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/firewall.tf b/deploy/terraform/terraform-units/modules/sap_deployer/firewall.tf index e5bb87f1ee..4312cd4243 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/firewall.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/firewall.tf @@ -147,6 +147,7 @@ resource "azurerm_firewall_network_rule_collection" "firewall-azure" { ) priority = random_integer.priority.result action = "Allow" + rule { name = "Azure-Cloud" source_addresses = ["*"] @@ -161,4 +162,26 @@ resource "azurerm_firewall_network_rule_collection" "firewall-azure" { destination_addresses = ["*"] protocols = ["Any"] } + dynamic "rule" { + for_each = range(length(var.firewall_rule_subnets) > 0 ? 1 : 0) + content { + name = "CustomSubnets" + source_addresses = var.firewall_rule_subnets + destination_ports = ["*"] + destination_addresses = ["*"] + protocols = ["Any"] + } + } + + dynamic "rule" { + for_each = range(length(var.firewall_allowed_ipaddresses) > 0 ? 1 : 0) + content { + name = "CustomIpAddresses" + source_addresses = var.firewall_allowed_ipaddresses + destination_ports = ["*"] + destination_addresses = ["*"] + protocols = ["Any"] + } + } + } diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_deployer/variables_global.tf index 94af1945e2..411b058ffd 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/variables_global.tf @@ -10,7 +10,6 @@ variable "bastion_deployment" { description = "Value indicating if Azur variable "bastion_sku" { description = "The SKU of the Bastion Host. Accepted values are Basic or Standard" } variable "bootstrap" { description = "Defines the phase of deployment" } variable "configure" { description = "Value indicating if deployer should be configured" } -variable "firewall_deployment" { description = "Boolean flag indicating if an Azure Firewall should be deployed" } variable "infrastructure" { description = "Dictionary of information about the common infrastructure" } variable "naming" { description = "Defines the names for the resources" } variable "options" { description = "Dictionary of miscallaneous parameters" } @@ -20,6 +19,17 @@ variable "tf_version" { description = "Terraform version to ins variable "use_private_endpoint" { description = "Boolean value indicating if private endpoint should be used for the deployment" } variable "use_service_endpoint" { description = "Boolean value indicating if service endpoints should be used for the deployment" } +######################################################################################### +# # +# Firewall # +# # +######################################################################################### + + +variable "firewall_deployment" { description = "Boolean flag indicating if an Azure Firewall should be deployed" } +variable "firewall_rule_subnets" { description = "List of subnets that are part of the firewall rule" } +variable "firewall_allowed_ipaddresses" { description = "List of allowed IP addresses to be part of the firewall rule" } + ######################################################################################### # # # KeyVault # From 970b9f34cc5d850d433eabf4d42e195192cd6085 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 18 Feb 2024 22:38:08 +0200 Subject: [PATCH 280/607] Resolve the login issue when running the removal --- deploy/pipelines/10-remover-terraform.yaml | 46 +++++++++++++++++++++- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/deploy/pipelines/10-remover-terraform.yaml b/deploy/pipelines/10-remover-terraform.yaml index 9d43b3f720..de532a7a30 100644 --- a/deploy/pipelines/10-remover-terraform.yaml +++ b/deploy/pipelines/10-remover-terraform.yaml @@ -301,8 +301,28 @@ stages: fi else echo -e "$green --- Running on deployer ---$reset" - source /etc/profile.d/deploy_server.sh - export ARM_USE_MSI=true + + if [ $LOGON_USING_SPN == "true" ]; then + echo "Using SPN" + + export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$WL_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + export ARM_USE_MSI=false + az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi + else + export ARM_USE_MSI=true + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + unset ARM_TENANT_ID + az login --identity --allow-no-subscriptions --output none + fi fi echo -e "$green--- Set variables ---$reset" @@ -649,6 +669,28 @@ stages: fi else echo -e "$green --- Running on deployer ---$reset" + + if [ $LOGON_USING_SPN == "true" ]; then + echo "Using SPN" + + export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$WL_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + export ARM_USE_MSI=false + az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi + else + export ARM_USE_MSI=true + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + unset ARM_TENANT_ID + az login --identity --allow-no-subscriptions --output none + fi fi echo -e "$green--- Run the remover script that destroys the SAP workload zone (landscape) ---$reset" From ee375cbcc0f68682458360f0b5220c057efacce0 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 19 Feb 2024 10:33:19 +0200 Subject: [PATCH 281/607] Add skip_provider_registration flag to azurerm provider --- deploy/terraform/run/sap_landscape/providers.tf | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/terraform/run/sap_landscape/providers.tf b/deploy/terraform/run/sap_landscape/providers.tf index 0edbd14d22..98de52cc7c 100644 --- a/deploy/terraform/run/sap_landscape/providers.tf +++ b/deploy/terraform/run/sap_landscape/providers.tf @@ -16,6 +16,7 @@ provider "azurerm" { features {} subscription_id = length(local.deployer_subscription_id) > 0 ? local.deployer_subscription_id : null use_msi = var.use_spn ? false : true + skip_provider_registration = true } provider "azurerm" { From da4bd84f92ae400d8f1071156cba31157155c555 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 19 Feb 2024 10:50:22 +0200 Subject: [PATCH 282/607] Update dns_zone_names in tfvar_variables.tf --- .../terraform/bootstrap/sap_deployer/tfvar_variables.tf | 8 +++++--- deploy/terraform/bootstrap/sap_library/tfvar_variables.tf | 7 ++++--- deploy/terraform/run/sap_deployer/tfvar_variables.tf | 7 ++++--- deploy/terraform/run/sap_landscape/tfvar_variables.tf | 7 ++++--- .../modules/sap_deployer/variables_global.tf | 7 ++++--- .../modules/sap_landscape/variables_global.tf | 7 ++++--- .../modules/sap_library/variables_global.tf | 7 ++++--- .../sap_system/common_infrastructure/variables_global.tf | 7 ++++--- .../modules/sap_system/output_files/variables_global.tf | 7 ++++--- 9 files changed, 37 insertions(+), 27 deletions(-) diff --git a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf index c5a5c5c1b0..d610731e84 100644 --- a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf @@ -440,9 +440,11 @@ variable "dns_zone_names" { type = map(string) default = { - "file_dns_zone_name" = "privatelink.file.core.windows.net" - "blob_dns_zone_name" = "privatelink.blob.core.windows.net" - "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" + "file_dns_zone_name" = "privatelink.file.core.windows.net" + "blob_dns_zone_name" = "privatelink.blob.core.windows.net" + "table_dns_zone_name" = "privatelink.table.core.windows.net" + "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" + } } diff --git a/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf index 10d5c3b7d4..4c2b36c2ed 100644 --- a/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf @@ -273,9 +273,10 @@ variable "dns_zone_names" { description = "Private DNS zone names" type = map(string) default = { - "file_dns_zone_name" = "privatelink.file.core.windows.net" - "blob_dns_zone_name" = "privatelink.blob.core.windows.net" - "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" + "file_dns_zone_name" = "privatelink.file.core.windows.net" + "blob_dns_zone_name" = "privatelink.blob.core.windows.net" + "table_dns_zone_name" = "privatelink.table.core.windows.net" + "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" } } diff --git a/deploy/terraform/run/sap_deployer/tfvar_variables.tf b/deploy/terraform/run/sap_deployer/tfvar_variables.tf index 3d555ceba8..7a7e6b14f8 100644 --- a/deploy/terraform/run/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/run/sap_deployer/tfvar_variables.tf @@ -443,9 +443,10 @@ variable "dns_zone_names" { type = map(string) default = { - "file_dns_zone_name" = "privatelink.file.core.windows.net" - "blob_dns_zone_name" = "privatelink.blob.core.windows.net" - "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" + "file_dns_zone_name" = "privatelink.file.core.windows.net" + "blob_dns_zone_name" = "privatelink.blob.core.windows.net" + "table_dns_zone_name" = "privatelink.table.core.windows.net" + "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" } } diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index 7b3e4d4c4f..002b548359 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -471,9 +471,10 @@ variable "dns_zone_names" { type = map(string) default = { - "file_dns_zone_name" = "privatelink.file.core.windows.net" - "blob_dns_zone_name" = "privatelink.blob.core.windows.net" - "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" + "file_dns_zone_name" = "privatelink.file.core.windows.net" + "blob_dns_zone_name" = "privatelink.blob.core.windows.net" + "table_dns_zone_name" = "privatelink.table.core.windows.net" + "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" } } diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_deployer/variables_global.tf index 411b058ffd..f11374a850 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/variables_global.tf @@ -99,9 +99,10 @@ variable "dns_zone_names" { type = map(string) default = { - "file_dns_zone_name" = "privatelink.file.core.windows.net" - "blob_dns_zone_name" = "privatelink.blob.core.windows.net" - "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" + "file_dns_zone_name" = "privatelink.file.core.windows.net" + "blob_dns_zone_name" = "privatelink.blob.core.windows.net" + "table_dns_zone_name" = "privatelink.table.core.windows.net" + "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" } } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf index 697018fc9f..57b46ad539 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf @@ -250,9 +250,10 @@ variable "dns_zone_names" { description = "Private DNS zone names" type = map(string) default = { - "file_dns_zone_name" = "privatelink.file.core.windows.net" - "blob_dns_zone_name" = "privatelink.blob.core.windows.net" - "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" + "file_dns_zone_name" = "privatelink.file.core.windows.net" + "blob_dns_zone_name" = "privatelink.blob.core.windows.net" + "table_dns_zone_name" = "privatelink.table.core.windows.net" + "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" } } diff --git a/deploy/terraform/terraform-units/modules/sap_library/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_library/variables_global.tf index f006eeb4cf..8c0f609719 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/variables_global.tf @@ -73,9 +73,10 @@ variable "dns_zone_names" { description = "Private DNS zone names" type = map(string) default = { - "file_dns_zone_name" = "privatelink.file.core.windows.net" - "blob_dns_zone_name" = "privatelink.blob.core.windows.net" - "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" + "file_dns_zone_name" = "privatelink.file.core.windows.net" + "blob_dns_zone_name" = "privatelink.blob.core.windows.net" + "table_dns_zone_name" = "privatelink.table.core.windows.net" + "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" } } diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf index 586ec08bb0..0b2ac0df03 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf @@ -246,9 +246,10 @@ variable "dns_zone_names" { type = map(string) default = { - "file_dns_zone_name" = "privatelink.file.core.windows.net" - "blob_dns_zone_name" = "privatelink.blob.core.windows.net" - "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" + "file_dns_zone_name" = "privatelink.file.core.windows.net" + "blob_dns_zone_name" = "privatelink.blob.core.windows.net" + "table_dns_zone_name" = "privatelink.table.core.windows.net" + "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" } } diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf index d0f8b02475..adc1f13c80 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf @@ -59,9 +59,10 @@ variable "dns_zone_names" { description = "Private DNS zone names" type = map(string) default = { - "file_dns_zone_name" = "privatelink.file.core.windows.net" - "blob_dns_zone_name" = "privatelink.blob.core.windows.net" - "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" + "file_dns_zone_name" = "privatelink.file.core.windows.net" + "blob_dns_zone_name" = "privatelink.blob.core.windows.net" + "table_dns_zone_name" = "privatelink.table.core.windows.net" + "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" } } variable "dns" { From adad6fc383104bfd1990daf928c32d7ec766688c Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 19 Feb 2024 10:53:47 +0200 Subject: [PATCH 283/607] Update New-SDAFDevopsProject.ps1 script --- deploy/scripts/New-SDAFDevopsProject.ps1 | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index e8878cde33..e4d13be380 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -31,8 +31,6 @@ if ($IsWindows) { $pathSeparator = "\" } else { $pathSeparator = "/" } $versionLabel = "v3.11.0.0" - - az logout az account clear @@ -46,9 +44,15 @@ else { # Check if access to the Azure DevOps organization is available and prompt for PAT if needed # Exact permissions required, to be validated, and included in the Read-Host text. + +if ($Env:AZURE_DEVOPS_EXT_PAT.Length -gt 0) { + az devops login --organization $ADO_Organization +} + $checkPAT = (az devops user list --organization $ADO_Organization --only-show-errors --top 1) if ($checkPAT.Length -eq 0) { $env:AZURE_DEVOPS_EXT_PAT = Read-Host "Please enter your Personal Access Token (PAT) with full access to the Azure DevOps organization $ADO_Organization" + az devops login --organization $ADO_Organization $verifyPAT = (az devops user list --organization $ADO_Organization --only-show-errors --top 1) if ($verifyPAT.Length -eq 0) { Read-Host -Prompt "Failed to authenticate to the Azure DevOps organization, press to exit" From f42200e669fc04852288142a3bec4b5f4a202f27 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 19 Feb 2024 12:24:38 +0200 Subject: [PATCH 284/607] account for change in variable type --- .../terraform-units/modules/sap_deployer/firewall.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/firewall.tf b/deploy/terraform/terraform-units/modules/sap_deployer/firewall.tf index 4312cd4243..f507c1fc04 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/firewall.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/firewall.tf @@ -163,7 +163,7 @@ resource "azurerm_firewall_network_rule_collection" "firewall-azure" { protocols = ["Any"] } dynamic "rule" { - for_each = range(length(var.firewall_rule_subnets) > 0 ? 1 : 0) + for_each = range(length(try(var.firewall_rule_subnets, [])) > 0 ? 1 : 0) content { name = "CustomSubnets" source_addresses = var.firewall_rule_subnets @@ -174,7 +174,7 @@ resource "azurerm_firewall_network_rule_collection" "firewall-azure" { } dynamic "rule" { - for_each = range(length(var.firewall_allowed_ipaddresses) > 0 ? 1 : 0) + for_each = range(length(try(var.firewall_allowed_ipaddresses, [])) > 0 ? 1 : 0) content { name = "CustomIpAddresses" source_addresses = var.firewall_allowed_ipaddresses From b8ce0caa92571e2ab61657f3c445bf7d9cd833f4 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 19 Feb 2024 12:55:44 +0200 Subject: [PATCH 285/607] Refactor authentication logic in New-SDAFDevopsProject.ps1 script --- deploy/scripts/New-SDAFDevopsProject.ps1 | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index e4d13be380..24dc2cac36 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -46,13 +46,25 @@ else { # Exact permissions required, to be validated, and included in the Read-Host text. if ($Env:AZURE_DEVOPS_EXT_PAT.Length -gt 0) { - az devops login --organization $ADO_Organization + Write-Host "Using the provided Personal Access Token (PAT) to authenticate to the Azure DevOps organization $ADO_Organization" -ForegroundColor Yellow + try { + az devops login --organization $ADO_Organization + } + catch { + <#Do this if a terminating exception happens#> + } + } $checkPAT = (az devops user list --organization $ADO_Organization --only-show-errors --top 1) if ($checkPAT.Length -eq 0) { $env:AZURE_DEVOPS_EXT_PAT = Read-Host "Please enter your Personal Access Token (PAT) with full access to the Azure DevOps organization $ADO_Organization" - az devops login --organization $ADO_Organization + try { + az devops login --organization $ADO_Organization + } + catch { + <#Do this if a terminating exception happens#> + } $verifyPAT = (az devops user list --organization $ADO_Organization --only-show-errors --top 1) if ($verifyPAT.Length -eq 0) { Read-Host -Prompt "Failed to authenticate to the Azure DevOps organization, press to exit" From 66ea55505ce471785969abd540d113939507c58f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 19 Feb 2024 12:58:45 +0200 Subject: [PATCH 286/607] Fix error handling in New-SDAFDevopsProject.ps1 script --- deploy/scripts/New-SDAFDevopsProject.ps1 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index 24dc2cac36..8a168087a9 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -51,7 +51,7 @@ if ($Env:AZURE_DEVOPS_EXT_PAT.Length -gt 0) { az devops login --organization $ADO_Organization } catch { - <#Do this if a terminating exception happens#> + $_ } } @@ -63,7 +63,7 @@ if ($checkPAT.Length -eq 0) { az devops login --organization $ADO_Organization } catch { - <#Do this if a terminating exception happens#> + $_ } $verifyPAT = (az devops user list --organization $ADO_Organization --only-show-errors --top 1) if ($verifyPAT.Length -eq 0) { From f903e9ad1e5a025b169291177e51b88a45865d08 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 19 Feb 2024 14:15:30 +0200 Subject: [PATCH 287/607] Update use_webapp condition in sap_library module.tf and storage_accounts.tf --- deploy/terraform/run/sap_library/module.tf | 2 +- .../terraform-units/modules/sap_library/storage_accounts.tf | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/terraform/run/sap_library/module.tf b/deploy/terraform/run/sap_library/module.tf index ae44dda7ef..95df96a702 100644 --- a/deploy/terraform/run/sap_library/module.tf +++ b/deploy/terraform/run/sap_library/module.tf @@ -26,7 +26,7 @@ module "sap_library" { storage_account_tfstate = local.storage_account_tfstate use_custom_dns_a_registration = var.use_custom_dns_a_registration use_private_endpoint = var.use_private_endpoint - use_webapp = var.use_webapp + use_webapp = var.use_webapp || length(try(data.terraform_remote_state.deployer[0].outputs.webapp_id,"")) > 0 Agent_IP = var.Agent_IP } diff --git a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf index ca7d70cb9b..5ce47fcadd 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf @@ -189,7 +189,7 @@ resource "azurerm_private_endpoint" "storage_tfstate" { resource "azurerm_private_endpoint" "table_tfstate" { provider = azurerm.main - count = var.use_private_endpoint && !local.sa_tfstate_exists ? 1 : 0 + count = var.use_private_endpoint && !local.sa_tfstate_exists && var.use_webapp ? 1 : 0 name = format("%s%s-table%s", var.naming.resource_prefixes.storage_private_link_tf, local.prefix, @@ -232,7 +232,7 @@ resource "azurerm_private_endpoint" "table_tfstate" { } dynamic "private_dns_zone_group" { - for_each = range(var.use_private_endpoint && !var.use_custom_dns_a_registration ? 1 : 0) + for_each = range(var.use_private_endpoint && !var.use_custom_dns_a_registration && var.use_webapp ? 1 : 0) content { name = var.dns_zone_names.blob_dns_zone_name private_dns_zone_ids = [local.use_local_private_dns ? azurerm_private_dns_zone.table[0].id : data.azurerm_private_dns_zone.table[0].id] From 81818849e366d04bb886298cdbefdce69f781c2b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 19 Feb 2024 16:36:34 +0200 Subject: [PATCH 288/607] Add agent IP to deployment script --- deploy/scripts/install_deployer.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deploy/scripts/install_deployer.sh b/deploy/scripts/install_deployer.sh index 69975d8ce5..bbf6648062 100755 --- a/deploy/scripts/install_deployer.sh +++ b/deploy/scripts/install_deployer.sh @@ -139,6 +139,8 @@ export TF_DATA_DIR="${param_dirname}"/.terraform this_ip=$(curl -s ipinfo.io/ip) >/dev/null 2>&1 export TF_VAR_Agent_IP=$this_ip +echo "Agent IP: $this_ip" + ok_to_proceed=false new_deployment=false From a2901712c1b60ffca5419832491dcc659596a2de Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 19 Feb 2024 16:57:52 +0200 Subject: [PATCH 289/607] Refactor environment variable names and update pool name --- deploy/scripts/New-SDAFDevopsProject.ps1 | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index 8a168087a9..c3ddfcd65a 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -16,14 +16,14 @@ function Show-Menu($data) { #region Initialize # Initialize variables from Environment variables -$ADO_Organization = $Env:SDAF_ADO_ORGANIZATION -$ADO_Project = $Env:SDAF_ADO_PROJECT -$ARM_TENANT_ID = $Env:ARM_TENANT_ID -$Control_plane_code = $Env:SDAF_CONTROL_PLANE_CODE -$Control_plane_subscriptionID = $Env:SDAF_ControlPlaneSubscriptionID -$ControlPlaneSubscriptionName = $Env:SDAF_ControlPlaneSubscriptionName -$Workload_zone_code = $Env:SDAF_WORKLOAD_ZONE_CODE -$Workload_zone_subscriptionID = $Env:SDAF_WorkloadZoneSubscriptionID +$ADO_Organization = $Env:SDAF_ADO_ORGANIZATION +$ADO_Project = $Env:SDAF_ADO_PROJECT +$ARM_TENANT_ID = $Env:ARM_TENANT_ID +$Control_plane_code = $Env:SDAF_CONTROL_PLANE_CODE +$Control_plane_subscriptionID = $Env:SDAF_ControlPlaneSubscriptionID +$ControlPlaneSubscriptionName = $Env:SDAF_ControlPlaneSubscriptionName +$Workload_zone_code = $Env:SDAF_WORKLOAD_ZONE_CODE +$Workload_zone_subscriptionID = $Env:SDAF_WorkloadZoneSubscriptionID $Workload_zoneSubscriptionName = $Env:SDAF_WorkloadZoneSubscriptionName if ($IsWindows) { $pathSeparator = "\" } else { $pathSeparator = "/" } @@ -162,7 +162,13 @@ else { $ControlPlanePrefix = "SDAF-" + $Control_plane_code $WorkloadZonePrefix = "SDAF-" + $Workload_zone_code -$Pool_Name = $ControlPlanePrefix + "-POOL" +if ($Env:SDAF_POOL_NAME.Length -eq 0) { + $Pool_Name = $ControlPlanePrefix + "-POOL" +} +else { + $Pool_Name = $Env:SDAF_POOL_NAME +} + $ApplicationName = $ControlPlanePrefix + "-configuration-app" From 085086d61f2ee1c3dacba834bdd19cc4ed6f680b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 19 Feb 2024 18:10:28 +0200 Subject: [PATCH 290/607] Update default values for firewall_rule_subnets and firewall_allowed_ipaddresses --- deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf index d610731e84..a3d07e4723 100644 --- a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf @@ -120,12 +120,12 @@ variable "firewall_deployment" { variable "firewall_rule_subnets" { description = "List of subnets that are part of the firewall rule" - default = null + default = [] } variable "firewall_allowed_ipaddresses" { description = "List of allowed IP addresses to be part of the firewall rule" - default = null + default = [] } #######################################4#######################################8 From 9f8a2b7583d6984712c1f4df26f589f8ad936375 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 19 Feb 2024 18:15:08 +0200 Subject: [PATCH 291/607] Update availability set ID in app tier VM resource --- .../terraform-units/modules/sap_system/app_tier/vm-app.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index e7582b8717..fb589ed5a7 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -132,7 +132,7 @@ resource "azurerm_linux_virtual_machine" "app" { availability_set_id = var.application_tier.app_use_avset ? ( length(var.application_tier.avset_arm_ids) > 0 ? ( var.application_tier.avset_arm_ids[count.index % max(length(var.application_tier.avset_arm_ids), 1)]) : ( - azurerm_availability_set.app[count.index % max(length(var.application_tier.avset_arm_ids), 1)].id + azurerm_availability_set.app[count.index % max(local.app_zone_count, 1)].id )) : ( null ) From daa5c41d1bcffae23cc3c9fda2dca10f87447f10 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 19 Feb 2024 18:57:11 +0200 Subject: [PATCH 292/607] Change cache path --- deploy/scripts/advanced_state_management.sh | 8 +++++--- deploy/scripts/helpers/script_helpers.sh | 7 ++++--- deploy/scripts/install_library.sh | 8 +++++--- deploy/scripts/install_workloadzone.sh | 7 ++++--- deploy/scripts/installer.sh | 7 ++++--- deploy/scripts/remover.sh | 8 +++++--- 6 files changed, 27 insertions(+), 18 deletions(-) diff --git a/deploy/scripts/advanced_state_management.sh b/deploy/scripts/advanced_state_management.sh index 69eb5bcbc9..d78c4de1d4 100755 --- a/deploy/scripts/advanced_state_management.sh +++ b/deploy/scripts/advanced_state_management.sh @@ -196,11 +196,13 @@ automation_config_directory=$CONFIG_REPO_PATH/.sap_deployment_automation/ system_config_information="${automation_config_directory}""${environment}""${region_code}" #Plugins -if [ ! -d "$HOME/.terraform.d/plugin-cache" ] +#Plugins +if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] then - mkdir "$HOME/.terraform.d/plugin-cache" + mkdir /opt/terraform/.terraform.d/plugin-cache fi -export TF_PLUGIN_CACHE_DIR="$HOME/.terraform.d/plugin-cache" +export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache + set_executing_user_environment_variables "none" diff --git a/deploy/scripts/helpers/script_helpers.sh b/deploy/scripts/helpers/script_helpers.sh index 49594bea44..3bda197bb9 100755 --- a/deploy/scripts/helpers/script_helpers.sh +++ b/deploy/scripts/helpers/script_helpers.sh @@ -391,11 +391,12 @@ function validate_dependencies { return 2 #No such file or directory fi # Set Terraform Plug in cache - if [ ! -d "$HOME/.terraform.d/plugin-cache" ] + if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] then - mkdir -p "$HOME/.terraform.d/plugin-cache" + mkdir /opt/terraform/.terraform.d/plugin-cache fi - export TF_PLUGIN_CACHE_DIR="$HOME/.terraform.d/plugin-cache" + export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache + az --version >stdout.az 2>&1 az=$(grep "azure-cli" stdout.az) diff --git a/deploy/scripts/install_library.sh b/deploy/scripts/install_library.sh index 260031d909..f4f74ce0c9 100755 --- a/deploy/scripts/install_library.sh +++ b/deploy/scripts/install_library.sh @@ -165,11 +165,13 @@ generic_config_information="${automation_config_directory}"config library_config_information="${automation_config_directory}""${environment}""${region_code}" #Plugins -if [ ! -d "$HOME/.terraform.d/plugin-cache" ] +Plugins +if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] then - mkdir "$HOME/.terraform.d/plugin-cache" + mkdir /opt/terraform/.terraform.d/plugin-cache fi -export TF_PLUGIN_CACHE_DIR="$HOME/.terraform.d/plugin-cache" +export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache + param_dirname=$(pwd) diff --git a/deploy/scripts/install_workloadzone.sh b/deploy/scripts/install_workloadzone.sh index 0ad47b625c..53f3bb6b8a 100755 --- a/deploy/scripts/install_workloadzone.sh +++ b/deploy/scripts/install_workloadzone.sh @@ -556,11 +556,12 @@ ok_to_proceed=false new_deployment=false #Plugins -if [ ! -d "$HOME/.terraform.d/plugin-cache" ] +if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] then - mkdir -p "$HOME/.terraform.d/plugin-cache" + mkdir /opt/terraform/.terraform.d/plugin-cache fi -export TF_PLUGIN_CACHE_DIR="$HOME/.terraform.d/plugin-cache" +export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache + root_dirname=$(pwd) echo " subscription_id=${STATE_SUBSCRIPTION}" diff --git a/deploy/scripts/installer.sh b/deploy/scripts/installer.sh index 178802fa95..f93dc461c1 100755 --- a/deploy/scripts/installer.sh +++ b/deploy/scripts/installer.sh @@ -156,12 +156,13 @@ if [ 1 == $called_from_ado ] ; then echo "Agent IP: $this_ip" fi + #Plugins -if [ ! -d "$HOME/.terraform.d/plugin-cache" ] +if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] then - mkdir "$HOME/.terraform.d/plugin-cache" + mkdir /opt/terraform/.terraform.d/plugin-cache fi -export TF_PLUGIN_CACHE_DIR="$HOME/.terraform.d/plugin-cache" +export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache parallelism=10 diff --git a/deploy/scripts/remover.sh b/deploy/scripts/remover.sh index 2c49319a0a..3a3fb38bcb 100755 --- a/deploy/scripts/remover.sh +++ b/deploy/scripts/remover.sh @@ -225,10 +225,12 @@ echo "Deployment region code: $region_code" key=$(echo "${parameterfile_name}" | cut -d. -f1) #Plugins -if [ ! -d "$HOME/.terraform.d/plugin-cache" ]; then - mkdir -p "$HOME/.terraform.d/plugin-cache" +if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] +then + mkdir /opt/terraform/.terraform.d/plugin-cache fi -export TF_PLUGIN_CACHE_DIR="$HOME/.terraform.d/plugin-cache" +export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache + init "${automation_config_directory}" "${generic_config_information}" "${system_config_information}" var_file="${parameterfile_dirname}"/"${parameterfile}" From ef264f279ba94b7a05f712f599f2cb795d7cd66d Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 19 Feb 2024 19:08:24 +0200 Subject: [PATCH 293/607] Refactor Terraform plugin cache setup --- deploy/scripts/advanced_state_management.sh | 6 +----- deploy/scripts/helpers/script_helpers.sh | 5 +---- deploy/scripts/install_library.sh | 5 +---- deploy/scripts/install_workloadzone.sh | 6 ++---- deploy/scripts/installer.sh | 5 +---- 5 files changed, 6 insertions(+), 21 deletions(-) diff --git a/deploy/scripts/advanced_state_management.sh b/deploy/scripts/advanced_state_management.sh index d78c4de1d4..6b0b900a79 100755 --- a/deploy/scripts/advanced_state_management.sh +++ b/deploy/scripts/advanced_state_management.sh @@ -196,11 +196,7 @@ automation_config_directory=$CONFIG_REPO_PATH/.sap_deployment_automation/ system_config_information="${automation_config_directory}""${environment}""${region_code}" #Plugins -#Plugins -if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] -then - mkdir /opt/terraform/.terraform.d/plugin-cache -fi +mkdir -p /opt/terraform/.terraform.d/plugin-cache export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache diff --git a/deploy/scripts/helpers/script_helpers.sh b/deploy/scripts/helpers/script_helpers.sh index 3bda197bb9..ceb986f5fa 100755 --- a/deploy/scripts/helpers/script_helpers.sh +++ b/deploy/scripts/helpers/script_helpers.sh @@ -391,10 +391,7 @@ function validate_dependencies { return 2 #No such file or directory fi # Set Terraform Plug in cache - if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] - then - mkdir /opt/terraform/.terraform.d/plugin-cache - fi + mkdir -p /opt/terraform/.terraform.d/plugin-cache export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache diff --git a/deploy/scripts/install_library.sh b/deploy/scripts/install_library.sh index f4f74ce0c9..da5e5daca7 100755 --- a/deploy/scripts/install_library.sh +++ b/deploy/scripts/install_library.sh @@ -166,10 +166,7 @@ library_config_information="${automation_config_directory}""${environment}""${re #Plugins Plugins -if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] -then - mkdir /opt/terraform/.terraform.d/plugin-cache -fi +mkdir -p /opt/terraform/.terraform.d/plugin-cache export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache diff --git a/deploy/scripts/install_workloadzone.sh b/deploy/scripts/install_workloadzone.sh index 53f3bb6b8a..2a4a3e36a0 100755 --- a/deploy/scripts/install_workloadzone.sh +++ b/deploy/scripts/install_workloadzone.sh @@ -556,10 +556,8 @@ ok_to_proceed=false new_deployment=false #Plugins -if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] -then - mkdir /opt/terraform/.terraform.d/plugin-cache -fi +mkdir -p /opt/terraform/.terraform.d/plugin-cache + export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache root_dirname=$(pwd) diff --git a/deploy/scripts/installer.sh b/deploy/scripts/installer.sh index f93dc461c1..8bf21d6e61 100755 --- a/deploy/scripts/installer.sh +++ b/deploy/scripts/installer.sh @@ -158,10 +158,7 @@ fi #Plugins -if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] -then - mkdir /opt/terraform/.terraform.d/plugin-cache -fi +mkdir -p /opt/terraform/.terraform.d/plugin-cache export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache From 4ea01b9be81a40e44390c30f92fea815f750c3b4 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 19 Feb 2024 19:12:37 +0200 Subject: [PATCH 294/607] Add sudo and chown commands to create and set ownership of Terraform plugin cache directory --- deploy/scripts/advanced_state_management.sh | 3 ++- deploy/scripts/helpers/script_helpers.sh | 3 ++- deploy/scripts/install_library.sh | 4 ++-- deploy/scripts/install_workloadzone.sh | 3 ++- deploy/scripts/installer.sh | 4 +++- 5 files changed, 11 insertions(+), 6 deletions(-) diff --git a/deploy/scripts/advanced_state_management.sh b/deploy/scripts/advanced_state_management.sh index 6b0b900a79..4a62ac7561 100755 --- a/deploy/scripts/advanced_state_management.sh +++ b/deploy/scripts/advanced_state_management.sh @@ -196,7 +196,8 @@ automation_config_directory=$CONFIG_REPO_PATH/.sap_deployment_automation/ system_config_information="${automation_config_directory}""${environment}""${region_code}" #Plugins -mkdir -p /opt/terraform/.terraform.d/plugin-cache +sudo mkdir -p /opt/terraform/.terraform.d/plugin-cache +sudo chown -R $USER:$USER /opt/terraform export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache diff --git a/deploy/scripts/helpers/script_helpers.sh b/deploy/scripts/helpers/script_helpers.sh index ceb986f5fa..4d1f035297 100755 --- a/deploy/scripts/helpers/script_helpers.sh +++ b/deploy/scripts/helpers/script_helpers.sh @@ -391,7 +391,8 @@ function validate_dependencies { return 2 #No such file or directory fi # Set Terraform Plug in cache - mkdir -p /opt/terraform/.terraform.d/plugin-cache + sudo mkdir -p /opt/terraform/.terraform.d/plugin-cache + sudo chown -R $USER:$USER /opt/terraform export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache diff --git a/deploy/scripts/install_library.sh b/deploy/scripts/install_library.sh index da5e5daca7..9628f678eb 100755 --- a/deploy/scripts/install_library.sh +++ b/deploy/scripts/install_library.sh @@ -165,8 +165,8 @@ generic_config_information="${automation_config_directory}"config library_config_information="${automation_config_directory}""${environment}""${region_code}" #Plugins -Plugins -mkdir -p /opt/terraform/.terraform.d/plugin-cache +sudo mkdir -p /opt/terraform/.terraform.d/plugin-cache +sudo chown -R $USER:$USER /opt/terraform export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache diff --git a/deploy/scripts/install_workloadzone.sh b/deploy/scripts/install_workloadzone.sh index 2a4a3e36a0..0df8e3455c 100755 --- a/deploy/scripts/install_workloadzone.sh +++ b/deploy/scripts/install_workloadzone.sh @@ -556,7 +556,8 @@ ok_to_proceed=false new_deployment=false #Plugins -mkdir -p /opt/terraform/.terraform.d/plugin-cache +sudo mkdir -p /opt/terraform/.terraform.d/plugin-cache +sudo chown -R $USER:$USER /opt/terraform export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache diff --git a/deploy/scripts/installer.sh b/deploy/scripts/installer.sh index 8bf21d6e61..6f94f95ecd 100755 --- a/deploy/scripts/installer.sh +++ b/deploy/scripts/installer.sh @@ -158,7 +158,9 @@ fi #Plugins -mkdir -p /opt/terraform/.terraform.d/plugin-cache +sudo mkdir -p /opt/terraform/.terraform.d/plugin-cache +sudo chown -R $USER:$USER /opt/terraform + export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache From 27948a933abcaf00e4e2cb7d03cf6922e5220fca Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 19 Feb 2024 19:54:47 +0200 Subject: [PATCH 295/607] Add dependency on azurerm_private_dns_zone.table --- .../terraform-units/modules/sap_library/storage_accounts.tf | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf index 5ce47fcadd..3a52759d3c 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf @@ -190,6 +190,7 @@ resource "azurerm_private_endpoint" "storage_tfstate" { resource "azurerm_private_endpoint" "table_tfstate" { provider = azurerm.main count = var.use_private_endpoint && !local.sa_tfstate_exists && var.use_webapp ? 1 : 0 + depends_on = [ azurerm_private_dns_zone.table ] name = format("%s%s-table%s", var.naming.resource_prefixes.storage_private_link_tf, local.prefix, From 5207799dc3c01f01c1acdf3a3fd1cbe94aa54d36 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 19 Feb 2024 19:59:38 +0200 Subject: [PATCH 296/607] Update count condition in azurerm_private_dns_zone resource --- deploy/terraform/terraform-units/modules/sap_library/dns.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_library/dns.tf b/deploy/terraform/terraform-units/modules/sap_library/dns.tf index d9196a16e1..c77fdfa31a 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/dns.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/dns.tf @@ -31,7 +31,7 @@ resource "azurerm_private_dns_zone" "blob" { resource "azurerm_private_dns_zone" "table" { provider = azurerm.main - count = local.use_local_private_dns && var.use_private_endpoint && length(try(var.deployer_tfstate.webapp_id,""))> 0 ? 1 : 0 + count = local.use_local_private_dns && var.use_private_endpoint && var.use_webapp ? 1 : 0 depends_on = [ azurerm_resource_group.library ] From c01983dfda0e1afb3bd1143d8434804f06f250af Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 19 Feb 2024 23:11:25 +0200 Subject: [PATCH 297/607] Update Azure CLI commands and remove unnecessary environment variables --- deploy/pipelines/01-deploy-control-plane.yaml | 2 +- deploy/pipelines/11-remover-arm-fallback.yaml | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 35f2ce9574..828a8f2372 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -428,7 +428,7 @@ stages: az extension add --name azure-devops --output none az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' - az pipelines variable-group list + export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") echo VARIABLE_GROUP_ID ${VARIABLE_GROUP_ID} if [ -z ${VARIABLE_GROUP_ID} ]; then diff --git a/deploy/pipelines/11-remover-arm-fallback.yaml b/deploy/pipelines/11-remover-arm-fallback.yaml index 2d79d62a6e..ebff349e9f 100644 --- a/deploy/pipelines/11-remover-arm-fallback.yaml +++ b/deploy/pipelines/11-remover-arm-fallback.yaml @@ -300,7 +300,7 @@ stages: - task: AzureCLI@2 continueOnError: false inputs: - azureSubscription: ${{ parameters.workload_zone_connection }} + azureSubscription: ${{ parameters.connection_name }} scriptType: bash scriptLocation: inlineScript addSpnToEnvironment: true @@ -328,9 +328,6 @@ stages: export ARM_USE_MSI=false if [ $USE_MSI != "true" ]; then echo "use Service Principal" - export ARM_CLIENT_ID=$CP_ARM_CLIENT_ID - export ARM_TENANT_ID=$CP_ARM_TENANT_ID - export ARM_CLIENT_SECRET=$CP_ARM_CLIENT_SECRET if [ -z $ARM_SUBSCRIPTION_ID ]; then echo "##vso[task.logissue type=error]Variable CP_ARM_SUBSCRIPTION_ID was not defined." exit 2 From f45257567dd52efa2b62158fbf5c68fbdc477d24 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 19 Feb 2024 23:13:46 +0200 Subject: [PATCH 298/607] Update Azure subscription name in pipeline --- deploy/pipelines/11-remover-arm-fallback.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/pipelines/11-remover-arm-fallback.yaml b/deploy/pipelines/11-remover-arm-fallback.yaml index ebff349e9f..e364d988c4 100644 --- a/deploy/pipelines/11-remover-arm-fallback.yaml +++ b/deploy/pipelines/11-remover-arm-fallback.yaml @@ -300,7 +300,7 @@ stages: - task: AzureCLI@2 continueOnError: false inputs: - azureSubscription: ${{ parameters.connection_name }} + azureSubscription: Control_Plane_Service_Connection scriptType: bash scriptLocation: inlineScript addSpnToEnvironment: true From fdf9b1541de091173f00a826b20bb491a793ed3f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 19 Feb 2024 23:19:10 +0200 Subject: [PATCH 299/607] Update Azure login process --- deploy/pipelines/11-remover-arm-fallback.yaml | 42 +++++-------------- 1 file changed, 10 insertions(+), 32 deletions(-) diff --git a/deploy/pipelines/11-remover-arm-fallback.yaml b/deploy/pipelines/11-remover-arm-fallback.yaml index e364d988c4..ddd1dd6da6 100644 --- a/deploy/pipelines/11-remover-arm-fallback.yaml +++ b/deploy/pipelines/11-remover-arm-fallback.yaml @@ -325,40 +325,18 @@ stages: else subscription=$variable_value fi + export ARM_CLIENT_ID=$servicePrincipalId + export ARM_TENANT_ID=$tenantId + export ARM_CLIENT_SECRET=$servicePrincipalKey export ARM_USE_MSI=false - if [ $USE_MSI != "true" ]; then - echo "use Service Principal" - if [ -z $ARM_SUBSCRIPTION_ID ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_SUBSCRIPTION_ID was not defined." - exit 2 - fi - if [ -z $ARM_CLIENT_ID ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_ID was not defined." - exit 2 - fi - if [ -z $ARM_CLIENT_SECRET ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_SECRET was not defined." - exit 2 - fi - if [ -z $ARM_TENANT_ID ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_TENANT_ID was not defined." - exit 2 - fi - echo -e "$green--- az login ---$reset" - az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code - fi - else - echo "use MSI" - export ARM_CLIENT_ID=$servicePrincipalId - export ARM_TENANT_ID=$tenantId - export ARM_CLIENT_SECRET=$servicePrincipalKey + echo -e "$green--- az login ---$reset" + az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code fi - echo "Subscription: $subscription" az account set --subscription $subscription From df00095234d06d3e8ea5125eee4b5e550bff049c Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 20 Feb 2024 11:34:57 +0200 Subject: [PATCH 300/607] Update availability set ID in app tier VM configuration --- .../terraform-units/modules/sap_system/app_tier/vm-app.tf | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index fb589ed5a7..645739e235 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -132,7 +132,7 @@ resource "azurerm_linux_virtual_machine" "app" { availability_set_id = var.application_tier.app_use_avset ? ( length(var.application_tier.avset_arm_ids) > 0 ? ( var.application_tier.avset_arm_ids[count.index % max(length(var.application_tier.avset_arm_ids), 1)]) : ( - azurerm_availability_set.app[count.index % max(local.app_zone_count, 1)].id + azurerm_availability_set.app[count.index % max(length(var.ppg), 1)].id )) : ( null ) @@ -263,10 +263,10 @@ resource "azurerm_windows_virtual_machine" "app" { ) //If more than one servers are deployed into a single zone put them in an availability set and not a zone - availability_set_id = local.use_app_avset ? ( + availability_set_id = var.application_tier.app_use_avset ? ( length(var.application_tier.avset_arm_ids) > 0 ? ( - var.application_tier.avset_arm_ids[count.index % max(local.app_zone_count, 1)]) : ( - azurerm_availability_set.app[count.index % max(local.app_zone_count, 1)].id + var.application_tier.avset_arm_ids[count.index % max(length(var.application_tier.avset_arm_ids), 1)]) : ( + azurerm_availability_set.app[count.index % max(length(var.ppg), 1)].id )) : ( null ) From 8f0df19b48db8deb6853377825f431cf1894314c Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 20 Feb 2024 11:43:33 +0200 Subject: [PATCH 301/607] Fix availability set ID in vm-app.tf --- .../terraform-units/modules/sap_system/app_tier/vm-app.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index 645739e235..0973827209 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -132,7 +132,7 @@ resource "azurerm_linux_virtual_machine" "app" { availability_set_id = var.application_tier.app_use_avset ? ( length(var.application_tier.avset_arm_ids) > 0 ? ( var.application_tier.avset_arm_ids[count.index % max(length(var.application_tier.avset_arm_ids), 1)]) : ( - azurerm_availability_set.app[count.index % max(length(var.ppg), 1)].id + azurerm_availability_set.app[count.index % max(length(azurerm_availability_set.app), 1)].id )) : ( null ) @@ -266,7 +266,7 @@ resource "azurerm_windows_virtual_machine" "app" { availability_set_id = var.application_tier.app_use_avset ? ( length(var.application_tier.avset_arm_ids) > 0 ? ( var.application_tier.avset_arm_ids[count.index % max(length(var.application_tier.avset_arm_ids), 1)]) : ( - azurerm_availability_set.app[count.index % max(length(var.ppg), 1)].id + azurerm_availability_set.app[count.index % max(length(azurerm_availability_set.app), 1)].id )) : ( null ) From ad6c6b863304af458a0c0ba5ac799474b122814c Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 20 Feb 2024 11:48:49 +0200 Subject: [PATCH 302/607] change AVSet distribution for SCS --- .../terraform-units/modules/sap_system/app_tier/vm-scs.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index 1aa569af37..a1650c2ad8 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -134,7 +134,7 @@ resource "azurerm_linux_virtual_machine" "scs" { //If more than one servers are deployed into a single zone put them in an availability set and not a zone availability_set_id = local.use_scs_avset ? ( - azurerm_availability_set.scs[count.index % max(local.scs_zone_count, 1)].id) : ( + azurerm_availability_set.scs[count.index % max(length(azurerm_availability_set.scs), 1)].id) : ( null ) @@ -314,7 +314,7 @@ resource "azurerm_windows_virtual_machine" "scs" { //If more than one servers are deployed into a single zone put them in an availability set and not a zone availability_set_id = local.use_scs_avset ? ( - azurerm_availability_set.scs[count.index % max(local.scs_zone_count, 1)].id) : ( + azurerm_availability_set.scs[count.index % max(length(azurerm_availability_set.scs), 1)].id) : ( null ) From 839457ba65054092131465c7e36f1c0a629cedee Mon Sep 17 00:00:00 2001 From: Devansh Jain <86314060+devanshjainms@users.noreply.github.com> Date: Tue, 20 Feb 2024 04:44:39 -0800 Subject: [PATCH 303/607] Add AMS monitoring playbook and variables (#548) * Add AMS monitoring playbook and variables * Fix ansible lint * Fix runtime parameters and package installation in Ansible roles * Update Prometheus role: Unzip and enable node exporter * Remove unnecessary tar command in node exporter setup * Update Prometheus role: Remove unused 'remote_src' option * Update unarchive task in main.yml * Add ACSS Registration and AMS Provider Creation to configuration menu and update node exporter URL --------- Co-authored-by: hdamecharla <71097261+hdamecharla@users.noreply.github.com> --- deploy/ansible/configuration_menu.sh | 5 +- .../ansible/playbook_01_os_base_config.yaml | 2 +- .../playbook_06_01_ams_monitoring.yaml | 88 +++++++++ .../0.8-ams-providers/defaults/main.yaml | 9 + .../tasks/0.8.1-set-runtime-parameters.yaml | 14 ++ .../0.8-ams-providers/tasks/main.yaml | 174 ++++++++++++++++++ .../1.16-services/vars/os-services.yaml | 16 ++ .../roles-os/1.20-prometheus/tasks/main.yml | 119 +++++------- deploy/ansible/vars/ansible-input-api.yaml | 6 + .../pipelines/05-DB-and-SAP-installation.yaml | 17 +- deploy/terraform/run/sap_landscape/module.tf | 1 + deploy/terraform/run/sap_landscape/output.tf | 15 ++ .../terraform/run/sap_landscape/providers.tf | 12 ++ .../run/sap_landscape/tfvar_variables.tf | 50 +++++ .../terraform/run/sap_landscape/transform.tf | 66 +++++++ deploy/terraform/run/sap_system/module.tf | 7 + .../run/sap_system/tfvar_variables.tf | 21 +++ .../modules/sap_landscape/ams.tf | 31 ++++ .../modules/sap_landscape/infrastructure.tf | 6 +- .../modules/sap_landscape/outputs.tf | 21 +++ .../modules/sap_landscape/providers.tf | 5 + .../modules/sap_landscape/subnets.tf | 27 +++ .../modules/sap_landscape/variables_local.tf | 48 +++++ .../sap_namegenerator/variables_global.tf | 3 + .../sap_system/output_files/inventory.tf | 3 + .../output_files/sap-parameters.yml.tmpl | 10 + .../output_files/variables_global.tf | 5 + 27 files changed, 706 insertions(+), 75 deletions(-) create mode 100644 deploy/ansible/playbook_06_01_ams_monitoring.yaml create mode 100644 deploy/ansible/roles-misc/0.8-ams-providers/defaults/main.yaml create mode 100644 deploy/ansible/roles-misc/0.8-ams-providers/tasks/0.8.1-set-runtime-parameters.yaml create mode 100644 deploy/ansible/roles-misc/0.8-ams-providers/tasks/main.yaml create mode 100644 deploy/terraform/terraform-units/modules/sap_landscape/ams.tf diff --git a/deploy/ansible/configuration_menu.sh b/deploy/ansible/configuration_menu.sh index ab15a71e10..3dfebc3e36 100755 --- a/deploy/ansible/configuration_menu.sh +++ b/deploy/ansible/configuration_menu.sh @@ -32,7 +32,8 @@ cmd_dir="$(dirname "$(readlink -e "${BASH_SOURCE[0]}")")" # playbook_05_02_sap_pas_install.yaml \ # playbook_05_03_sap_app_install.yaml \ # playbook_05_04_sap_web_install.yaml \ -# playbook_06_00_acss_registration.yaml +# playbook_06_00_acss_registration.yaml \ +# playbook_06_01_ams_monitoring.yaml # The SAP System parameters file which should exist in the current directory sap_params_file=sap-parameters.yaml @@ -110,6 +111,7 @@ options=( "Application Server installations" "Web Dispatcher installations" "ACSS Registration" + "AMS Provider Creation" "HCMT" # Special menu entries @@ -139,6 +141,7 @@ all_playbooks=( ${cmd_dir}/playbook_05_03_sap_app_install.yaml ${cmd_dir}/playbook_05_04_sap_web_install.yaml ${cmd_dir}/playbook_06_00_acss_registration.yaml + ${cmd_dir}/playbook_06_01_ams_monitoring.yaml ${cmd_dir}/playbook_04_00_02_db_hcmt.yaml ${cmd_dir}/playbook_bom_downloader.yaml ${cmd_dir}/playbook_07_00_00_post_installation.yaml diff --git a/deploy/ansible/playbook_01_os_base_config.yaml b/deploy/ansible/playbook_01_os_base_config.yaml index 354d173331..2d0c9c25aa 100644 --- a/deploy/ansible/playbook_01_os_base_config.yaml +++ b/deploy/ansible/playbook_01_os_base_config.yaml @@ -169,7 +169,7 @@ name: roles-os/1.20-prometheus when: - prometheus - - ansible_os_family | upper == "SUSE" + - ansible_os_family | upper == "SUSE" or ansible_os_family | upper == "REDHAT" tags: - 1.20-prometheus when: diff --git a/deploy/ansible/playbook_06_01_ams_monitoring.yaml b/deploy/ansible/playbook_06_01_ams_monitoring.yaml new file mode 100644 index 0000000000..97cacf79b6 --- /dev/null +++ b/deploy/ansible/playbook_06_01_ams_monitoring.yaml @@ -0,0 +1,88 @@ +# /*---------------------------------------------------------------------------8 +# | | +# | Playbook for Azure Monitor for SAP Provider Settings | +# | | +# +------------------------------------4--------------------------------------*/ + +# -------------------------------------+---------------------------------------8 +# Role: 6.1 AMS Provider Configuration +# +# -------------------------------------+---------------------------------------8 + +--- + +- name: "AMS Provider Creation Playbook: - Initialization" + hosts: localhost + gather_facts: true + vars_files: vars/ansible-input-api.yaml # API Input template with defaults + tasks: + - name: "AMS Provider Creation Playbook: - Initialization" + block: + - name: "AMS Provider Creation Playbook: - Create Progress folder" + ansible.builtin.file: + path: "{{ _workspace_directory }}/.progress" + state: directory + mode: 0755 + + - name: "AMS Provider Creation Playbook: - Remove ams-provider-creation-done flag" + ansible.builtin.file: + path: "{{ _workspace_directory }}/.progress/ams-provider-creatio-done" + state: absent + + - name: "AMS Provider Creation Playbook: - Read/Create passwords" + ansible.builtin.include_role: + name: roles-misc/0.1-passwords + public: true + tags: + - 0.1-passwords + when: + - ams_resource_id is defined + - ams_resource_id != "" + +# /*---------------------------------------------------------------------------8 +# | | +# | Playbook for Creating OS Provider in AMS | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: AMS Provider Instance Creation + hosts: "{{ sap_sid | upper }}_DB : + {{ sap_sid | upper }}_SCS : + {{ sap_sid | upper }}_ERS : + {{ sap_sid | upper }}_PAS : + {{ sap_sid | upper }}_APP : + {{ sap_sid | upper }}_WEB" + become: true + gather_facts: true # Important to collect hostvars information + vars_files: vars/ansible-input-api.yaml # API Input template with defaults + tasks: + - name: AMS Provider Instance Creation + block: + - name: "Run the AMS provider prerequisites" + when: ansible_os_family != "Windows" + ansible.builtin.include_role: + name: "roles-os/1.20-prometheus" + tags: + - 6.1-ams-provider-prerequisites + + - name: "Run the AMS provider creation" + ansible.builtin.include_role: + name: "roles-misc/0.8-ams-providers" + tags: + - 6.2-ams-provider-creation + + - name: "AMS Provider Creation Playbook: - ams-provider-creation-done flag" + delegate_to: localhost + become: false + ansible.builtin.file: + path: "{{ _workspace_directory }}/.progress/ams-provider-creatio-done" + state: touch + mode: 0755 + when: + - ams_resource_id is defined + - ams_resource_id != "" +... + +# /*---------------------------------------------------------------------------8 +# | END | +# +------------------------------------4--------------------------------------*/ diff --git a/deploy/ansible/roles-misc/0.8-ams-providers/defaults/main.yaml b/deploy/ansible/roles-misc/0.8-ams-providers/defaults/main.yaml new file mode 100644 index 0000000000..c5ee2f8e8e --- /dev/null +++ b/deploy/ansible/roles-misc/0.8-ams-providers/defaults/main.yaml @@ -0,0 +1,9 @@ +--- +# TODO: Maybe move these to a group_vars/all/distro file so that they +# can be shared by all playbooks/tasks automatically, and extend with +# standardised versions of all similar patterns used in the playbooks. +distro_name: "{{ ansible_os_family | upper }}-{{ ansible_distribution_major_version }}" +distribution_id: "{{ ansible_os_family | lower ~ ansible_distribution_major_version }}" +distribution_full_id: "{{ ansible_os_family | lower ~ ansible_distribution_version }}" +ams_cli_extension_url: "https://files.pythonhosted.org/packages/ce/f3/91b1a5fdff7a7f0cc8bdfc9a7177f1c1dbab909f857a5ba4cc837650635e/azure_mgmt_workloads-1.0.0-py3-none-any.whl" +... diff --git a/deploy/ansible/roles-misc/0.8-ams-providers/tasks/0.8.1-set-runtime-parameters.yaml b/deploy/ansible/roles-misc/0.8-ams-providers/tasks/0.8.1-set-runtime-parameters.yaml new file mode 100644 index 0000000000..a8d1375963 --- /dev/null +++ b/deploy/ansible/roles-misc/0.8-ams-providers/tasks/0.8.1-set-runtime-parameters.yaml @@ -0,0 +1,14 @@ +--- +# /*---------------------------------------------------------------------------8 +# | | +# | Set Runtime Parameters - e.g Sub ID , Resource group name | +# | | +# +------------------------------------4--------------------------------------*/ +- name: "0.8.1 ams provider creation: - Set Python version {{ distribution_id }}" + ansible.builtin.set_fact: + python_version: "python3" +- name: "0.8.1 ams provider creation: - Set Python version {{ distribution_id }}" + ansible.builtin.set_fact: + python_version: "python2" + when: (ansible_distribution | lower ~ ansible_distribution_major_version) in ['sles_sap12'] +... diff --git a/deploy/ansible/roles-misc/0.8-ams-providers/tasks/main.yaml b/deploy/ansible/roles-misc/0.8-ams-providers/tasks/main.yaml new file mode 100644 index 0000000000..147748ec3b --- /dev/null +++ b/deploy/ansible/roles-misc/0.8-ams-providers/tasks/main.yaml @@ -0,0 +1,174 @@ +--- + +- name: "0.8.1 ams provider creation: - Set Python version {{ distribution_id }}" + ansible.builtin.set_fact: + python_version: "python3" + db_hosts: "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + scs_hosts: "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" + ers_hosts: "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_ERS') }}" + ha_cluster_port_number: "{{ 9664 if ansible_os_family | upper == 'SUSE' else 44322 }}" + +- name: "0.8.1 ams provider creation: - Install [AMS] cli extension" + delegate_to: localhost + ansible.builtin.shell: >- + az extension add --name workloads --yes || exit 1 + tags: + - skip_ansible_lint + +- name: "0.8.1 ams provider creation: - Get Access Token" + delegate_to: localhost + ansible.builtin.shell: >- + az account get-access-token --resource https://management.azure.com \ + --query accessToken -o tsv + register: ams_access_token + tags: + - skip_ansible_lint + +- name: "0.8.1 ams provider creation: - Generate a guid for the AMS provider instance" + delegate_to: localhost + ansible.builtin.command: uuidgen + register: ams_provider_guid + tags: + - skip_ansible_lint + +- name: "0.8.1 ams provider creation: - Create PrometheusOS (OS) provider in AMS" + delegate_to: localhost + when: + - ansible_os_family | upper == 'SUSE' or ansible_os_family | upper == 'REDHAT' + - enable_os_monitoring + block: + - name: "0.8.1 ams provider creation: - Create Prometheus OS AMS provider instance" + ansible.builtin.uri: + url: "https://management.azure.com{{ ams_resource_id }}/providerInstances/{{ ansible_hostname | upper }}-OS?api-version=2023-04-01" + method: PUT + body_format: json + body: | + { + "properties": { + "providerSettings": { + "providerType": "PrometheusOS", + "prometheusUrl": "http://{{ hostvars[ansible_hostname]['ansible_default_ipv4']['address'] }}:9100/metrics", + "sslPreference": "Disabled", + "sapSid": "{{ sap_sid | upper }}" + } + } + } + headers: + Authorization: "Bearer {{ ams_access_token.stdout }}" + x-ms-rpaas-new-resource: "true" + x-ms-client-request-id: "SDAF-{{ ams_provider_guid.stdout }}" + register: create_ams_provider_response + failed_when: create_ams_provider_response.json.properties.provisioningState != 'Accepted' + + - name: "0.8.1 ams provider creation: - Check the created OS provider instance in AMS" + ansible.builtin.uri: + url: "https://management.azure.com{{ ams_resource_id }}/providerInstances/{{ ansible_hostname | upper }}-OS?api-version=2023-04-01" + method: GET + headers: + Authorization: "Bearer {{ ams_access_token.stdout }}" + x-ms-rpaas-new-resource: "true" + x-ms-client-request-id: "SDAF-{{ ams_provider_guid.stdout }}" + register: get_ams_response + until: get_ams_response.json.properties.provisioningState == 'Succeeded' + retries: 10 + delay: 60 + no_log: true + +- name: "0.8.2 ams provider creation: - Create PrometheusHACluster HA provider in AMS" + delegate_to: localhost + when: + - ansible_os_family | upper == 'SUSE' or ansible_os_family | upper == 'REDHAT' + - enable_ha_monitoring + block: + - name: "0.8.2 ams provider creation: - DB Cluster: Create PrometheusHACluster" + block: + - name: "0.8.2 ams provider creation: - Create PrometheusHACluster AMS provider instance" + ansible.builtin.uri: + url: "https://management.azure.com{{ ams_resource_id }}/providerInstances/{{ ansible_hostname | upper }}-HA-DB?api-version=2023-04-01" + method: PUT + body_format: json + body: | + { + "properties": { + "providerSettings": { + "providerType": "PrometheusHaCluster", + "prometheusUrl": "http://{{ hostvars[ansible_hostname]['ansible_default_ipv4']['address'] }}:{{ ha_cluster_port_number }}/metrics", + "hostname": "{{ ansible_hostname }}", + "sid": "{{ sap_sid | upper }}", + "clusterName": "{{ sap_sid | upper }}-DB", + "sslPreference": "Disabled" + } + } + } + headers: + Authorization: "Bearer {{ ams_access_token.stdout }}" + x-ms-rpaas-new-resource: "true" + x-ms-client-request-id: "SDAF-{{ ams_provider_guid.stdout }}" + register: create_ams_provider_response + failed_when: create_ams_provider_response.json.properties.provisioningState != 'Accepted' + no_log: true + + - name: "0.8.2 ams provider creation: - Check the created HA provider instance in AMS" + ansible.builtin.uri: + url: "https://management.azure.com{{ ams_resource_id }}/providerInstances/{{ ansible_hostname | upper }}-HA-DB?api-version=2023-04-01" + method: GET + headers: + Authorization: "Bearer {{ ams_access_token.stdout }}" + x-ms-rpaas-new-resource: "true" + x-ms-client-request-id: "SDAF-{{ ams_provider_guid.stdout }}" + register: get_ams_response + until: get_ams_response.json.properties.provisioningState == 'Succeeded' + retries: 10 + delay: 60 + no_log: true + when: + - ansible_hostname in db_hosts + - database_high_availability + + - name: "0.8.2 ams provider creation: - SCS Cluster: Create PrometheusHACluster" + block: + - name: "0.8.2 ams provider creation: - Create PrometheusHACluster AMS provider instance" + ansible.builtin.uri: + url: "https://management.azure.com{{ ams_resource_id }}/providerInstances/{{ ansible_hostname | upper }}-HA-SCS?api-version=2023-04-01" + method: PUT + body_format: json + body: | + { + "properties": { + "providerSettings": { + "providerType": "PrometheusHaCluster", + "prometheusUrl": "http://{{ hostvars[ansible_hostname]['ansible_default_ipv4']['address'] }}:{{ ha_cluster_port_number }}/metrics", + "hostname": "{{ ansible_hostname }}", + "sid": "{{ sap_sid | upper }}", + "clusterName": "{{ sap_sid | upper }}-SCS", + "sslPreference": "Disabled" + } + } + } + headers: + Authorization: "Bearer {{ ams_access_token.stdout }}" + x-ms-rpaas-new-resource: "true" + x-ms-client-request-id: "SDAF-{{ ams_provider_guid.stdout }}" + register: create_ams_provider_response + failed_when: create_ams_provider_response.json.properties.provisioningState != 'Accepted' + no_log: true + + - name: "0.8.2 ams provider creation: - Check the created HA provider instance in AMS" + ansible.builtin.uri: + url: "https://management.azure.com{{ ams_resource_id }}/providerInstances/{{ ansible_hostname | upper }}-HA-SCS?api-version=2023-04-01" + method: GET + headers: + Authorization: "Bearer {{ ams_access_token.stdout }}" + x-ms-rpaas-new-resource: "true" + x-ms-client-request-id: "SDAF-{{ ams_provider_guid.stdout }}" + register: get_ams_response + until: get_ams_response.json.properties.provisioningState == 'Succeeded' + retries: 10 + delay: 60 + no_log: true + when: + - ansible_hostname in scs_hosts or ansible_hostname in ers_hosts + - scs_high_availability + +# More provider creation tasks to be added below this line. +... diff --git a/deploy/ansible/roles-os/1.16-services/vars/os-services.yaml b/deploy/ansible/roles-os/1.16-services/vars/os-services.yaml index c192491fe4..cde76ab082 100644 --- a/deploy/ansible/roles-os/1.16-services/vars/os-services.yaml +++ b/deploy/ansible/roles-os/1.16-services/vars/os-services.yaml @@ -26,6 +26,7 @@ services: redhat7: - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'stopped' } - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'disabled' } + - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'started' } - { tier: 'sapos', service: 'rpcbind', node_tier: 'scs', state: 'started' } - { tier: 'os', service: 'fstrim.timer', node_tier: 'all', state: 'stopped' } - { tier: 'os', service: 'fstrim.timer', node_tier: 'all', state: 'disabled' } @@ -33,6 +34,10 @@ services: - { tier: 'sapos', service: 'tuned', node_tier: 'all', state: 'enabled' } - { tier: 'ha', service: 'pcsd', node_tier: 'all', state: 'started' } - { tier: 'ha', service: 'pcsd', node_tier: 'all', state: 'enabled' } + - { tier: 'os', service: 'pmcd', node_tier: 'all', state: 'started' } + - { tier: 'os', service: 'pmcd', node_tier: 'all', state: 'enabled' } + - { tier: 'os', service: 'pmproxy', node_tier: 'all', state: 'started' } + - { tier: 'os', service: 'pmproxy', node_tier: 'all', state: 'enabled' } - { tier: 'ha', service: 'pcsd', node_tier: 'db2', state: 'started' } - { tier: 'ha', service: 'pcsd', node_tier: 'db2', state: 'enabled' } @@ -40,6 +45,7 @@ services: - { tier: 'os', service: 'auditd', node_tier: 'all', state: 'started' } - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'stopped' } - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'disabled' } + - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'started' } - { tier: 'os', service: 'fstrim.timer', node_tier: 'all', state: 'stopped' } - { tier: 'os', service: 'fstrim.timer', node_tier: 'all', state: 'disabled' } - { tier: 'sapos', service: 'tuned', node_tier: 'all', state: 'started' } @@ -52,12 +58,17 @@ services: - { tier: 'ha', service: 'pcsd', node_tier: 'ers', state: 'enabled' } - { tier: 'ha', service: 'pcsd', node_tier: 'db2', state: 'started' } - { tier: 'ha', service: 'pcsd', node_tier: 'db2', state: 'enabled' } + - { tier: 'os', service: 'pmcd', node_tier: 'all', state: 'started' } + - { tier: 'os', service: 'pmcd', node_tier: 'all', state: 'enabled' } + - { tier: 'os', service: 'pmproxy', node_tier: 'all', state: 'started' } + - { tier: 'os', service: 'pmproxy', node_tier: 'all', state: 'enabled' } - { tier: 'os', service: 'oddjobd', node_tier: 'all', state: 'disabled' } redhat9: - { tier: 'os', service: 'auditd', node_tier: 'all', state: 'started' } - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'stopped' } - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'disabled' } + - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'started' } - { tier: 'os', service: 'fstrim.timer', node_tier: 'all', state: 'stopped' } - { tier: 'os', service: 'fstrim.timer', node_tier: 'all', state: 'disabled' } - { tier: 'sapos', service: 'tuned', node_tier: 'all', state: 'started' } @@ -69,6 +80,10 @@ services: - { tier: 'ha', service: 'pcsd', node_tier: 'ers', state: 'started' } - { tier: 'ha', service: 'pcsd', node_tier: 'ers', state: 'enabled' } - { tier: 'ha', service: 'pcsd', node_tier: 'db2', state: 'started' } + - { tier: 'os', service: 'pmcd', node_tier: 'all', state: 'started' } + - { tier: 'os', service: 'pmcd', node_tier: 'all', state: 'enabled' } + - { tier: 'os', service: 'pmproxy', node_tier: 'all', state: 'started' } + - { tier: 'os', service: 'pmproxy', node_tier: 'all', state: 'enabled' } - { tier: 'ha', service: 'pcsd', node_tier: 'db2', state: 'enabled' } sles_sap12: @@ -91,5 +106,6 @@ services: - { tier: 'os', service: 'fstrim.timer', node_tier: 'all', state: 'disabled' } - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'stopped' } - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'disabled' } + - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'started' } - { tier: 'os', service: 'waagent', node_tier: 'all', state: 'restarted' } # Ensure the service is restarted and swap space is available in case the handler is not called in 1.1/swap. - { tier: 'sapos', service: 'rpcbind', node_tier: 'scs', state: 'restarted' } diff --git a/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml b/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml index 85ecf2aa50..789bad83d2 100644 --- a/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml +++ b/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml @@ -1,77 +1,56 @@ -# /*----------------------------------------------------------------------------8 -# | | +# /*---------------------------------------------------------------------------8 # | | +# | Task: 1.20 - Package Installation for OS | # | | # +------------------------------------4--------------------------------------*/ ---- - -# -------------------------------------+---------------------------------------8 -# -# Task: 1.20 - Prometheus -# -# -------------------------------------+---------------------------------------8 - - -# -------------------------------------+---------------------------------------8 -# -# -# -# -------------------------------------+---------------------------------------8 - -# ---------------------------------------- -# BEGIN -# ---------------------------------------- - -# ---------------------------------------- -# END -# ---------------------------------------- - -# TODO: Review and rewrite for efficiency and working for all servers - -# Enable prometheus on HANA servers -- name: Set OS version - ansible.builtin.set_fact: - distro: "SLE_{{ ansible_facts['distribution_major_version'] }}_SP{{ ansible_facts['distribution_release'] }}" - -- name: Check if server_monitoring.repo is present - ansible.builtin.stat: - path: /etc/zypp/repos.d/server_monitoring.repo - register: monitor_repo - -- name: Add monitoring repository if it does not exist - community.general.zypper_repository: - repo: "https://download.opensuse.org/repositories/server:/monitoring/{{ distro }}/server:monitoring.repo" - state: present - when: not monitor_repo.stat.exists - register: add_repo_status - ignore_errors: true +# /*----------------------------------------------------------------------------8 +# | BEGIN | +# +------------------------------------4---------------------------------------*/ -- name: Ensure Prometheus is installed and configured - when: not (add_repo_status.failed is defined and add_repo_status.failed) +- name: "1.20 Packages: - Download node exporter package to enable monitoring" block: - - name: Install Prometheus with Node and HA Cluster exporters - community.general.zypper_repository: - name: "{{ item }}" - disable_gpg_check: true - loop: - - golang-github-prometheus-node_exporter - - prometheus-ha_cluster_exporter - - - name: Set arguments for Node exporter - ansible.builtin.lineinfile: - path: /etc/sysconfig/prometheus-node_exporter - regexp: 'ARGS=.*' - line: 'ARGS="--collector.systemd"' - - - name: Start the Node exporter - ansible.builtin.service: - name: prometheus-node_exporter - state: started + - name: "1.20 Packages: - Download node exporter package to enable monitoring" + ansible.builtin.get_url: + url: "https://aka.ms/linx-node-exporter" + dest: "/tmp/" + timeout: 60 + mode: 0644 + + - name: "1.20 Packages: - Unzip node exporter" + ansible.builtin.unarchive: + src: "/tmp/node_exporter-1.3.1.linux-amd64.tar.gz" + dest: "/tmp/" + mode: 0644 + + - name: "1.20 Packages: - Enable node exporter" + ansible.builtin.shell: | + nohup /tmp/node_exporter-1.3.1.linux-amd64/node_exporter --web.listen-address=':9100' & + +- name: "1.20.0 Packages: - Download HA cluster exporter package to enable monitoring" + block: + - name: "1.20 Packages: - Install ha cluster exporter package" + community.general.zypper: + name: "prometheus-ha_cluster_exporter" + state: present + - name: "1.20 Packages: - Enable HA cluster exporter." + ansible.builtin.shell: "nohup ha_cluster_exporter &" + when: ansible_os_family | upper == "SUSE" + +- name: "1.20.0 Packages: - Install pcp and pcp-pmda-hacluster package" + block: + - name: "1.20 Packages: - Install pcp and pcp-pmda-hacluster package" + ansible.builtin.yum: + name: + - "pcp" + - "pcp-pmda-hacluster" + + - name: "1.20 Packages: - Install and enable the HA Cluster PMDA." + ansible.builtin.shell: "./Install" + args: + chdir: "/var/lib/pcp/pmdas/hacluster/" + when: ansible_os_family | upper == "REDHAT" -# TODO: fix this for HA - # - name: Start the HA Cluster exporter (HA Clusters only) - # service: - # name: prometheus-ha_cluster_exporter - # state: started - # when: hana_database.high_availability == True +# /*----------------------------------------------------------------------------8 +# | END | +# +------------------------------------4---------------------------------------*/ diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index 8704dc2ebc..a12a0fa2b5 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -232,3 +232,9 @@ sap_swap: - { tier: "observer", swap_size_mb: "2048" } - { tier: 'sqlserver', swap_size_mb: '20480' } # --------------------- End - SAP SWAP settings variables --------------------8 + +# ------------------- Begin - Azure Monitor for SAP (AMS) variables ------------8 +ams_resource_id: "" +enable_os_monitoring: false +enable_ha_monitoring: false +# ------------------- End - Azure Monitor for SAP (AMS) variables --------------8 diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index af69c3654b..401d714d6b 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -548,7 +548,22 @@ stages: acssEnvironment: ${{ parameters.acss_environment }} acssSapProduct: ${{ parameters.acss_sap_product }} USE_MSI: $(USE_MSI) - + - template: templates\run-ansible.yaml + parameters: + displayName: "AMS Provider Creation" + ansibleFilePath: ${{ parameters.sap_automation_repo_path }}/deploy/ansible/playbook_06_01_ams_monitoring.yaml + secretName: "$(Preparation.SSH_KEY_NAME)" + passwordSecretName: "$(Preparation.PASSWORD_KEY_NAME)" + userNameSecretName: "$(Preparation.USERNAME_KEY_NAME)" + vaultName: $(Preparation.VAULT_NAME) + parametersFolder: $(Preparation.FOLDER) + sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" + sidHosts: $(Preparation.HOSTS) + extraParams: ${{ parameters.extra_params }} + azureClientId: $(ARM_CLIENT_ID) + azureClientSecret: $(ARM_CLIENT_SECRET) + azureTenantId: $(ARM_TENANT_ID) + azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) - template: templates\collect-log-files.yaml parameters: logPath: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/logs diff --git a/deploy/terraform/run/sap_landscape/module.tf b/deploy/terraform/run/sap_landscape/module.tf index 8f19a39678..e71e92a004 100644 --- a/deploy/terraform/run/sap_landscape/module.tf +++ b/deploy/terraform/run/sap_landscape/module.tf @@ -10,6 +10,7 @@ module "sap_landscape" { azurerm.deployer = azurerm azurerm.dnsmanagement = azurerm.dnsmanagement azurerm.peering = azurerm.peering + azapi.api = azapi.api } additional_users_to_add_to_keyvault_policies = var.additional_users_to_add_to_keyvault_policies diff --git a/deploy/terraform/run/sap_landscape/output.tf b/deploy/terraform/run/sap_landscape/output.tf index a41775a96e..c81f767bdc 100644 --- a/deploy/terraform/run/sap_landscape/output.tf +++ b/deploy/terraform/run/sap_landscape/output.tf @@ -66,6 +66,11 @@ output "db_nsg_id" { value = module.sap_landscape.db_nsg_id } +output "ams_subnet_id" { + description = "Azure resource identifier for the AMS subnet" + value = length(var.ams_subnet_arm_id) > 0 ? var.ams_subnet_arm_id : module.sap_landscape.ams_subnet_id + } + output "route_table_id" { description = "Azure resource identifier for the route table" value = module.sap_landscape.route_table_id @@ -312,3 +317,13 @@ output "iSCSI_servers" { [] ) } + +############################################################################### +# # +# AMS Resource # +# # +############################################################################### +output ams_resource_id { + description = "AMS resource ID" + value = module.sap_landscape.ams_resource_id + } \ No newline at end of file diff --git a/deploy/terraform/run/sap_landscape/providers.tf b/deploy/terraform/run/sap_landscape/providers.tf index 98de52cc7c..549a2abc0f 100644 --- a/deploy/terraform/run/sap_landscape/providers.tf +++ b/deploy/terraform/run/sap_landscape/providers.tf @@ -86,6 +86,15 @@ provider "azuread" { tenant_id = local.spn.tenant_id use_msi = var.use_spn ? false : true } + +provider "azapi" { + alias = "api" + subscription_id = local.spn.subscription_id + client_id = local.spn.client_id + client_secret = local.spn.client_secret + tenant_id = local.spn.tenant_id + } + terraform { required_version = ">= 1.0" required_providers { @@ -109,5 +118,8 @@ terraform { source = "hashicorp/azurerm" version = ">=3.3" } + azapi = { + source = "Azure/azapi" + } } } diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index 002b548359..79b3599073 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -272,6 +272,36 @@ variable "anf_subnet_nsg_arm_id" { } +#######################################4#######################################8 +# # +# AMS Subnet variables # +# # +#######################################4#######################################8 + +variable "ams_subnet_name" { + description = "If provided, the name of the ams subnet" + default = "" + } + +variable "ams_subnet_arm_id" { + description = "If provided, Azure resource id for the ams subnet" + default = "" + } + +variable "ams_subnet_address_prefix" { + description = "The address prefix for the ams subnet" + default = "" + } + +variable "ams_subnet_nsg_name" { + description = "If provided, the name of the AMS subnet NSG" + default = "" + } + +variable "ams_subnet_nsg_arm_id" { + description = "If provided, Azure resource id for the AMS subnet NSG" + default = "" + } ######################################################################################### # # @@ -729,3 +759,23 @@ variable "export_transport_path" { description = "If provided, export mount path for the transport media" default = true } + +#######################################4#######################################8 +# # +# AMS Instance variables # +# # +#######################################4#######################################8 + +variable "create_ams_instance" { + description = "If true, an AMS instance will be created" + default = false + } + +variable "ams_instance_name" { + description = "If provided, the name of the AMS instance" + default = "" + } +variable "ams_laws_arm_id" { + description = "If provided, Azure resource id for the Log analytics workspace in AMS" + default = "" + } \ No newline at end of file diff --git a/deploy/terraform/run/sap_landscape/transform.tf b/deploy/terraform/run/sap_landscape/transform.tf index 8b5680781b..24d6200b05 100644 --- a/deploy/terraform/run/sap_landscape/transform.tf +++ b/deploy/terraform/run/sap_landscape/transform.tf @@ -114,6 +114,25 @@ locals { length(try(var.infrastructure.vnets.sap.subnet_anf.nsg.arm_id, "")) ) > 0 + subnet_ams_defined = ( + length(var.ams_subnet_address_prefix) + + length(try(var.infrastructure.vnets.sap.subnet_ams.prefix, "")) + + length(var.ams_subnet_arm_id) + + length(try(var.infrastructure.vnets.sap.subnet_ams.arm_id, "")) + ) > 0 + + subnet_ams_arm_id_defined = ( + length(var.ams_subnet_arm_id) + + length(try(var.infrastructure.vnets.sap.subnet_ams.arm_id, "")) + ) > 0 + + subnet_ams_nsg_defined = ( + length(var.ams_subnet_nsg_name) + + length(try(var.infrastructure.vnets.sap.subnet_ams.nsg.name, "")) + + length(var.ams_subnet_nsg_arm_id) + + length(try(var.infrastructure.vnets.sap.subnet_ams.nsg.arm_id, "")) + ) > 0 + resource_group = { name = try(var.infrastructure.resource_group.name, var.resourcegroup_name) arm_id = try(var.infrastructure.resource_group.arm_id, var.resourcegroup_arm_id) @@ -123,6 +142,12 @@ locals { length(local.resource_group.arm_id) ) > 0 + ams_instance = { + name = var.ams_instance_name + create_ams_instance = var.create_ams_instance + ams_laws_arm_id = var.ams_laws_arm_id + } + temp_infrastructure = { environment = coalesce(var.environment, try(var.infrastructure.environment, "")) region = lower(coalesce(var.location, try(var.infrastructure.region, ""))) @@ -363,6 +388,36 @@ locals { ) ) + subnet_ams = merge( + ( + { + "name" = try(var.infrastructure.vnets.sap.subnet_ams.name, var.ams_subnet_name) + } + ), ( + local.subnet_ams_arm_id_defined ? ( + { + "arm_id" = try(var.infrastructure.vnets.sap.subnet_ams.arm_id, var.ams_subnet_arm_id) + } + ) : ( + null + )), ( + { + "prefix" = try(var.infrastructure.vnets.sap.subnet_ams.prefix, var.ams_subnet_address_prefix) + } + ), ( + local.subnet_web_nsg_defined ? ( + { + "nsg" = { + "name" = try(var.infrastructure.vnets.sap.subnet_ams.nsg.name, var.ams_subnet_nsg_name) + "arm_id" = try(var.infrastructure.vnets.sap.subnet_ams.nsg.arm_id, var.ams_subnet_nsg_arm_id) + } + } + ) : ( + null + ) + ) + ) + all_subnets = merge(local.sap, ( local.subnet_admin_defined ? ( { @@ -399,6 +454,13 @@ locals { ) : ( null )), ( + local.subnet_ams_defined ? ( + { + "subnet_ams" = local.subnet_ams + } + ) : ( + null + )), ( local.subnet_iscsi_defined ? ( { "subnet_iscsi" = local.subnet_iscsi @@ -446,6 +508,10 @@ locals { "vnets" = local.temp_vnet } ), ( + { + "ams_instance" = local.ams_instance + } + ),( local.iscsi.iscsi_count > 0 ? ( { "iscsi" = local.iscsi diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index a75ace3f4a..3f19dcefe0 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -444,4 +444,11 @@ module "output_files" { iSCSI_server_ips = var.database_cluster_type == "ISCSI" || var.scs_cluster_type == "ISCSI" ? data.terraform_remote_state.landscape.outputs.iSCSI_server_ips : [] iSCSI_server_names = var.database_cluster_type == "ISCSI" || var.scs_cluster_type == "ISCSI" ? data.terraform_remote_state.landscape.outputs.iSCSI_server_names : [] iSCSI_servers = var.database_cluster_type == "ISCSI" || var.scs_cluster_type == "ISCSI" ? data.terraform_remote_state.landscape.outputs.iSCSI_servers : [] + + ######################################################################################### + # AMS # + ######################################################################################### + ams_resource_id = coalesce(var.ams_resource_id, data.terraform_remote_state.landscape.outputs.ams_resource_id) + enable_ha_monitoring = var.enable_ha_monitoring + enable_os_monitoring = var.enable_os_monitoring } diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index bda0fe1416..dca46ae72a 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -1269,6 +1269,27 @@ variable "subscription" { default = "" } +######################################################################################### +# # +# Azure Monitor for SAP variables # +# # +######################################################################################### + +variable "ams_resource_id" { + description = "[optional] If defined, will use the specified Azure Monitor for SAP instance, else will use the AMS instance in the workload zone." + default = "" + } + +variable "enable_ha_monitoring" { + description = "If defined, will enable prometheus high availability cluster monitoring" + default = false + } + +variable "enable_os_monitoring" { + description = "If defined, will enable prometheus os monitoring" + default = false + } + ######################################################################################### # # # Configuration values # diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf b/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf new file mode 100644 index 0000000000..c3090b50c5 --- /dev/null +++ b/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf @@ -0,0 +1,31 @@ +// Imports data of existing AMS subnet +data "azurerm_subnet" "ams" { + provider = azurerm.main + count = length(local.ams_subnet_arm_id) > 0 ? 1 : 0 + name = local.ams_subnet_name + virtual_network_name = local.SAP_virtualnetwork_name + resource_group_name = local.resourcegroup_name +} + +resource "azapi_resource" "ams_instance" { + type = "Microsoft.Workloads/monitors@2023-04-01" + count = local.create_ams_instance ? 1 : 0 + name = local.ams_instance_name + location = local.region + parent_id = azurerm_resource_group.resource_group[0].id + depends_on = [ + azurerm_virtual_network.vnet_sap, + azurerm_subnet.ams + ] + body = jsonencode({ + properties = { + appLocation: local.region, + routingPreference: "RouteAll", + logAnalyticsWorkspaceArmId: local.ams_laws_arm_id, + managedResourceGroupConfiguration: { + name: "managedrg-ams" + }, + monitorSubnet: azurerm_subnet.ams[0].id, + } + }) +} \ No newline at end of file diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_landscape/infrastructure.tf index 95118e37e5..cc857475b5 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/infrastructure.tf @@ -64,7 +64,8 @@ resource "azurerm_virtual_network_peering" "peering_management_sap" { azurerm_subnet.app, azurerm_subnet.db, azurerm_subnet.web, - azurerm_subnet.admin + azurerm_subnet.admin, + azurerm_subnet.ams ] @@ -104,7 +105,8 @@ resource "azurerm_virtual_network_peering" "peering_sap_management" { azurerm_subnet.app, azurerm_subnet.db, azurerm_subnet.web, - azurerm_subnet.admin + azurerm_subnet.admin, + azurerm_subnet.ams ] diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf b/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf index cfa1feeffa..3a10bafd67 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf @@ -114,6 +114,16 @@ output "anf_subnet_id" { ) } +output "ams_subnet_id" { + description = "Azure resource identifier for the ams subnet" + value = local.ams_subnet_defined ? ( + local.ams_subnet_existing ? ( + var.infrastructure.vnets.sap.subnet_ams.arm_id) : ( + try(azurerm_subnet.ams[0].id, ""))) : ( + "" + ) + } + output "admin_nsg_id" { description = "Azure resource identifier for the admin subnet network security group" value = local.admin_subnet_defined ? ( @@ -510,3 +520,14 @@ output "iSCSI_servers" { [] ) } + +############################################################################### +# # +# AMS resource properties # +# # +############################################################################### + +output "ams_resource_id" { + description = "Azure resource identifier for the AMS resource" + value = azapi_resource.ams_instance[0].id + } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf b/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf index e3516f0c20..ed1db2f2b8 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf @@ -5,5 +5,10 @@ terraform { configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement, azurerm.peering] version = ">= 3.23" } + + azapi = { + source = "Azure/azapi" + configuration_aliases = [azapi.api] + } } } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf b/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf index e52598f77d..4d18fc1bdd 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf @@ -96,6 +96,22 @@ resource "azurerm_subnet" "anf" { } } +// Creates AMS subnet of SAP VNET +resource "azurerm_subnet" "ams" { + provider = azurerm.main + count = local.create_ams_instance && local.ams_subnet_defined && !local.ams_subnet_existing ? 1 : 0 + name = local.ams_subnet_name + resource_group_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].resource_group_name : azurerm_virtual_network.vnet_sap[0].resource_group_name + virtual_network_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].name : azurerm_virtual_network.vnet_sap[0].name + address_prefixes = [local.ams_subnet_prefix] + + delegation { + name = "delegation" + service_delegation { + name = "Microsoft.Web/serverFarms" + } + } +} #Associate the subnets to the route table @@ -144,6 +160,17 @@ resource "azurerm_subnet_route_table_association" "web" { route_table_id = azurerm_route_table.rt[0].id } +resource "azurerm_subnet_route_table_association" "ams" { + provider = azurerm.main + count = local.create_ams_instance && local.ams_subnet_defined && !local.SAP_virtualnetwork_exists && !local.ams_subnet_existing ? 1 : 0 + depends_on = [ + azurerm_route_table.rt, + azurerm_subnet.ams + ] + subnet_id = local.ams_subnet_existing ? var.infrastructure.vnets.sap.subnet_ams.arm_id : azurerm_subnet.ams[0].id + route_table_id = azurerm_route_table.rt[0].id +} + # Creates network security rule to allow internal traffic for SAP db subnet resource "azurerm_network_security_rule" "nsr_internal_db" { provider = azurerm.main diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf index 04ecad8781..47893bf5c0 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf @@ -50,6 +50,19 @@ locals { ) ) ) + // AMS instance + create_ams_instance = var.infrastructure.ams_instance.create_ams_instance + ams_instance_name = length(var.infrastructure.ams_instance.name) > 0 ? ( + var.infrastructure.ams_instance.name) : ( + format("%s%s%s%s", + var.naming.resource_prefixes.vnet_rg, + local.prefix, + local.resource_suffixes.vnet_rg, + local.resource_suffixes.ams_instance + ) + ) + ams_laws_arm_id = length(var.infrastructure.ams_instance.ams_laws_arm_id) > 0 ? ( + var.infrastructure.ams_instance.ams_laws_arm_id) : "" // SAP vnet SAP_virtualnetwork_id = try(var.infrastructure.vnets.sap.arm_id, "") @@ -480,6 +493,41 @@ locals { ) ) + ############################################################################################## + # + # AMS subnet - Check if locally provided + # + ############################################################################################## + + + ams_subnet_defined = ( + length(try(var.infrastructure.vnets.sap.subnet_ams.arm_id, "")) + + length(try(var.infrastructure.vnets.sap.subnet_ams.prefix, "")) + ) > 0 + ams_subnet_arm_id = local.ams_subnet_defined ? ( + try(var.infrastructure.vnets.sap.subnet_ams.arm_id, "")) : ( + "" + ) + ams_subnet_existing = length(local.ams_subnet_arm_id) > 0 + ams_subnet_name = local.ams_subnet_existing ? ( + try(split("/", local.ams_subnet_arm_id)[10], "")) : ( + length(try(var.infrastructure.vnets.sap.subnet_ams.name, "")) > 0 ? ( + var.infrastructure.vnets.sap.subnet_ams.name) : ( + format("%s%s%s%s", + var.naming.resource_prefixes.ams_subnet, + length(local.prefix) > 0 ? ( + local.prefix) : ( + var.infrastructure.environment + ), + var.naming.separator, + local.resource_suffixes.ams_subnet + ) + ) + ) + ams_subnet_prefix = local.ams_subnet_defined ? ( + try(var.infrastructure.vnets.sap.subnet_ams.prefix, "")) : ( + "" + ) # Store the Deployer KV in workload zone KV deployer_keyvault_user_name = try(var.deployer_tfstate.deployer_kv_user_name, "") diff --git a/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf index 717c9e00dd..3960773b27 100644 --- a/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf @@ -371,6 +371,7 @@ variable "resource_prefixes" { "witness" = "" "witness_accesskey" = "" "witness_name" = "" + "ams_subnet" = "" } } @@ -503,6 +504,8 @@ variable "resource_suffixes" { "witness" = "-witness" "witness_accesskey" = "-witness-accesskey" "witness_name" = "-witness-name" + "ams_subnet" = "ams-subnet" + "ams_instance" = "-AMS" } } diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf index aea9308da3..0bb4908092 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf @@ -240,6 +240,9 @@ resource "local_file" "sap-parameters_yml" { ) web_instance_number = var.web_instance_number web_sid = var.web_sid + ams_resource_id = var.ams_resource_id + enable_os_monitoring = var.enable_os_monitoring + enable_ha_monitoring = var.enable_ha_monitoring } ) diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.yml.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.yml.tmpl index c8cbfdfd1e..96a018c47f 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.yml.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.yml.tmpl @@ -177,4 +177,14 @@ iscsi_servers: %{~ endfor } %{~ endif } +############################################################################# +# # +# AMS # +# # +############################################################################# +# ams_resource_id is the AMS resource ID +ams_resource_id: ${ams_resource_id} +enable_os_monitoring: ${enable_os_monitoring} +enable_ha_monitoring: ${enable_ha_monitoring} + ... diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf index adc1f13c80..04efde8d24 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf @@ -199,3 +199,8 @@ variable "web_sid" { variable "webdispatcher_server_ips" { description = "List of IP addresses for the Web dispatchers" } variable "webdispatcher_server_secondary_ips" { description = "List of secondary IP addresses for the Web dispatchers" } variable "webdispatcher_server_vm_names" { description = "List of VM names for the Web dispatchers" } + +variable "ams_resource_id" { description = "Resource ID for AMS" } +variable "enable_os_monitoring" { description = "Enable OS monitoring" } +variable "enable_ha_monitoring" { description = "Enable HA monitoring" } + From 6fbadcf6ef625c07efbbb9bc210e175f657fd300 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 20 Feb 2024 14:53:50 +0200 Subject: [PATCH 304/607] Add OracleLinux 8.9 to VM-Images.json --- Webapp/SDAF/ParameterDetails/VM-Images.json | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/Webapp/SDAF/ParameterDetails/VM-Images.json b/Webapp/SDAF/ParameterDetails/VM-Images.json index c75ce3337f..8404400aa0 100644 --- a/Webapp/SDAF/ParameterDetails/VM-Images.json +++ b/Webapp/SDAF/ParameterDetails/VM-Images.json @@ -263,6 +263,18 @@ "type": "marketplace" } }, + { + "name": "OracleLinux 8.9", + "data": { + "os_type": "LINUX", + "source_image_id": "", + "publisher": "Oracle", + "offer": "Oracle-Linux", + "sku": "ol89-lvm-gen2", + "version": "latest", + "type": "marketplace" + } + }, { "name": "Windows Server 2016", "data": { From 912a6c6548be4c3a4a0439c20a06b1ab8681d969 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 20 Feb 2024 14:54:10 +0200 Subject: [PATCH 305/607] Update SPN authentication condition in deployment pipeline --- deploy/pipelines/03-sap-system-deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/pipelines/03-sap-system-deployment.yaml b/deploy/pipelines/03-sap-system-deployment.yaml index 9f9ec4aa50..017fa88d76 100644 --- a/deploy/pipelines/03-sap-system-deployment.yaml +++ b/deploy/pipelines/03-sap-system-deployment.yaml @@ -245,7 +245,7 @@ stages: fi az logout --output none - if [ $LOGON_USING_SPN == "true" ]; then + if [ $USE_MSI != "true" ]; then echo "Using SPN" export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID From ba23020d8957c3575e2d02b716444e6b35c444ea Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 20 Feb 2024 15:55:27 +0200 Subject: [PATCH 306/607] Add AMS support to Web App --- Webapp/SDAF/Models/CustomValidators.cs | 9 +++++ Webapp/SDAF/Models/LandscapeModel.cs | 14 ++++++++ .../ParameterDetails/LandscapeDetails.json | 33 +++++++++++++++++++ .../ParameterDetails/LandscapeTemplate.txt | 15 +++++++++ Webapp/SDAF/SDAFWebApp.csproj | 10 +++--- 5 files changed, 76 insertions(+), 5 deletions(-) diff --git a/Webapp/SDAF/Models/CustomValidators.cs b/Webapp/SDAF/Models/CustomValidators.cs index ba0142842f..5023cc33a0 100644 --- a/Webapp/SDAF/Models/CustomValidators.cs +++ b/Webapp/SDAF/Models/CustomValidators.cs @@ -233,6 +233,15 @@ public override bool IsValid(object value) return RegexValidation(value, pattern); } } + public class AMSIdValidator : ValidationAttribute + { + public override bool IsValid(object value) + { + string pattern = @"^\/subscriptions\/\w{8}-\w{4}-\w{4}-\w{4}-\w{12}\/resourceGroups\/[a-zA-Z0-9-_]+\/providers\/Microsoft.Monitor\/Accounts\/[a-zA-Z0-9-_]+$"; + return RegexValidation(value, pattern); + } + } + public class ScaleSetIdValidator : ValidationAttribute { diff --git a/Webapp/SDAF/Models/LandscapeModel.cs b/Webapp/SDAF/Models/LandscapeModel.cs index 1f1a94621b..669a20a8ad 100644 --- a/Webapp/SDAF/Models/LandscapeModel.cs +++ b/Webapp/SDAF/Models/LandscapeModel.cs @@ -378,5 +378,19 @@ public bool IsValid() public bool? use_spn{ get; set; } = true; + + /*---------------------------------------------------------------------------8 + | | + | AMS information | + | | + +------------------------------------4--------------------------------------*/ + + public bool? create_ams_instance { get; set; } = false; + + public string ams_instance_name { get; set; } + + [AMSIdValidator(ErrorMessage = "Invalid User Assigned id")] + public string ams_laws_arm_id { get; set; } + } } diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index b0999dc414..7f51032691 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -1371,5 +1371,38 @@ "Display": 3 } ] + }, + { + "Section": "Azure Monitor Settings", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#utility-vm-parameters", + "Parameters": [ + { + "Name": "create_ams_instance", + "Required": false, + "Description": "Defines if an AMS Instance should be created.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "ams_instance_name", + "Required": false, + "Description": "Defines the name of the AMS instance", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "ams_laws_arm_id", + "Required": false, + "Description": "Defines the Azure resource id for the Log analytics workspace in AMS", + "Type": "lookup", + "Options": [], + "Overrules": "", + "Display": 2 + } + ] } ] diff --git a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt index 9a31c49b0c..76262d0958 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt @@ -526,3 +526,18 @@ $$utility_vm_nic_ips$$ # These tags will be applied to all resources $$tags$$ + +############################################################################################ +# # +# AMS Configuration # +# # +############################################################################################ + +# If true, an AMS instance will be created +$$create_ams_instance$$ + +# ams_instance_name If provided, the name of the AMS instance +$$ams_instance_name$$ + +# ams_laws_arm_id if provided, Azure resource id for the Log analytics workspace in AMS +$$ams_laws_arm_id$$ diff --git a/Webapp/SDAF/SDAFWebApp.csproj b/Webapp/SDAF/SDAFWebApp.csproj index 99f8ef615a..2e44a21036 100644 --- a/Webapp/SDAF/SDAFWebApp.csproj +++ b/Webapp/SDAF/SDAFWebApp.csproj @@ -15,7 +15,7 @@ - + @@ -24,10 +24,10 @@ - - - - + + + + From ef94d649ec0e399b36ab83618a79c7259b2d62f2 Mon Sep 17 00:00:00 2001 From: Devansh Jain <86314060+devanshjainms@users.noreply.github.com> Date: Wed, 21 Feb 2024 00:37:45 -0800 Subject: [PATCH 307/607] Move special services to specific folders. (#554) * Remove redundant services and start monitoring services * Update service start and enable tasks in main.yml * Add become flag to unzip task * Update node exporter package extraction mode * Update ansible role for Prometheus node exporter * Update node exporter mode * Remove redundant 'firewalld' service start --- .../roles-os/1.16-services/vars/os-services.yaml | 16 ---------------- .../roles-os/1.20-prometheus/tasks/main.yml | 14 +++++++++++++- 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/deploy/ansible/roles-os/1.16-services/vars/os-services.yaml b/deploy/ansible/roles-os/1.16-services/vars/os-services.yaml index cde76ab082..c192491fe4 100644 --- a/deploy/ansible/roles-os/1.16-services/vars/os-services.yaml +++ b/deploy/ansible/roles-os/1.16-services/vars/os-services.yaml @@ -26,7 +26,6 @@ services: redhat7: - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'stopped' } - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'disabled' } - - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'started' } - { tier: 'sapos', service: 'rpcbind', node_tier: 'scs', state: 'started' } - { tier: 'os', service: 'fstrim.timer', node_tier: 'all', state: 'stopped' } - { tier: 'os', service: 'fstrim.timer', node_tier: 'all', state: 'disabled' } @@ -34,10 +33,6 @@ services: - { tier: 'sapos', service: 'tuned', node_tier: 'all', state: 'enabled' } - { tier: 'ha', service: 'pcsd', node_tier: 'all', state: 'started' } - { tier: 'ha', service: 'pcsd', node_tier: 'all', state: 'enabled' } - - { tier: 'os', service: 'pmcd', node_tier: 'all', state: 'started' } - - { tier: 'os', service: 'pmcd', node_tier: 'all', state: 'enabled' } - - { tier: 'os', service: 'pmproxy', node_tier: 'all', state: 'started' } - - { tier: 'os', service: 'pmproxy', node_tier: 'all', state: 'enabled' } - { tier: 'ha', service: 'pcsd', node_tier: 'db2', state: 'started' } - { tier: 'ha', service: 'pcsd', node_tier: 'db2', state: 'enabled' } @@ -45,7 +40,6 @@ services: - { tier: 'os', service: 'auditd', node_tier: 'all', state: 'started' } - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'stopped' } - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'disabled' } - - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'started' } - { tier: 'os', service: 'fstrim.timer', node_tier: 'all', state: 'stopped' } - { tier: 'os', service: 'fstrim.timer', node_tier: 'all', state: 'disabled' } - { tier: 'sapos', service: 'tuned', node_tier: 'all', state: 'started' } @@ -58,17 +52,12 @@ services: - { tier: 'ha', service: 'pcsd', node_tier: 'ers', state: 'enabled' } - { tier: 'ha', service: 'pcsd', node_tier: 'db2', state: 'started' } - { tier: 'ha', service: 'pcsd', node_tier: 'db2', state: 'enabled' } - - { tier: 'os', service: 'pmcd', node_tier: 'all', state: 'started' } - - { tier: 'os', service: 'pmcd', node_tier: 'all', state: 'enabled' } - - { tier: 'os', service: 'pmproxy', node_tier: 'all', state: 'started' } - - { tier: 'os', service: 'pmproxy', node_tier: 'all', state: 'enabled' } - { tier: 'os', service: 'oddjobd', node_tier: 'all', state: 'disabled' } redhat9: - { tier: 'os', service: 'auditd', node_tier: 'all', state: 'started' } - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'stopped' } - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'disabled' } - - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'started' } - { tier: 'os', service: 'fstrim.timer', node_tier: 'all', state: 'stopped' } - { tier: 'os', service: 'fstrim.timer', node_tier: 'all', state: 'disabled' } - { tier: 'sapos', service: 'tuned', node_tier: 'all', state: 'started' } @@ -80,10 +69,6 @@ services: - { tier: 'ha', service: 'pcsd', node_tier: 'ers', state: 'started' } - { tier: 'ha', service: 'pcsd', node_tier: 'ers', state: 'enabled' } - { tier: 'ha', service: 'pcsd', node_tier: 'db2', state: 'started' } - - { tier: 'os', service: 'pmcd', node_tier: 'all', state: 'started' } - - { tier: 'os', service: 'pmcd', node_tier: 'all', state: 'enabled' } - - { tier: 'os', service: 'pmproxy', node_tier: 'all', state: 'started' } - - { tier: 'os', service: 'pmproxy', node_tier: 'all', state: 'enabled' } - { tier: 'ha', service: 'pcsd', node_tier: 'db2', state: 'enabled' } sles_sap12: @@ -106,6 +91,5 @@ services: - { tier: 'os', service: 'fstrim.timer', node_tier: 'all', state: 'disabled' } - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'stopped' } - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'disabled' } - - { tier: 'os', service: 'firewalld', node_tier: 'all', state: 'started' } - { tier: 'os', service: 'waagent', node_tier: 'all', state: 'restarted' } # Ensure the service is restarted and swap space is available in case the handler is not called in 1.1/swap. - { tier: 'sapos', service: 'rpcbind', node_tier: 'scs', state: 'restarted' } diff --git a/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml b/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml index 789bad83d2..3a850a8828 100644 --- a/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml +++ b/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml @@ -8,6 +8,18 @@ # | BEGIN | # +------------------------------------4---------------------------------------*/ +- name: "1.20 Packages: - Start and enable services required for monitoring" + block: + - name: "1.20 Packages: - Start and enable services required for monitoring" + ansible.builtin.service: + name: "{{ item }}" + state: started + enabled: true + with_items: + - pmcd + - pmproxy + when: ansible_os_family | upper == "REDHAT" + - name: "1.20 Packages: - Download node exporter package to enable monitoring" block: - name: "1.20 Packages: - Download node exporter package to enable monitoring" @@ -21,7 +33,7 @@ ansible.builtin.unarchive: src: "/tmp/node_exporter-1.3.1.linux-amd64.tar.gz" dest: "/tmp/" - mode: 0644 + remote_src: true - name: "1.20 Packages: - Enable node exporter" ansible.builtin.shell: | From 8d7763e9220d027fca46da0d855f67b21c2eafaf Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 21 Feb 2024 10:44:04 +0200 Subject: [PATCH 308/607] Add conditionals for enabling monitoring in main.yml --- .../ansible/roles-os/1.20-prometheus/tasks/main.yml | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml b/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml index 3a850a8828..1c914c681e 100644 --- a/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml +++ b/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml @@ -9,6 +9,8 @@ # +------------------------------------4---------------------------------------*/ - name: "1.20 Packages: - Start and enable services required for monitoring" + when: + - enable_os_monitoring or enable_ha_monitoring block: - name: "1.20 Packages: - Start and enable services required for monitoring" ansible.builtin.service: @@ -21,6 +23,8 @@ when: ansible_os_family | upper == "REDHAT" - name: "1.20 Packages: - Download node exporter package to enable monitoring" + when: + - enable_os_monitoring or enable_ha_monitoring block: - name: "1.20 Packages: - Download node exporter package to enable monitoring" ansible.builtin.get_url: @@ -40,6 +44,9 @@ nohup /tmp/node_exporter-1.3.1.linux-amd64/node_exporter --web.listen-address=':9100' & - name: "1.20.0 Packages: - Download HA cluster exporter package to enable monitoring" + when: + - enable_os_monitoring or enable_ha_monitoring + - ansible_os_family | upper == "SUSE" block: - name: "1.20 Packages: - Install ha cluster exporter package" community.general.zypper: @@ -47,9 +54,11 @@ state: present - name: "1.20 Packages: - Enable HA cluster exporter." ansible.builtin.shell: "nohup ha_cluster_exporter &" - when: ansible_os_family | upper == "SUSE" - name: "1.20.0 Packages: - Install pcp and pcp-pmda-hacluster package" + when: + - enable_os_monitoring or enable_ha_monitoring + - ansible_os_family | upper == "REDHAT" block: - name: "1.20 Packages: - Install pcp and pcp-pmda-hacluster package" ansible.builtin.yum: @@ -61,7 +70,6 @@ ansible.builtin.shell: "./Install" args: chdir: "/var/lib/pcp/pmdas/hacluster/" - when: ansible_os_family | upper == "REDHAT" # /*----------------------------------------------------------------------------8 # | END | From a2665f101a3935f08c05c4af59b5ab7d4e514b05 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 21 Feb 2024 15:32:42 +0530 Subject: [PATCH 309/607] search in /usr/sap/ directory for hdbuserstore --- deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml index 1b994859cb..8ad654a629 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml @@ -353,7 +353,7 @@ - database_high_availability - platform == 'HANA' ansible.builtin.find: - paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/" + paths: "/usr/sap/" file_type: file patterns: 'hdbuserstore' recurse: true From e8eb88cdace9329f58870d7ed79abacf31cffcc7 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 21 Feb 2024 15:40:39 +0530 Subject: [PATCH 310/607] update other path as well --- deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml index 8ad654a629..95228cbed2 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml @@ -464,7 +464,7 @@ - database_high_availability - platform == 'HANA' ansible.builtin.find: - paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/" + paths: "/usr/sap/" file_type: file patterns: 'hdbuserstore' recurse: true From 8f9c7ad6c04abb4111018ff87592549fc41504c5 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 21 Feb 2024 13:15:03 +0200 Subject: [PATCH 311/607] Only output AMS resource if created --- .../terraform/terraform-units/modules/sap_landscape/outputs.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf b/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf index 3a10bafd67..925f509e7a 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf @@ -529,5 +529,5 @@ output "iSCSI_servers" { output "ams_resource_id" { description = "Azure resource identifier for the AMS resource" - value = azapi_resource.ams_instance[0].id + value = local.create_ams_instance ? azapi_resource.ams_instance[0].id : "" } From d8f6198a0bb48fc37a771af633128eb717e8f586 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 21 Feb 2024 21:04:58 +0530 Subject: [PATCH 312/607] Update paths for HANA client in Ansible playbook --- deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml index 95228cbed2..d18fd2b2f3 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml @@ -353,7 +353,7 @@ - database_high_availability - platform == 'HANA' ansible.builtin.find: - paths: "/usr/sap/" + paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/, /usr/sap/{{ sid_to_be_deployed.sid }}/hdbclient" file_type: file patterns: 'hdbuserstore' recurse: true @@ -464,7 +464,7 @@ - database_high_availability - platform == 'HANA' ansible.builtin.find: - paths: "/usr/sap/" + paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/, /usr/sap/{{ sid_to_be_deployed.sid }}/hdbclient" file_type: file patterns: 'hdbuserstore' recurse: true From 8a0c23ff18f636115777ffd33a29f842aaca12fb Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 21 Feb 2024 21:29:48 +0530 Subject: [PATCH 313/607] remove the extra space in the hdbuserstore search path --- deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml index d18fd2b2f3..31587c1f35 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml @@ -353,7 +353,7 @@ - database_high_availability - platform == 'HANA' ansible.builtin.find: - paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/, /usr/sap/{{ sid_to_be_deployed.sid }}/hdbclient" + paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/,/usr/sap/{{ sid_to_be_deployed.sid }}/hdbclient" file_type: file patterns: 'hdbuserstore' recurse: true @@ -464,7 +464,7 @@ - database_high_availability - platform == 'HANA' ansible.builtin.find: - paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/, /usr/sap/{{ sid_to_be_deployed.sid }}/hdbclient" + paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/,/usr/sap/{{ sid_to_be_deployed.sid }}/hdbclient" file_type: file patterns: 'hdbuserstore' recurse: true From bc02d88239abc95388338bbe82bac701bb3d40bf Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 21 Feb 2024 18:35:40 +0200 Subject: [PATCH 314/607] Update ams_resource_id in module.tf --- deploy/terraform/run/sap_system/module.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index 3f19dcefe0..1c4a31b47c 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -448,7 +448,7 @@ module "output_files" { ######################################################################################### # AMS # ######################################################################################### - ams_resource_id = coalesce(var.ams_resource_id, data.terraform_remote_state.landscape.outputs.ams_resource_id) + ams_resource_id = coalesce(var.ams_resource_id, try(data.terraform_remote_state.landscape.outputs.ams_resource_id, "")) enable_ha_monitoring = var.enable_ha_monitoring enable_os_monitoring = var.enable_os_monitoring } From 61cf31b5580240b070d78faee6776d8c1a6b5874 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 21 Feb 2024 19:50:40 +0200 Subject: [PATCH 315/607] Update sap-parameters.yml template file path --- .../modules/sap_system/output_files/inventory.tf | 2 +- .../{sap-parameters.yml.tmpl => sap-parameters.tmpl} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename deploy/terraform/terraform-units/modules/sap_system/output_files/{sap-parameters.yml.tmpl => sap-parameters.tmpl} (100%) diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf index 0bb4908092..5c7f596903 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf @@ -175,7 +175,7 @@ resource "local_file" "ansible_inventory_new_yml" { # } resource "local_file" "sap-parameters_yml" { - content = templatefile(format("%s/sap-parameters.yml.tmpl", path.module), { + content = templatefile(format("%s/sap-parameters.tmpl", path.module), { app_instance_number = var.app_instance_number bom = length(var.bom_name) > 0 ? var.bom_name : "" database_cluster_type = var.database_cluster_type diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.yml.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl similarity index 100% rename from deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.yml.tmpl rename to deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl From 32e2f1d5354806e8ef529933ac5e4168b3ea7aa5 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 21 Feb 2024 19:58:09 +0200 Subject: [PATCH 316/607] Update availability set IDs in VM resource configuration --- .../modules/sap_system/anydb_node/vm-anydb.tf | 8 ++++---- .../modules/sap_system/app_tier/vm-webdisp.tf | 4 ++-- .../terraform-units/modules/sap_system/hdb_node/vm-hdb.tf | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf index 5243586592..42ff7663ca 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf @@ -135,8 +135,8 @@ resource "azurerm_linux_virtual_machine" "dbserver" { //If more than one servers are deployed into a single zone put them in an availability set and not a zone availability_set_id = var.database.use_avset ? ( local.availabilitysets_exist ? ( - data.azurerm_availability_set.anydb[count.index % max(local.db_zone_count, 1)].id) : ( - azurerm_availability_set.anydb[count.index % max(local.db_zone_count, 1)].id + data.azurerm_availability_set.anydb[count.index % max(length(data.azurerm_availability_set.anydb), 1)].id) : ( + azurerm_availability_set.anydb[count.index % max(length(azurerm_availability_set.anydb), 1)].id ) ) : null @@ -272,8 +272,8 @@ resource "azurerm_windows_virtual_machine" "dbserver" { //If more than one servers are deployed into a single zone put them in an availability set and not a zone availability_set_id = var.database.use_avset ? ( local.availabilitysets_exist ? ( - data.azurerm_availability_set.anydb[count.index % max(local.db_zone_count, 1)].id) : ( - azurerm_availability_set.anydb[count.index % max(local.db_zone_count, 1)].id + data.azurerm_availability_set.anydb[count.index % max(length(data.azurerm_availability_set.anydb), 1)].id) : ( + azurerm_availability_set.anydb[count.index % max(length(azurerm_availability_set.anydb), 1)].id ) ) : null diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf index b3f054be7c..42d0f5bf91 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf @@ -129,7 +129,7 @@ resource "azurerm_linux_virtual_machine" "web" { //If more than one servers are deployed into a single zone put them in an availability set and not a zone availability_set_id = local.use_web_avset ? ( - azurerm_availability_set.web[count.index % max(local.web_zone_count, 1)].id + azurerm_availability_set.web[count.index % max(length(azurerm_availability_set.web), 1)].id ) : ( null ) @@ -269,7 +269,7 @@ resource "azurerm_windows_virtual_machine" "web" { //If more than one servers are deployed into a single zone put them in an availability set and not a zone availability_set_id = local.use_web_avset ? ( - azurerm_availability_set.web[count.index % max(local.web_zone_count, 1)].id + azurerm_availability_set.web[count.index % max(length(azurerm_availability_set.web), 1)].id ) : ( null ) diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf index e5e1c1ad69..2ea4bfe13c 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf @@ -191,8 +191,8 @@ resource "azurerm_linux_virtual_machine" "vm_dbnode" { //If more than one servers are deployed into a single zone put them in an availability set and not a zone availability_set_id = local.use_avset && !local.enable_ultradisk ? ( local.availabilitysets_exist ? ( - data.azurerm_availability_set.hdb[count.index % max(local.db_zone_count, 1)].id) : ( - azurerm_availability_set.hdb[count.index % max(local.db_zone_count, 1)].id + data.azurerm_availability_set.hdb[count.index % max(length(data.azurerm_availability_set.hdb), 1)].id) : ( + azurerm_availability_set.hdb[count.index % max(length(azurerm_availability_set.hdb), 1)].id ) ) : null From 772c53dde1c3adb9efff13e21504d7c34f2861c4 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 21 Feb 2024 20:00:10 +0200 Subject: [PATCH 317/607] Fix AMS resource ID in module.tf --- deploy/terraform/run/sap_system/module.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index 1c4a31b47c..c95631c6c2 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -448,7 +448,7 @@ module "output_files" { ######################################################################################### # AMS # ######################################################################################### - ams_resource_id = coalesce(var.ams_resource_id, try(data.terraform_remote_state.landscape.outputs.ams_resource_id, "")) + ams_resource_id = try(coalesce(var.ams_resource_id, try(data.terraform_remote_state.landscape.outputs.ams_resource_id, "")),"") enable_ha_monitoring = var.enable_ha_monitoring enable_os_monitoring = var.enable_os_monitoring } From 8394c9b2921cfd481b5d2a260eac52c3c5d8cedb Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 21 Feb 2024 20:03:11 +0200 Subject: [PATCH 318/607] Refactor database server VM name output in anydb_node module --- .../terraform-units/modules/sap_system/anydb_node/outputs.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf index eff55991dc..ac6b29af3f 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf @@ -63,9 +63,9 @@ output "database_server_vm_ids" { output "database_server_vm_names" { description = "AnyDB Virtual machine names" value = local.enable_deployment ? ( - coalesce(azurerm_linux_virtual_machine.dbserver[*].name, + compact(concat(azurerm_linux_virtual_machine.dbserver[*].name, azurerm_windows_virtual_machine.dbserver[*].name - ) + )) ) : ( [""] ) From fee2c6cda1fc8fbfef7bc734c21e5422f579069c Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 21 Feb 2024 20:14:56 +0200 Subject: [PATCH 319/607] Fix disk size calculation in vm-webdisp.tf --- .../terraform-units/modules/sap_system/app_tier/vm-webdisp.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf index 42d0f5bf91..ef2b690c45 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf @@ -318,7 +318,7 @@ resource "azurerm_windows_virtual_machine" "web" { name = storage_type.name, id = disk_count, disk_type = storage_type.disk_type, - size_gb = storage_type.size_gb, + size_gb = storage_type.size_gb < 128 ? 128 : storage_type.size_gb, caching = storage_type.caching } ] From dc68222ff1c0bb1d81f8f549607420a7b97214e8 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 21 Feb 2024 20:47:25 +0200 Subject: [PATCH 320/607] Add condition to create directory only for non-Windows systems --- deploy/ansible/playbook_00_validate_parameters.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deploy/ansible/playbook_00_validate_parameters.yaml b/deploy/ansible/playbook_00_validate_parameters.yaml index 4ccef8d314..c2618d38f6 100644 --- a/deploy/ansible/playbook_00_validate_parameters.yaml +++ b/deploy/ansible/playbook_00_validate_parameters.yaml @@ -594,6 +594,8 @@ path: '/etc/sap_deployment_automation/{{ sap_sid | upper }}' state: directory mode: '0755' + when: + - ansible_os_family != "Windows" - name: "0.0 Validations - Show Hosts" ansible.builtin.shell: set -o pipefail && cat /etc/hosts | grep -v -e "^#" | grep -v -e '^[[:space:]]*$' From a0dfb9505eabc0ca0a235803558265c0a0c4a960 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 21 Feb 2024 22:24:47 +0200 Subject: [PATCH 321/607] Add Microsoft AD collection to Ansible dependencies --- deploy/scripts/configure_deployer.sh | 1 + .../modules/sap_deployer/templates/configure_deployer.sh.tmpl | 1 + 2 files changed, 2 insertions(+) diff --git a/deploy/scripts/configure_deployer.sh b/deploy/scripts/configure_deployer.sh index 46ed059e8e..10a41e3207 100755 --- a/deploy/scripts/configure_deployer.sh +++ b/deploy/scripts/configure_deployer.sh @@ -679,6 +679,7 @@ sudo -H "${ansible_venv_bin}/ansible-galaxy" collection install ansible.windows sudo -H "${ansible_venv_bin}/ansible-galaxy" collection install ansible.posix --force --collections-path "${ansible_collections}" sudo -H "${ansible_venv_bin}/ansible-galaxy" collection install ansible.utils --force --collections-path "${ansible_collections}" sudo -H "${ansible_venv_bin}/ansible-galaxy" collection install community.windows --force --collections-path "${ansible_collections}" +sudo -H "${ansible_venv_bin}/ansible-galaxy" collection install microsoft.ad --force --collections-path "${ansible_collections}" if [[ "${ansible_version}" == "2.11" ]]; then # ansible galaxy upstream has changed. Some collections are only available for install via old-galaxy.ansible.com diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl index 9787bf9462..d6c0127fe3 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl @@ -656,6 +656,7 @@ else sudo -H "$${ansible_venv_bin}/ansible-galaxy" collection install ansible.posix --force --collections-path "$${ansible_collections}" sudo -H "$${ansible_venv_bin}/ansible-galaxy" collection install ansible.utils --force --collections-path "$${ansible_collections}" sudo -H "$${ansible_venv_bin}/ansible-galaxy" collection install community.windows --force --collections-path "$${ansible_collections}" + sudo -H "$${ansible_venv_bin}/ansible-galaxy" collection install microsoft.ad --force --collections-path "$${ansible_collections}" if [[ "$${ansible_version}" == "2.11" ]]; then # ansible galaxy upstream has changed. Some collections are only available for install via old-galaxy.ansible.com From 623643e8e2493b1f2761da0cbe7bb6c9145ffed5 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 21 Feb 2024 22:49:16 +0200 Subject: [PATCH 322/607] Update domain join role to use the new domain join module --- .../ansible/roles-os/windows/1.11-domain-join/tasks/main.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-os/windows/1.11-domain-join/tasks/main.yaml b/deploy/ansible/roles-os/windows/1.11-domain-join/tasks/main.yaml index 99bb9239c8..fe14939b11 100644 --- a/deploy/ansible/roles-os/windows/1.11-domain-join/tasks/main.yaml +++ b/deploy/ansible/roles-os/windows/1.11-domain-join/tasks/main.yaml @@ -19,7 +19,7 @@ # domain_ou_path needs to be defined in the ansible-input-api.yaml file # in the format as an example: 'OU=Windows,OU=Servers,DC=ansible,DC=local' - name: "WIN: Joining the domain {{ domain }}" - ansible.windows.win_domain_membership: + microsoft.ad.membership: dns_domain_name: "{{ domain_name }}" domain_admin_user: "{{ domain_service_account }}@{{ domain_name }}" domain_admin_password: "{{ domain_service_password }}" @@ -33,7 +33,7 @@ - domain_ou_path | trim | length > 1 - name: "WIN: Joining the domain {{ domain }}" - ansible.windows.win_domain_membership: + microsoft.ad.membership: dns_domain_name: "{{ domain_name }}" domain_admin_user: "{{ domain_service_account }}@{{ domain_name }}" domain_admin_password: "{{ domain_service_password }}" From d19bacd8e9363d848089154e992816a7ce6de122 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 21 Feb 2024 22:59:56 +0200 Subject: [PATCH 323/607] Refactor OS configuration playbook for Windows systems --- deploy/ansible/playbook_01_os_base_config.yaml | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/deploy/ansible/playbook_01_os_base_config.yaml b/deploy/ansible/playbook_01_os_base_config.yaml index 2d0c9c25aa..4e87cc3665 100644 --- a/deploy/ansible/playbook_01_os_base_config.yaml +++ b/deploy/ansible/playbook_01_os_base_config.yaml @@ -176,6 +176,8 @@ - ansible_os_family != "Windows" - name: OS Configuration - Windows based systems + when: + - ansible_os_family == "Windows" block: - name: "OS configuration playbook: - Set os fact" ansible.builtin.set_fact: @@ -227,25 +229,20 @@ - 1.11-domain-join - name: "OS configuration playbook: - Checks" + when: + - scs_high_availability block: - name: "OS configuration playbook: - Check if required DNS entries are made" ansible.windows.win_shell: "[System.Net.DNS]::Resolve('{{ sap_sid | lower }}scs{{ scs_instance_number }}cl1')" register: dns_check_results - when: - - scs_high_availability failed_when: dns_check_results.rc > 0 - name: "OS configuration playbook: - Check if required DNS entries are made - show results" ansible.builtin.debug: msg: "DNS query results: {{ dns_check_results.stdout }}" verbosity: 2 - when: dns_check_results is defined - when: - - ansible_os_family == "Windows" - - - when: - - ansible_os_family == "Windows" + when: + - dns_check_results is defined # /*---------------------------------------------------------------------------8 # | | From 0aa04dd330dd5430daf432f8f3edde44f59d916a Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 21 Feb 2024 23:18:20 +0200 Subject: [PATCH 324/607] Update group members in add_group_members.yaml --- .../windows/2.5-sap-users/tasks/add_group_members.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/windows/2.5-sap-users/tasks/add_group_members.yaml b/deploy/ansible/roles-sap-os/windows/2.5-sap-users/tasks/add_group_members.yaml index b9d604deba..471ffc46aa 100644 --- a/deploy/ansible/roles-sap-os/windows/2.5-sap-users/tasks/add_group_members.yaml +++ b/deploy/ansible/roles-sap-os/windows/2.5-sap-users/tasks/add_group_members.yaml @@ -6,7 +6,7 @@ ansible.windows.win_group_membership: name: "SAP_{{ sap_sid | upper }}_LocalAdmin" members: - - '{{ domain }}\{{ win_sap_admin }}' + - '{{ win_sap_admin }}@{{ domain_name }}' - '{{ sap_sid }}adm@{{ domain_name }}' state: present notify: reboot From bb9b595096e90917bc9d35f5f9bdb935c3abead2 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 21 Feb 2024 23:28:19 +0200 Subject: [PATCH 325/607] Update full variable in main.yaml --- .../roles-sap-os/windows/2.3-sap-exports/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/windows/2.3-sap-exports/tasks/main.yaml b/deploy/ansible/roles-sap-os/windows/2.3-sap-exports/tasks/main.yaml index cfb28614d3..3732e5c840 100644 --- a/deploy/ansible/roles-sap-os/windows/2.3-sap-exports/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/windows/2.3-sap-exports/tasks/main.yaml @@ -28,7 +28,7 @@ description: Share Installation Files Accross Servers path: "{{ target_media_location_windows }}" list: true - full: '{{ orchestration_ansible_user }},SAP_{{ sap_sid | upper }}_LocalAdmin,SAP_{{ sap_sid | upper }}_GlobalAdmin@{{ domain }},{{ domain_service_account }}@{{ domain_name }},{{ sql_svc_account }}' + full: '{{ orchestration_ansible_user }},SAP_{{ sap_sid | upper }}_LocalAdmin,SAP_{{ sap_sid | upper }}_GlobalAdmin@{{ domain_name }},{{ domain_service_account }}@{{ domain_name }},{{ sql_svc_account }}' state: present register: win_share_info when: From acf4fd3a10cfe549713bbc5b52bc1c3010c7f621 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 21 Feb 2024 23:35:04 +0200 Subject: [PATCH 326/607] Change to use UPN --- deploy/ansible/playbook_02_os_sap_specific_config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/playbook_02_os_sap_specific_config.yaml b/deploy/ansible/playbook_02_os_sap_specific_config.yaml index 99b067b0ca..d50f751699 100644 --- a/deploy/ansible/playbook_02_os_sap_specific_config.yaml +++ b/deploy/ansible/playbook_02_os_sap_specific_config.yaml @@ -290,9 +290,9 @@ - name: 'SAP OS configuration playbook: - Calculating the domain service account names' ansible.builtin.set_fact: - domain_sqlsvc_account: '{{ domain | upper }}\{{ sql_svc_account_name }}' + domain_sqlsvc_account: '{{ sql_svc_account_name }}@{{ domain_name }}' when: - - domain is defined + - domain_name is defined - domain_sqlsvc_account is not defined - name: "SAP OS configuration playbook: - Add local groups and Permissions" From 5c37430908c2404ec4dc645e9c785cf63e370fc8 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 21 Feb 2024 23:43:29 +0200 Subject: [PATCH 327/607] Use UPN --- .../ansible/roles-sap-os/windows/2.6-sap-mounts/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/windows/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/windows/2.6-sap-mounts/tasks/main.yaml index e458190a64..3839fa5e5d 100644 --- a/deploy/ansible/roles-sap-os/windows/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/windows/2.6-sap-mounts/tasks/main.yaml @@ -10,7 +10,7 @@ - name: "2.6 SAP Mounts: - Create a Mapped Drive under {{ win_mapped_drive }}" become: true become_method: ansible.builtin.runas - become_user: '{{ sap_sid }}adm@{{ domain }}' + become_user: '{{ sap_sid }}adm@{{ domain_name }}' community.windows.win_mapped_drive: letter: "{{ win_mapped_drive }}" path: "{{ item.path }}" From e42919f8299207633daabf8a3ffbe1c6e4a946ac Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 22 Feb 2024 11:11:57 +0530 Subject: [PATCH 328/607] Update hdbuserstore paths in SAP installation tasks --- deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml | 4 ++-- deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index 95db069bc7..c36850b6ae 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -247,7 +247,7 @@ block: - name: "PAS Install: Get hdbuserstore path" ansible.builtin.find: - paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/" + paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/,/usr/sap/{{ sid_to_be_deployed.sid }}/hdbclient" file_type: file patterns: 'hdbuserstore' recurse: true @@ -452,7 +452,7 @@ - name: "PAS Install: Get hdbuserstore path" ansible.builtin.find: - paths: "/usr/sap/{{ sap_sid | upper }}" + paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/,/usr/sap/{{ sid_to_be_deployed.sid }}/hdbclient" file_type: file patterns: 'hdbuserstore' recurse: true diff --git a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml index 6755e3ea99..0b40de3291 100644 --- a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml @@ -262,7 +262,7 @@ - name: "APP Install: Get hdbuserstore path" ansible.builtin.find: - paths: "/usr/sap/{{ sap_sid | upper }}" + paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/,/usr/sap/{{ sid_to_be_deployed.sid }}/hdbclient" file_type: file patterns: 'hdbuserstore' recurse: true @@ -329,7 +329,7 @@ - name: "APP Install: Get hdbuserstore path" ansible.builtin.find: - paths: "/usr/sap/{{ sap_sid | upper }}" + paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/,/usr/sap/{{ sid_to_be_deployed.sid }}/hdbclient" file_type: file patterns: 'hdbuserstore' recurse: true From a5e82f08613f4764e999b76d4b7036262a491971 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Sat, 24 Feb 2024 16:59:27 +0530 Subject: [PATCH 329/607] Remove duplicate when condition in deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml --- deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml b/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml index 1c914c681e..53ec666f7c 100644 --- a/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml +++ b/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml @@ -20,7 +20,6 @@ with_items: - pmcd - pmproxy - when: ansible_os_family | upper == "REDHAT" - name: "1.20 Packages: - Download node exporter package to enable monitoring" when: From 32ed6dd712f7ba0dc0654585eaf29be20d87d73d Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 25 Feb 2024 13:49:32 +0200 Subject: [PATCH 330/607] Windows Update fixes for 2022 --- .../tasks/4.4.0.0-mssql-prerequisites.yaml | 2 ++ .../tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.0.0-mssql-prerequisites.yaml b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.0.0-mssql-prerequisites.yaml index c5e987f3c6..d572374b54 100644 --- a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.0.0-mssql-prerequisites.yaml +++ b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.0.0-mssql-prerequisites.yaml @@ -133,6 +133,8 @@ - SecurityUpdates - CriticalUpdates - UpdateRollups + reject_list: + - KB5034439 register: win_updates notify: "WIN-SQL: Restart SQL Server VM" diff --git a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml index 89d86bd18e..9a3d8cd517 100644 --- a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml +++ b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml @@ -31,8 +31,8 @@ - name: 'WIN: Calculating the domain\service account names' ansible.builtin.set_fact: - domain_sqlsvc_account: '{{ domain | upper }}\{{ win_sql_svc_account }}' - domain_sqlagent_account: '{{ domain | upper }}\{{ win_sqlagent_svc_account }}' + domain_sqlsvc_account: '{{ win_sql_svc_account }}@{{ domain_name }}\' + domain_sqlagent_account: '{{ win_sqlagent_svc_account }}@{{ domain_name }}\' when: - domain is defined From 07641cfa07c363e26af7a2665f826cf1a11c3481 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 25 Feb 2024 18:53:13 +0200 Subject: [PATCH 331/607] Add the ability to better control which KBs to install/ignore --- .../tasks/4.4.0.0-mssql-prerequisites.yaml | 4 +++- deploy/ansible/vars/ansible-input-api.yaml | 5 +++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.0.0-mssql-prerequisites.yaml b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.0.0-mssql-prerequisites.yaml index d572374b54..e9d0997541 100644 --- a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.0.0-mssql-prerequisites.yaml +++ b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.0.0-mssql-prerequisites.yaml @@ -134,7 +134,9 @@ - CriticalUpdates - UpdateRollups reject_list: - - KB5034439 + - {{ win_updates_reject_list }} + accept_list: + - {{ win_updates_accept_list }} register: win_updates notify: "WIN-SQL: Restart SQL Server VM" diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index a12a0fa2b5..94f0de1efe 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -175,6 +175,11 @@ domain_user_password: "{{ winadm_password }}" sid_service_password: "{{ svc_password }}" win_sap_admin: "SAP_{{ sap_sid | upper }}_GlobalAdmin" +# Windows patch releated parameters + +win_updates_reject_list: ["KB5034439"] +win_updates_accept_list: [] + # SQL Server specific parameters use_sql_for_SAP: true data_disks: F:\{{ sap_sid | upper }}DATA1\{{ sap_sid | upper }}4DATA1.mdf,G:\{{ sap_sid | upper }}DATA2\{{ sap_sid | upper }}4DATA2.mdf,H:\{{ sap_sid | upper }}DATA3\{{ sap_sid | upper }}4DATA3.mdf,I:\{{ sap_sid | upper }}DATA4\{{ sap_sid | upper }}4DATA4.mdf From 3a58ea36e7402444cb39619a56aa87e2c6db0a6d Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 25 Feb 2024 21:07:32 +0200 Subject: [PATCH 332/607] Refactor win_updates configuration in mssql prerequisites --- .../tasks/4.4.0.0-mssql-prerequisites.yaml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.0.0-mssql-prerequisites.yaml b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.0.0-mssql-prerequisites.yaml index e9d0997541..dfe4273c33 100644 --- a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.0.0-mssql-prerequisites.yaml +++ b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.0.0-mssql-prerequisites.yaml @@ -133,10 +133,8 @@ - SecurityUpdates - CriticalUpdates - UpdateRollups - reject_list: - - {{ win_updates_reject_list }} - accept_list: - - {{ win_updates_accept_list }} + reject_list: "{{ win_updates_reject_list }}" + accept_list: "{{ win_updates_accept_list }}" register: win_updates notify: "WIN-SQL: Restart SQL Server VM" From b3a9c6611cbe57a2d34cf2edbd79862872fbb862 Mon Sep 17 00:00:00 2001 From: Devansh Jain <86314060+devanshjainms@users.noreply.github.com> Date: Sun, 25 Feb 2024 12:56:02 -0800 Subject: [PATCH 333/607] Chnaged condition (#555) --- deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml b/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml index 53ec666f7c..bf1d02e562 100644 --- a/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml +++ b/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml @@ -10,7 +10,8 @@ - name: "1.20 Packages: - Start and enable services required for monitoring" when: - - enable_os_monitoring or enable_ha_monitoring + - enable_ha_monitoring + - ansible_os_family | upper == "REDHAT" block: - name: "1.20 Packages: - Start and enable services required for monitoring" ansible.builtin.service: From 12b6c9125d61109c9cf6f669fb0cbe7cbda8b039 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 26 Feb 2024 22:12:42 +0200 Subject: [PATCH 334/607] Add AMS Subnet to Web App --- Webapp/SDAF/Models/LandscapeModel.cs | 26 +- .../ParameterDetails/LandscapeDetails.json | 1553 +++++++++-------- .../ParameterDetails/LandscapeTemplate.txt | 73 +- 3 files changed, 875 insertions(+), 777 deletions(-) diff --git a/Webapp/SDAF/Models/LandscapeModel.cs b/Webapp/SDAF/Models/LandscapeModel.cs index 669a20a8ad..8e3031a612 100644 --- a/Webapp/SDAF/Models/LandscapeModel.cs +++ b/Webapp/SDAF/Models/LandscapeModel.cs @@ -157,11 +157,27 @@ public bool IsValid() public bool? peer_with_control_plane_vnet { get; set; } = true; -/*---------------------------------------------------------------------------8 -| | -| Miscallaneous information | -| | -+------------------------------------4--------------------------------------*/ + [SubnetArmIdValidator(ErrorMessage = "Invalid AMS subnet arm id")] + public string ams_subnet_arm_id { get; set; } + + //[Required] + [AddressPrefixValidator(ErrorMessage = "AMS subnet address space must be a valid RFC 1918 address")] + public string ams_subnet_address_prefix { get; set; } + + public string ams_subnet_name { get; set; } + + [NsgArmIdValidator(ErrorMessage = "Invalid AMS subnet nsg arm id")] + public string ams_subnet_nsg_arm_id { get; set; } + + public string ams_subnet_nsg_name { get; set; } + + + + /*---------------------------------------------------------------------------8 + | | + | Miscallaneous information | + | | + +------------------------------------4--------------------------------------*/ public string automation_username { get; set; } = "azureadm"; diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index 7f51032691..8f0c74ba11 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -598,811 +598,870 @@ "Options": [], "Overrules": "", "Display": 3 - } - ] - }, - { - "Section": "ANF subnet", - "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#network-parameters", - "Parameters": [ - { - "Name": "anf_subnet_address_prefix", - "Required": false, - "Description": "Defines the subnet address range for the ANF subnet.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 1 - }, - { - "Name": "anf_subnet_arm_id", - "Required": false, - "Description": "Specifies Azure resource identifier for the existing subnet to use for ANF subnet.", - "Type": "lookup", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "anf_subnet_address_prefix", - "Display": 3 - }, - { - "Name": "anf_subnet_name", - "Required": false, - "Description": "Should only be used if the default naming is not acceptable.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "anf_subnet_nsg_arm_id", - "Required": false, - "Description": "Specifies Azure resource identifier for the existing network security group.", - "Type": "lookup", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "anf_subnet_nsg_name", - "Display": 3 - }, - { - "Name": "anf_subnet_nsg_name", - "Required": false, - "Description": "Should only be used if the default naming is not acceptable for the network security group name", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - } - ] - }, - { - "Section": "Azure keyvault support", - "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#key-vault-parameters", - "Parameters": [ - { - "Name": "user_keyvault_id", - "Required": false, - "Description": "Specifies the Azure resource identifier for an existing key vault designed to host secrets for the administrative users.", - "Type": "lookup", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, - { - "Name": "spn_keyvault_id", - "Required": false, - "Description": "Specifies the Azure resource identifier for an existing key vault. Designed to host the deployment credentials used by the automation.", - "Type": "lookup", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, - { - "Name": "enable_purge_control_for_keyvaults", - "Required": false, - "Description": "Disables the purge protection for Azure key vaults", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "additional_users_to_add_to_keyvault_policies", - "Required": false, - "Description": "Additional users (object IDs) to add to the key vault policies.", - "Type": "list", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 2 - }, - - { - "Name": "enable_rbac_authorization_for_keyvault", - "Required": false, - "Description": "Controls the access policy model for the workload zone keyvault", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 2 - }, - { - "Name": "soft_delete_retention_days", - "Required": false, - "Description": "The number of days that items should be retained in the soft delete period", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 2 - } - ] - }, - { - "Section": "DNS", - "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#dns-support", - "Parameters": [ - { - "Name": "dns_label", - "Required": false, - "Description": "Provides the DNS label to use for the Virtual Network.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 2 - }, - { - "Name": "dns_server_list", - "Required": false, - "Description": "Boolean value indicating if a custom dns record should be created for the storage account", - "Type": "list", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, - { - "Name": "use_custom_dns_a_registration", - "Required": false, - "Description": "Boolean value indicating if custom dns registration is used", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "register_virtual_network_to_dns", - "Required": false, - "Description": "Defines if the Virtual network and the load balancers are registered with DNS", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "management_dns_subscription_id", - "Required": false, - "Description": "Subscription for the DNS zone, if different from the management subscription", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "management_dns_resourcegroup_name", - "Required": false, - "Description": "Resource group for the DNS zone, if different from the SAP Library resource group", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - } - ] - }, - { - "Section": "NFS support", - "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#nfs-support", - "Parameters": [ - { - "Name": "NFS_provider", - "Required": false, - "Description": "Defines how NFS services are provided to the SAP systems: AFS (Azure Files for NFS), ANF (Azure NetApp Files), NFS (custom solution)", - "Type": "lookup", - "Options": [ - { - "Text": "", - "Value": "" - }, - { - "Text": "AFS", - "Value": "AFS" - }, - { - "Text": "ANF", - "Value": "ANF" - }, - { - "Text": "NFS", - "Value": "NFS" - } - ], - "Overrules": "", - "Display": 1 - }, - { - "Name": "use_AFS_for_shared_storage", - "Required": false, - "Description": "Defines if shared media is shared from Azure Files when using Azure NetApp Files for data.", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 2 - }, - { - "Name": "create_transport_storage", - "Required": false, - "Description": "Defines if the workload zone will host storage for the transport data.", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 2 }, { - "Name": "transport_volume_size", - "Required": false, - "Description": "Defines the size of the transport volume.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 2 - } - ] - }, - { - "Section": "ANF Support", - "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#azure-netapp-files-support", - "Parameters": [ - { - "Name": "ANF_account_name", - "Required": false, - "Description": "The name for the Netapp Account to create.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 2 - }, - { - "Name": "ANF_account_arm_id", - "Required": false, - "Description": "Azure resource identifier for an existing Netapp Account", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "ANF_service_level", - "Required": false, - "Description": "The service level for the NetApp pool", - "Type": "lookup", - "Options": [ + "Section": "Azure Monitor subnet", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone", + "Parameters": [ { - "Text": "Ultra", - "Value": "Ultra" + "Name": "ams_subnet_arm_id", + "Required": false, + "Description": "Specifies Azure resource identifier for the existing subnet for the ams subnet", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "ams_subnet_address_prefix", + "Display": 3 }, { - "Text": "Premium", - "Value": "Premium" + "Name": "ams_subnet_address_prefix", + "Required": false, + "Description": "Defines the subnet address range for the ams subnet.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 }, { - "Text": "Standard", - "Value": "Standard" - } - ], - "Overrules": "", - "Display": 2 - }, - { - "Name": "ANF_pool_size", - "Required": false, - "Description": "The pool size in TB for the NetApp pool.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 2 - }, - { - "Name": "ANF_qos_type", - "Required": false, - "Description": "The Quality of Service type of the pool (Auto or Manual).", - "Type": "lookup", - "Options": [ - { - "Text": "Manual", - "Value": "Manual" + "Name": "ams_subnet_name", + "Required": false, + "Description": "Should only be used if the default naming is not acceptable.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 }, { - "Text": "Auto", - "Value": "Auto" - } - ], - "Overrules": "", - "Display": 2 - }, - { - "Name": "ANF_use_existing_pool", - "Required": false, - "Description": "Use existing storage pool.", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "ANF_pool_name", - "Required": false, - "Description": "the NetApp capacity pool name (if any)", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "ANF_transport_volume_throughput", - "Required": false, - "Description": "The throughput in MB/s for the transport volume.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "ANF_transport_volume_zone", - "Required": false, - "Description": "Azure NetApp transport volume availability zone.", - "Type": "list", - "Options": [ - { - "Text": "", - "Value": "" - }, - { - "Text": "1", - "Value": "1" - }, - { - "Text": "2", - "Value": "2" + "Name": "ams_subnet_nsg_arm_id", + "Required": false, + "Description": "Specifies Azure resource identifier for the existing network security group to use.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "ams_subnet_nsg_name", + "Display": 3 }, { - "Text": "3", - "Value": "3" + "Name": "ams_subnet_nsg_name", + "Required": false, + "Description": "Should only be used if the default naming is not acceptable for the network security group name", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 } - ], - "Overrules": "", - "Display": 2 - }, - { - "Name": "ANF_install_volume_throughput", - "Required": false, - "Description": "The throughput in MB/s for the install volume.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 + ] }, { - "Name": "ANF_install_volume_zone", - "Required": false, - "Description": "Azure NetApp install volume availability zone.", - "Type": "list", - "Options": [ - { - "Text": "", - "Value": "" - }, - { - "Text": "1", - "Value": "1" - }, + "Section": "ANF subnet", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#network-parameters", + "Parameters": [ { - "Text": "2", - "Value": "2" + "Name": "anf_subnet_address_prefix", + "Required": false, + "Description": "Defines the subnet address range for the ANF subnet.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 1 }, - { - "Text": "3", - "Value": "3" - } - ], - "Overrules": "", - "Display": 2 - } - ] - }, - { - "Section": "iSCSI support", - "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#iscsi-parameters", - "Parameters": [ - { - "Name": "iscsi_count", - "Required": false, - "Description": "The number of iSCSI virtual machines.", - "Type": "field", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, - { - "Name": "iscsi_size", - "Required": false, - "Description": "Size of iSCSI Virtual Machines to be created.", - "Type": "field", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, - { - "Name": "iscsi_useDHCP", - "Required": false, - "Description": "Controls whether to use dynamic IP addresses provided by the Azure subnet", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "iscsi_image_offering", - "Required": false, - "Description": "Defines the Virtual Machine image for the iSCSI devices.", - "Type": "image_dropdown", - "Options": [], - "Overrules": "iscsi_image", - "Display": 3 - }, - { - "Name": "iscsi_image", - "Required": false, - "Description": "Defines the Virtual Machine image for the iSCSI devices.", - "Type": "image", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, - { - "Name": "iscsi_authentication_type", - "Required": false, - "Description": "Defines the Virtual Machine authentication type for the iSCSI devices.", - "Type": "field", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, - { - "Name": "iscsi_authentication_username", - "Required": false, - "Description": "Defines the username for the iSCSI devices.", - "Type": "field", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, - { - "Name": "iscsi_nic_ips", - "Required": false, - "Description": "Defines the IP Addresses for the iSCSI devices.", - "Type": "list", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, - { - "Name": "iscsi_vm_zones", - "Required": false, - "Description": "Defines the Availability zones for the iSCSI devices.", - "Type": "list", - "Options": [ - { - "Text": "", - "Value": "" + { + "Name": "anf_subnet_arm_id", + "Required": false, + "Description": "Specifies Azure resource identifier for the existing subnet to use for ANF subnet.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "anf_subnet_address_prefix", + "Display": 3 }, { - "Text": "1", - "Value": "1" + "Name": "anf_subnet_name", + "Required": false, + "Description": "Should only be used if the default naming is not acceptable.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 }, { - "Text": "2", - "Value": "2" + "Name": "anf_subnet_nsg_arm_id", + "Required": false, + "Description": "Specifies Azure resource identifier for the existing network security group.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "anf_subnet_nsg_name", + "Display": 3 }, { - "Text": "3", - "Value": "3" + "Name": "anf_subnet_nsg_name", + "Required": false, + "Description": "Should only be used if the default naming is not acceptable for the network security group name", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 } - ], - "Overrules": "", - "Display": 3 + ] }, { - "Name": "user_assigned_identity_id", - "Required": false, - "Description": "Azure resource identifier for User assigned identity.", - "Type": "lookup", - "Options": [], - "Overrules": "", - "Display": 3 - } + "Section": "Azure keyvault support", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#key-vault-parameters", + "Parameters": [ + { + "Name": "user_keyvault_id", + "Required": false, + "Description": "Specifies the Azure resource identifier for an existing key vault designed to host secrets for the administrative users.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "spn_keyvault_id", + "Required": false, + "Description": "Specifies the Azure resource identifier for an existing key vault. Designed to host the deployment credentials used by the automation.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "enable_purge_control_for_keyvaults", + "Required": false, + "Description": "Disables the purge protection for Azure key vaults", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "additional_users_to_add_to_keyvault_policies", + "Required": false, + "Description": "Additional users (object IDs) to add to the key vault policies.", + "Type": "list", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 2 + }, - ] - }, - { - "Section": "Utility VM", - "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#utility-vm-parameters", - "Parameters": [ - { - "Name": "utility_vm_count", - "Required": false, - "Description": "Defines number of utility virtual machines to deploy. The utility virtual machines can be used to host SAPGui and other tools.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 2 - }, - { - "Name": "utility_vm_size", - "Required": false, - "Description": "Defines the size for the utility virtual machine. Default size is Standard_D4ds_v4.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "utility_vm_os_disk_size", - "Required": false, - "Description": "Defines the size of the OS disk for the Virtual Machine. Default size is 128", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "utility_vm_os_disk_type", - "Required": false, - "Description": "Defines the type of the OS disk for the Virtual Machine. Default size is Premium_LRS.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "utility_vm_useDHCP", - "Required": false, - "Description": "Defines if Azure subnet provided IP addresses should be used for the utility virtual machines.", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "utility_vm_image_offering", - "Required": false, - "Description": "Defines the virtual machine image to use for the utility virtual machines.", - "Type": "image_dropdown", - "Options": [], - "Overrules": "utility_vm_image", - "Display": 2 - }, - { - "Name": "utility_vm_image", - "Required": false, - "Description": "Defines the virtual machine image to use for the utility virtual machines.", - "Type": "image", - "Options": [ { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, - { - "Name": "utility_vm_nic_ips", - "Required": false, - "Description": "Provides the static IP addresses for the utility virtual machines.", - "Type": "list", - "Options": [ + "Name": "enable_rbac_authorization_for_keyvault", + "Required": false, + "Description": "Controls the access policy model for the workload zone keyvault", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, { - "Text": "", - "Value": "" + "Name": "soft_delete_retention_days", + "Required": false, + "Description": "The number of days that items should be retained in the soft delete period", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 } - ], - "Overrules": "", - "Display": 3 - } - ] - }, - { - "Section": "Storage Accounts", - "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#azure-files-nfs-support", - "Parameters": [ - { - "Name": "install_volume_size", - "Required": false, - "Description": "The volume size in GB for the transport share", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 2 + ] }, { - "Name": "install_storage_account_id", - "Required": false, - "Description": "Azure Resource Identifier for the Installation media storage account.", - "Type": "lookup", - "Options": [ + "Section": "DNS", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#dns-support", + "Parameters": [ { - "Text": "", - "Value": "" + "Name": "dns_label", + "Required": false, + "Description": "Provides the DNS label to use for the Virtual Network.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "dns_server_list", + "Required": false, + "Description": "Boolean value indicating if a custom dns record should be created for the storage account", + "Type": "list", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "use_custom_dns_a_registration", + "Required": false, + "Description": "Boolean value indicating if custom dns registration is used", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "register_virtual_network_to_dns", + "Required": false, + "Description": "Defines if the Virtual network and the load balancers are registered with DNS", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "management_dns_subscription_id", + "Required": false, + "Description": "Subscription for the DNS zone, if different from the management subscription", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "management_dns_resourcegroup_name", + "Required": false, + "Description": "Resource group for the DNS zone, if different from the SAP Library resource group", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 } - ], - "Overrules": "", - "Display": 3 + ] }, { - "Name": "install_private_endpoint_id", - "Required": false, - "Description": "Azure Resource Identifier for a private endpoint for the installation storage account.", - "Type": "lookup", - "Options": [ + "Section": "NFS support", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#nfs-support", + "Parameters": [ { - "Text": "", - "Value": "" + "Name": "NFS_provider", + "Required": false, + "Description": "Defines how NFS services are provided to the SAP systems: AFS (Azure Files for NFS), ANF (Azure NetApp Files), NFS (custom solution)", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + }, + { + "Text": "AFS", + "Value": "AFS" + }, + { + "Text": "ANF", + "Value": "ANF" + }, + { + "Text": "NFS", + "Value": "NFS" + } + ], + "Overrules": "", + "Display": 1 + }, + { + "Name": "use_AFS_for_shared_storage", + "Required": false, + "Description": "Defines if shared media is shared from Azure Files when using Azure NetApp Files for data.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "create_transport_storage", + "Required": false, + "Description": "Defines if the workload zone will host storage for the transport data.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "transport_volume_size", + "Required": false, + "Description": "Defines the size of the transport volume.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 } - ], - "Overrules": "", - "Display": 3 + ] }, { - "Name": "storage_account_replication_type", - "Required": false, - "Description": "Defines the replication type for Azure Files for NFS install account.", - "Type": "lookup", - "Options": [ + "Section": "ANF Support", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#azure-netapp-files-support", + "Parameters": [ + { + "Name": "ANF_account_name", + "Required": false, + "Description": "The name for the Netapp Account to create.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "ANF_account_arm_id", + "Required": false, + "Description": "Azure resource identifier for an existing Netapp Account", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "ANF_service_level", + "Required": false, + "Description": "The service level for the NetApp pool", + "Type": "lookup", + "Options": [ + { + "Text": "Ultra", + "Value": "Ultra" + }, + { + "Text": "Premium", + "Value": "Premium" + }, + { + "Text": "Standard", + "Value": "Standard" + } + ], + "Overrules": "", + "Display": 2 + }, + { + "Name": "ANF_pool_size", + "Required": false, + "Description": "The pool size in TB for the NetApp pool.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "ANF_qos_type", + "Required": false, + "Description": "The Quality of Service type of the pool (Auto or Manual).", + "Type": "lookup", + "Options": [ + { + "Text": "Manual", + "Value": "Manual" + }, + { + "Text": "Auto", + "Value": "Auto" + } + ], + "Overrules": "", + "Display": 2 + }, + { + "Name": "ANF_use_existing_pool", + "Required": false, + "Description": "Use existing storage pool.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "ANF_pool_name", + "Required": false, + "Description": "the NetApp capacity pool name (if any)", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "ANF_transport_volume_throughput", + "Required": false, + "Description": "The throughput in MB/s for the transport volume.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "ANF_transport_volume_zone", + "Required": false, + "Description": "Azure NetApp transport volume availability zone.", + "Type": "list", + "Options": [ + { + "Text": "", + "Value": "" + }, + { + "Text": "1", + "Value": "1" + }, + { + "Text": "2", + "Value": "2" + }, + { + "Text": "3", + "Value": "3" + } + ], + "Overrules": "", + "Display": 2 + }, { - "Text": "LRS", - "Value": "LRS" + "Name": "ANF_install_volume_throughput", + "Required": false, + "Description": "The throughput in MB/s for the install volume.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 }, { - "Text": "ZRS", - "Value": "ZRS" + "Name": "ANF_install_volume_zone", + "Required": false, + "Description": "Azure NetApp install volume availability zone.", + "Type": "list", + "Options": [ + { + "Text": "", + "Value": "" + }, + { + "Text": "1", + "Value": "1" + }, + { + "Text": "2", + "Value": "2" + }, + { + "Text": "3", + "Value": "3" + } + ], + "Overrules": "", + "Display": 2 } - ], - "Overrules": "", - "Display": 2 + ] }, { - "Name": "diagnostics_storage_account_arm_id", - "Required": false, - "Description": "Azure resource id for the diagnostics storage account", - "Type": "lookup", - "Options": [ + "Section": "iSCSI support", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#iscsi-parameters", + "Parameters": [ { - "Text": "", - "Value": "" + "Name": "iscsi_count", + "Required": false, + "Description": "The number of iSCSI virtual machines.", + "Type": "field", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "iscsi_size", + "Required": false, + "Description": "Size of iSCSI Virtual Machines to be created.", + "Type": "field", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "iscsi_useDHCP", + "Required": false, + "Description": "Controls whether to use dynamic IP addresses provided by the Azure subnet", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "iscsi_image_offering", + "Required": false, + "Description": "Defines the Virtual Machine image for the iSCSI devices.", + "Type": "image_dropdown", + "Options": [], + "Overrules": "iscsi_image", + "Display": 3 + }, + { + "Name": "iscsi_image", + "Required": false, + "Description": "Defines the Virtual Machine image for the iSCSI devices.", + "Type": "image", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "iscsi_authentication_type", + "Required": false, + "Description": "Defines the Virtual Machine authentication type for the iSCSI devices.", + "Type": "field", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "iscsi_authentication_username", + "Required": false, + "Description": "Defines the username for the iSCSI devices.", + "Type": "field", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "iscsi_nic_ips", + "Required": false, + "Description": "Defines the IP Addresses for the iSCSI devices.", + "Type": "list", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "iscsi_vm_zones", + "Required": false, + "Description": "Defines the Availability zones for the iSCSI devices.", + "Type": "list", + "Options": [ + { + "Text": "", + "Value": "" + }, + { + "Text": "1", + "Value": "1" + }, + { + "Text": "2", + "Value": "2" + }, + { + "Text": "3", + "Value": "3" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "user_assigned_identity_id", + "Required": false, + "Description": "Azure resource identifier for User assigned identity.", + "Type": "lookup", + "Options": [], + "Overrules": "", + "Display": 3 } - ], - "Overrules": "", - "Display": 3 + + ] }, { - "Name": "witness_storage_account_arm_id", - "Required": false, - "Description": "Azure resource id for the witness storage account", - "Type": "lookup", - "Options": [ + "Section": "Utility VM", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#utility-vm-parameters", + "Parameters": [ { - "Text": "", - "Value": "" + "Name": "utility_vm_count", + "Required": false, + "Description": "Defines number of utility virtual machines to deploy. The utility virtual machines can be used to host SAPGui and other tools.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "utility_vm_size", + "Required": false, + "Description": "Defines the size for the utility virtual machine. Default size is Standard_D4ds_v4.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "utility_vm_os_disk_size", + "Required": false, + "Description": "Defines the size of the OS disk for the Virtual Machine. Default size is 128", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "utility_vm_os_disk_type", + "Required": false, + "Description": "Defines the type of the OS disk for the Virtual Machine. Default size is Premium_LRS.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "utility_vm_useDHCP", + "Required": false, + "Description": "Defines if Azure subnet provided IP addresses should be used for the utility virtual machines.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "utility_vm_image_offering", + "Required": false, + "Description": "Defines the virtual machine image to use for the utility virtual machines.", + "Type": "image_dropdown", + "Options": [], + "Overrules": "utility_vm_image", + "Display": 2 + }, + { + "Name": "utility_vm_image", + "Required": false, + "Description": "Defines the virtual machine image to use for the utility virtual machines.", + "Type": "image", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "utility_vm_nic_ips", + "Required": false, + "Description": "Provides the static IP addresses for the utility virtual machines.", + "Type": "list", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 } - ], - "Overrules": "", - "Display": 3 + ] }, { - "Name": "transport_storage_account_id", - "Required": false, - "Description": "Azure Resource Identifier for the Transport media storage account.", - "Type": "lookup", - "Options": [ + "Section": "Storage Accounts", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#azure-files-nfs-support", + "Parameters": [ { - "Text": "", - "Value": "" + "Name": "install_volume_size", + "Required": false, + "Description": "The volume size in GB for the transport share", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "install_storage_account_id", + "Required": false, + "Description": "Azure Resource Identifier for the Installation media storage account.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "install_private_endpoint_id", + "Required": false, + "Description": "Azure Resource Identifier for a private endpoint for the installation storage account.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "storage_account_replication_type", + "Required": false, + "Description": "Defines the replication type for Azure Files for NFS install account.", + "Type": "lookup", + "Options": [ + { + "Text": "LRS", + "Value": "LRS" + }, + { + "Text": "ZRS", + "Value": "ZRS" + } + ], + "Overrules": "", + "Display": 2 + }, + { + "Name": "diagnostics_storage_account_arm_id", + "Required": false, + "Description": "Azure resource id for the diagnostics storage account", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "witness_storage_account_arm_id", + "Required": false, + "Description": "Azure resource id for the witness storage account", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "transport_storage_account_id", + "Required": false, + "Description": "Azure Resource Identifier for the Transport media storage account.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "transport_private_endpoint_id", + "Required": false, + "Description": "Azure Resource Identifier for a private endpoint connection for the transport storage account.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 } - ], - "Overrules": "", - "Display": 3 + ] }, { - "Name": "transport_private_endpoint_id", - "Required": false, - "Description": "Azure Resource Identifier for a private endpoint connection for the transport storage account.", - "Type": "lookup", - "Options": [ + "Section": "Azure Monitor Settings", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#utility-vm-parameters", + "Parameters": [ { - "Text": "", - "Value": "" + "Name": "create_ams_instance", + "Required": false, + "Description": "Defines if an AMS Instance should be created.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "ams_instance_name", + "Required": false, + "Description": "Defines the name of the AMS instance", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "ams_laws_arm_id", + "Required": false, + "Description": "Defines the Azure resource id for the Log analytics workspace in AMS", + "Type": "lookup", + "Options": [], + "Overrules": "", + "Display": 2 } - ], - "Overrules": "", - "Display": 3 - } - ] - }, - { - "Section": "Azure Monitor Settings", - "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#utility-vm-parameters", - "Parameters": [ - { - "Name": "create_ams_instance", - "Required": false, - "Description": "Defines if an AMS Instance should be created.", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 2 - }, - { - "Name": "ams_instance_name", - "Required": false, - "Description": "Defines the name of the AMS instance", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 2 - }, - { - "Name": "ams_laws_arm_id", - "Required": false, - "Description": "Defines the Azure resource id for the Log analytics workspace in AMS", - "Type": "lookup", - "Options": [], - "Overrules": "", - "Display": 2 + ] } ] - } -] diff --git a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt index 76262d0958..b21a8d1e22 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt @@ -210,6 +210,54 @@ $$anf_subnet_nsg_name$$ # anf_subnet_nsg_arm_id is an optional parameter that if provided specifies Azure resource identifier for the existing network security group to use $$anf_subnet_nsg_arm_id$$ + +########################################################################### +# # +# ISCSI Networking # +# # +########################################################################### + +/* iscsi subnet information */ +# If defined these parameters control the subnet name and the subnet prefix +# iscsi_subnet_name is an optional parameter and should only be used if the default naming is not acceptable +$$iscsi_subnet_name$$ + +# iscsi_subnet_arm_id is an optional parameter that if provided specifies Azure resource identifier for the existing subnet +$$iscsi_subnet_arm_id$$ + +# iscsi_subnet_address_prefix is a mandatory parameter if the subnets are not defined in the workload or if existing subnets are not used +$$iscsi_subnet_address_prefix$$ + +# iscsi_subnet_nsg_arm_id is an optional parameter that if provided specifies Azure resource identifier for the existing nsg +$$iscsi_subnet_nsg_arm_id$$ + +# iscsi_subnet_nsg_name is an optional parameter and should only be used if the default naming is not acceptable for the network security group name +$$iscsi_subnet_nsg_name$$ + +########################################################################### +# # +# AMS Networking # +# # +########################################################################### + +/* ams subnet information */ +# If defined these parameters control the subnet name and the subnet prefix +# ams_subnet_name is an optional parameter and should only be used if the default naming is not acceptable +$$ams_subnet_name$$ + +# ams_subnet_arm_id is an optional parameter that if provided specifies Azure resource identifier for the existing subnet +$$ams_subnet_arm_id$$ + +# ams_subnet_address_prefix is a mandatory parameter if the subnets are not defined in the workload or if existing subnets are not used +$$ams_subnet_address_prefix$$ + +# ams_subnet_nsg_arm_id is an optional parameter that if provided specifies Azure resource identifier for the existing nsg +$$ams_subnet_nsg_arm_id$$ + +# ams_subnet_nsg_name is an optional parameter and should only be used if the default naming is not acceptable for the network security group name +$$ams_subnet_nsg_name$$ + + ######################################################################################### # # # DNS Settings # @@ -412,31 +460,6 @@ $$ANF_install_volume_size$$ # ANF_install_volume_zone is the zone for the transport volume $$ANF_install_volume_zone$$ - -########################################################################### -# # -# ISCSI Networking # -# # -########################################################################### - -/* iscsi subnet information */ -# If defined these parameters control the subnet name and the subnet prefix -# iscsi_subnet_name is an optional parameter and should only be used if the default naming is not acceptable -$$iscsi_subnet_name$$ - -# iscsi_subnet_arm_id is an optional parameter that if provided specifies Azure resource identifier for the existing subnet -$$iscsi_subnet_arm_id$$ - -# iscsi_subnet_address_prefix is a mandatory parameter if the subnets are not defined in the workload or if existing subnets are not used -$$iscsi_subnet_address_prefix$$ - -# iscsi_subnet_nsg_arm_id is an optional parameter that if provided specifies Azure resource identifier for the existing nsg -$$iscsi_subnet_nsg_arm_id$$ - -# iscsi_subnet_nsg_name is an optional parameter and should only be used if the default naming is not acceptable for the network security group name -$$iscsi_subnet_nsg_name$$ - - ########################################################################### # # # ISCSI Devices # From 251dc297c910e33f9fb930192f629c2361c5c370 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 26 Feb 2024 22:35:50 +0200 Subject: [PATCH 335/607] Add support for ASR created disks and VMS --- .../modules/sap_landscape/vm.tf | 6 ++++- .../modules/sap_system/anydb_node/vm-anydb.tf | 14 +++++++++-- .../sap_system/anydb_node/vm-observer.tf | 5 ++++ .../modules/sap_system/app_tier/vm-app.tf | 21 ++++++++++++++++ .../modules/sap_system/app_tier/vm-scs.tf | 24 +++++++++++++++++++ .../modules/sap_system/app_tier/vm-webdisp.tf | 17 +++++++++++++ .../common_infrastructure/vm-anchor.tf | 11 +++++++++ .../modules/sap_system/hdb_node/vm-hdb.tf | 19 +++++++++++++++ 8 files changed, 114 insertions(+), 3 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf index 206cb47bb3..d16a528c93 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf @@ -87,7 +87,11 @@ resource "azurerm_windows_virtual_machine" "utility_vm" { version = var.vm_settings.image.version } - + lifecycle { + ignore_changes = [ + source_image_id + ] + } } # Create the Linux Application VM(s) diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf index 42ff7663ca..4c64b01a74 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf @@ -233,7 +233,8 @@ resource "azurerm_linux_virtual_machine" "dbserver" { lifecycle { ignore_changes = [ // Ignore changes to computername - computer_name + computer_name, + source_image_id ] } @@ -359,7 +360,8 @@ resource "azurerm_windows_virtual_machine" "dbserver" { lifecycle { ignore_changes = [ // Ignore changes to computername - computer_name + computer_name, + source_image_id ] } } @@ -407,6 +409,14 @@ resource "azurerm_managed_disk" "disks" { tags = var.tags + lifecycle { + ignore_changes = [ + create_option, + hyper_v_generation, + source_resource_id + ] + } + } // Manages attaching a Disk to a Virtual Machine diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-observer.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-observer.tf index e41287613a..06a71ce394 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-observer.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-observer.tf @@ -179,4 +179,9 @@ resource "azurerm_windows_virtual_machine" "observer" { boot_diagnostics { storage_account_uri = var.storage_bootdiag_endpoint } + lifecycle { + ignore_changes = [ + source_image_id + ] + } } diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index 0973827209..6e00c45219 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -235,6 +235,12 @@ resource "azurerm_linux_virtual_machine" "app" { identity_ids = [var.application_tier.user_assigned_identity_id] } } + lifecycle { + ignore_changes = [ + source_image_id + ] + } + } # Create the Windows Application VM(s) @@ -355,6 +361,12 @@ resource "azurerm_windows_virtual_machine" "app" { identity_ids = [var.application_tier.user_assigned_identity_id] } } + lifecycle { + ignore_changes = [ + // Ignore changes to computername + source_image_id + ] + } } @@ -383,6 +395,15 @@ resource "azurerm_managed_disk" "app" { ) ) tags = var.tags + + lifecycle { + ignore_changes = [ + create_option, + hyper_v_generation, + source_resource_id + ] + } + } resource "azurerm_virtual_machine_data_disk_attachment" "app" { diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index a1650c2ad8..513acdd036 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -239,6 +239,11 @@ resource "azurerm_linux_virtual_machine" "scs" { identity_ids = length(var.application_tier.user_assigned_identity_id) > 0 ? [var.application_tier.user_assigned_identity_id] : null } } + lifecycle { + ignore_changes = [ + source_image_id + ] + } } @@ -418,6 +423,11 @@ resource "azurerm_windows_virtual_machine" "scs" { identity_ids = [var.application_tier.user_assigned_identity_id] } } + lifecycle { + ignore_changes = [ + source_image_id + ] + } } @@ -449,6 +459,13 @@ resource "azurerm_managed_disk" "scs" { null ) + lifecycle { + ignore_changes = [ + create_option, + hyper_v_generation, + source_resource_id + ] + } } @@ -572,6 +589,13 @@ resource "azurerm_managed_disk" "cluster" { )) : ( null ) + lifecycle { + ignore_changes = [ + create_option, + hyper_v_generation, + source_resource_id + ] + } } resource "azurerm_virtual_machine_data_disk_attachment" "cluster" { diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf index ef2b690c45..9b3bc4c6b6 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf @@ -240,6 +240,11 @@ resource "azurerm_linux_virtual_machine" "web" { identity_ids = [var.application_tier.user_assigned_identity_id] } } + lifecycle { + ignore_changes = [ + source_image_id + ] + } } @@ -369,6 +374,11 @@ resource "azurerm_windows_virtual_machine" "web" { identity_ids = [var.application_tier.user_assigned_identity_id] } } + lifecycle { + ignore_changes = [ + source_image_id + ] + } } @@ -398,6 +408,13 @@ resource "azurerm_managed_disk" "web" { )) : ( null ) + lifecycle { + ignore_changes = [ + create_option, + hyper_v_generation, + source_resource_id + ] + } } resource "azurerm_virtual_machine_data_disk_attachment" "web" { diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/vm-anchor.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/vm-anchor.tf index bee57f8261..14a60fd489 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/vm-anchor.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/vm-anchor.tf @@ -106,6 +106,11 @@ resource "azurerm_linux_virtual_machine" "anchor" { ultra_ssd_enabled = local.enable_anchor_ultra[count.index] } + lifecycle { + ignore_changes = [ + source_image_id + ] + } } @@ -166,6 +171,12 @@ resource "azurerm_windows_virtual_machine" "anchor" { ultra_ssd_enabled = local.enable_anchor_ultra[count.index] } + lifecycle { + ignore_changes = [ + source_image_id + ] + } + patch_mode = "Manual" license_type = length(var.license_type) > 0 ? var.license_type : null } diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf index 2ea4bfe13c..5e9230a7d9 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf @@ -291,6 +291,11 @@ resource "azurerm_linux_virtual_machine" "vm_dbnode" { identity_ids = length(var.database.user_assigned_identity_id) > 0 ? [var.database.user_assigned_identity_id] : null } } + lifecycle { + ignore_changes = [ + source_image_id + ] + } } @@ -359,6 +364,13 @@ resource "azurerm_managed_disk" "data_disk" { null ) tags = var.tags + lifecycle { + ignore_changes = [ + create_option, + hyper_v_generation, + source_resource_id + ] + } } @@ -432,6 +444,13 @@ resource "azurerm_managed_disk" "cluster" { azurerm_linux_virtual_machine.vm_dbnode[local.data_disk_list[count.index].vm_index].zone) : ( null ) + lifecycle { + ignore_changes = [ + create_option, + hyper_v_generation, + source_resource_id + ] + } } From c9f9bdc01c66f5aaf77403289a801ea79afa488b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 26 Feb 2024 22:42:02 +0200 Subject: [PATCH 336/607] Fix bug in login functionality --- .../ParameterDetails/LandscapeDetails.json | 1636 ++++++++--------- 1 file changed, 818 insertions(+), 818 deletions(-) diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index 8f0c74ba11..4c14cd2376 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -293,7 +293,6 @@ "Overrules": "", "Display": 2 } - ] }, { @@ -598,870 +597,871 @@ "Options": [], "Overrules": "", "Display": 3 - }, + } + ] + }, + { + "Section": "Azure Monitor subnet", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone", + "Parameters": [ { - "Section": "Azure Monitor subnet", - "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone", - "Parameters": [ - { - "Name": "ams_subnet_arm_id", - "Required": false, - "Description": "Specifies Azure resource identifier for the existing subnet for the ams subnet", - "Type": "lookup", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "ams_subnet_address_prefix", - "Display": 3 - }, - { - "Name": "ams_subnet_address_prefix", - "Required": false, - "Description": "Defines the subnet address range for the ams subnet.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "ams_subnet_name", - "Required": false, - "Description": "Should only be used if the default naming is not acceptable.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "ams_subnet_nsg_arm_id", - "Required": false, - "Description": "Specifies Azure resource identifier for the existing network security group to use.", - "Type": "lookup", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "ams_subnet_nsg_name", - "Display": 3 - }, + "Name": "ams_subnet_arm_id", + "Required": false, + "Description": "Specifies Azure resource identifier for the existing subnet for the ams subnet", + "Type": "lookup", + "Options": [ { - "Name": "ams_subnet_nsg_name", - "Required": false, - "Description": "Should only be used if the default naming is not acceptable for the network security group name", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 + "Text": "", + "Value": "" } - ] + ], + "Overrules": "ams_subnet_address_prefix", + "Display": 3 }, { - "Section": "ANF subnet", - "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#network-parameters", - "Parameters": [ - { - "Name": "anf_subnet_address_prefix", - "Required": false, - "Description": "Defines the subnet address range for the ANF subnet.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 1 - }, - { - "Name": "anf_subnet_arm_id", - "Required": false, - "Description": "Specifies Azure resource identifier for the existing subnet to use for ANF subnet.", - "Type": "lookup", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "anf_subnet_address_prefix", - "Display": 3 - }, - { - "Name": "anf_subnet_name", - "Required": false, - "Description": "Should only be used if the default naming is not acceptable.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "anf_subnet_nsg_arm_id", - "Required": false, - "Description": "Specifies Azure resource identifier for the existing network security group.", - "Type": "lookup", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "anf_subnet_nsg_name", - "Display": 3 - }, + "Name": "ams_subnet_address_prefix", + "Required": false, + "Description": "Defines the subnet address range for the ams subnet.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "ams_subnet_name", + "Required": false, + "Description": "Should only be used if the default naming is not acceptable.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "ams_subnet_nsg_arm_id", + "Required": false, + "Description": "Specifies Azure resource identifier for the existing network security group to use.", + "Type": "lookup", + "Options": [ { - "Name": "anf_subnet_nsg_name", - "Required": false, - "Description": "Should only be used if the default naming is not acceptable for the network security group name", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 + "Text": "", + "Value": "" } - ] - }, - { - "Section": "Azure keyvault support", - "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#key-vault-parameters", - "Parameters": [ - { - "Name": "user_keyvault_id", - "Required": false, - "Description": "Specifies the Azure resource identifier for an existing key vault designed to host secrets for the administrative users.", - "Type": "lookup", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, - { - "Name": "spn_keyvault_id", - "Required": false, - "Description": "Specifies the Azure resource identifier for an existing key vault. Designed to host the deployment credentials used by the automation.", - "Type": "lookup", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, - { - "Name": "enable_purge_control_for_keyvaults", - "Required": false, - "Description": "Disables the purge protection for Azure key vaults", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "additional_users_to_add_to_keyvault_policies", - "Required": false, - "Description": "Additional users (object IDs) to add to the key vault policies.", - "Type": "list", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 2 - }, - - { - "Name": "enable_rbac_authorization_for_keyvault", - "Required": false, - "Description": "Controls the access policy model for the workload zone keyvault", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 2 - }, + ], + "Overrules": "ams_subnet_nsg_name", + "Display": 3 + }, + { + "Name": "ams_subnet_nsg_name", + "Required": false, + "Description": "Should only be used if the default naming is not acceptable for the network security group name", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + } + ] + }, + { + "Section": "ANF subnet", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#network-parameters", + "Parameters": [ + { + "Name": "anf_subnet_address_prefix", + "Required": false, + "Description": "Defines the subnet address range for the ANF subnet.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 1 + }, + { + "Name": "anf_subnet_arm_id", + "Required": false, + "Description": "Specifies Azure resource identifier for the existing subnet to use for ANF subnet.", + "Type": "lookup", + "Options": [ { - "Name": "soft_delete_retention_days", - "Required": false, - "Description": "The number of days that items should be retained in the soft delete period", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 2 + "Text": "", + "Value": "" } - ] + ], + "Overrules": "anf_subnet_address_prefix", + "Display": 3 }, { - "Section": "DNS", - "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#dns-support", - "Parameters": [ - { - "Name": "dns_label", - "Required": false, - "Description": "Provides the DNS label to use for the Virtual Network.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 2 - }, - { - "Name": "dns_server_list", - "Required": false, - "Description": "Boolean value indicating if a custom dns record should be created for the storage account", - "Type": "list", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, - { - "Name": "use_custom_dns_a_registration", - "Required": false, - "Description": "Boolean value indicating if custom dns registration is used", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "register_virtual_network_to_dns", - "Required": false, - "Description": "Defines if the Virtual network and the load balancers are registered with DNS", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "management_dns_subscription_id", - "Required": false, - "Description": "Subscription for the DNS zone, if different from the management subscription", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, + "Name": "anf_subnet_name", + "Required": false, + "Description": "Should only be used if the default naming is not acceptable.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "anf_subnet_nsg_arm_id", + "Required": false, + "Description": "Specifies Azure resource identifier for the existing network security group.", + "Type": "lookup", + "Options": [ { - "Name": "management_dns_resourcegroup_name", - "Required": false, - "Description": "Resource group for the DNS zone, if different from the SAP Library resource group", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 + "Text": "", + "Value": "" } - ] - }, - { - "Section": "NFS support", - "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#nfs-support", - "Parameters": [ - { - "Name": "NFS_provider", - "Required": false, - "Description": "Defines how NFS services are provided to the SAP systems: AFS (Azure Files for NFS), ANF (Azure NetApp Files), NFS (custom solution)", - "Type": "lookup", - "Options": [ - { - "Text": "", - "Value": "" - }, - { - "Text": "AFS", - "Value": "AFS" - }, - { - "Text": "ANF", - "Value": "ANF" - }, - { - "Text": "NFS", - "Value": "NFS" - } - ], - "Overrules": "", - "Display": 1 - }, - { - "Name": "use_AFS_for_shared_storage", - "Required": false, - "Description": "Defines if shared media is shared from Azure Files when using Azure NetApp Files for data.", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 2 - }, - { - "Name": "create_transport_storage", - "Required": false, - "Description": "Defines if the workload zone will host storage for the transport data.", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 2 - }, + ], + "Overrules": "anf_subnet_nsg_name", + "Display": 3 + }, + { + "Name": "anf_subnet_nsg_name", + "Required": false, + "Description": "Should only be used if the default naming is not acceptable for the network security group name", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + } + ] + }, + { + "Section": "Azure keyvault support", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#key-vault-parameters", + "Parameters": [ + { + "Name": "user_keyvault_id", + "Required": false, + "Description": "Specifies the Azure resource identifier for an existing key vault designed to host secrets for the administrative users.", + "Type": "lookup", + "Options": [ { - "Name": "transport_volume_size", - "Required": false, - "Description": "Defines the size of the transport volume.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 2 + "Text": "", + "Value": "" } - ] + ], + "Overrules": "", + "Display": 3 }, { - "Section": "ANF Support", - "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#azure-netapp-files-support", - "Parameters": [ - { - "Name": "ANF_account_name", - "Required": false, - "Description": "The name for the Netapp Account to create.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 2 - }, - { - "Name": "ANF_account_arm_id", - "Required": false, - "Description": "Azure resource identifier for an existing Netapp Account", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "ANF_service_level", - "Required": false, - "Description": "The service level for the NetApp pool", - "Type": "lookup", - "Options": [ - { - "Text": "Ultra", - "Value": "Ultra" - }, - { - "Text": "Premium", - "Value": "Premium" - }, - { - "Text": "Standard", - "Value": "Standard" - } - ], - "Overrules": "", - "Display": 2 - }, - { - "Name": "ANF_pool_size", - "Required": false, - "Description": "The pool size in TB for the NetApp pool.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 2 - }, - { - "Name": "ANF_qos_type", - "Required": false, - "Description": "The Quality of Service type of the pool (Auto or Manual).", - "Type": "lookup", - "Options": [ - { - "Text": "Manual", - "Value": "Manual" - }, - { - "Text": "Auto", - "Value": "Auto" - } - ], - "Overrules": "", - "Display": 2 - }, - { - "Name": "ANF_use_existing_pool", - "Required": false, - "Description": "Use existing storage pool.", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "ANF_pool_name", - "Required": false, - "Description": "the NetApp capacity pool name (if any)", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "ANF_transport_volume_throughput", - "Required": false, - "Description": "The throughput in MB/s for the transport volume.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "ANF_transport_volume_zone", - "Required": false, - "Description": "Azure NetApp transport volume availability zone.", - "Type": "list", - "Options": [ - { - "Text": "", - "Value": "" - }, - { - "Text": "1", - "Value": "1" - }, - { - "Text": "2", - "Value": "2" - }, - { - "Text": "3", - "Value": "3" - } - ], - "Overrules": "", - "Display": 2 - }, - { - "Name": "ANF_install_volume_throughput", - "Required": false, - "Description": "The throughput in MB/s for the install volume.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, + "Name": "spn_keyvault_id", + "Required": false, + "Description": "Specifies the Azure resource identifier for an existing key vault. Designed to host the deployment credentials used by the automation.", + "Type": "lookup", + "Options": [ { - "Name": "ANF_install_volume_zone", - "Required": false, - "Description": "Azure NetApp install volume availability zone.", - "Type": "list", - "Options": [ - { - "Text": "", - "Value": "" - }, - { - "Text": "1", - "Value": "1" - }, - { - "Text": "2", - "Value": "2" - }, - { - "Text": "3", - "Value": "3" - } - ], - "Overrules": "", - "Display": 2 + "Text": "", + "Value": "" } - ] - }, - { - "Section": "iSCSI support", - "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#iscsi-parameters", - "Parameters": [ - { - "Name": "iscsi_count", - "Required": false, - "Description": "The number of iSCSI virtual machines.", - "Type": "field", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, - { - "Name": "iscsi_size", - "Required": false, - "Description": "Size of iSCSI Virtual Machines to be created.", - "Type": "field", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, - { - "Name": "iscsi_useDHCP", - "Required": false, - "Description": "Controls whether to use dynamic IP addresses provided by the Azure subnet", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "iscsi_image_offering", - "Required": false, - "Description": "Defines the Virtual Machine image for the iSCSI devices.", - "Type": "image_dropdown", - "Options": [], - "Overrules": "iscsi_image", - "Display": 3 - }, - { - "Name": "iscsi_image", - "Required": false, - "Description": "Defines the Virtual Machine image for the iSCSI devices.", - "Type": "image", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, - { - "Name": "iscsi_authentication_type", - "Required": false, - "Description": "Defines the Virtual Machine authentication type for the iSCSI devices.", - "Type": "field", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, - { - "Name": "iscsi_authentication_username", - "Required": false, - "Description": "Defines the username for the iSCSI devices.", - "Type": "field", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, - { - "Name": "iscsi_nic_ips", - "Required": false, - "Description": "Defines the IP Addresses for the iSCSI devices.", - "Type": "list", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, - { - "Name": "iscsi_vm_zones", - "Required": false, - "Description": "Defines the Availability zones for the iSCSI devices.", - "Type": "list", - "Options": [ - { - "Text": "", - "Value": "" - }, - { - "Text": "1", - "Value": "1" - }, - { - "Text": "2", - "Value": "2" - }, - { - "Text": "3", - "Value": "3" - } - ], - "Overrules": "", - "Display": 3 - }, + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "enable_purge_control_for_keyvaults", + "Required": false, + "Description": "Disables the purge protection for Azure key vaults", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "additional_users_to_add_to_keyvault_policies", + "Required": false, + "Description": "Additional users (object IDs) to add to the key vault policies.", + "Type": "list", + "Options": [ { - "Name": "user_assigned_identity_id", - "Required": false, - "Description": "Azure resource identifier for User assigned identity.", - "Type": "lookup", - "Options": [], - "Overrules": "", - "Display": 3 + "Text": "", + "Value": "" } + ], + "Overrules": "", + "Display": 2 + }, - ] + { + "Name": "enable_rbac_authorization_for_keyvault", + "Required": false, + "Description": "Controls the access policy model for the workload zone keyvault", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 }, { - "Section": "Utility VM", - "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#utility-vm-parameters", - "Parameters": [ - { - "Name": "utility_vm_count", - "Required": false, - "Description": "Defines number of utility virtual machines to deploy. The utility virtual machines can be used to host SAPGui and other tools.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 2 - }, - { - "Name": "utility_vm_size", - "Required": false, - "Description": "Defines the size for the utility virtual machine. Default size is Standard_D4ds_v4.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "utility_vm_os_disk_size", - "Required": false, - "Description": "Defines the size of the OS disk for the Virtual Machine. Default size is 128", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "utility_vm_os_disk_type", - "Required": false, - "Description": "Defines the type of the OS disk for the Virtual Machine. Default size is Premium_LRS.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "utility_vm_useDHCP", - "Required": false, - "Description": "Defines if Azure subnet provided IP addresses should be used for the utility virtual machines.", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "utility_vm_image_offering", - "Required": false, - "Description": "Defines the virtual machine image to use for the utility virtual machines.", - "Type": "image_dropdown", - "Options": [], - "Overrules": "utility_vm_image", - "Display": 2 - }, - { - "Name": "utility_vm_image", - "Required": false, - "Description": "Defines the virtual machine image to use for the utility virtual machines.", - "Type": "image", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, + "Name": "soft_delete_retention_days", + "Required": false, + "Description": "The number of days that items should be retained in the soft delete period", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + } + ] + }, + { + "Section": "DNS", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#dns-support", + "Parameters": [ + { + "Name": "dns_label", + "Required": false, + "Description": "Provides the DNS label to use for the Virtual Network.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "dns_server_list", + "Required": false, + "Description": "Boolean value indicating if a custom dns record should be created for the storage account", + "Type": "list", + "Options": [ { - "Name": "utility_vm_nic_ips", - "Required": false, - "Description": "Provides the static IP addresses for the utility virtual machines.", - "Type": "list", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 + "Text": "", + "Value": "" } - ] + ], + "Overrules": "", + "Display": 3 }, { - "Section": "Storage Accounts", - "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#azure-files-nfs-support", - "Parameters": [ - { - "Name": "install_volume_size", - "Required": false, - "Description": "The volume size in GB for the transport share", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 2 - }, - { - "Name": "install_storage_account_id", - "Required": false, - "Description": "Azure Resource Identifier for the Installation media storage account.", - "Type": "lookup", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 - }, + "Name": "use_custom_dns_a_registration", + "Required": false, + "Description": "Boolean value indicating if custom dns registration is used", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "register_virtual_network_to_dns", + "Required": false, + "Description": "Defines if the Virtual network and the load balancers are registered with DNS", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "management_dns_subscription_id", + "Required": false, + "Description": "Subscription for the DNS zone, if different from the management subscription", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "management_dns_resourcegroup_name", + "Required": false, + "Description": "Resource group for the DNS zone, if different from the SAP Library resource group", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + } + ] + }, + { + "Section": "NFS support", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#nfs-support", + "Parameters": [ + { + "Name": "NFS_provider", + "Required": false, + "Description": "Defines how NFS services are provided to the SAP systems: AFS (Azure Files for NFS), ANF (Azure NetApp Files), NFS (custom solution)", + "Type": "lookup", + "Options": [ { - "Name": "install_private_endpoint_id", - "Required": false, - "Description": "Azure Resource Identifier for a private endpoint for the installation storage account.", - "Type": "lookup", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 + "Text": "", + "Value": "" }, { - "Name": "storage_account_replication_type", - "Required": false, - "Description": "Defines the replication type for Azure Files for NFS install account.", - "Type": "lookup", - "Options": [ - { - "Text": "LRS", - "Value": "LRS" - }, - { - "Text": "ZRS", - "Value": "ZRS" - } - ], - "Overrules": "", - "Display": 2 + "Text": "AFS", + "Value": "AFS" }, { - "Name": "diagnostics_storage_account_arm_id", - "Required": false, - "Description": "Azure resource id for the diagnostics storage account", - "Type": "lookup", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 + "Text": "ANF", + "Value": "ANF" }, { - "Name": "witness_storage_account_arm_id", - "Required": false, - "Description": "Azure resource id for the witness storage account", - "Type": "lookup", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 + "Text": "NFS", + "Value": "NFS" + } + ], + "Overrules": "", + "Display": 1 + }, + { + "Name": "use_AFS_for_shared_storage", + "Required": false, + "Description": "Defines if shared media is shared from Azure Files when using Azure NetApp Files for data.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "create_transport_storage", + "Required": false, + "Description": "Defines if the workload zone will host storage for the transport data.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "transport_volume_size", + "Required": false, + "Description": "Defines the size of the transport volume.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + } + ] + }, + { + "Section": "ANF Support", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#azure-netapp-files-support", + "Parameters": [ + { + "Name": "ANF_account_name", + "Required": false, + "Description": "The name for the Netapp Account to create.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "ANF_account_arm_id", + "Required": false, + "Description": "Azure resource identifier for an existing Netapp Account", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "ANF_service_level", + "Required": false, + "Description": "The service level for the NetApp pool", + "Type": "lookup", + "Options": [ + { + "Text": "Ultra", + "Value": "Ultra" }, { - "Name": "transport_storage_account_id", - "Required": false, - "Description": "Azure Resource Identifier for the Transport media storage account.", - "Type": "lookup", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 + "Text": "Premium", + "Value": "Premium" }, { - "Name": "transport_private_endpoint_id", - "Required": false, - "Description": "Azure Resource Identifier for a private endpoint connection for the transport storage account.", - "Type": "lookup", - "Options": [ - { - "Text": "", - "Value": "" - } - ], - "Overrules": "", - "Display": 3 + "Text": "Standard", + "Value": "Standard" + } + ], + "Overrules": "", + "Display": 2 + }, + { + "Name": "ANF_pool_size", + "Required": false, + "Description": "The pool size in TB for the NetApp pool.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "ANF_qos_type", + "Required": false, + "Description": "The Quality of Service type of the pool (Auto or Manual).", + "Type": "lookup", + "Options": [ + { + "Text": "Manual", + "Value": "Manual" + }, + { + "Text": "Auto", + "Value": "Auto" + } + ], + "Overrules": "", + "Display": 2 + }, + { + "Name": "ANF_use_existing_pool", + "Required": false, + "Description": "Use existing storage pool.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "ANF_pool_name", + "Required": false, + "Description": "the NetApp capacity pool name (if any)", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "ANF_transport_volume_throughput", + "Required": false, + "Description": "The throughput in MB/s for the transport volume.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "ANF_transport_volume_zone", + "Required": false, + "Description": "Azure NetApp transport volume availability zone.", + "Type": "list", + "Options": [ + { + "Text": "", + "Value": "" + }, + { + "Text": "1", + "Value": "1" + }, + { + "Text": "2", + "Value": "2" + }, + { + "Text": "3", + "Value": "3" + } + ], + "Overrules": "", + "Display": 2 + }, + { + "Name": "ANF_install_volume_throughput", + "Required": false, + "Description": "The throughput in MB/s for the install volume.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "ANF_install_volume_zone", + "Required": false, + "Description": "Azure NetApp install volume availability zone.", + "Type": "list", + "Options": [ + { + "Text": "", + "Value": "" + }, + { + "Text": "1", + "Value": "1" + }, + { + "Text": "2", + "Value": "2" + }, + { + "Text": "3", + "Value": "3" + } + ], + "Overrules": "", + "Display": 2 + } + ] + }, + { + "Section": "iSCSI support", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#iscsi-parameters", + "Parameters": [ + { + "Name": "iscsi_count", + "Required": false, + "Description": "The number of iSCSI virtual machines.", + "Type": "field", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "iscsi_size", + "Required": false, + "Description": "Size of iSCSI Virtual Machines to be created.", + "Type": "field", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "iscsi_useDHCP", + "Required": false, + "Description": "Controls whether to use dynamic IP addresses provided by the Azure subnet", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "iscsi_image_offering", + "Required": false, + "Description": "Defines the Virtual Machine image for the iSCSI devices.", + "Type": "image_dropdown", + "Options": [], + "Overrules": "iscsi_image", + "Display": 3 + }, + { + "Name": "iscsi_image", + "Required": false, + "Description": "Defines the Virtual Machine image for the iSCSI devices.", + "Type": "image", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "iscsi_authentication_type", + "Required": false, + "Description": "Defines the Virtual Machine authentication type for the iSCSI devices.", + "Type": "field", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "iscsi_authentication_username", + "Required": false, + "Description": "Defines the username for the iSCSI devices.", + "Type": "field", + "Options": [ + { + "Text": "", + "Value": "" } - ] + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "iscsi_nic_ips", + "Required": false, + "Description": "Defines the IP Addresses for the iSCSI devices.", + "Type": "list", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 }, { - "Section": "Azure Monitor Settings", - "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#utility-vm-parameters", - "Parameters": [ + "Name": "iscsi_vm_zones", + "Required": false, + "Description": "Defines the Availability zones for the iSCSI devices.", + "Type": "list", + "Options": [ { - "Name": "create_ams_instance", - "Required": false, - "Description": "Defines if an AMS Instance should be created.", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 2 + "Text": "", + "Value": "" + }, + { + "Text": "1", + "Value": "1" }, { - "Name": "ams_instance_name", - "Required": false, - "Description": "Defines the name of the AMS instance", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 2 + "Text": "2", + "Value": "2" }, { - "Name": "ams_laws_arm_id", - "Required": false, - "Description": "Defines the Azure resource id for the Log analytics workspace in AMS", - "Type": "lookup", - "Options": [], - "Overrules": "", - "Display": 2 + "Text": "3", + "Value": "3" } - ] + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "user_assigned_identity_id", + "Required": false, + "Description": "Azure resource identifier for User assigned identity.", + "Type": "lookup", + "Options": [], + "Overrules": "", + "Display": 3 + } + ] + }, + { + "Section": "Utility VM", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#utility-vm-parameters", + "Parameters": [ + { + "Name": "utility_vm_count", + "Required": false, + "Description": "Defines number of utility virtual machines to deploy. The utility virtual machines can be used to host SAPGui and other tools.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "utility_vm_size", + "Required": false, + "Description": "Defines the size for the utility virtual machine. Default size is Standard_D4ds_v4.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "utility_vm_os_disk_size", + "Required": false, + "Description": "Defines the size of the OS disk for the Virtual Machine. Default size is 128", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "utility_vm_os_disk_type", + "Required": false, + "Description": "Defines the type of the OS disk for the Virtual Machine. Default size is Premium_LRS.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "utility_vm_useDHCP", + "Required": false, + "Description": "Defines if Azure subnet provided IP addresses should be used for the utility virtual machines.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "utility_vm_image_offering", + "Required": false, + "Description": "Defines the virtual machine image to use for the utility virtual machines.", + "Type": "image_dropdown", + "Options": [], + "Overrules": "utility_vm_image", + "Display": 2 + }, + { + "Name": "utility_vm_image", + "Required": false, + "Description": "Defines the virtual machine image to use for the utility virtual machines.", + "Type": "image", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "utility_vm_nic_ips", + "Required": false, + "Description": "Provides the static IP addresses for the utility virtual machines.", + "Type": "list", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + } + ] + }, + { + "Section": "Storage Accounts", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#azure-files-nfs-support", + "Parameters": [ + { + "Name": "install_volume_size", + "Required": false, + "Description": "The volume size in GB for the transport share", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "install_storage_account_id", + "Required": false, + "Description": "Azure Resource Identifier for the Installation media storage account.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "install_private_endpoint_id", + "Required": false, + "Description": "Azure Resource Identifier for a private endpoint for the installation storage account.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "storage_account_replication_type", + "Required": false, + "Description": "Defines the replication type for Azure Files for NFS install account.", + "Type": "lookup", + "Options": [ + { + "Text": "LRS", + "Value": "LRS" + }, + { + "Text": "ZRS", + "Value": "ZRS" + } + ], + "Overrules": "", + "Display": 2 + }, + { + "Name": "diagnostics_storage_account_arm_id", + "Required": false, + "Description": "Azure resource id for the diagnostics storage account", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "witness_storage_account_arm_id", + "Required": false, + "Description": "Azure resource id for the witness storage account", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "transport_storage_account_id", + "Required": false, + "Description": "Azure Resource Identifier for the Transport media storage account.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "transport_private_endpoint_id", + "Required": false, + "Description": "Azure Resource Identifier for a private endpoint connection for the transport storage account.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "", + "Display": 3 + } + ] + }, + { + "Section": "Azure Monitor Settings", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#utility-vm-parameters", + "Parameters": [ + { + "Name": "create_ams_instance", + "Required": false, + "Description": "Defines if an AMS Instance should be created.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "ams_instance_name", + "Required": false, + "Description": "Defines the name of the AMS instance", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "ams_laws_arm_id", + "Required": false, + "Description": "Defines the Azure resource id for the Log analytics workspace in AMS", + "Type": "lookup", + "Options": [], + "Overrules": "", + "Display": 2 } ] + } +] From 974c4fc136b265d5d4e248dc43719cee3e89faf5 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 26 Feb 2024 22:55:20 +0200 Subject: [PATCH 337/607] Update ams_instance count and monitorSubnet in ams.tf --- .../modules/sap_landscape/ams.tf | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf b/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf index c3090b50c5..3a34c9e5f1 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf @@ -9,23 +9,23 @@ data "azurerm_subnet" "ams" { resource "azapi_resource" "ams_instance" { type = "Microsoft.Workloads/monitors@2023-04-01" - count = local.create_ams_instance ? 1 : 0 + count = local.create_ams_instance && local.ams_subnet_defined ? 1 : 0 name = local.ams_instance_name location = local.region parent_id = azurerm_resource_group.resource_group[0].id - depends_on = [ + depends_on = [ azurerm_virtual_network.vnet_sap, azurerm_subnet.ams ] body = jsonencode({ properties = { - appLocation: local.region, - routingPreference: "RouteAll", - logAnalyticsWorkspaceArmId: local.ams_laws_arm_id, - managedResourceGroupConfiguration: { - name: "managedrg-ams" - }, - monitorSubnet: azurerm_subnet.ams[0].id, - } + appLocation: local.region, + routingPreference: "RouteAll", + logAnalyticsWorkspaceArmId: local.ams_laws_arm_id, + managedResourceGroupConfiguration: { + name: "managedrg-ams" + }, + monitorSubnet: length(local.ams_subnet_arm_id) > 0 ? local.ams_subnet_arm_id : azurerm_subnet.ams[0].id, + } }) -} \ No newline at end of file +} From 749522cf711ddc2d569058a7e551cef7cbc005b5 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 26 Feb 2024 23:32:19 +0200 Subject: [PATCH 338/607] Add AMS variables to System definition --- Webapp/SDAF/Models/CustomValidators.cs | 2 +- Webapp/SDAF/Models/SystemModel.cs | 16 +++++++++ .../ParameterDetails/LandscapeDetails.json | 10 +++--- .../SDAF/ParameterDetails/SystemDetails.json | 33 +++++++++++++++++++ .../SDAF/ParameterDetails/SystemTemplate.txt | 19 +++++++++++ 5 files changed, 74 insertions(+), 6 deletions(-) diff --git a/Webapp/SDAF/Models/CustomValidators.cs b/Webapp/SDAF/Models/CustomValidators.cs index 5023cc33a0..3b6da854f1 100644 --- a/Webapp/SDAF/Models/CustomValidators.cs +++ b/Webapp/SDAF/Models/CustomValidators.cs @@ -237,7 +237,7 @@ public class AMSIdValidator : ValidationAttribute { public override bool IsValid(object value) { - string pattern = @"^\/subscriptions\/\w{8}-\w{4}-\w{4}-\w{4}-\w{12}\/resourceGroups\/[a-zA-Z0-9-_]+\/providers\/Microsoft.Monitor\/Accounts\/[a-zA-Z0-9-_]+$"; + string pattern = @"^\/subscriptions\/\w{8}-\w{4}-\w{4}-\w{4}-\w{12}\/resourceGroups\/[a-zA-Z0-9-_]+\/providers\/Microsoft.Workloads\/monitors\/[a-zA-Z0-9-_]+$"; return RegexValidation(value, pattern); } } diff --git a/Webapp/SDAF/Models/SystemModel.cs b/Webapp/SDAF/Models/SystemModel.cs index c84edc0d49..afb5bb5a7b 100644 --- a/Webapp/SDAF/Models/SystemModel.cs +++ b/Webapp/SDAF/Models/SystemModel.cs @@ -612,6 +612,22 @@ public bool IsValid() public int? stand_by_node_count { get; set; } = 0; + + /*---------------------------------------------------------------------------8 + | | + | AMS Parameters | + | | + +------------------------------------4--------------------------------------*/ + + public bool? enable_ha_monitoring { get; set; } = false; + + public bool? enable_os_monitoring { get; set; } = false; + + [AMSIdValidator(ErrorMessage = "Invalid AMS Resource id")] + public string ams_resource_id { get; set; } + + + } public class Tag diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index 4c14cd2376..25bc191154 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -616,7 +616,7 @@ } ], "Overrules": "ams_subnet_address_prefix", - "Display": 3 + "Display": 2 }, { "Name": "ams_subnet_address_prefix", @@ -625,7 +625,7 @@ "Type": "field", "Options": [], "Overrules": "", - "Display": 3 + "Display": 2 }, { "Name": "ams_subnet_name", @@ -634,7 +634,7 @@ "Type": "field", "Options": [], "Overrules": "", - "Display": 3 + "Display": 2 }, { "Name": "ams_subnet_nsg_arm_id", @@ -648,7 +648,7 @@ } ], "Overrules": "ams_subnet_nsg_name", - "Display": 3 + "Display": 2 }, { "Name": "ams_subnet_nsg_name", @@ -657,7 +657,7 @@ "Type": "field", "Options": [], "Overrules": "", - "Display": 3 + "Display": 2 } ] }, diff --git a/Webapp/SDAF/ParameterDetails/SystemDetails.json b/Webapp/SDAF/ParameterDetails/SystemDetails.json index aac9fa9ac4..f4bf2cd462 100644 --- a/Webapp/SDAF/ParameterDetails/SystemDetails.json +++ b/Webapp/SDAF/ParameterDetails/SystemDetails.json @@ -2222,6 +2222,39 @@ } ] }, + { + "Section": "Azure Monitor support", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-system", + "Parameters": [ + { + "Name": "ams_resource_id", + "Required": false, + "Description": "Defines the Azure resource id for the Log analytics workspace in AMS", + "Type": "lookup", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "enable_ha_monitoring", + "Required": false, + "Description": "Enables prometheus high availability cluster monitoring.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "enable_os_monitoring", + "Required": false, + "Description": "Enable prometheus operating system level monitoring.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + } + ] + }, { "Section": "Other Parameters", "Link": "https://learn.microsoft.com/en-us/azure/virtual-machines/workloads/sap/automation-configure-system?branch=main#other-parameters", diff --git a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt index 7e8e8a111d..8fba95ae3a 100644 --- a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt @@ -860,3 +860,22 @@ $$database_HANA_use_ANF_scaleout_scenario$$ # Defined the standbynode count in a scaleout scenario $$stand_by_node_count$$ + + +######################################################################################### +# # +# AMS variables # +# # +######################################################################################### + +# If defined, will enable prometheus high availability cluster monitoring +$$enable_ha_monitoring$$ + +# If defined, will enable prometheus operating system level monitoring +$$enable_os_monitoring$$ + +# If defined, will use the specified Azure Monitor for SAP instance, else will use the AMS instance in the workload zone. +$$ams_resource_id$$ + + + From 32c362ee12c91dae3303b73867043946c8e20648 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 26 Feb 2024 23:54:43 +0200 Subject: [PATCH 339/607] remove duplicate lifecycle sections --- .../terraform-units/modules/sap_system/app_tier/vm-scs.tf | 7 ++----- .../terraform-units/modules/sap_system/hdb_node/vm-hdb.tf | 7 ++----- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index 513acdd036..6167e7c666 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -564,10 +564,6 @@ resource "azurerm_managed_disk" "cluster" { ) ) ) ? 1 : 0 - lifecycle { - ignore_changes = [tags] - } - name = format("%s%s%s%s", var.naming.resource_prefixes.scs_cluster_disk, local.prefix, @@ -593,7 +589,8 @@ resource "azurerm_managed_disk" "cluster" { ignore_changes = [ create_option, hyper_v_generation, - source_resource_id + source_resource_id, + tags ] } } diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf index 5e9230a7d9..dd41be7240 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf @@ -421,10 +421,6 @@ resource "azurerm_managed_disk" "cluster" { ) ) ) ? 1 : 0 - lifecycle { - ignore_changes = [tags] - } - name = format("%s%s%s%s", var.naming.resource_prefixes.database_cluster_disk, local.prefix, @@ -448,7 +444,8 @@ resource "azurerm_managed_disk" "cluster" { ignore_changes = [ create_option, hyper_v_generation, - source_resource_id + source_resource_id, + tags ] } From 742420e6ebe4b8f72c3a84d0a469057b1d5e85a0 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 27 Feb 2024 00:37:32 +0200 Subject: [PATCH 340/607] Add AMS Provider configuration option --- .../pipelines/05-DB-and-SAP-installation.yaml | 39 +++++++++++-------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index 401d714d6b..7a6690fc1d 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -104,6 +104,12 @@ parameters: - Other # 20220929 MKD - ACSS Registration + - name: ams_provider + displayName: Configure AMS Provider + type: boolean + default: false + + - name: sap_automation_repo_path displayName: The local path on the agent where the sap_automation repo can be found type: string @@ -548,22 +554,23 @@ stages: acssEnvironment: ${{ parameters.acss_environment }} acssSapProduct: ${{ parameters.acss_sap_product }} USE_MSI: $(USE_MSI) - - template: templates\run-ansible.yaml - parameters: - displayName: "AMS Provider Creation" - ansibleFilePath: ${{ parameters.sap_automation_repo_path }}/deploy/ansible/playbook_06_01_ams_monitoring.yaml - secretName: "$(Preparation.SSH_KEY_NAME)" - passwordSecretName: "$(Preparation.PASSWORD_KEY_NAME)" - userNameSecretName: "$(Preparation.USERNAME_KEY_NAME)" - vaultName: $(Preparation.VAULT_NAME) - parametersFolder: $(Preparation.FOLDER) - sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" - sidHosts: $(Preparation.HOSTS) - extraParams: ${{ parameters.extra_params }} - azureClientId: $(ARM_CLIENT_ID) - azureClientSecret: $(ARM_CLIENT_SECRET) - azureTenantId: $(ARM_TENANT_ID) - azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + - ${{ if eq(parameters.ams_provider, true) }}: + - template: templates\run-ansible.yaml + parameters: + displayName: "AMS Provider Creation" + ansibleFilePath: ${{ parameters.sap_automation_repo_path }}/deploy/ansible/playbook_06_01_ams_monitoring.yaml + secretName: "$(Preparation.SSH_KEY_NAME)" + passwordSecretName: "$(Preparation.PASSWORD_KEY_NAME)" + userNameSecretName: "$(Preparation.USERNAME_KEY_NAME)" + vaultName: $(Preparation.VAULT_NAME) + parametersFolder: $(Preparation.FOLDER) + sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" + sidHosts: $(Preparation.HOSTS) + extraParams: ${{ parameters.extra_params }} + azureClientId: $(ARM_CLIENT_ID) + azureClientSecret: $(ARM_CLIENT_SECRET) + azureTenantId: $(ARM_TENANT_ID) + azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) - template: templates\collect-log-files.yaml parameters: logPath: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/logs From 47cafe5803f629e7b30f08bda10c0162d2217d69 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 27 Feb 2024 10:51:34 +0200 Subject: [PATCH 341/607] Add AMS Provider configuration option --- deploy/pipelines/05-DB-and-SAP-installation.yaml | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index 7a6690fc1d..2e6ca25a1e 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -82,6 +82,13 @@ parameters: type: boolean default: false + - name: ams_provider + displayName: Configure AMS Provider + type: boolean + default: false + + + # 20220929 MKD - ACSS Registration - name: acss_registration displayName: Register System in ACSS @@ -104,12 +111,6 @@ parameters: - Other # 20220929 MKD - ACSS Registration - - name: ams_provider - displayName: Configure AMS Provider - type: boolean - default: false - - - name: sap_automation_repo_path displayName: The local path on the agent where the sap_automation repo can be found type: string From 4f79271de14e64edededd8607293ae0ec2e5ca85 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 27 Feb 2024 11:06:42 +0200 Subject: [PATCH 342/607] Install packages before enable services --- .../roles-os/1.20-prometheus/tasks/main.yml | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml b/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml index bf1d02e562..2bcd2a8ef1 100644 --- a/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml +++ b/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml @@ -8,6 +8,22 @@ # | BEGIN | # +------------------------------------4---------------------------------------*/ +- name: "1.20.0 Packages: - Install pcp and pcp-pmda-hacluster package" + when: + - enable_os_monitoring or enable_ha_monitoring + - ansible_os_family | upper == "REDHAT" + block: + - name: "1.20 Packages: - Install pcp and pcp-pmda-hacluster package" + ansible.builtin.yum: + name: + - "pcp" + - "pcp-pmda-hacluster" + + - name: "1.20 Packages: - Install and enable the HA Cluster PMDA." + ansible.builtin.shell: "./Install" + args: + chdir: "/var/lib/pcp/pmdas/hacluster/" + - name: "1.20 Packages: - Start and enable services required for monitoring" when: - enable_ha_monitoring @@ -55,22 +71,6 @@ - name: "1.20 Packages: - Enable HA cluster exporter." ansible.builtin.shell: "nohup ha_cluster_exporter &" -- name: "1.20.0 Packages: - Install pcp and pcp-pmda-hacluster package" - when: - - enable_os_monitoring or enable_ha_monitoring - - ansible_os_family | upper == "REDHAT" - block: - - name: "1.20 Packages: - Install pcp and pcp-pmda-hacluster package" - ansible.builtin.yum: - name: - - "pcp" - - "pcp-pmda-hacluster" - - - name: "1.20 Packages: - Install and enable the HA Cluster PMDA." - ansible.builtin.shell: "./Install" - args: - chdir: "/var/lib/pcp/pmdas/hacluster/" - # /*----------------------------------------------------------------------------8 # | END | # +------------------------------------4---------------------------------------*/ From 9ff0bb648eadef6be2c1ed695a7eb23713c62fd0 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 27 Feb 2024 17:38:14 +0200 Subject: [PATCH 343/607] Login using the User Assigned Identity --- .../modules/sap_deployer/templates/configure_deployer.sh.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl index d6c0127fe3..3647fda70f 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl @@ -721,7 +721,7 @@ else echo export "PATH=$${ansible_bin}:$${tf_bin}:$${PATH}":"$${DOTNET_ROOT}":"$${AZADHOME}/Azure_SAP_Automated_Deployment/sap-automation/deploy/scripts" | sudo tee -a /etc/profile.d/deploy_server.sh # Ensure that the user's account is logged in to Azure with specified creds - echo 'az login --identity --allow-no-subscriptions --output none' | sudo tee -a /etc/profile.d/deploy_server.sh + (echo 'az login --identity --allow-no-subscriptions --output none --username "$${client_id}" ' | sudo tee -a /etc/profile.d/deploy_server.sh) > /dev/null 2>&1 echo 'echo $${USER} account ready for use with Azure SAP Automated Deployment' | sudo tee -a /etc/profile.d/deploy_server.sh sudo runuser -l "$${local_user}" -c '/usr/bin/az login --identity --allow-no-subscriptions --output none' From e6fc72308be5ab7321fcae94c7938572fea24e9f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 27 Feb 2024 18:39:25 +0200 Subject: [PATCH 344/607] Update VM-Images.json and tfvar_variables.tf --- Webapp/SDAF/ParameterDetails/VM-Images.json | 12 ++++++++++++ .../terraform/run/sap_landscape/tfvar_variables.tf | 6 +++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/Webapp/SDAF/ParameterDetails/VM-Images.json b/Webapp/SDAF/ParameterDetails/VM-Images.json index 8404400aa0..16ee745a3c 100644 --- a/Webapp/SDAF/ParameterDetails/VM-Images.json +++ b/Webapp/SDAF/ParameterDetails/VM-Images.json @@ -310,5 +310,17 @@ "version": "latest", "type": "marketplace" } + }, + { + "name": "Windows Server 2022 (Secure)", + "data": { + "os_type": "WINDOWS", + "source_image_id": "", + "publisher": "MicrosoftWindowsServer", + "offer": "windowsserver", + "sku": "2022-datacenter-g2", + "version": "latest", + "type": "marketplace" + } } ] diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index 79b3599073..4f23f1bd8f 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -721,8 +721,8 @@ variable "utility_vm_image" { "os_type" = "WINDOWS" "source_image_id" = "" "publisher" = "MicrosoftWindowsServer" - "offer" = "windowsserver" - "sku" = "2019-datacenter" + "offer" = "WindowsServer" + "sku" = "2022-Datacenter" "version" = "latest" } } @@ -778,4 +778,4 @@ variable "ams_instance_name" { variable "ams_laws_arm_id" { description = "If provided, Azure resource id for the Log analytics workspace in AMS" default = "" - } \ No newline at end of file + } From 8353b7202e8f3bd1fc0503885b3f77f70c1dca39 Mon Sep 17 00:00:00 2001 From: Devansh Jain <86314060+devanshjainms@users.noreply.github.com> Date: Tue, 27 Feb 2024 13:03:46 -0800 Subject: [PATCH 345/607] Fix for log analytics workspace id being set as null in AMS resource (#556) * Remove logAnalyticsWorkspaceArmId from ams_instance resource * Add conditional creation of AMS instance based on log analytics workspace * Refactor AMS instance resource in sap_landscape module * Update output value for ams_resource_id * Refactor AMS instance configuration in sap_landscape module * Refactor AMS resource ID output in sap_landscape module * Refactor AMS instance count in ams.tf --- deploy/terraform/terraform-units/modules/sap_landscape/ams.tf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf b/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf index 3a34c9e5f1..742600b334 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf @@ -7,6 +7,7 @@ data "azurerm_subnet" "ams" { resource_group_name = local.resourcegroup_name } +# Created AMS instance if log analytics workspace is NOT defined resource "azapi_resource" "ams_instance" { type = "Microsoft.Workloads/monitors@2023-04-01" count = local.create_ams_instance && local.ams_subnet_defined ? 1 : 0 @@ -21,7 +22,7 @@ resource "azapi_resource" "ams_instance" { properties = { appLocation: local.region, routingPreference: "RouteAll", - logAnalyticsWorkspaceArmId: local.ams_laws_arm_id, + logAnalyticsWorkspaceArmId: length(local.ams_laws_arm_id) > 0 ? local.ams_laws_arm_id : null, managedResourceGroupConfiguration: { name: "managedrg-ams" }, From 3bed0b066078bc6845f912b6fdf6baa2690cd0fc Mon Sep 17 00:00:00 2001 From: Harm Jan Stam Date: Fri, 1 Mar 2024 09:32:48 +0100 Subject: [PATCH 346/607] Re-add storage_type.name_offset for append_disk of hdb_node (#557) Was removed in version 3.9.3 but should be present like with anydb_node and app_tier. Append disk isn't working atm. https://github.com/Azure/sap-automation/pull/499/files#diff-f3f0cacc6c65c102bd1d82eb6bf30c1b485d6e272375d92855dcb7664e4bf35bL220 --- .../modules/sap_system/hdb_node/variables_local.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf index 2cbe719fc0..4f8b3b649f 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf @@ -231,7 +231,7 @@ locals { for idx, disk_count in range(storage_type.count) : { suffix = format("-%s%02d", storage_type.name, - disk_count + var.options.resource_offset + storage_type.name_offset + disk_count + var.options.resource_offset ) storage_account_type = storage_type.disk_type, disk_size_gb = storage_type.size_gb, From 4feafc8a6135b7689e8d0b59d174d28e8a6b682a Mon Sep 17 00:00:00 2001 From: Devansh Jain <86314060+devanshjainms@users.noreply.github.com> Date: Fri, 1 Mar 2024 00:53:31 -0800 Subject: [PATCH 347/607] Remove code to stop the secondary node in Oracle DB before restart (#553) * Refactor Oracle Data Guard setup on Secondary node * Remove commented out code for stopping and creating lsnrctl_stopped_sec.txt file on Secondary node --- .../tasks/ora-dg-setup-secondary.yaml | 25 ------------------- 1 file changed, 25 deletions(-) diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml index dfea0ca840..4a924f6007 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml @@ -40,31 +40,6 @@ mode: "{{ '0777' | int - (custom_umask | default('022') | int) }}" when: current_host == ora_secondary -# Restart the Listener on Secondary node. - -- name: "Oracle Data Guard - Setup Secondary: stop lsnrctl on Secondary" - become: true - become_user: "oracle" - ansible.builtin.shell: lsnrctl stop - register: lsnrctl_stop_secondary_results - failed_when: lsnrctl_stop_secondary_results.rc > 0 - args: - creates: /etc/sap_deployment_automation/dgscripts/lsnrctl_stopped_sec.txt - chdir: /etc/sap_deployment_automation/dgscripts - executable: /bin/csh - when: current_host == ora_secondary - -- name: "Oracle Data Guard - Setup Secondary: Create lsnrctl_stopped_sec.txt" - become: true - become_user: "oracle" - ansible.builtin.file: - path: /etc/sap_deployment_automation/dgscripts/lsnrctl_stopped_sec.txt - state: touch - mode: '0755' - when: - - current_host == ora_secondary - - lsnrctl_stop_secondary_results.rc == 0 - - name: "Oracle Data Guard - Setup Secondary: restart lsnrctl on Secondary" become: true become_user: "oracle" From ac8b4ff9556dd77eb28bf0384c55399707b3282e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 3 Mar 2024 20:01:37 +0200 Subject: [PATCH 348/607] Update ora-dg-setup-secondary.yaml Don't fail if service is already running --- .../roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml index 4a924f6007..2a9a41eaf9 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml @@ -45,7 +45,7 @@ become_user: "oracle" ansible.builtin.shell: lsnrctl start register: lsnrctl_start_secondary_results - failed_when: lsnrctl_start_secondary_results.rc > 0 + failed_when: lsnrctl_start_secondary_results.rc > 1 args: creates: /etc/sap_deployment_automation/dgscripts/lsnrctl_started_sec.txt chdir: /etc/sap_deployment_automation/dgscripts From 2f81765606a5f1e9905f639cb977ff7e1fd1647b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 3 Mar 2024 22:02:57 +0200 Subject: [PATCH 349/607] Fix virtual machine data disk attachment and role assignment for Linux database servers --- .../modules/sap_system/anydb_node/vm-anydb.tf | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf index 4c64b01a74..7ef86c0ffd 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf @@ -585,7 +585,7 @@ resource "azurerm_virtual_machine_data_disk_attachment" "cluster" { ) : ( (upper(var.database.os.os_type) == "WINDOWS" # If Windows ) ? ( - null + azurerm_windows_virtual_machine.dbserver[count.index].id ) : ( null # If Other ) @@ -600,7 +600,7 @@ resource "azurerm_role_assignment" "role_assignment_msi" { count = ( var.use_msi_for_clusters && length(var.fencing_role_name) > 0 && - var.database.high_availability + var.database.high_availability && upper(var.database.os.os_type) == "LINUX" ) ? ( var.database_server_count ) : ( @@ -616,7 +616,7 @@ resource "azurerm_role_assignment" "role_assignment_msi_ha" { count = ( var.use_msi_for_clusters && length(var.fencing_role_name) > 0 && - var.database.high_availability + var.database.high_availability && upper(var.database.os.os_type) == "LINUX" ) ? ( var.database_server_count ) : ( From 3a44ef5237c3b0762cbaf41bb10977b196450a38 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 3 Mar 2024 22:16:23 +0200 Subject: [PATCH 350/607] Add Windows Server 2019-GS and Windows Server 2022-GS to VM-Images.json --- Webapp/SDAF/ParameterDetails/VM-Images.json | 26 ++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/Webapp/SDAF/ParameterDetails/VM-Images.json b/Webapp/SDAF/ParameterDetails/VM-Images.json index 16ee745a3c..ed9e6ff8f8 100644 --- a/Webapp/SDAF/ParameterDetails/VM-Images.json +++ b/Webapp/SDAF/ParameterDetails/VM-Images.json @@ -299,6 +299,18 @@ "type": "marketplace" } }, + { + "name": "Windows Server 2019-GS", + "data": { + "os_type": "WINDOWS", + "source_image_id": "", + "publisher": "MicrosoftWindowsServer", + "offer": "windowsserver", + "sku": "2019-datacenter-g2", + "version": "latest", + "type": "marketplace" + } + }, { "name": "Windows Server 2022", "data": { @@ -312,7 +324,19 @@ } }, { - "name": "Windows Server 2022 (Secure)", + "name": "Windows Server 2022-GS", + "data": { + "os_type": "WINDOWS", + "source_image_id": "", + "publisher": "MicrosoftWindowsServer", + "offer": "windowsserver", + "sku": "2022-datacenter-g2", + "version": "latest", + "type": "marketplace" + } + }, + { + "name": "Windows Server 2022-GS", "data": { "os_type": "WINDOWS", "source_image_id": "", From 4a15f31a0b07e1111bb1231cfb42289765a49ebb Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 3 Mar 2024 23:25:35 +0200 Subject: [PATCH 351/607] Add Windows Update --- .../windows/1.4-packages/tasks/main.yaml | 72 +++++++++++++------ 1 file changed, 50 insertions(+), 22 deletions(-) diff --git a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml index 0a6110f02d..4e660d340c 100644 --- a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml @@ -19,47 +19,75 @@ # ---------------------------------------- # BEGIN -- name: "Run Flag Directory is Existing" +- name: "1.4-Packages: Run Flag Directory is Existing" ansible.windows.win_file: path: '{{ sap_deployment_automation }}\{{ sap_sid | upper }}' state: directory -- name: "Check if Nuget is already installed" +- name: "1.4-Packages: Check if Nuget is already installed" ansible.windows.win_stat: path: '{{ sap_deployment_automation }}\{{ sap_sid | upper }}\nuget.txt' register: nuget_installed -- name: "Install Nuget on {{ ansible_hostname }}" +- name: "1.4-Packages: Windows Update" + ansible.builtin.debug: + msg: "Starting Windows Update. Please wait" + + +# set a check that the customer can select if it can be done or not. [make it an option] +- name: "1.4-Packages: Ensure Installing the latest windows patches" + ansible.windows.win_updates: + category_names: + - SecurityUpdates + - CriticalUpdates + - UpdateRollups + reject_list: "{{ win_updates_reject_list }}" + accept_list: "{{ win_updates_accept_list }}" + register: win_updates + +- name: "1.4-Packages: Reboot if required" + ansible.windows.win_reboot: + reboot_timeout: 600 + post_reboot_delay: 120 + test_command: 'exit (Get-Service -Name Netlogon).Status -ne "Running"' + when: win_updates.reboot_required + +- name: "1.4-Packages: Force all notified handlers to run now" + ansible.builtin.meta: flush_handlers + + +- name: "1.4-Packages: Install Nuget on {{ ansible_hostname }}" ansible.windows.win_shell: | [Net.ServicePointManager]::SecurityProtocol = [Net.ServicePointManager]::SecurityProtocol -bor [Net.SecurityProtocolType]::Tls12 + Register-PSRepository -Default -InstallationPolicy Trusted Install-PackageProvider -Name NuGet -Force when: - not nuget_installed.stat.exists -- name: "Nuget || Flag File" +- name: "1.4-Packages: Nuget || Flag File" ansible.windows.win_file: path: '{{ sap_deployment_automation }}\{{ sap_sid | upper }}\nuget.txt' state: touch -- name: "Check if PowerShellGet is already installed" +- name: "1.4-Packages: Check if PowerShellGet is already installed" ansible.windows.win_stat: - path: '{{ sap_deployment_automation }}\{{ sap_sid | upper }}\PowerShellGet.txt' - register: powershell_get_installed + path: '{{ sap_deployment_automation }}\{{ sap_sid | upper }}\PowerShellGet.txt' + register: powershell_get_installed -- name: "Install PowerShellGet on {{ ansible_hostname }}" +- name: "1.4-Packages: Install PowerShellGet on {{ ansible_hostname }}" ansible.windows.win_shell: Install-Module PowerShellGet -AllowClobber -Force when: - not powershell_get_installed.stat.exists - tier == 'os' -- name: "PowerShellGet || Flag File" +- name: "1.4-Packages: PowerShellGet || Flag File" ansible.windows.win_file: path: '{{ sap_deployment_automation }}\{{ sap_sid | upper }}\PowerShellGet.txt' state: touch # Install the DSC modules -- name: "Ensure DSC modules are installed" +- name: "1.4-Packages: Ensure DSC modules are installed" community.windows.win_psmodule: name: "{{ item }}" state: present @@ -75,21 +103,21 @@ when: - tier == 'os' -- name: "Disable UAC for admin accounts" +- name: "1.4-Packages: Disable UAC for admin accounts" ansible.windows.win_dsc: resource_name: UserAccountControl IsSingleInstance: "Yes" NotificationLevel: "NeverNotify" notify: reboot -- name: "Component Install" +- name: "1.4-Packages: Component Install" when: - node_tier in ['scs','ers','app','pas'] - tier == 'app_tier' block: # 2x) Install vcredist package then reboot. - - name: "Install Visual C++ 2013 Redistributable package" + - name: "1.4-Packages: Install Visual C++ 2013 Redistributable package" ansible.windows.win_package: path: '{{ download_directory_windows }}\SWPM\NTPATCH\VCREDIST2013\vcredist_x64.exe' product_id: '{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}' @@ -102,7 +130,7 @@ ansible_become_password: "{{ domain_user_password }}" register: vcredist2013_output - - name: "Install Visual C++ 2017 Redistributable package" + - name: "1.4-Packages: Install Visual C++ 2017 Redistributable package" ansible.windows.win_package: path: '{{ download_directory_windows }}\SWPM\NTPATCH\VCREDIST2017\VC_redist.x64.exe' product_id: '{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}' @@ -115,26 +143,26 @@ ansible_become_password: "{{ domain_user_password }}" register: vcredist2017_output - - name: "Reboot after package installation" + - name: "1.4-Packages: Reboot after package installation" ansible.windows.win_reboot: reboot_timeout: 600 post_reboot_delay: 120 when: vcredist2013_output.reboot_required or vcredist2017_output.reboot_required - - name: 'Create temporary extract directory {{ item.archive }}' + - name: '1.4-Packages: Create temporary extract directory {{ item.archive }}' ansible.windows.win_tempfile: - state: directory - suffix: extract - register: tempdir + state: directory + suffix: extract + register: tempdir # 3x) download SQL server ODBC 17 on windows. - - name: "Download SQL Server ODBC Driver 17" + - name: "1.4-Packages: Download SQL Server ODBC Driver 17" ansible.windows.win_get_url: url: https://download.microsoft.com/download/6/f/f/6ffefc73-39ab-4cc0-bb7c-4093d64c2669/en-US/17.10.1.1/x64/msodbcsql.msi dest: "{{ tempdir.path }}" validate_certs: true - - name: "Install SQL Server ODBC Driver 17" + - name: "1.4-Packages: Install SQL Server ODBC Driver 17" ansible.windows.win_shell: | MsiExec.exe /i {{ tempdir.path }}\msodbcsql.msi IACCEPTMSODBCSQLLICENSETERMS=YES /qn vars: @@ -143,5 +171,5 @@ ansible_become_user: '{{ sap_sid }}adm@{{ domain_name }}' ansible_become_password: "{{ domain_user_password }}" -- name: "Force all notified handlers to run now" +- name: "1.4-Packages: Force all notified handlers to run now" ansible.builtin.meta: flush_handlers From df86a545c168c30d23accf2fd426098328068cc6 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 3 Mar 2024 23:41:02 +0200 Subject: [PATCH 352/607] Remove unnecessary task and update Nuget installation command --- deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml index 4e660d340c..b7fc382a03 100644 --- a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml @@ -52,10 +52,6 @@ test_command: 'exit (Get-Service -Name Netlogon).Status -ne "Running"' when: win_updates.reboot_required -- name: "1.4-Packages: Force all notified handlers to run now" - ansible.builtin.meta: flush_handlers - - - name: "1.4-Packages: Install Nuget on {{ ansible_hostname }}" ansible.windows.win_shell: | [Net.ServicePointManager]::SecurityProtocol = [Net.ServicePointManager]::SecurityProtocol -bor [Net.SecurityProtocolType]::Tls12 From 7a23016e15105f5dcf6b9cc7444235d7479b7aea Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 4 Mar 2024 00:00:39 +0200 Subject: [PATCH 353/607] Add check for nuget installation and enable reboot after windows patch installation --- .../roles-os/windows/1.4-packages/tasks/main.yaml | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml index b7fc382a03..383a62cdea 100644 --- a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml @@ -36,6 +36,8 @@ # set a check that the customer can select if it can be done or not. [make it an option] - name: "1.4-Packages: Ensure Installing the latest windows patches" + when: + - not nuget_installed.stat.exists ansible.windows.win_updates: category_names: - SecurityUpdates @@ -43,15 +45,9 @@ - UpdateRollups reject_list: "{{ win_updates_reject_list }}" accept_list: "{{ win_updates_accept_list }}" + reboot: true register: win_updates -- name: "1.4-Packages: Reboot if required" - ansible.windows.win_reboot: - reboot_timeout: 600 - post_reboot_delay: 120 - test_command: 'exit (Get-Service -Name Netlogon).Status -ne "Running"' - when: win_updates.reboot_required - - name: "1.4-Packages: Install Nuget on {{ ansible_hostname }}" ansible.windows.win_shell: | [Net.ServicePointManager]::SecurityProtocol = [Net.ServicePointManager]::SecurityProtocol -bor [Net.SecurityProtocolType]::Tls12 From 9532c99023b175c89a6f7588dd46cf514d50f384 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 4 Mar 2024 00:09:03 +0200 Subject: [PATCH 354/607] Add NuGet and PowerShellGet installation tasks --- .../ansible/roles-os/windows/1.4-packages/tasks/main.yaml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml index 383a62cdea..e4c7553191 100644 --- a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml @@ -53,10 +53,12 @@ [Net.ServicePointManager]::SecurityProtocol = [Net.ServicePointManager]::SecurityProtocol -bor [Net.SecurityProtocolType]::Tls12 Register-PSRepository -Default -InstallationPolicy Trusted Install-PackageProvider -Name NuGet -Force + register: nuget_installed when: - not nuget_installed.stat.exists - name: "1.4-Packages: Nuget || Flag File" + when: nuget_installed.rc == 0 ansible.windows.win_file: path: '{{ sap_deployment_automation }}\{{ sap_sid | upper }}\nuget.txt' state: touch @@ -64,15 +66,16 @@ - name: "1.4-Packages: Check if PowerShellGet is already installed" ansible.windows.win_stat: path: '{{ sap_deployment_automation }}\{{ sap_sid | upper }}\PowerShellGet.txt' - register: powershell_get_installed + register: powershell_is_installed - name: "1.4-Packages: Install PowerShellGet on {{ ansible_hostname }}" ansible.windows.win_shell: Install-Module PowerShellGet -AllowClobber -Force when: - - not powershell_get_installed.stat.exists + - not powershell_is_installed.stat.exists - tier == 'os' - name: "1.4-Packages: PowerShellGet || Flag File" + when: powershell_is_installed.rc == 0 ansible.windows.win_file: path: '{{ sap_deployment_automation }}\{{ sap_sid | upper }}\PowerShellGet.txt' state: touch From 292c3128788eaf31cb19841da52699c797ef1f6d Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 4 Mar 2024 00:24:36 +0200 Subject: [PATCH 355/607] Fix Nuget and PowerShellGet installation tasks --- .../roles-os/windows/1.4-packages/tasks/main.yaml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml index e4c7553191..42ce63d5ec 100644 --- a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml @@ -58,7 +58,9 @@ - not nuget_installed.stat.exists - name: "1.4-Packages: Nuget || Flag File" - when: nuget_installed.rc == 0 + when: + - nuget_installed is defined + - nuget_installed.rc == 0 ansible.windows.win_file: path: '{{ sap_deployment_automation }}\{{ sap_sid | upper }}\nuget.txt' state: touch @@ -70,12 +72,15 @@ - name: "1.4-Packages: Install PowerShellGet on {{ ansible_hostname }}" ansible.windows.win_shell: Install-Module PowerShellGet -AllowClobber -Force + register: powershell_installed when: - not powershell_is_installed.stat.exists - tier == 'os' - name: "1.4-Packages: PowerShellGet || Flag File" - when: powershell_is_installed.rc == 0 + when: + - powershell_installed is defined + - powershell_installed.rc == 0 ansible.windows.win_file: path: '{{ sap_deployment_automation }}\{{ sap_sid | upper }}\PowerShellGet.txt' state: touch From 9420a6deb4d89d7c0f1bfb9c605aaa0d8d243edc Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 4 Mar 2024 00:29:35 +0200 Subject: [PATCH 356/607] Fix Nuget installation check in Windows role --- .../ansible/roles-os/windows/1.4-packages/tasks/main.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml index 42ce63d5ec..2589cd37dc 100644 --- a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml @@ -27,7 +27,7 @@ - name: "1.4-Packages: Check if Nuget is already installed" ansible.windows.win_stat: path: '{{ sap_deployment_automation }}\{{ sap_sid | upper }}\nuget.txt' - register: nuget_installed + register: nuget_is_installed - name: "1.4-Packages: Windows Update" ansible.builtin.debug: @@ -37,7 +37,7 @@ # set a check that the customer can select if it can be done or not. [make it an option] - name: "1.4-Packages: Ensure Installing the latest windows patches" when: - - not nuget_installed.stat.exists + - not nuget_is_installed.stat.exists ansible.windows.win_updates: category_names: - SecurityUpdates @@ -55,7 +55,7 @@ Install-PackageProvider -Name NuGet -Force register: nuget_installed when: - - not nuget_installed.stat.exists + - not nuget_is_installed.stat.exists - name: "1.4-Packages: Nuget || Flag File" when: From 56fd9fbb150a3cc49777a99e771a99d803fe422f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 4 Mar 2024 00:36:45 +0200 Subject: [PATCH 357/607] Add condition to check if Nuget is installed --- deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml index 2589cd37dc..37d4df6538 100644 --- a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml @@ -59,6 +59,7 @@ - name: "1.4-Packages: Nuget || Flag File" when: + - not nuget_is_installed.stat.exists - nuget_installed is defined - nuget_installed.rc == 0 ansible.windows.win_file: From a466a09b0400a5791a8620cdf1c076d42e4fd3ef Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 4 Mar 2024 01:10:53 +0200 Subject: [PATCH 358/607] Add condition to check if PowerShell is installed before running a task --- deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml index 37d4df6538..c17308691e 100644 --- a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml @@ -80,6 +80,7 @@ - name: "1.4-Packages: PowerShellGet || Flag File" when: + - not powershell_is_installed.stat.exists - powershell_installed is defined - powershell_installed.rc == 0 ansible.windows.win_file: From bc1f0a99e46a05813ecb697b2e48ef2fbb71535b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 4 Mar 2024 01:16:35 +0200 Subject: [PATCH 359/607] Add Nuget installation and repository registration tasks --- .../roles-os/windows/1.4-packages/tasks/main.yaml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml index c17308691e..119c0e683e 100644 --- a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml @@ -48,14 +48,18 @@ reboot: true register: win_updates -- name: "1.4-Packages: Install Nuget on {{ ansible_hostname }}" +- name: "1.4-Packages: Register-PSRepository" ansible.windows.win_shell: | [Net.ServicePointManager]::SecurityProtocol = [Net.ServicePointManager]::SecurityProtocol -bor [Net.SecurityProtocolType]::Tls12 Register-PSRepository -Default -InstallationPolicy Trusted Install-PackageProvider -Name NuGet -Force + register: repository_registered + +- name: "1.4-Packages: Install Nuget" + ansible.windows.win_shell: | + [Net.ServicePointManager]::SecurityProtocol = [Net.ServicePointManager]::SecurityProtocol -bor [Net.SecurityProtocolType]::Tls12 + Install-PackageProvider -Name NuGet -Force register: nuget_installed - when: - - not nuget_is_installed.stat.exists - name: "1.4-Packages: Nuget || Flag File" when: From 3d3668a1d63df8a43676c711d71312f8fc11de68 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 4 Mar 2024 11:00:15 +0200 Subject: [PATCH 360/607] Update NuGet installation in Windows role --- deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml index 119c0e683e..bc91347cf1 100644 --- a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml @@ -52,7 +52,6 @@ ansible.windows.win_shell: | [Net.ServicePointManager]::SecurityProtocol = [Net.ServicePointManager]::SecurityProtocol -bor [Net.SecurityProtocolType]::Tls12 Register-PSRepository -Default -InstallationPolicy Trusted - Install-PackageProvider -Name NuGet -Force register: repository_registered - name: "1.4-Packages: Install Nuget" @@ -76,7 +75,9 @@ register: powershell_is_installed - name: "1.4-Packages: Install PowerShellGet on {{ ansible_hostname }}" - ansible.windows.win_shell: Install-Module PowerShellGet -AllowClobber -Force + ansible.windows.win_shell: | + [Net.ServicePointManager]::SecurityProtocol = [Net.ServicePointManager]::SecurityProtocol -bor [Net.SecurityProtocolType]::Tls12 + Install-Module PowerShellGet -AllowClobber -Force register: powershell_installed when: - not powershell_is_installed.stat.exists From f832c83e96bb81fcdc25fbe1ac84fb34e4c93b63 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 4 Mar 2024 11:14:16 +0200 Subject: [PATCH 361/607] Add restart of Listener on Secondary node in Oracle Data Guard setup --- .../tasks/ora-dg-setup-secondary.yaml | 40 +++++++++++++++++-- 1 file changed, 37 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml index 2a9a41eaf9..89caa15f70 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml @@ -40,17 +40,47 @@ mode: "{{ '0777' | int - (custom_umask | default('022') | int) }}" when: current_host == ora_secondary +# Restart the Listener on Secondary node. + +- name: "Oracle Data Guard - Setup Secondary: stop lsnrctl on Secondary" + become: true + become_user: "oracle" + ansible.builtin.shell: lsnrctl stop + register: lsnrctl_stop_secondary_results + failed_when: lsnrctl_stop_secondary_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/lsnrctl_stopped_sec.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + when: + - platform == 'ORACLE-ASM' + - current_host == ora_secondary + +- name: "Oracle Data Guard - Setup Secondary: Create lsnrctl_stopped_sec.txt" + become: true + become_user: "oracle" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/lsnrctl_stopped_sec.txt + state: touch + mode: '0755' + when: + - platform == 'ORACLE-ASM' + - current_host == ora_secondary + - lsnrctl_stop_secondary_results.rc == 0 + - name: "Oracle Data Guard - Setup Secondary: restart lsnrctl on Secondary" become: true become_user: "oracle" ansible.builtin.shell: lsnrctl start register: lsnrctl_start_secondary_results - failed_when: lsnrctl_start_secondary_results.rc > 1 + failed_when: lsnrctl_start_secondary_results.rc > 0 args: creates: /etc/sap_deployment_automation/dgscripts/lsnrctl_started_sec.txt chdir: /etc/sap_deployment_automation/dgscripts executable: /bin/csh - when: current_host == ora_secondary + when: + - platform == 'ORACLE-ASM' + - current_host == ora_secondary - name: "Oracle Data Guard - Setup Secondary: restart lsnrctl on Secondary (Debug)" ansible.builtin.debug: @@ -62,7 +92,10 @@ dest: /etc/sap_deployment_automation/lsnrctl_start_primary.log content: "{{ lsnrctl_start_secondary_results.stdout }}" mode: '0777' - when: lsnrctl_start_secondary_results.stdout is defined + when: + - platform == 'ORACLE-ASM' + - current_host == ora_secondary + - lsnrctl_start_secondary_results.stdout is defined - name: "Oracle Data Guard - Setup Secondary: Create lsnrctl_started_sec.txt" become: true @@ -72,6 +105,7 @@ state: touch mode: '0755' when: + - platform == 'ORACLE-ASM' - current_host == ora_secondary - lsnrctl_start_secondary_results.rc == 0 From 8af0608c376ae56aca55a18ef145633c423e9f98 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 4 Mar 2024 12:41:50 +0200 Subject: [PATCH 362/607] Fix PowerShell installation in Windows role --- .../roles-os/windows/1.4-packages/tasks/main.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml index bc91347cf1..6d35855a4b 100644 --- a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml @@ -2,7 +2,7 @@ # | | # | OS Base Disk Configuration | # | | -# +------------------------------------4--------------------------------------*/ +# +------------------------------------4--------------------------------------*/powershell_is_installed --- # -------------------------------------+---------------------------------------8 # @@ -50,13 +50,13 @@ - name: "1.4-Packages: Register-PSRepository" ansible.windows.win_shell: | - [Net.ServicePointManager]::SecurityProtocol = [Net.ServicePointManager]::SecurityProtocol -bor [Net.SecurityProtocolType]::Tls12 - Register-PSRepository -Default -InstallationPolicy Trusted + [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 + Register-PSRepository -Default register: repository_registered - name: "1.4-Packages: Install Nuget" ansible.windows.win_shell: | - [Net.ServicePointManager]::SecurityProtocol = [Net.ServicePointManager]::SecurityProtocol -bor [Net.SecurityProtocolType]::Tls12 + [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 Install-PackageProvider -Name NuGet -Force register: nuget_installed @@ -76,7 +76,7 @@ - name: "1.4-Packages: Install PowerShellGet on {{ ansible_hostname }}" ansible.windows.win_shell: | - [Net.ServicePointManager]::SecurityProtocol = [Net.ServicePointManager]::SecurityProtocol -bor [Net.SecurityProtocolType]::Tls12 + [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 Install-Module PowerShellGet -AllowClobber -Force register: powershell_installed when: From 64712a7d4b3b3ed612950dd71bab239745cb5ebd Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 4 Mar 2024 12:46:26 +0200 Subject: [PATCH 363/607] Update Register-PSRepository command to use -Force option --- deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml index 6d35855a4b..a4d09c78c4 100644 --- a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml @@ -51,7 +51,7 @@ - name: "1.4-Packages: Register-PSRepository" ansible.windows.win_shell: | [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 - Register-PSRepository -Default + Register-PSRepository -Default -Force register: repository_registered - name: "1.4-Packages: Install Nuget" From 07b3c50149162352213bf1f235af626fa8122958 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 4 Mar 2024 12:56:52 +0200 Subject: [PATCH 364/607] Update Register-PSRepository command to use ErrorAction SilentlyContinue --- deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml index a4d09c78c4..be760f501b 100644 --- a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml @@ -51,7 +51,7 @@ - name: "1.4-Packages: Register-PSRepository" ansible.windows.win_shell: | [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 - Register-PSRepository -Default -Force + Register-PSRepository -Default -ErrorAction SilentlyContinue register: repository_registered - name: "1.4-Packages: Install Nuget" From 46e3d03598bf28bdb684a03a9d4864a888714e3f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 4 Mar 2024 13:00:46 +0200 Subject: [PATCH 365/607] Add Nuget installation and repository registration tasks --- .../roles-os/windows/1.4-packages/tasks/main.yaml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml index be760f501b..ba5a8aa2fd 100644 --- a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml @@ -48,17 +48,18 @@ reboot: true register: win_updates +- name: "1.4-Packages: Install Nuget" + ansible.windows.win_shell: | + [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 + Install-PackageProvider -Name NuGet -Force + register: nuget_installed + - name: "1.4-Packages: Register-PSRepository" ansible.windows.win_shell: | [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 Register-PSRepository -Default -ErrorAction SilentlyContinue register: repository_registered -- name: "1.4-Packages: Install Nuget" - ansible.windows.win_shell: | - [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 - Install-PackageProvider -Name NuGet -Force - register: nuget_installed - name: "1.4-Packages: Nuget || Flag File" when: From 043457e31014bb75de81ede6d25669c504f4e894 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 4 Mar 2024 15:22:26 +0200 Subject: [PATCH 366/607] Update become_user to use domain_name instead of domain --- .../tasks/1.17.1-wincluster-createcluster.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/ansible/roles-os/windows/1.17-generic-wincluster/tasks/1.17.1-wincluster-createcluster.yaml b/deploy/ansible/roles-os/windows/1.17-generic-wincluster/tasks/1.17.1-wincluster-createcluster.yaml index 2710adf897..4ddb154e33 100644 --- a/deploy/ansible/roles-os/windows/1.17-generic-wincluster/tasks/1.17.1-wincluster-createcluster.yaml +++ b/deploy/ansible/roles-os/windows/1.17-generic-wincluster/tasks/1.17.1-wincluster-createcluster.yaml @@ -41,7 +41,7 @@ - name: "WinCluster-Create: Create windows cluster on the first node" become: true become_method: ansible.builtin.runas - become_user: '{{ sap_sid }}adm@{{ domain }}' + become_user: '{{ sap_sid }}adm@{{ domain_name }}' ansible.windows.win_dsc: resource_name: Cluster StaticIPAddress: "{{ cluster_ip_address }}" @@ -60,7 +60,7 @@ - name: "WinCluster-Create: Tasks for secondary nodes" become: true become_method: ansible.builtin.runas - become_user: '{{ sap_sid }}adm@{{ domain }}' + become_user: '{{ sap_sid }}adm@{{ domain_name }}' block: - name: "WinCluster-Create: Wait for cluster to be created" ansible.windows.win_dsc: @@ -101,7 +101,7 @@ - name: "WinCluster-Create: Create quorum resource" become: true become_method: ansible.builtin.runas - become_user: '{{ sap_sid }}adm@{{ domain }}' + become_user: '{{ sap_sid }}adm@{{ domain_name }}' ansible.windows.win_dsc: resource_name: ClusterQuorum Type: "NodeAndCloudMajority" @@ -116,7 +116,7 @@ - name: "WinCluster-Create: Add shared disk resource to cluster (only execute on scs node)" become: true become_method: ansible.builtin.runas - become_user: '{{ sap_sid }}adm@{{ domain }}' + become_user: '{{ sap_sid }}adm@{{ domain_name }}' when: - scs_high_availability - "'scs' in supported_tiers" From 1ec558e030ec435fc97c63838baf1468df086d0e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 4 Mar 2024 17:25:17 +0200 Subject: [PATCH 367/607] Fix variable group validation in pipeline files --- deploy/pipelines/02-sap-workload-zone.yaml | 56 ++++++++++--------- .../pipelines/03-sap-system-deployment.yaml | 24 ++++---- 2 files changed, 42 insertions(+), 38 deletions(-) diff --git a/deploy/pipelines/02-sap-workload-zone.yaml b/deploy/pipelines/02-sap-workload-zone.yaml index 4c601b60ca..50f267851d 100644 --- a/deploy/pipelines/02-sap-workload-zone.yaml +++ b/deploy/pipelines/02-sap-workload-zone.yaml @@ -147,40 +147,42 @@ stages: echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined in the $(variable_group) variable group." exit 2 fi + if [ $USE_MSI != "true" ]; then - if [ -z $WL_ARM_CLIENT_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined in the $(variable_group) variable group." - exit 2 - fi + if [ -z $WL_ARM_CLIENT_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined in the $(variable_group) variable group." + exit 2 + fi - if [ -z $WL_ARM_CLIENT_SECRET ]; then - echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined in the $(variable_group) variable group." - exit 2 - fi + if [ -z $WL_ARM_CLIENT_SECRET ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined in the $(variable_group) variable group." + exit 2 + fi - if [ -z $WL_ARM_TENANT_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined in the $(variable_group) variable group." - exit 2 - fi + if [ -z $WL_ARM_TENANT_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined in the $(variable_group) variable group." + exit 2 + fi - if [ -z $CP_ARM_SUBSCRIPTION_ID ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_SUBSCRIPTION_ID was not defined in the $(parent_variable_group) variable group." - exit 2 - fi + if [ -z $CP_ARM_SUBSCRIPTION_ID ]; then + echo "##vso[task.logissue type=error]Variable CP_ARM_SUBSCRIPTION_ID was not defined in the $(parent_variable_group) variable group." + exit 2 + fi - if [ -z $CP_ARM_CLIENT_ID ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_ID was not defined in the $(parent_variable_group) variable group." - exit 2 - fi + if [ -z $CP_ARM_CLIENT_ID ]; then + echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_ID was not defined in the $(parent_variable_group) variable group." + exit 2 + fi - if [ -z $CP_ARM_CLIENT_SECRET ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_SECRET was not defined in the $(parent_variable_group) variable group." - exit 2 - fi + if [ -z $CP_ARM_CLIENT_SECRET ]; then + echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_SECRET was not defined in the $(parent_variable_group) variable group." + exit 2 + fi - if [ -z $CP_ARM_TENANT_ID ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_TENANT_ID was not defined in the $(parent_variable_group) variable group." - exit 2 + if [ -z $CP_ARM_TENANT_ID ]; then + echo "##vso[task.logissue type=error]Variable CP_ARM_TENANT_ID was not defined in the $(parent_variable_group) variable group." + exit 2 + fi fi echo -e "$green--- Convert config file to UX format ---$reset" diff --git a/deploy/pipelines/03-sap-system-deployment.yaml b/deploy/pipelines/03-sap-system-deployment.yaml index 017fa88d76..58e2880d8d 100644 --- a/deploy/pipelines/03-sap-system-deployment.yaml +++ b/deploy/pipelines/03-sap-system-deployment.yaml @@ -93,21 +93,23 @@ stages: echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined." exit 2 fi + if [ $USE_MSI != "true" ]; then + if [ -z $WL_ARM_CLIENT_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined." + exit 2 + fi - if [ -z $WL_ARM_CLIENT_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined." - exit 2 - fi + if [ -z $WL_ARM_CLIENT_SECRET ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined." + exit 2 + fi - if [ -z $WL_ARM_CLIENT_SECRET ]; then - echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined." - exit 2 + if [ -z $WL_ARM_TENANT_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined." + exit 2 + fi fi - if [ -z $WL_ARM_TENANT_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined." - exit 2 - fi echo -e "$green--- Checkout $(Build.SourceBranchName) ---$reset" cd $CONFIG_REPO_PATH git checkout -q $(Build.SourceBranchName) From 36eb909edfd5360313c57c14c1c8bb1b06f66ee8 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 4 Mar 2024 21:09:49 +0200 Subject: [PATCH 368/607] Update WinCluster-PostConfig tasks to use domain_name instead of domain --- .../tasks/1.17.2-wincluster-postconfig.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/deploy/ansible/roles-os/windows/1.17-generic-wincluster/tasks/1.17.2-wincluster-postconfig.yaml b/deploy/ansible/roles-os/windows/1.17-generic-wincluster/tasks/1.17.2-wincluster-postconfig.yaml index 9060990265..3eda7114b2 100644 --- a/deploy/ansible/roles-os/windows/1.17-generic-wincluster/tasks/1.17.2-wincluster-postconfig.yaml +++ b/deploy/ansible/roles-os/windows/1.17-generic-wincluster/tasks/1.17.2-wincluster-postconfig.yaml @@ -46,7 +46,7 @@ - name: "WinCluster-PostConfig: Update Cluster Properties" become: true become_method: ansible.builtin.runas - become_user: '{{ sap_sid }}adm@{{ domain }}' + become_user: '{{ sap_sid }}adm@{{ domain_name }}' ansible.windows.win_dsc: resource_name: ClusterProperty Name: "{{ cluster_name }}" @@ -60,7 +60,7 @@ - name: "WinCluster-PostConfig: Update RouteHistoryLength" become: true become_method: ansible.builtin.runas - become_user: '{{ sap_sid }}adm@{{ domain }}' + become_user: '{{ sap_sid }}adm@{{ domain_name }}' ansible.windows.win_shell: | (Get-Cluster).RouteHistoryLength = 30 when: @@ -69,7 +69,7 @@ - name: "WinCluster-PostConfig: Copy probe port setup script" become: true become_method: ansible.builtin.runas - become_user: '{{ sap_sid }}adm@{{ domain }}' + become_user: '{{ sap_sid }}adm@{{ domain_name }}' ansible.windows.win_template: src: "Set-AzLBHealthProbePortForASCS.j2" dest: "{{ sap_deployment_automation }}\\Set-AzLBHealthProbePortForASCS.ps1" @@ -80,7 +80,7 @@ - name: "WinCluster-PostConfig: Execute probe port setup script" become: true become_method: ansible.builtin.runas - become_user: '{{ sap_sid }}adm@{{ domain }}' + become_user: '{{ sap_sid }}adm@{{ domain_name }}' ansible.windows.win_shell: "{{ sap_deployment_automation }}\\Set-AzLBHealthProbePortForASCS.ps1" when: - ansible_hostname == primary_node @@ -90,7 +90,7 @@ - name: "WinCluster-PostConfig: ENSA1 - Set param to keep connections between SAP WP and the enq server from closing" become: true become_method: ansible.builtin.runas - become_user: '{{ sap_sid }}adm@{{ domain }}' + become_user: '{{ sap_sid }}adm@{{ domain_name }}' community.windows.win_lineinfile: backup: true path: "{{ shared_disk_drive_letter }}:\\usr\\sap\\{{ sap_sid | upper }}\\SYS\\profile\\{{ sap_sid | upper }}_ASCS{{ scs_instance_number }}_{{ scs_cluster_hostname }}" From 35f29a089195cfa83478ea49eb8a54ca6ebd1660 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 5 Mar 2024 11:13:06 +0200 Subject: [PATCH 369/607] Add credssp transport and ignore server certificate validation for WinRM --- .../tasks/1.17.1-wincluster-createcluster.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/deploy/ansible/roles-os/windows/1.17-generic-wincluster/tasks/1.17.1-wincluster-createcluster.yaml b/deploy/ansible/roles-os/windows/1.17-generic-wincluster/tasks/1.17.1-wincluster-createcluster.yaml index 4ddb154e33..aba972b595 100644 --- a/deploy/ansible/roles-os/windows/1.17-generic-wincluster/tasks/1.17.1-wincluster-createcluster.yaml +++ b/deploy/ansible/roles-os/windows/1.17-generic-wincluster/tasks/1.17.1-wincluster-createcluster.yaml @@ -48,6 +48,9 @@ Name: "{{ cluster_name }}" PsDscRunAsCredential_username: "{{ domain_service_account }}@{{ domain_name }}" PsDscRunAsCredential_password: "{{ domain_service_password }}" + vars: + ansible_winrm_transport: credssp + ansible_winrm_server_cert_validation: ignore when: - ansible_hostname == primary_node @@ -97,6 +100,9 @@ PsDscRunAsCredential_password: "{{ domain_service_password }}" when: - ansible_hostname == secondary_node + vars: + ansible_winrm_transport: credssp + ansible_winrm_server_cert_validation: ignore - name: "WinCluster-Create: Create quorum resource" become: true From 08a884f5da8271178c0135464abafe825040c0e3 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 5 Mar 2024 11:47:43 +0200 Subject: [PATCH 370/607] Refactor DNS entry checks in OS configuration playbook --- deploy/ansible/playbook_01_os_base_config.yaml | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/playbook_01_os_base_config.yaml b/deploy/ansible/playbook_01_os_base_config.yaml index 4e87cc3665..73060303d2 100644 --- a/deploy/ansible/playbook_01_os_base_config.yaml +++ b/deploy/ansible/playbook_01_os_base_config.yaml @@ -233,10 +233,25 @@ - scs_high_availability block: - name: "OS configuration playbook: - Check if required DNS entries are made" - ansible.windows.win_shell: "[System.Net.DNS]::Resolve('{{ sap_sid | lower }}scs{{ scs_instance_number }}cl1')" + ansible.windows.win_shell: "[System.Net.DNS]::Resolve('{{ sap_sid | lower }}scs{{ scs_instance_number }}cl1').AddressList.IPAddressToString" register: dns_check_results failed_when: dns_check_results.rc > 0 + - name: "OS configuration playbook: - Check if required DNS entries are made" + ansible.builtin.set_fact: + dns_in_AD: "{{ dns_check_results.stdout }}" + when: + - dns_check_results is defined + register: dns_check_results + failed_when: dns_check_results.rc > 0 + + - name: "OS configuration playbook: - Check if required DNS entries match" + ansible.builtin.assert: + that: "'{{ dns_in_AD }}' == '{{ scs_clst_lb_ip | split('/') | first }}'" + fail_msg: "The DNS entry for the SCS cluster is not correct in Active Directory" + when: + - dns_in_AD is defined + - name: "OS configuration playbook: - Check if required DNS entries are made - show results" ansible.builtin.debug: msg: "DNS query results: {{ dns_check_results.stdout }}" From 64e896c09ac60d76aa8c1e88be4f3b2eb5e07d1a Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 5 Mar 2024 15:41:04 +0530 Subject: [PATCH 371/607] Terrform code related to kdump disk creation and probe_threshold configuration on the load balancer --- deploy/configs/hana_sizes_v2.json | 13 ++-- deploy/terraform/run/sap_system/module.tf | 6 +- .../run/sap_system/tfvar_variables.tf | 28 +++++++- deploy/terraform/run/sap_system/transform.tf | 23 ++++--- .../sap_system/anydb_node/infrastructure.tf | 2 +- .../modules/sap_system/anydb_node/outputs.tf | 14 +++- .../modules/sap_system/anydb_node/vm-anydb.tf | 63 +++++++++++++++++ .../modules/sap_system/app_tier/outputs.tf | 12 ++++ .../modules/sap_system/app_tier/vm-scs.tf | 68 +++++++++++++++++++ .../modules/sap_system/app_tier/vm-webdisp.tf | 1 + .../modules/sap_system/hdb_node/outputs.tf | 13 ++++ .../modules/sap_system/hdb_node/vm-hdb.tf | 60 ++++++++++++++++ .../sap_system/output_files/inventory.tf | 1 + .../output_files/sap-parameters.tmpl | 2 + .../output_files/variables_global.tf | 1 + 15 files changed, 287 insertions(+), 20 deletions(-) diff --git a/deploy/configs/hana_sizes_v2.json b/deploy/configs/hana_sizes_v2.json index 2d0abeb333..c6ec71371b 100644 --- a/deploy/configs/hana_sizes_v2.json +++ b/deploy/configs/hana_sizes_v2.json @@ -8,7 +8,6 @@ { "name": "os", "fullname": "", - "fullname": "", "count": 1, "disk_type": "Premium_LRS", "size_gb": 128, @@ -101,7 +100,7 @@ "size_gb": 512, "caching": "None", "write_accelerator": false, - "lun_start": 9 + "lun_start": 9 }, { "name": "shared", @@ -111,7 +110,7 @@ "size_gb": 512, "caching": "None", "write_accelerator": false, - "lun_start": 13 + "lun_start": 13 }, { "name": "sap", @@ -172,17 +171,17 @@ "size_gb": 128, "caching": "None", "write_accelerator": false, - "lun_start": 10 + "lun_start": 10 }, { "name": "log", "fullname": "", - "count": 3, + "count": 3, "disk_type": "PremiumV2_LRS", "size_gb": 64, "caching": "None", "write_accelerator": false, - "lun_start": 20 + "lun_start": 20 }, { "name": "shared", @@ -202,7 +201,7 @@ "size_gb": 512, "caching": "None", "write_accelerator": false, - "lun_start": 2 + "lun_start": 2 } ] }, diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index c95631c6c2..d9263dfaa9 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -314,6 +314,7 @@ module "output_files" { module.hdb_node.database_shared_disks) : ( module.anydb_node.database_shared_disks ) + is_use_fence_kdump = var.use_fence_kdump infrastructure = local.infrastructure landscape_tfstate = data.terraform_remote_state.landscape.outputs naming = length(var.name_override_file) > 0 ? ( @@ -352,7 +353,10 @@ module "output_files" { database_subnet_netmask = module.common_infrastructure.db_subnet_netmask disks = distinct(compact(concat(module.hdb_node.database_disks, module.anydb_node.database_disks, - module.app_tier.apptier_disks + module.app_tier.apptier_disks, + module.hdb_node.database_kdump_disks, + module.anydb_node.database_kdump_disks, + module.app_tier.scs_kdump_disks ))) loadbalancers = module.hdb_node.loadbalancers diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index dca46ae72a..b73b207b3a 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -341,10 +341,34 @@ variable "fencing_role_name" { } variable "use_simple_mount" { - description = "If specified use Simple mount" - default = true + description = "Determine if simple mount needs to be added for SCS and DB clusters" + default = false + } + +variable "use_fence_kdump" { + description = "Configure fencing device based on the fence agent fence_kdump for both SCS and DB clusters" + default = false + } + +variable "use_fence_kdump_size_gb_db" { + description = "Default size of the kdump disk which will be attached to the VMs which are part DB cluster" + default = 128 } +variable "use_fence_kdump_size_gb_scs" { + description = "Default size of the kdump disk which will be attached to the VMs which are part of SCS cluster" + default = 64 + } + +variable "use_fence_kdump_lun_db" { + description = "Default lun number of the kdump disk which will be attached to the VMs which are part of DB cluster" + default = 8 + } + +variable "use_fence_kdump_lun_scs" { + description = "Default lun number of the kdump disk which will be attached to the VMs which are part of SCS cluster" + default = 4 + } ######################################################################################### # # diff --git a/deploy/terraform/run/sap_system/transform.tf b/deploy/terraform/run/sap_system/transform.tf index b573cafb94..ac0225c6b8 100644 --- a/deploy/terraform/run/sap_system/transform.tf +++ b/deploy/terraform/run/sap_system/transform.tf @@ -591,21 +591,28 @@ locals { length(local.web_nic_ips) > 0 ? { web_nic_ips = local.web_nic_ips } : null), ( length(var.webdispatcher_server_nic_secondary_ips) > 0 ? { web_nic_secondary_ips = var.webdispatcher_server_nic_secondary_ips } : null), ( length(local.web_admin_nic_ips) > 0 ? { web_admin_nic_ips = local.web_admin_nic_ips } : null), ( - length(local.webdispatcher_loadbalancer_ips) > 0 ? { webdispatcher_loadbalancer_ips = local.webdispatcher_loadbalancer_ips } : null), ( + length(local.webdispatcher_loadbalancer_ips) > 0 ? { webdispatcher_loadbalancer_ips = local.webdispatcher_loadbalancer_ips } : null), ( length(local.app_tags) > 0 ? { app_tags = local.app_tags } : { app_tags = local.app_tags }), ( length(local.scs_tags) > 0 ? { scs_tags = local.scs_tags } : { scs_tags = local.scs_tags }), ( - length(local.web_tags) > 0 ? { web_tags = local.web_tags } : { web_tags = local.web_tags } + length(local.web_tags) > 0 ? { web_tags = local.web_tags } : { web_tags = local.web_tags }), ( + var.use_fence_kdump && var.scs_high_availability ? { fence_kdump_disk_size = var.use_fence_kdump_size_gb_scs } : { fence_kdump_disk_size = 0 } ), ( + var.use_fence_kdump && var.scs_high_availability ? { fence_kdump_lun_number = var.use_fence_kdump_lun_scs } : { fence_kdump_lun_number = -1 } ) ) database = merge( local.databases_temp, - (local.db_os_specified ? { os = local.db_os } : null), - (local.db_authentication_defined ? { authentication = local.db_authentication } : null), - (local.db_avset_arm_ids_defined ? { avset_arm_ids = local.avset_arm_ids } : null), - (length(local.frontend_ips) > 0 ? { loadbalancer = { frontend_ips = local.frontend_ips } } : { loadbalancer = { frontend_ips = [] } }), - (length(local.db_tags) > 0 ? { tags = local.db_tags } : null), - (local.db_sid_specified ? { instance = local.instance } : null) + (local.db_os_specified ? { os = local.db_os } : null), + (local.db_authentication_defined ? { authentication = local.db_authentication } : null), + (local.db_avset_arm_ids_defined ? { avset_arm_ids = local.avset_arm_ids } : null), + (length(local.frontend_ips) > 0 ? { loadbalancer = { frontend_ips = local.frontend_ips } } : { loadbalancer = { frontend_ips = [] } }), + (length(local.db_tags) > 0 ? { tags = local.db_tags } : null), + (local.db_sid_specified ? { instance = local.instance } : null), ( + ( var.use_fence_kdump && + var.database_high_availability ) ? { fence_kdump_disk_size = var.use_fence_kdump_size_gb_db } : { fence_kdump_disk_size = 0 } ), ( + ( var.use_fence_kdump && + var.database_high_availability ) ? { fence_kdump_lun_number = var.use_fence_kdump_lun_db } : { fence_kdump_lun_number = -1 } + ) ) diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/infrastructure.tf index f4ee09e61d..091336fdcf 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/infrastructure.tf @@ -61,7 +61,7 @@ resource "azurerm_lb_probe" "anydb" { protocol = "Tcp" interval_in_seconds = 5 number_of_probes = 2 - + probe_threshold = 2 } #######################################4#######################################8 diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf index ac6b29af3f..55525a650f 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf @@ -157,7 +157,7 @@ output "database_shared_disks" { description = "List of Azure shared disks" value = distinct( flatten( - [for vm in var.naming.virtualmachine_names.HANA_COMPUTERNAME : + [for vm in var.naming.virtualmachine_names.ANYDB_VMNAME : [for idx, disk in azurerm_virtual_machine_data_disk_attachment.cluster : format("{ host: '%s', lun: %d, type: 'ASD' }", vm, disk.lun) ] @@ -165,3 +165,15 @@ output "database_shared_disks" { ) ) } +output "database_kdump_disks" { + description = "List of Azure kdump disks" + value = distinct( + flatten( + [for vm in var.naming.virtualmachine_names.ANYDB_VMNAME : + [for idx, disk in azurerm_virtual_machine_data_disk_attachment.kdump : + format("{ host: '%s', lun: %d, type: 'kdump' }", vm, disk.lun) + ] + ] + ) + ) + } diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf index 7ef86c0ffd..e3265e23c8 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf @@ -626,3 +626,66 @@ resource "azurerm_role_assignment" "role_assignment_msi_ha" { role_definition_name = var.fencing_role_name principal_id = azurerm_linux_virtual_machine.dbserver[(count.index +1) % var.database_server_count].identity[0].principal_id } + + +######################################################################################### +# # +# Azure Data Disk for Kdump # +# # +#######################################+################################################# +resource "azurerm_managed_disk" "kdump" { + provider = azurerm.main + count = ( + local.enable_deployment && + var.database.high_availability && + ( + upper(var.database.os.os_type) == "LINUX" && + ( var.database.fence_kdump_disk_size > 0 ) + ) + ) ? var.database_server_count : 0 + lifecycle { + ignore_changes = [tags] + } + + name = format("%s%s%s%s%s", + try( var.naming.resource_prefixes.fence_kdump_disk, ""), + local.prefix, + var.naming.separator, + var.naming.virtualmachine_names.ANYDB_VMNAME[count.index], + try( var.naming.resource_suffixes.fence_kdump_disk, "fence_kdump_disk" ) + ) + location = var.resource_group[0].location + resource_group_name = var.resource_group[0].name + create_option = "Empty" + storage_account_type = "Premium_LRS" + disk_size_gb = try(var.database.fence_kdump_disk_size,128) + disk_encryption_set_id = try(var.options.disk_encryption_set_id, null) + tags = var.tags + + zone = local.zonal_deployment && !var.database.use_avset ? ( + azurerm_linux_virtual_machine.dbserver[count.index].zone + ) : ( + null + ) + +} + +resource "azurerm_virtual_machine_data_disk_attachment" "kdump" { + provider = azurerm.main + count = ( + local.enable_deployment && + var.database.high_availability && + ( + upper(var.database.os.os_type) == "LINUX" && + ( var.database.fence_kdump_disk_size > 0 ) + ) + ) ? var.database_server_count : 0 + + managed_disk_id = azurerm_managed_disk.kdump[count.index].id + virtual_machine_id = (upper(var.database.os.os_type) == "LINUX" # If Linux + ) ? ( + azurerm_linux_virtual_machine.dbserver[count.index].id + ) : null + caching = "None" + lun = var.database.fence_kdump_lun_number +} diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/outputs.tf index d6fdc7543c..92423c0a34 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/outputs.tf @@ -307,3 +307,15 @@ output "scs_asd" { ) ) } +output "scs_kdump_disks" { + description = "List of Azure shared disks" + value = distinct( + flatten( + [for vm in var.naming.virtualmachine_names.SCS_COMPUTERNAME : + [for idx, disk in azurerm_virtual_machine_data_disk_attachment.kdump : + format("{ host: '%s', lun: %d, type: 'kdump' }", vm, disk.lun) + ] + ] + ) + ) + } diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index 6167e7c666..13f1647252 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -564,6 +564,10 @@ resource "azurerm_managed_disk" "cluster" { ) ) ) ? 1 : 0 + lifecycle { + ignore_changes = [tags] + } + name = format("%s%s%s%s", var.naming.resource_prefixes.scs_cluster_disk, local.prefix, @@ -624,3 +628,67 @@ resource "azurerm_virtual_machine_data_disk_attachment" "cluster" { caching = "None" lun = var.scs_cluster_disk_lun } + +######################################################################################### +# # +# Azure Data Disk for Kdump # +# # +#######################################+################################################# +resource "azurerm_managed_disk" "kdump" { + provider = azurerm.main + count = ( + local.enable_deployment && + var.application_tier.scs_high_availability && + ( + upper(var.application_tier.scs_os.os_type) == "LINUX" && + ( var.application_tier.fence_kdump_disk_size > 0 ) + ) + ) ? local.scs_server_count : 0 + lifecycle { + ignore_changes = [tags] + } + + name = format("%s%s%s%s%s", + try( var.naming.resource_prefixes.fence_kdump_disk, ""), + local.prefix, + var.naming.separator, + var.naming.virtualmachine_names.SCS_VMNAME[count.index], + try( var.naming.resource_suffixes.fence_kdump_disk, "fence_kdump_disk" ) + ) + location = var.resource_group[0].location + resource_group_name = var.resource_group[0].name + create_option = "Empty" + storage_account_type = "Premium_LRS" + disk_size_gb = try(var.application_tier.fence_kdump_disk_size,64) + disk_encryption_set_id = try(var.options.disk_encryption_set_id, null) + tags = var.tags + + zone = local.scs_zonal_deployment ? ( + upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( + azurerm_linux_virtual_machine.scs[count.index].zone) : + null + ) : ( + null + ) + +} + +resource "azurerm_virtual_machine_data_disk_attachment" "kdump" { + provider = azurerm.main + count = ( + local.enable_deployment && + var.application_tier.scs_high_availability && + ( + upper(var.application_tier.scs_os.os_type) == "LINUX" && + ( var.application_tier.fence_kdump_disk_size > 0 ) + ) + ) ? local.scs_server_count : 0 + + managed_disk_id = azurerm_managed_disk.kdump[count.index].id + virtual_machine_id = (upper(var.application_tier.scs_os.os_type) == "LINUX" # If Linux + ) ? ( + azurerm_linux_virtual_machine.scs[count.index].id + ) : null + caching = "None" + lun = var.application_tier.fence_kdump_lun_number +} diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf index 9b3bc4c6b6..cfe80110c3 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf @@ -564,6 +564,7 @@ resource "azurerm_lb_probe" "web" { protocol = "Tcp" interval_in_seconds = 5 number_of_probes = 2 + probe_threshold = 2 } # Create the Web dispatcher Load Balancer Rules diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf index 8cefe1ad41..e71ef9ffc8 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf @@ -204,3 +204,16 @@ output "database_shared_disks" { ) ) } + +output "database_kdump_disks" { + description = "List of Azure disks for kdump" + value = distinct( + flatten( + [for vm in var.naming.virtualmachine_names.HANA_COMPUTERNAME : + [for idx, disk in azurerm_virtual_machine_data_disk_attachment.kdump : + format("{ host: '%s', lun: %d, type: 'kdump' }", vm, disk.lun) + ] + ] + ) + ) + } diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf index dd41be7240..06cd940f7d 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf @@ -480,3 +480,63 @@ resource "azurerm_virtual_machine_data_disk_attachment" "cluster" { caching = "None" lun = var.database_cluster_disk_lun } + +######################################################################################### +# # +# Azure Data Disk for Kdump # +# # +#######################################+################################################# +resource "azurerm_managed_disk" "kdump" { + provider = azurerm.main + count = ( + local.enable_deployment && + var.database.high_availability && + ( + upper(var.database.os.os_type) == "LINUX" && + ( var.database.fence_kdump_disk_size > 0 ) + ) + ) ? var.database_server_count : 0 + lifecycle { + ignore_changes = [tags] + } + + name = format("%s%s%s%s%s", + try( var.naming.resource_prefixes.fence_kdump_disk, ""), + local.prefix, + var.naming.separator, + var.naming.virtualmachine_names.HANA_VMNAME[count.index], + try( var.naming.resource_suffixes.fence_kdump_disk, "fence_kdump_disk" ) + ) + location = var.resource_group[0].location + resource_group_name = var.resource_group[0].name + create_option = "Empty" + storage_account_type = "Premium_LRS" + disk_size_gb = try(var.database.fence_kdump_disk_size,128) + disk_encryption_set_id = try(var.options.disk_encryption_set_id, null) + tags = var.tags + zone = local.zonal_deployment && !local.use_avset ? ( + azurerm_linux_virtual_machine.vm_dbnode[count.index].zone) : ( + null + ) + +} + +resource "azurerm_virtual_machine_data_disk_attachment" "kdump" { + provider = azurerm.main + count = ( + local.enable_deployment && + var.database.high_availability && + ( + upper(var.database.os.os_type) == "LINUX" && + ( var.database.fence_kdump_disk_size > 0 ) + ) + ) ? var.database_server_count : 0 + + managed_disk_id = azurerm_managed_disk.kdump[count.index].id + virtual_machine_id = (upper(var.database.os.os_type) == "LINUX" # If Linux + ) ? ( + azurerm_linux_virtual_machine.vm_dbnode[count.index].id + ) : null + caching = "None" + lun = var.database.fence_kdump_lun_number +} diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf index 5c7f596903..811a8b3093 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf @@ -206,6 +206,7 @@ resource "local_file" "sap-parameters_yml" { "" ) is_use_simple_mount = var.use_simple_mount + is_use_fence_kdump = var.is_use_fence_kdump iscsi_server_list = concat(local.iscsi_scs_servers, local.iscsi_db_servers) kv_name = local.kv_name, NFS_provider = var.NFS_provider diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl index 96a018c47f..f07cbd4386 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl @@ -96,6 +96,8 @@ database_cluster_ip: ${database_cluster_ip} # use_simple_mount defines if simple mount is to be used use_simple_mount: ${is_use_simple_mount} +# use_fence_kdump defines if optional kdump stonith device needs to be added for RHEL clusters. +use_fence_kdump: ${is_use_fence_kdump} ############################################################################# # # # NFS # diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf index 04efde8d24..d3c91e94e9 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf @@ -52,6 +52,7 @@ variable "database_server_ips" { description = "List of IP addr variable "database_server_secondary_ips" { description = "List of secondary IP addresses for the database servers" } variable "database_shared_disks" { description = "Database Azure Shared Disk" } variable "database_server_vm_names" { description = "List of VM names for the database servers" } +variable "is_use_fence_kdump" { description = "Use fence kdump for optional stonith configuration on RHEL" } variable "db_sid" { description = "Database SID" } variable "database_subnet_netmask" { description = "netmask for the database subnet" } variable "disks" { description = "List of disks" } From ceb819e6a21c19cb618f65b75e13f319537a74fb Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 5 Mar 2024 15:47:01 +0530 Subject: [PATCH 372/607] Refactor kdump and cluster managed disk lifecycle configuration --- .../modules/sap_system/anydb_node/vm-anydb.tf | 24 ++++++++++++------- .../modules/sap_system/app_tier/outputs.tf | 6 ++--- .../modules/sap_system/app_tier/vm-scs.tf | 14 ++++++----- .../modules/sap_system/hdb_node/vm-hdb.tf | 11 ++++++--- 4 files changed, 35 insertions(+), 20 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf index e3265e23c8..e5e01c7c07 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf @@ -535,9 +535,6 @@ resource "azurerm_managed_disk" "cluster" { ) ) ) ? 1 : 0 - lifecycle { - ignore_changes = [tags] - } name = format("%s%s%s%s", var.naming.resource_prefixes.database_cluster_disk, @@ -561,7 +558,14 @@ resource "azurerm_managed_disk" "cluster" { )) : ( null ) - + lifecycle { + ignore_changes = [ + create_option, + hyper_v_generation, + source_resource_id, + tags + ] + } } resource "azurerm_virtual_machine_data_disk_attachment" "cluster" { @@ -643,9 +647,6 @@ resource "azurerm_managed_disk" "kdump" { ( var.database.fence_kdump_disk_size > 0 ) ) ) ? var.database_server_count : 0 - lifecycle { - ignore_changes = [tags] - } name = format("%s%s%s%s%s", try( var.naming.resource_prefixes.fence_kdump_disk, ""), @@ -667,7 +668,14 @@ resource "azurerm_managed_disk" "kdump" { ) : ( null ) - + lifecycle { + ignore_changes = [ + create_option, + hyper_v_generation, + source_resource_id, + tags + ] + } } resource "azurerm_virtual_machine_data_disk_attachment" "kdump" { diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/outputs.tf index 92423c0a34..e9a6f311b4 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/outputs.tf @@ -301,19 +301,19 @@ output "scs_asd" { flatten( [for vm in var.naming.virtualmachine_names.SCS_COMPUTERNAME : [for idx, disk in azurerm_virtual_machine_data_disk_attachment.cluster : - format("{ host: '%s', lun: %d, type: 'ASD' }", vm, disk.lun) + format("{ host: '%s', LUN: %d, type: 'ASD' }", vm, disk.lun) ] ] ) ) } output "scs_kdump_disks" { - description = "List of Azure shared disks" + description = "List of kdump disks" value = distinct( flatten( [for vm in var.naming.virtualmachine_names.SCS_COMPUTERNAME : [for idx, disk in azurerm_virtual_machine_data_disk_attachment.kdump : - format("{ host: '%s', lun: %d, type: 'kdump' }", vm, disk.lun) + format("{ host: '%s', LUN: %d, type: 'kdump' }", vm, disk.lun) ] ] ) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index 13f1647252..18be6572b3 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -564,9 +564,6 @@ resource "azurerm_managed_disk" "cluster" { ) ) ) ? 1 : 0 - lifecycle { - ignore_changes = [tags] - } name = format("%s%s%s%s", var.naming.resource_prefixes.scs_cluster_disk, @@ -644,9 +641,6 @@ resource "azurerm_managed_disk" "kdump" { ( var.application_tier.fence_kdump_disk_size > 0 ) ) ) ? local.scs_server_count : 0 - lifecycle { - ignore_changes = [tags] - } name = format("%s%s%s%s%s", try( var.naming.resource_prefixes.fence_kdump_disk, ""), @@ -670,6 +664,14 @@ resource "azurerm_managed_disk" "kdump" { ) : ( null ) + lifecycle { + ignore_changes = [ + create_option, + hyper_v_generation, + source_resource_id, + tags + ] + } } diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf index 06cd940f7d..a8fb333a63 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf @@ -496,9 +496,6 @@ resource "azurerm_managed_disk" "kdump" { ( var.database.fence_kdump_disk_size > 0 ) ) ) ? var.database_server_count : 0 - lifecycle { - ignore_changes = [tags] - } name = format("%s%s%s%s%s", try( var.naming.resource_prefixes.fence_kdump_disk, ""), @@ -518,6 +515,14 @@ resource "azurerm_managed_disk" "kdump" { azurerm_linux_virtual_machine.vm_dbnode[count.index].zone) : ( null ) + lifecycle { + ignore_changes = [ + create_option, + hyper_v_generation, + source_resource_id, + tags + ] + } } From 062b4c15250eef69410b152859198390047267a5 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 5 Mar 2024 12:57:47 +0200 Subject: [PATCH 373/607] Refactor DNS check in OS configuration playbook --- deploy/ansible/playbook_01_os_base_config.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/deploy/ansible/playbook_01_os_base_config.yaml b/deploy/ansible/playbook_01_os_base_config.yaml index 73060303d2..5c0718bc99 100644 --- a/deploy/ansible/playbook_01_os_base_config.yaml +++ b/deploy/ansible/playbook_01_os_base_config.yaml @@ -239,11 +239,9 @@ - name: "OS configuration playbook: - Check if required DNS entries are made" ansible.builtin.set_fact: - dns_in_AD: "{{ dns_check_results.stdout }}" + dns_in_AD: "{{ dns_check_results.stdout_lines[0] }}" when: - dns_check_results is defined - register: dns_check_results - failed_when: dns_check_results.rc > 0 - name: "OS configuration playbook: - Check if required DNS entries match" ansible.builtin.assert: From fcd379681826003552f587385349b1ed3931a44a Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 5 Mar 2024 13:14:59 +0200 Subject: [PATCH 374/607] Add KDUMP support to the Web App --- Webapp/SDAF/Models/SystemModel.cs | 14 ++++++ .../SDAF/ParameterDetails/SystemDetails.json | 46 +++++++++++++++++++ .../SDAF/ParameterDetails/SystemTemplate.txt | 14 ++++++ 3 files changed, 74 insertions(+) diff --git a/Webapp/SDAF/Models/SystemModel.cs b/Webapp/SDAF/Models/SystemModel.cs index afb5bb5a7b..a896d07dc7 100644 --- a/Webapp/SDAF/Models/SystemModel.cs +++ b/Webapp/SDAF/Models/SystemModel.cs @@ -626,7 +626,21 @@ public bool IsValid() [AMSIdValidator(ErrorMessage = "Invalid AMS Resource id")] public string ams_resource_id { get; set; } + /*---------------------------------------------------------------------------8 + | | + | KDump Parameters | + | | + +------------------------------------4--------------------------------------*/ + + public bool? use_fence_kdump { get; set; } = false; + + public int? use_fence_kdump_size_gb_db { get; set; } = 128; + + public int? use_fence_kdump_lun_db { get; set; } = 8; + + public int? use_fence_kdump_size_gb_scs { get; set; } = 64; + public int? use_fence_kdump_lun_scs { get; set; } = 4; } diff --git a/Webapp/SDAF/ParameterDetails/SystemDetails.json b/Webapp/SDAF/ParameterDetails/SystemDetails.json index f4bf2cd462..e1d55b1ba1 100644 --- a/Webapp/SDAF/ParameterDetails/SystemDetails.json +++ b/Webapp/SDAF/ParameterDetails/SystemDetails.json @@ -1389,7 +1389,53 @@ "Options": [], "Overrules": "", "Display": 3 + }, + { + "Name": "use_fence_kdump", + "Required": false, + "Description": "Configure fencing device based on the fence agent fence_kdump for both SCS and DB clusters", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "use_fence_kdump_size_gb_db", + "Required": false, + "Description": "Default size of the kdump disk which will be attached to the VMs which are part DB cluster.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "use_fence_kdump_lun_db", + "Required": false, + "Description": "Default LUN number of the kdump disk which will be attached to the VMs which are part of DB cluster.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "use_fence_kdump_size_gb_scs", + "Required": false, + "Description": "Default size of the kdump disk which will be attached to the VMs which are part of SCS cluster.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "use_fence_kdump_lun_scs", + "Required": false, + "Description": "Default LUN number of the kdump disk which will be attached to the VMs which are part of SCS cluster.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 } + ] }, { diff --git a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt index 8fba95ae3a..0de4000fe4 100644 --- a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt @@ -130,6 +130,20 @@ $$fencing_role_name$$ # use_simple_mount specifies if Simple mounts are used (Applicable for SLES 15 SP# or newer) $$use_simple_mount$$ +# Configure fencing device based on the fence agent fence_kdump for both SCS and DB clusters +$$use_fence_kdump$$ + +# Default size of the kdump disk which will be attached to the VMs which are part DB cluster +$$use_fence_kdump_size_gb_db$$ + +# Default LUN number of the kdump disk which will be attached to the VMs which are part of DB cluster +$$use_fence_kdump_lun_db$$ + +# Default size of the kdump disk which will be attached to the VMs which are part of SCS cluster +$$use_fence_kdump_size_gb_scs$$ + +# Default LUN number of the kdump disk which will be attached to the VMs which are part of SCS cluster +$$use_fence_kdump_lun_scs$$ ######################################################################################### # # From 81c0133ab0749381e3c3b16a23d755779ee881ca Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 5 Mar 2024 13:33:12 +0200 Subject: [PATCH 375/607] Update dotnet packages --- Webapp/SDAF/SDAFWebApp.csproj | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Webapp/SDAF/SDAFWebApp.csproj b/Webapp/SDAF/SDAFWebApp.csproj index 2e44a21036..7961079e0c 100644 --- a/Webapp/SDAF/SDAFWebApp.csproj +++ b/Webapp/SDAF/SDAFWebApp.csproj @@ -17,8 +17,8 @@ - - + + @@ -26,8 +26,8 @@ - - + + From 362789b9057f388d4cb4ab699b338646ccca414c Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 5 Mar 2024 13:41:01 +0200 Subject: [PATCH 376/607] Refactor DNS entry check in OS configuration playbook --- deploy/ansible/playbook_01_os_base_config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/playbook_01_os_base_config.yaml b/deploy/ansible/playbook_01_os_base_config.yaml index 5c0718bc99..caeaec1405 100644 --- a/deploy/ansible/playbook_01_os_base_config.yaml +++ b/deploy/ansible/playbook_01_os_base_config.yaml @@ -245,7 +245,7 @@ - name: "OS configuration playbook: - Check if required DNS entries match" ansible.builtin.assert: - that: "'{{ dns_in_AD }}' == '{{ scs_clst_lb_ip | split('/') | first }}'" + that: "dns_in_AD == (scs_clst_lb_ip | split('/') | first)" fail_msg: "The DNS entry for the SCS cluster is not correct in Active Directory" when: - dns_in_AD is defined From 297273c6f48e14642ff75074f704ecc0075188cf Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 5 Mar 2024 20:19:20 +0530 Subject: [PATCH 377/607] Ansible pieces for kdump enablement, adding sbd during cluster setup, simplification of NFS mount options when using AFS and other changes --- .../tasks/1.17.0-set_runtime_facts.yml | 3 +- .../tasks/1.17.1-pre_checks.yml | 69 +++++++++- .../tasks/1.17.1.2-sbd.yaml | 20 +++ .../tasks/1.17.2-provision.yml | 3 + .../tasks/1.17.2.0-cluster-RedHat.yml | 128 ++++++++++++++++-- .../tasks/1.17.2.0-cluster-Suse.yml | 87 ++++++++++-- .../1.17-generic-pacemaker/vars/main.yml | 3 + .../2.10-sap-notes/tasks/2.10.2.yaml | 5 +- .../2.10-sap-notes/tasks/main.yaml | 28 +--- .../2.4-hosts-file/tasks/main.yaml | 20 ++- .../tasks/2.6.0-afs-mounts.yaml | 28 ++-- .../tasks/5.5.4.1-cluster-Suse.yml | 2 - .../tasks/5.6.1-set_runtime_facts.yml | 25 ++++ .../tasks/5.6.6-validate.yml | 2 +- deploy/ansible/vars/ansible-input-api.yaml | 4 + deploy/ansible/vars/disks_config.yml | 14 +- 16 files changed, 370 insertions(+), 71 deletions(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.0-set_runtime_facts.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.0-set_runtime_facts.yml index d08271213f..ea40eb3c90 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.0-set_runtime_facts.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.0-set_runtime_facts.yml @@ -104,9 +104,10 @@ # - "database_cluster_type: {{ database_cluster_type }}" # verbosity: 2 + # scs_high_availability = true is already assumed when: (database_cluster_type == "ISCSI") or - (scs_cluster_type == "ISCSI") # scs_high_availability = true is already assumed + (scs_cluster_type == "ISCSI") # /*---------------------------------------------------------------------------8 # | | diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml index 0503dcbd2a..02fa59a3ca 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml @@ -12,6 +12,9 @@ # | | # +------------------------------------4--------------------------------------*/ - name: "Cluster Type: Fencing" + when: + - (database_cluster_type == "AFA") or + (scs_cluster_type == "AFA") # scs_high_availability = true is already assumed block: - name: "BEGIN: Fencing specific..." @@ -39,9 +42,7 @@ msg: # Best method for formatting output with Azure Devops Logs - "END : Fencing specific..." - when: - - (database_cluster_type == "AFA") or - (scs_cluster_type == "AFA") # scs_high_availability = true is already assumed + # /*---------------------------------------------------------------------------8 # | | # | Fencing Specific - END | @@ -124,6 +125,68 @@ msg: - "CLUSTER VALIDATION : {{ cluster_existence_check }}" +# /*---------------------------------------------------------------------------8 +# | | +# | Fencing Specific - Kdump RHEL only | +# | | +# +------------------------------------4--------------------------------------*/ +- name: "1.17 Generic Pacemaker - RHEL - Configure optional fence_kdump stonith device" + when: + - ansible_os_family | upper == "REDHAT" + - use_fence_kdump + block: + - name: "1.17 Generic Pacemaker - kdump service is enabled" + ansible.builtin.service: + name: "kdump" + enabled: true + state: started + + # Todo: Use ansible.builtin.service_facts to get the status of the kdump service + - name: "1.17 Generic Pacemaker - RHEL - Check if kdump is enabled" + ansible.builtin.command: "systemctl is-enabled kdump" + register: kdump_enabled_check_result + changed_when: false + failed_when: false + tags: + - skip_ansible_lint + + - name: "1.17 Generic Pacemaker - RHEL - Save kdump facts" + ansible.builtin.set_fact: + kdump_enabled_check: "{{ kdump_enabled_check_result.rc | int }}" + kdump_enabled: "{{ kdump_enabled_check_result.stdout | trim }}" + when: + - kdump_enabled_check_result.rc == 0 + + # kdump_enabled_check_result.stdout == "enabled" + - name: "1.17 Generic Pacemaker - RHEL - show if kdump is enabled" + ansible.builtin.debug: + msg: + - "Kdump is enabled: {{ kdump_enabled | default('false') }}" + verbosity: 2 + + # Install the fence_kdump fence agent. This is required for fencing to work with kdump. + - name: "1.17 Generic Pacemaker - RHEL - Install fence-agents-kdump" + ansible.builtin.package: + name: "fence-agents-kdump" + state: present + when: + - kdump_enabled_check == 0 + - kdump_enabled == "enabled" + + # Allow the required ports for fence_kdump through the firewall. + - name: "1.17 Generic Pacemaker - RHEL - Allow ports for fence_kdump through the firewall" + ansible.builtin.firewalld: + port: "7410/udp" + permanent: true + state: enabled + offline: true + +# /*---------------------------------------------------------------------------8 +# | | +# | Fencing Specific - Kdump RHEL only - END | +# | | +# +------------------------------------4--------------------------------------*/ + ... # /*---------------------------------------------------------------------------8 # | END | diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml index 8cf28f88d2..db160faa86 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml @@ -232,6 +232,16 @@ {%- endfor -%} "'} + - name: "set fact sbd_device as list of entries in sbdMap" + ansible.builtin.set_fact: + sbd_device: "{{ sbdMap | map(attribute='diskById') | join(';') | list }}" + when: sbdDumpCommand_results.rc == 1 + + - name: "Show sbd_device..." + ansible.builtin.debug: + var: sbd_device + verbosity: 2 + when: sbdDumpCommand_results.rc == 1 # /*------------------------------------ # | @@ -277,6 +287,16 @@ (scs_cluster_type == "ASD") or (scs_cluster_type == "ISCSI") # scs_high_availability = true is already assumed +# you need to restart cluster after enabling sbd +# - name: "systemctl restart pacemaker" +# ansible.builtin.systemd: +# name: pacemaker +# state: restarted +# when: +# - (database_cluster_type == "ASD") or +# (database_cluster_type == "ISCSI") or +# (scs_cluster_type == "ASD") or +# (scs_cluster_type == "ISCSI") # scs_high_availability = true is already assumed ... # /*---------------------------------------------------------------------------8 # | END | diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml index f571407ee2..7b9272d75d 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml @@ -25,6 +25,9 @@ backup: true regexp: '^CLOUD_NETCONFIG_MANAGE=' line: CLOUD_NETCONFIG_MANAGE='no' + when: + - ansible_facts.packages['cloud-netconfig-azure'] + - (ansible_facts.packages['cloud-netconfig-azure'][0].version | float) < 1.3 tags: - cloudnetmanage diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml index f89f6f1257..874b1953a6 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml @@ -214,6 +214,120 @@ # | | # +------------------------------------4--------------------------------------*/ +# /*---------------------------------------------------------------------------8 +# | | +# | kdump stonith - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "1.17 Generic Pacemaker - Install fence-agents-kdump package" + when: + - kdump_enabled == "enabled" + ansible.builtin.yum: + name: fence-agents-kdump + state: present + register: fence_agents_kdump_package + changed_when: fence_agents_kdump_package.changed + +- name: "1.17 Generic Pacemaker - configure the special fencing device fence_kdump" + when: + - inventory_hostname == primary_instance_name + - kdump_enabled == "enabled" + block: + + # we can assume that the stonith:fence_azure_rm is already configured + # if we need to check we can use either pcs stonith show or pcs stonith status + # create fence_kdump fencing device in the cluster + - name: "1.17 Generic Pacemaker - Create a fence_kdump fencing device in the cluster" + ansible.builtin.command: > + pcs stonith create rsc_st_kdump fence_kdump + pcmk_reboot_action="off" + pcmk_host_list="{{ primary_instance_name }} {{ secondary_instance_name }}" + timeout=30 + + - name: "1.17 Generic Pacemaker - Update Monitor interval" + ansible.builtin.command: pcs resource update rsc_st_kdump op monitor interval=3600 + + # for each node in the play, set the fence_kdump fencing device as the first fencing device to be used + - name: "1.17 Generic Pacemaker - Set the fence_kdump fencing device as the first for {{ primary_instance_name }}" + ansible.builtin.command: pcs stonith level add 1 {{ primary_instance_name }} rsc_st_kdump + + - name: "1.17 Generic Pacemaker - Set the fence_kdump fencing device as the first for {{ secondary_instance_name }}" + ansible.builtin.command: pcs stonith level add 1 {{ secondary_instance_name }} rsc_st_kdump + + - name: "1.17 Generic Pacemaker - Set the fence_azure_rm fencing device as the second for {{ primary_instance_name }}" + ansible.builtin.command: pcs stonith level add 2 {{ primary_instance_name }} rsc_st_azure + + - name: "1.17 Generic Pacemaker - Set the fence_azure_arm fencing device as the second for {{ secondary_instance_name }}" + ansible.builtin.command: pcs stonith level add 2 {{ secondary_instance_name }} rsc_st_azure + +- name: "1.17 Generic Pacemaker - Ensure that the kdump service is enabled" + when: + - kdump_enabled == "enabled" + block: +# Ensure that the initramfs image file contains the fence_kdump and hosts files + - name: "1.17 Generic Pacemaker - Check that the initramfs image file contains the fence_kdump and hosts files" + ansible.builtin.command: lsinitrd /boot/initramfs-$(uname -r)kdump.img | egrep "fence|hosts" + register: initramfs_image_file + changed_when: false + failed_when: false + + - name: "1.17 Generic Pacemaker - Add hosts and kdump files to the initramfs image file" + ansible.builtin.command: dracut -f -v --add hosts --install "cat" /boot/initramfs-$(uname -r)kdump.img $(uname -r) --force + when: initramfs_image_file.rc != 0 + failed_when: false + + # Ensure that the initramfs image file contains the fence_kdump and hosts files + - name: "1.17 Generic Pacemaker - Check that the initramfs image file contains the fence_kdump and hosts files" + ansible.builtin.command: lsinitrd /boot/initramfs-$(uname -r)kdump.img | egrep "fence|hosts" + register: initramfs_image_check + changed_when: false + failed_when: initramfs_image_check.rc != 0 + + # print debug on the validation of initramfs + - name: "1.17 Generic Pacemaker - debug initramfs output" + ansible.builtin.debug: + msg: "initramfs check: {{ initramfs_image_check.stdout }}" + when: initramfs_image_check.rc == 0 + + # Perform the fence_kdump_nodes configuration in /etc/kdump.conf + - name: "1.17 Generic Pacemaker - Perform the fence_kdump_nodes configuration in /etc/kdump.conf" + ansible.builtin.replace: + path: /etc/kdump.conf + regexp: '^#fence_kdump_nodes' + replace: 'fence_kdump_nodes "{{ secondary_instance_name }}"' + backup: true + register: kdump_conf_file + failed_when: kdump_conf_file.rc != 0 + when: + - kdump_enabled == "enabled" + - inventory_hostname == primary_instance_name + + # Perform the fence_kdump_nodes configuration in /etc/kdump.conf + - name: "1.17 Generic Pacemaker - Perform the fence_kdump_nodes configuration in /etc/kdump.conf" + ansible.builtin.replace: + path: /etc/kdump.conf + regexp: '^#fence_kdump_nodes' + replace: 'fence_kdump_nodes "{{ primary_instance_name }}"' + backup: true + register: kdump_conf_file + failed_when: kdump_conf_file.rc != 0 + when: + - kdump_enabled == "enabled" + - inventory_hostname == secondary_instance_name + + # restart kdump service + - name: "1.17 Generic Pacemaker - Restart kdump service" + ansible.builtin.service: + name: kdump + state: restarted + +# /*---------------------------------------------------------------------------8 +# | | +# | kdump stonith - END | +# | | +# +------------------------------------4--------------------------------------*/ + # /*---------------------------------------------------------------------------8 # | | # | Azure scheduled events - BEGIN | @@ -225,18 +339,17 @@ is_rhel_84_or_newer: "{{ ansible_distribution_version is version('8.4', '>=') }}" when: ansible_distribution_major_version in ["8", "9"] +- name: "1.17 Generic Pacemaker - Ensure Azure scheduled events is configured" + when: + - inventory_hostname == primary_instance_name + - is_rhel_84_or_newer + block: # After configuring the Pacemaker resources for azure-events agent, # when you place the cluster in or out of maintenance mode, you may get warning messages like: # WARNING: cib-bootstrap-options: unknown attribute 'hostName_ hostname' # WARNING: cib-bootstrap-options: unknown attribute 'azure-events_globalPullState' # WARNING: cib-bootstrap-options: unknown attribute 'hostName_ hostname' # These warning messages can be ignored. -- name: "1.17 Generic Pacemaker - Ensure Azure scheduled events is configured" - when: - - cluster_use_scheduled_events_agent - - inventory_hostname == primary_instance_name - - is_rhel_84_or_newer - block: - name: "1.17 Generic Pacemaker - Ensure maintenance mode is set" ansible.builtin.command: pcs property set maintenance-mode=true @@ -287,8 +400,7 @@ # /*---------------------------------------------------------------------------8 # | | -# | Azure scheduled events - END | +# | Azure scheduled events - END | # | | # +------------------------------------4--------------------------------------*/ - # End of Generic Pacemaker setup diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml index 717d59c48c..2e077a2115 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml @@ -22,13 +22,16 @@ state: present regexp: "^#?\\s*CLOUD_NETCONFIG_MANAGE=" line: "CLOUD_NETCONFIG_MANAGE='no'" - when: ansible_facts.packages['cloud-netconfig-azure'] and (ansible_facts.packages['cloud-netconfig-azure'][0].version | float) < 1.3 + when: + - ansible_facts.packages['cloud-netconfig-azure'] + - (ansible_facts.packages['cloud-netconfig-azure'][0].version | float) < 1.3 - name: "1.17 Generic Pacemaker - Ensure Primary node initiates the Cluster" + when: ansible_hostname == primary_instance_name block: -# - name: "1.17 Generic Pacemaker - Ensure csync2 is configured" -# ansible.builtin.command: crm cluster init -y csync2 --interface eth0 + # - name: "1.17 Generic Pacemaker - Ensure csync2 is configured" + # ansible.builtin.command: crm cluster init -y csync2 --interface eth0 - name: "1.17 Generic Pacemaker - Ensure corosync is configured" ansible.builtin.command: "crm cluster init -y -u corosync --interface eth0" @@ -37,16 +40,41 @@ # ha-cluster-init is not supported in SLES 15 SP4 anymore, crm syntax required # ansible.builtin.command: "ha-cluster-init -y --name 'hdb_{{ db_sid | upper }}' --interface eth0 --no-overwrite-sshkey" ansible.builtin.command: "crm cluster init -y --name 'hdb_{{ db_sid | upper }}' --interface eth0 --no-overwrite-sshkey" - when: node_tier == 'hana' + when: + - node_tier == 'hana' + - database_cluster_type == "AFA" + + - name: "1.17 Generic Pacemaker - Ensure cluster (hdb_{{ db_sid | upper }}) is configured - SBD" + ansible.builtin.command: "crm cluster init -y --name 'hdb_{{ db_sid | upper }}' --interface eth0 --no-overwrite-sshkey --sbd-device={{ sbd_device }}" + when: + - node_tier == 'hana' + - not database_cluster_type == "AFA" + - name: "1.17 Generic Pacemaker - Ensure cluster (scs_{{ sap_sid | upper }}) is configured" ansible.builtin.command: "crm cluster init -y --name 'scs_{{ sap_sid | upper }}' --interface eth0 --no-overwrite-sshkey" - when: node_tier == 'scs' + when: + - node_tier == 'scs' + - scs_cluster_type == "AFA" + + - name: "1.17 Generic Pacemaker - Ensure cluster (scs_{{ sap_sid | upper }}) is configured - SBD" + ansible.builtin.command: "crm cluster init -y --name 'scs_{{ db_sid | upper }}' --interface eth0 --no-overwrite-sshkey --sbd-device={{ sbd_device }}" + when: + - node_tier == 'scs' + - not scs_cluster_type == "AFA" - name: "1.17 Generic Pacemaker - Ensure cluster (db2_{{ db_sid | upper }}) is configured" ansible.builtin.command: "crm cluster init -y --name 'db2_{{ db_sid | upper }}' --interface eth0 --no-overwrite-sshkey" - when: node_tier == 'db2' - when: ansible_hostname == primary_instance_name + when: + - node_tier == 'db2' + - database_cluster_type == "AFA" + + - name: "1.17 Generic Pacemaker - Ensure cluster (db2_{{ db_sid | upper }}) is configured - SBD" + ansible.builtin.command: "crm cluster init -y --name 'db2_{{ db_sid | upper }}' --interface eth0 --no-overwrite-sshkey --sbd-device={{ sbd_device }}" + when: + - node_tier == 'db2' + - not database_cluster_type == "AFA" + - name: "1.17 Generic Pacemaker - Ensure Secondary node joins the Cluster" block: @@ -76,8 +104,8 @@ when: ansible_hostname == primary_instance_name - name: "1.17 Generic Pacemaker - Pause" - ansible.builtin.pause: - seconds: 15 + ansible.builtin.wait_for: + timeout: 30 when: inventory_hostname == primary_instance_name - name: "1.17 Generic Pacemaker - Ensure the corosync service is restarted on secondary node" @@ -87,8 +115,8 @@ when: ansible_hostname == secondary_instance_name - name: "1.17 Generic Pacemaker - Pause" - ansible.builtin.pause: - seconds: 15 + ansible.builtin.wait_for: + timeout: 30 when: ansible_hostname == secondary_instance_name @@ -156,7 +184,7 @@ # | | # +------------------------------------4--------------------------------------*/ # scs_high_availability = true is already assumed -- name: "1.17 Generic Pacemaker - Ensure the STONITH Azure fence agent is created when SBD not used" +- name: "1.17 Generic Pacemaker - Ensure the STONITH Azure fence agent is created when SBD is used" when: - (database_cluster_type == "ASD") or (database_cluster_type == "ISCSI") or @@ -193,6 +221,38 @@ ansible.builtin.debug: msg: "SBD device configuration ends" +- name: "1.17 Generic Pacemaker - configure SBD service and cluster restart" + when: + - (database_cluster_type == "ASD") or + (database_cluster_type == "ISCSI") or + (scs_cluster_type == "ASD") or + (scs_cluster_type == "ISCSI") + block: + + - name: "systemctl enable sbd" + ansible.builtin.systemd: + name: sbd + enabled: true + daemon_reload: true + + # restart cluster on primary node to ensure sbd is enabled + - name: "1.17 Generic Pacemaker - stop cluster on both nodes to ensure sbd is enabled." + ansible.builtin.command: crm cluster stop + + - name: "1.17 Generic Pacemaker - Check if cluster is stopped" + ansible.builtin.wait_for: + path: /var/lib/pacemaker/cib/cib.xml + state: absent + timeout: 60 + + - name: "1.17 Generic Pacemaker - start cluster on both nodes to ensure sbd is enabled." + ansible.builtin.command: crm cluster start + + always: + - name: "1.17 Generic Pacemaker - SBD service configuration" + ansible.builtin.debug: + msg: "SBD service configuration ends" + # /*---------------------------------------------------------------------------8 # | | # | SBD - END | @@ -206,6 +266,7 @@ # +------------------------------------4--------------------------------------*/ - name: "1.17 Generic Pacemaker - Ensure Azure scheduled events is configured" + when: inventory_hostname == primary_instance_name block: # After configuring the Pacemaker resources for azure-events agent, # when you place the cluster in or out of maintenance mode, you may get warning messages like: @@ -227,7 +288,7 @@ - name: "1.17 Generic Pacemaker - Ensure maintenance mode is disabled" ansible.builtin.command: crm configure property maintenance-mode=false - when: inventory_hostname == primary_instance_name + # /*---------------------------------------------------------------------------8 # | | diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/vars/main.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/vars/main.yml index 70b64ad22b..1e57c00bc2 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/vars/main.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/vars/main.yml @@ -68,6 +68,9 @@ sbdMap: [] # SLES 15 SP4 and newer: resource-agents-4.10.0+git40.0f4de473-150400.3.19.1 # todo: Figure out a way to get the release information from the package manager package_versions: + redhat8.2: + - {name: "pacemaker", version: "2.0.5", compare_operator: ">=", version_type: "loose"} + - {name: "resource-agents", version: "4.1.1", compare_operator: ">=", version_type: "loose"} redhat8.4: - {name: "pacemaker", version: "2.0.5", compare_operator: ">=", version_type: "loose"} - {name: "resource-agents", version: "4.1.1", compare_operator: ">=", version_type: "loose"} diff --git a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.2.yaml b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.2.yaml index e10bd8ea14..d3f625a8b3 100644 --- a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.2.yaml +++ b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.2.yaml @@ -35,9 +35,10 @@ - soft - hard -- name: "2.10.2 sap-notes: - Disable SELinux" +- name: "2.10.2 sap-notes: - Set SELinux to permissive " ansible.posix.selinux: - state: disabled + state: permissive + policy: targeted notify: "2.10-sap-notes: Reboot after the selinux is configured" tags: ansible_skip_lint diff --git a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml index 70937be5df..f4a5e6ae6b 100644 --- a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml @@ -62,29 +62,11 @@ when: - selinux_disabled.changed -- name: "2.10.1 sap-notes: - Disable SELinux and Reboot" - when: - - node_tier in ['scs', 'ers', 'pas', 'app', 'web'] - - distribution_id in ['oraclelinux8'] - block: - - name: "2.10.1 sap-notes: - Disable SELinux" - ansible.posix.selinux: - state: disabled - register: selinux_permissive - - - name: "2.10.1 sap-notes: Reboot app VMs after selinux is configured" - ansible.builtin.reboot: - reboot_timeout: 300 - post_reboot_delay: 60 - ignore_unreachable: true - when: - - selinux_permissive.changed - - name: "2.10.1 sap-notes: Check VM Agent Status" when: - selinux_disabled.changed - node_tier in ['scs', 'ers', 'pas', 'app', 'web'] - - distribution_id in ['redhat7', 'redhat8', 'redhat9', 'oraclelinux8'] + - distribution_id in ['redhat7', 'redhat8', 'redhat9'] block: - name: "2.10.1 sap-notes: Clear the failed state of hosts" ansible.builtin.meta: clear_host_errors @@ -101,19 +83,19 @@ timeout: 300 register: wait_for_connection_results -- name: 2.10.1 - SAP Note 2777782 +- name: "2.10.1 - SAP Note 2777782 tasks on RHEL OS" ansible.builtin.include_tasks: 2.10.1.yaml when: - node_tier == 'hana' - distribution_id in ['redhat8', 'redhat9'] -- name: 2.10.2 - SAP Note 2777782 +- name: "2.10.2 - SAP Note 2777782 tasks on OEL OS" ansible.builtin.include_tasks: 2.10.2.yaml when: - node_tier in ['oracle', 'oracle-asm'] - distribution_id == 'oraclelinux8' -- name: 2.10.1275776 - SAP Note 1275776 +- name: "2.10.1275776 - SAP Note 1275776 tasks on RHEL" ansible.builtin.include_tasks: 2.10.1275776.yaml when: - node_tier == 'sybase' @@ -122,7 +104,7 @@ - name: "2.10 - Force all notified handlers to run now" ansible.builtin.meta: flush_handlers -- name: 2.10.3119751 - SAP Note 3119751 +- name: "2.10.3119751 - SAP Note 3119751 tasks on RHEL" ansible.builtin.include_tasks: 2.10.3119751.yaml when: - platform == 'HANA' diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml index 841f6f9aea..272e65728b 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml @@ -109,12 +109,24 @@ - name: "2.4 Hosts: Process pas_hostname variable and update host file when it is defined" when: - - custom_scs_virtual_hostname | default(false, true) + - ( custom_pas_virtual_hostname | length > 1 )| default(false, true) block: - - name: "2.4 Hosts: - Set virtual_host fact from the fetched PAS server list" + + - name: "2.4 Hosts: - Set virtual_host fact from the fetched PAS server list" ansible.builtin.set_fact: - pas_virtualhost_from_inventory: "{{ hostvars[query('inventory_hostnames', '{{ sap_sid | upper }}_PAS') | first]['virtual_host'] }}" - when: query('inventory_hostnames', sap_sid | upper ~ '_PAS') | length > 0 + pas_virtualhost_from_inventory: >- + {%- set _virthost = "" -%} + {%- if ( query('inventory_hostnames', sap_sid | upper ~ '_PAS') | length > 0 ) -%} + {%- set _virthost = hostvars[query('inventory_hostnames', sap_sid | upper ~ '_PAS') | first]['virtual_host'] -%} + {%- else -%} + {%- for hostvars[query('inventory_hostnames', 'all')] -%} + {%- if 'pas' in hostvars[item][supported_tiers] -%} + {%- set _virthost = hostvars[item]['virtual_host'] -%} + {%- break -%} + {%- endif -%} + {%- endfor -%} + {%- endif -%} + {{- _virthost -}} - name: "2.4 Hosts: - Set fact for the PAS if pas_hostname is defined" ansible.builtin.set_fact: diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 04188de1cf..74e85a9a51 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -28,6 +28,10 @@ - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" +- name: "AFS Mount: Set the NFSmount options" + ansible.builtin.set_fact: + afs_mnt_options: 'noresvport,vers=4,minorversion=1,sec=sys' + - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: msg: @@ -68,7 +72,7 @@ src: "{{ sap_mnt }}" path: "/saptmp" fstype: "nfs4" - opts: "vers=4.1,sec=sys" + opts: "{{ afs_mnt_options }}" state: mounted rescue: @@ -85,7 +89,7 @@ src: "{{ sap_mnt }}" path: "/saptmp" fstype: "nfs4" - opts: "vers=4.1,sec=sys" + opts: "{{ afs_mnt_options }}" state: mounted - name: "AFS Mount: Create SAP Directories (AFS)" @@ -137,7 +141,7 @@ src: "{{ sap_mnt }}" path: "/saptmp" fstype: "nfs4" - opts: "vers=4.1,sec=sys" + opts: "{{ afs_mnt_options }}" state: absent when: - sap_mnt is defined @@ -154,7 +158,7 @@ 'temppath': 'sapinstall', 'folder': '{{ bom_base_name }}', 'mount': '{{ usr_sap_install_mountpoint }}', - 'opts': 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp', + 'opts': 'rw,hard,rsize=1048576,wsize=1048576,noresvport,actimeo=60,vers=4,minorversion=1,sec=sys', 'path': '/usr/sap/install', 'permissions': '0777', 'set_chattr_on_dir': false, @@ -250,7 +254,7 @@ src: "{{ item.src }}" path: "{{ item.path }}" fstype: "{{ item.type }}" - opts: 'vers=4,minorversion=1,sec=sys' + opts: "{{ afs_mnt_options }}" state: mounted loop: - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } @@ -260,7 +264,7 @@ src: "{{ item.src }}" path: "{{ item.path }}" fstype: "{{ item.type }}" - opts: 'vers=4,minorversion=1,sec=sys' + opts: "{{ afs_mnt_options }}" state: unmounted loop: - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } @@ -277,7 +281,7 @@ src: "{{ item.src }}" path: "{{ item.path }}" fstype: "{{ item.type }}" - opts: 'vers=4,minorversion=1,sec=sys' + opts: "{{ afs_mnt_options }}" state: mounted loop: - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } @@ -294,7 +298,7 @@ src: "{{ sap_mnt }}/sapmnt{{ item.sid }}" path: "/sapmnt/{{ item.sid }}" fstype: 'nfs4' - opts: 'vers=4,minorversion=1,sec=sys' + opts: "{{ afs_mnt_options }}" state: mounted loop: "{{ MULTI_SIDS }}" when: @@ -309,7 +313,7 @@ src: "{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}" path: "/sapmnt/{{ sap_sid | upper }}" fstype: nfs4 - opts: 'vers=4,minorversion=1,sec=sys' + opts: "{{ afs_mnt_options }}" state: mounted rescue: - name: "AFS Mount: Pause for 15 seconds" @@ -320,7 +324,7 @@ src: "{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}" path: "/sapmnt/{{ sap_sid | upper }}" fstype: nfs4 - opts: 'vers=4,minorversion=1,sec=sys' + opts: "{{ afs_mnt_options }}" state: mounted when: @@ -358,7 +362,7 @@ src: "{{ item.src }}" path: "{{ item.path }}" fstype: "{{ item.type }}" - opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' + opts: "{{ afs_mnt_options }}" state: mounted loop: @@ -378,7 +382,7 @@ 'type': 'trans', 'temppath': 'saptrans', 'mount': '{{ sap_trans }}', - 'opts': 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp', + 'opts': '{{ afs_mnt_options }}', 'path': '/usr/sap/trans', 'permissions': '0775', 'set_chattr_on_dir': false, diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml index b08b603ff6..193b038a93 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml @@ -15,7 +15,6 @@ (database_cluster_type == "ISCSI") ansible.builtin.command: > crm configure property \$id="cib-bootstrap-options" - no-quorum-policy="ignore" stonith-enabled="true" stonith-action="reboot" stonith-timeout="144s" @@ -24,7 +23,6 @@ when: database_cluster_type not in ["ISCSI", "ASD"] ansible.builtin.command: > crm configure property \$id="cib-bootstrap-options" - no-quorum-policy="ignore" stonith-enabled="true" stonith-action="reboot" stonith-timeout="900s" diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml index 45a2cf1e81..1a8c3d60f7 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml @@ -108,6 +108,31 @@ when: - scs_high_availability +- name: "5.6 SCSERS - HA install calculate NFS mount options" + ansible.builtin.set_fact: + clus_nfs_options: >- + {%- set _nfsopts = 'noresvport,vers=4.1,sec=sys' -%} + {%- if (NFS_provider == "ANF") -%} + {%- if NFS_version == "NFSv3" -%} + {%- set _nfsopts = '' -%} + {%- elif NFS_version == "NFSv4.1" -%} + {%- set _nfsopts = 'sec=sys,nfsvers=4.1' -%} + {%- endif -%} + {%- elif (NFS_provider == "AFS") -%} + {%- if NFS_version == "NFSv4.1" -%} + {%- set _nfsopts = 'noresvport,sec=sys,vers=4.1' -%} + {%- endif -%} + {%- endif -%} + {{- _nfsopts -}} + when: + - scs_high_availability + +- name: "5.6 SCSERS - HA install show NFS mount options" + ansible.builtin.debug: + msg: "NFS mount options: {{ clus_nfs_options }}" + when: + - scs_high_availability + - name: "5.6 SCSERS - ASCS/ERS check if installed" become: true block: diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.6-validate.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.6-validate.yml index 500803fcca..de866ce028 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.6-validate.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.6-validate.yml @@ -161,7 +161,7 @@ - name: "5.6 SCS/ERS Validation: Wait 60 secs for the StartService {{ sap_sid | upper }} to finish" ansible.builtin.wait_for: - timeout: 0 + timeout: 60 # {{ sapcontrol_path }} -nr {{ scs_instance_number }} -function GetProcessList | grep MessageServer | awk '{split($0,result,", "); print result[1],result[3] }' - name: "5.6 SCS/ERS Validation: Determine if SCS is running on {{ ansible_hostname }}" diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index 94f0de1efe..a9f98425b2 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -99,6 +99,8 @@ db2sidadm_uid: 3004 db2sapsid_uid: 3005 # Uid of the database connect user db2hadr_port1: 51012 db2hadr_port2: 51013 +# Name of the database connect user for ABAP. Default value is 'sap'. +db2_abap_connect_user: "" tmp_directory: "/var/tmp" url_internet: "https://azure.status.microsoft/en-us/status" # URL to use for internet access checks" @@ -221,6 +223,8 @@ cluster_use_scheduled_events_agent: true custom_cluster_fs_mon_timeout: "" custom_cluster_sap_mon_timeout: "" +use_fence_kdump: false + # ------------------- Begin - SAP SWAP settings variables --------------------8 sap_swap: - { tier: "scs", swap_size_mb: "4096" } diff --git a/deploy/ansible/vars/disks_config.yml b/deploy/ansible/vars/disks_config.yml index 2d9045ada3..83e584ea8c 100644 --- a/deploy/ansible/vars/disks_config.yml +++ b/deploy/ansible/vars/disks_config.yml @@ -34,7 +34,7 @@ disk_type_to_name_map: offline_log_dir: '{{ node_tier | lower }}_offline_logdir' saptmp: '{{ node_tier | lower }}_saptmp' # ------------------- End - disktypes required for DB2 ---------------------8 - + kdump: '{{ node_tier | lower }}_kdump' # ------------------- Begin - disktypes required for ASE -------------------8 sapdata_1: '{{ node_tier | lower }}_sapdata_1' saplog_1: '{{ node_tier | lower }}_saplog_1' @@ -75,7 +75,6 @@ disk_type_to_name_map: # mkfs command when formatting the file system. # logical_volumes: - # --------------------- Begin - disks required for usrsap -------------------8 # ---------------------- Begin - disks required for WD ---------------------8 - tier: 'sapos' node_tier: 'web' @@ -85,6 +84,17 @@ logical_volumes: fstype: 'xfs' # ----------------------- End - disks required for WD ----------------------8 + # ---------------------- Begin - disks required for kdump--------------------8 + - tier: 'sapos' + node_tier: 'all' + supported_tiers: ['scs','hana','db2','ers'] + vg: 'vg_kdump' + lv: 'lv_kdump' + size: '100%FREE' + fstype: 'xfs' + # ----------------------- End - disks required for kdump --------------------8 + + # --------------------- Begin - disks required for usrsap -------------------8 - tier: 'sapos' node_tier: 'all' vg: 'vg_sap' From d25b34694cf4b11df22b84dd72bd52279647c86c Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 5 Mar 2024 17:02:49 +0200 Subject: [PATCH 378/607] Add a pre-flight check script (#559) Co-authored-by: Kimmo Forss --- deploy/configs/sdaf_distros.json | 44 +++ deploy/configs/sdaf_skus.json | 50 +++ deploy/configs/sdaf_urls.json | 59 +++ deploy/scripts/Test-SDAFReadiness.ps1 | 529 ++++++++++++++++++++++++++ 4 files changed, 682 insertions(+) create mode 100644 deploy/configs/sdaf_distros.json create mode 100644 deploy/configs/sdaf_skus.json create mode 100644 deploy/configs/sdaf_urls.json create mode 100644 deploy/scripts/Test-SDAFReadiness.ps1 diff --git a/deploy/configs/sdaf_distros.json b/deploy/configs/sdaf_distros.json new file mode 100644 index 0000000000..15a428a977 --- /dev/null +++ b/deploy/configs/sdaf_distros.json @@ -0,0 +1,44 @@ +{ + "SUSE": { + "DistroIds": [ + "SUSE:sles-sap-15-sp5:gen2:latest", + "SUSE:sles-sap-15-sp4:gen2:latest", + "SUSE:sles-sap-15-sp3:gen2:latest", + "SUSE:sles-sap-15-sp2:gen2:latest", + "SUSE:sles-sap-12-sp5:gen2:latest" + ] + }, + "REDHAT": { + "DistroIds": [ + "RedHat:RHEL-SAP-HA:92sapha-gen2:latest", + "RedHat:RHEL-SAP-HA:90sapha-gen2:latest", + "RedHat:RHEL-SAP-HA:88sapha-gen2:latest", + "RedHat:RHEL-SAP-HA:86sapha-gen2:latest", + "RedHat:RHEL-SAP-HA:84sapha-gen2:latest", + "RedHat:RHEL-SAP-APPS:92sapapps-gen2:latest", + "RedHat:RHEL-SAP-APPS:90sapapps-gen2:latest", + "RedHat:RHEL-SAP-APPS:88sapapps-gen2:latest", + "RedHat:RHEL-SAP-APPS:86sapapps-gen2:latest", + "RedHat:RHEL-SAP-APPS:84sapapps-gen2:latest" + ] + }, + "WINDOWS": { + "DistroIds": [ + "MicrosoftWindowsServer:WindowsServer:2022-datacenter-g2", + "MicrosoftWindowsServer:WindowsServer:2022-datacenter", + "MicrosoftWindowsServer:WindowsServer:2019-datacenter-g2", + "MicrosoftWindowsServer:WindowsServer:2019-datacenter", + "MicrosoftWindowsServer:WindowsServer:2016-datacenter" + ] + }, + "ORACLE": { + "DistroIds": [ + "Oracle:Oracle-Linux:ol92-lvm-gen2:latest", + "Oracle:Oracle-Linux:ol90-lvm-gen2:latest", + "Oracle:Oracle-Linux:ol89-lvm-gen2:latest", + "Oracle:Oracle-Linux:ol88-lvm-gen2:latest", + "Oracle:Oracle-Linux:ol86-lvm-gen2:latest", + "Oracle:Oracle-Linux:ol84-lvm-gen2:latest" + ] + } +} diff --git a/deploy/configs/sdaf_skus.json b/deploy/configs/sdaf_skus.json new file mode 100644 index 0000000000..275e704d13 --- /dev/null +++ b/deploy/configs/sdaf_skus.json @@ -0,0 +1,50 @@ +{ + "vm_sku": [ + "Standard_E16-4ds_v4", + "Standard_E16-4ds_v5", + "Standard_E16-8ds_v4", + "Standard_E16-8ds_v5", + "Standard_E16ds_v4", + "Standard_E16ds_v5", + "Standard_E20ds_v4", + "Standard_E20ds_v5", + "Standard_E32ds_v4", + "Standard_E32ds_v5", + "Standard_E48ds_v4", + "Standard_E48ds_v5", + "Standard_E4ds_v4", + "Standard_E4ds_v5", + "Standard_E64ds_v4", + "Standard_E64ds_v5", + "Standard_E8ds_v4", + "Standard_E8ds_v5", + "Standard_D16ds_v4", + "Standard_D16ds_v5", + "Standard_D32ds_v4", + "Standard_D32ds_v5", + "Standard_D48ds_v4", + "Standard_D48ds_v5", + "Standard_D4ds_v4", + "Standard_D4ds_v5", + "Standard_D64ds_v4", + "Standard_D64ds_v5", + "Standard_D8ds_v4", + "Standard_D8ds_v5", + "Standard_M128", + "Standard_M128m", + "Standard_M128ms", + "Standard_M32ls", + "Standard_M32ms", + "Standard_M32ms_v2", + "Standard_M32ts", + "Standard_M64", + "Standard_M64dms_v2", + "Standard_M64ds_v2", + "Standard_M64ls", + "Standard_M64m", + "Standard_M64ms", + "Standard_M64ms_v2", + "Standard_M64s", + "Standard_M64s_v2" + ] +} diff --git a/deploy/configs/sdaf_urls.json b/deploy/configs/sdaf_urls.json new file mode 100644 index 0000000000..6c6ec3d582 --- /dev/null +++ b/deploy/configs/sdaf_urls.json @@ -0,0 +1,59 @@ +{ + "deployer": { + "urls": [ + "https://github.com/Azure/sap-automation", + "https://github.com/Azure/sap-automation-samples", + "https://github.com/Azure/sap-automation-bootstrap", + "https://packages.microsoft.com/keys/microsoft.asc", + "https://releases.hashicorp.com/", + "https://registry.terraform.io", + "https://checkpoint-api.hashicorp.com", + "https://bootstrap.pypa.io", + "https://pypi.org", + "https://pythonhosted.org", + "https://galaxy.ansible.com" + ], + "IPs": [] + }, + "windows": { + "urls": [ + "https://visualstudio.microsoft.com/downloads/", + "https://www.powershellgallery.com/" + ], + "IPs": [ + "onegetcdn.azureedge.net", + "az818661.vo.msecnd.net", + "devopsgallerystorage.blob.core.windows.net" + ] + }, + "sap": { + "urls": [ + "https://softwaredownloads.sap.com/file/0020000000098642022", + "https://yum.oracle.com", + "https://public-yum.oracle.com" + ], + "IPs": [ + "13.91.47.76", + "40.85.190.91", + "52.187.75.218", + "52.174.163.213", + "52.237.203.198", + "52.136.197.163", + "20.225.226.182", + "52.142.4.99", + "20.248.180.252", + "20.24.186.80", + "13.72.186.193", + "13.72.14.155", + "52.244.249.194", + "52.187.53.250", + "104.45.31.195", + "52.149.120.86", + "51.145.209.119", + "52.157.241.14", + "52.230.96.47", + "52.237.80.2", + "52.139.216.51" + ] + } +} diff --git a/deploy/scripts/Test-SDAFReadiness.ps1 b/deploy/scripts/Test-SDAFReadiness.ps1 new file mode 100644 index 0000000000..d85af0015c --- /dev/null +++ b/deploy/scripts/Test-SDAFReadiness.ps1 @@ -0,0 +1,529 @@ +function Show-Menu($data) { + Write-Host "================ $Title ================" + $i = 1 + foreach ($d in $data) { + Write-Host "($i): Select '$i' for $($d)" + $i++ + } + + Write-Host "q: Select 'q' for Exit" + +} + + +$rnd = $(Get-Random -Minimum 1 -Maximum 1000).ToString() + +$LogFileName = "SDAF-" + $(Get-Date -Format "yyyyMMdd-HHmm") + ".md" + +Add-Content -Path $LogFileName "# SDAF Assesment #" +Add-Content -Path $LogFileName "" +$OutputString = "Time of assessment: " + $(Get-Date -Format "yyyy-MM-dd HH:mm:ss") +Add-Content -Path $LogFileName $OutputString +$authenticationMethod = 'Service Principal (recommended)' +$Title = "Select the authentication method to use" +$data = @('Service Principal (recommended)', 'User Account') +Show-Menu($data) +$selection = Read-Host $Title +$authenticationMethod = $data[$selection - 1] + +Add-Content -Path $LogFileName "" +$OutputString = "Authentication model: " + $authenticationMethod +Add-Content -Path $LogFileName $OutputString + + +if ($authenticationMethod -eq "User Account") { + az logout + az login --output none + $VM_password = Read-Host "Please enter the Virtual Machine Password" -AsSecureString +} +else { + $ARM_CLIENT_ID = $Env:ARM_CLIENT_ID + $ARM_CLIENT_SECRET = $Env:ARM_CLIENT_SECRET + $ARM_TENANT_ID = $Env:ARM_TENANT_ID + + if ($null -eq $ARM_CLIENT_ID -or $ARM_CLIENT_ID -eq "") { + $ARM_CLIENT_ID = Read-Host "Please enter the Service Principal's Application ID" + } + + if ($null -eq $ARM_CLIENT_SECRET -or $ARM_CLIENT_SECRET -eq "") { + $ARM_CLIENT_SECRET = Read-Host "Please enter the Service Principals App ID Password" -AsSecureString + } + + $VM_password = $ARM_CLIENT_SECRET + + if ($null -eq $ARM_TENANT_ID -or $ARM_TENANT_ID -eq "") { + $ARM_TENANT_ID = Read-Host "Please enter the Tenant ID" + } + + if ($null -eq $ARM_SUBSCRIPTION_ID -or $ARM_SUBSCRIPTION_ID -eq "") { + $ARM_SUBSCRIPTION_ID = Read-Host "Please enter the Subscription ID" + } + az logout + az login --service-principal -u $ARM_CLIENT_ID -p $ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none +} + +$ARM_SUBSCRIPTION_ID = $Env:ARM_SUBSCRIPTION_ID +if ($null -eq $ARM_SUBSCRIPTION_ID -or $ARM_SUBSCRIPTION_ID -eq "") { + $ARM_SUBSCRIPTION_ID = Read-Host "Please enter the Subscription ID" +} + +az account set --subscription $ARM_SUBSCRIPTION_ID + +Add-Content -Path $LogFileName "" +$OutputString = "Subscription: " + $ARM_SUBSCRIPTION_ID +Add-Content -Path $LogFileName $OutputString + + +Add-Content -Path $LogFileName "" +$OutputString = "Microsoft.Compute Resource Provider Registration State" +Add-Content -Path $LogFileName $OutputString +Add-Content -Path $LogFileName $(az feature list --query "[?properties.state=='Registered'].{Name:name,State:properties.state}" --output table --namespace "Microsoft.Compute") +Write-Host "Microsoft.Compute Resource Provider Registration State" -ForegroundColor Green +az feature list --query "[?properties.state=='Registered'].{Name:name,State:properties.state}" --output table --namespace "Microsoft.Compute" + +Write-Host +Add-Content -Path $LogFileName "" +$OutputString = "Microsoft.Storage Resource Provider Registration State" +Add-Content -Path $LogFileName $OutputString +Add-Content -Path $LogFileName $(az feature list --query "[?properties.state=='Registered'].{Name:name,State:properties.state}" --output table --namespace "Microsoft.Storage") +Write-Host "Microsoft.Storage Resource Provider Registration State" -ForegroundColor Green +az feature list --query "[?properties.state=='Registered'].{Name:name,State:properties.state}" --output table --namespace "Microsoft.Storage" + +Write-Host +Add-Content -Path $LogFileName "" +$OutputString = "Microsoft.Network Resource Provider Registration State" +Add-Content -Path $LogFileName $OutputString +Add-Content -Path $LogFileName $(az feature list --query "[?properties.state=='Registered'].{Name:name,State:properties.state}" --output table --namespace "Microsoft.Network") +Write-Host "Microsoft.Storage Resource Provider Registration State" -ForegroundColor Green +az feature list --query "[?properties.state=='Registered'].{Name:name,State:properties.state}" --output table --namespace "Microsoft.Network" + +Write-Host +Add-Content -Path $LogFileName "" +$OutputString = "Microsoft.Network Resource Provider Registration State" +Add-Content -Path $LogFileName $OutputString +Add-Content -Path $LogFileName $(az feature list --query "[?properties.state=='Registered'].{Name:name,State:properties.state}" --output table --namespace "Microsoft.NetApp") +Write-Host "Microsoft.NetApp Resource Provider Registration State" -ForegroundColor Green +az feature list --query "[?properties.state=='Registered'].{Name:name,State:properties.state}" --output table --namespace "Microsoft.NetApp" + +Add-Content -Path $LogFileName "" + + +$Title = "Select the Location" +$Location = $Env:Location +if ($null -eq $Location -or $Location -eq "") { + $locations = $(az account list-locations -o table --query "[?metadata.regionType=='Physical'].name" | Sort-Object) + Show-Menu($locations[2..($locations.Length - 1)]) + $selection = Read-Host $Title + + $selectionOffset = [convert]::ToInt32($selection, 10) + 1 + $Location = $locations[$selectionOffset] +} + +Add-Content -Path $LogFileName "" +$OutputString = "Resource group location: " + $Location +Add-Content -Path $LogFileName $OutputString + + +$resourceGroupName = $Env:ResourceGroupName +if ($null -eq $resourceGroupName -or $resourceGroupName -eq "") { + $resourceGroupName = Read-Host "Please enter the Resource Group Name" +} + +Write-Host "Creating Resource Group" -foregroundcolor Yellow +$OutputString = $(az group create --name $resourceGroupName --location $Location --query "properties.provisioningState") +Add-Content -Path $LogFileName $OutputString +Write-Host $OutputString +$resourceGroupId = $(az group show --name $resourceGroupName --name $resourceGroupName --query id --output tsv) + +Add-Content -Path $LogFileName "" +$OutputString = "Resource group name: " + $resourceGroupName +Add-Content -Path $LogFileName $OutputString + +if ($authenticationMethod -ne "User Account") { + Write-Host "Assigning the Service Principal the User Access Administrator role" + az role assignment create --assignee $ARM_CLIENT_ID --role "User Access Administrator" --scope $resourceGroupId --query "properties.provisioningState" + + Write-Host "Checking if the Service Principal has the User Access Administrator role" + $roleName = $(az role assignment list --assignee $ARM_CLIENT_ID --query "[?roleDefinitionName=='User Access Administrator'].roleDefinitionName" --output tsv) + if ($null -eq $roleName -or $roleName -eq "") { + Write-Host "The Service Principal does not have the User Access Administrator role" -ForegroundColor Red + Write-Host "Please assign the User Access Administrator role to the Service Principal and re-run the script, alternatively configure the tfvars so that role assignmenta are not performed" -ForegroundColor Red + + } +} + +$vnetName = "SDAF-VNet" +$anfSubnetName = "SDAF-anf" +$subnetName = "SDAF-Subnet" +$selection = Read-Host "Create Virtual network Y/N" +if ($selection.ToUpper() -eq "Y") { + $OutputString = "Creating Virtual Network: " + $vnetName + + Write-Host $OutputString -foregroundcolor Yellow + Add-Content -Path $LogFileName "" + Add-Content -Path $LogFileName $OutputString + + $OutputString = $(az network vnet create --name $vnetName --resource-group $resourceGroupName --location $Location --address-prefix "10.112.0.0/16" --subnet-name $subnetName --subnet-prefix "10.112.0.0/19" --query "provisioningState") + Write-Host $OutputString + Add-Content -Path $LogFileName $OutputString + + $OutputString = "Creating Subnet: " + $subnetName + Write-Host $OutputString -foregroundcolor Yellow + Add-Content -Path $LogFileName $OutputString + $OutputString = $(az network vnet subnet create --resource-group $resourceGroupName --vnet-name $vnetName --name $anfSubnetName --address-prefixes "10.112.64.0/27" --delegations "Microsoft.NetApp/volumes" --query "provisioningState") + Write-Host $OutputString + Add-Content -Path $LogFileName $OutputString +} +else { + $vnetName = Read-Host "Please enter the Virtual Network Name" + $subnetName = Read-Host "Please enter the Subnet Name" + $anfSubnetName = Read-Host "Please enter the ANF Subnet Name" + + $OutputString = "Using Virtual Network: " + $vnetName + + Add-Content -Path $LogFileName "" + Add-Content -Path $LogFileName $OutputString + + $OutputString = "Using Subnet: " + $subnetName + + Add-Content -Path $LogFileName "" + Add-Content -Path $LogFileName $OutputString + + $OutputString = "Using ANF Subnet: " + $anfSubnetName + + Add-Content -Path $LogFileName "" + Add-Content -Path $LogFileName $OutputString +} + + +$storageAccountName = "sdaftest$rnd" +$shareName = "sdaftestshare" + +$selection = Read-Host "Create storage account Y/N" +if ($selection.ToUpper() -eq "Y") { + $OutputString = "Creating Storage Account: " + $storageAccountName + Write-Host $OutputString -foregroundcolor Yellow + Add-Content -Path $LogFileName $OutputString + $OutputString = $(az storage account create --name $storageAccountName --resource-group $resourceGroupName --location $Location --kind FileStorage --sku Premium_LRS --allow-blob-public-access false --https-only=false --query "provisioningState") + Write-Host $OutputString + Add-Content -Path $LogFileName $OutputString + + $storageID = $(az storage account show --resource-group $resourceGroupName --name $storageAccountName --query "id" -o tsv) + $OutputString = "Creating Private Endpoint for the Storage Account: " + $storageAccountName + Write-Host $OutputString -foregroundcolor Yellow + Add-Content -Path $LogFileName $OutputString + + $OutputString = $(az network private-endpoint create --connection-name SDAF-connection-1 --name SDAF-private-endpoint --private-connection-resource-id $storageID --resource-group $resourceGroupName --subnet $subnetName --vnet-name $vnetName --group-ids file --query "provisioningState") + Write-Host $OutputString + Add-Content -Path $LogFileName $OutputString + +} + + +$selection = Read-Host "Create file share Y/N" +if ($selection.ToUpper() -eq "Y") { + $OutputString = "Creating File share: " + $shareName + Write-Host $OutputString -foregroundcolor Yellow + Add-Content -Path $LogFileName $OutputString + az storage share-rm create --resource-group $resourceGroupName --storage-account $storageAccountName --name $shareName --enabled-protocols NFS --access-tier "Premium" --quota 128 --output none +} + + +$vmssName = "SDAF-VmssFlex" + +$OutputString = "Creating flexible scale set: " + $vmssName +Write-Host $OutputString -foregroundcolor Yellow +Add-Content -Path $LogFileName $OutputString + +Write-Host "" -foregroundcolor Yellow +# Create flexible scale set for deployment of SAP workload across availability zones with platform fault domain count set to 1 +$OutputString = $(az vmss create --name $vmssName --resource-group $resourceGroupName --location $Location --orchestration-mode flexible --zones 1 2 3 --platform-fault-domain-count 1 --query "provisioningState") +Write-Host $OutputString +Add-Content -Path $LogFileName $OutputString + +$vmssid = $(az vmss show --name $vmssName --resource-group $resourceGroupName --query id) + +$selection = Read-Host "Create Virtual Machine Y/N" +if ($selection.ToUpper() -eq "Y") { + $Title = "Select the Publisher" + $data = @('SUSE', 'RedHat', 'Oracle', 'Windows') + + Show-Menu($data) + $selection = Read-Host $Title + $publisher = $data[$selection - 1] + Add-Content -Path $LogFileName "" + Add-Content -Path $LogFileName "## Virtual Machine ##" + Add-Content -Path $LogFileName "" + if ($publisher -eq "Quit") { + return + } + + $configPath = join-path -path (resolve-path ..) -ChildPath configs + + $AllDistros = Get-Content -Raw -Path (Join-Path -Path $configPath -ChildPath "sdaf_distros.json") | ConvertFrom-Json + + $SKUS = Get-Content -Raw -Path ..\configs\sdaf_skus.json | ConvertFrom-Json + + #$distros = $(az vm image list --location $Location --query "[].urn" --publisher $publisher --all --offer "sap" --output table | Sort-Object) + if ($publisher -eq "SUSE") { + $distros = $AllDistros.SUSE.DistroIds + } + if ($publisher -eq "RedHat") { + $distros = $AllDistros.REDHAT.DistroIds + } + if ($publisher -eq "Oracle") { + $distros = $AllDistros.ORACLE.DistroIds + } + if ($publisher -eq "Windows") { + $distros = $AllDistros.WINDOWS.DistroIds + } + + $Title = "Select the Distro" + + Show-Menu($distros) + $selection = Read-Host "Please choose the Distro" + $distro = $distros[$selection - 1] + + Add-Content -Path $LogFileName "" + + $OutputString = "Distro: " + $distro + Write-Host $OutputString -foregroundcolor Yellow + Add-Content -Path $LogFileName $OutputString + $skus = $SKUS.vm_sku + + $Title = "Select the SKU" + + Show-Menu($skus) + $selection = Read-Host $Title + + $vmSKU = $skus[$selection - 1] + + Add-Content -Path $LogFileName "" + + $OutputString = "Virtual Machine SKU: " + $vmSKU + Write-Host $OutputString -foregroundcolor Yellow + Add-Content -Path $LogFileName $OutputString + + + Write-Host $$ + + Write-Host "Checking if the region supports PremiumV2 disks" + $zone = $(az vm list-skus --resource-type disks --query "[?name=='PremiumV2_LRS'].locationInfo[0].zones | [0] | [0]" --location $Location) + $vmStatus = "" + + if ($null -eq $zone -or $zone -eq "") { + Write-Host "Creating a Virtual Machine" -foregroundcolor Yellow + $vmStatus = $(az vm create --resource-group $resourceGroupName --name "SDAF-VM" --image $distro --admin-username "azureadm" --admin-password $ARM_CLIENT_SECRET --size $vmSKU --vnet-name $vnetName --subnet $subnetName --vmss $vmssid --no-wait --query "provisioningState") + } + else { + $diskName = "SDAFdisk" + $logicalSectorSize = 4096 + + Write-Host "Creating a Premium SSD v2 disk" -foregroundcolor Yellow + az disk create -n $diskName -g $resourceGroupName --size-gb 100 --disk-iops-read-write 5000 --disk-mbps-read-write 150 --location $Location --zone $zone --sku PremiumV2_LRS --logical-sector-size $logicalSectorSize --query "provisioningState" + Write-Host "Creating a Virtual Machine" -foregroundcolor Yellow + $vmStatus = $(az vm create --resource-group $resourceGroupName --name "SDAF-VM" --image $distro --admin-username "azureadm" --admin-password $VM_password --size $vmSKU --vnet-name $vnetName --subnet $subnetName --vmss $vmssid --zone $zone --attach-data-disks $diskName --query "provisioningState") + + } + + Write-Host $vmStatus + $vmStatus = "Succeeded" + + if ($vmStatus -eq "Succeeded") { + + $UrlsToCheck = Get-Content -Raw -Path ..\configs\sdaf_urls.json | ConvertFrom-Json + + Add-Content -Path $LogFileName "" + Add-Content -Path $LogFileName "## Check URLS ##" + Add-Content -Path $LogFileName "" + + Write-Host "Checking Deployer URLs" -ForegroundColor Yellow + Add-Content -Path $LogFileName "Checking Deployer URLs" + + foreach ($url in $UrlsToCheck.deployer.urls) { + Write-Host "Checking if $url is accessible from the Virtual Machine" + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name "SDAF-VM" --command-id RunShellScript --scripts "wget -O /tmp/foo.zip $url" --query value[0].message) + if ($result.Contains("200 OK")) { + $OutputString = "$url is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + + } + else { + $OutputString = "$url is not accessible" + Write-Host $OutputString -ForegroundColor Red + Add-Content -Path $LogFileName $OutputString + } + } + + Write-Host "Checking Deployer IPs" -ForegroundColor Yellow + Add-Content -Path $LogFileName "Checking Deployer IPs" + Add-Content -Path $LogFileName "" + + foreach ($IP in $UrlsToCheck.deployer.IPs) { + Write-Host "Checking if $IP is accessible from the Virtual Machine" + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name "SDAF-VM" --command-id RunShellScript --scripts "nc -zv $IP 443" --query value[0].message) + if ($result.Contains("succeeded!")) { + $OutputString = "$IP is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } + elseif ($result.Contains("Connected")) { + $OutputString = "$IP is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } + else { + $OutputString = "$IP is not accessible" + Write-Host $OutputString -ForegroundColor Red + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } + } + + + Write-Host "Checking Windows URLs" -ForegroundColor Yellow + Add-Content -Path $LogFileName "Checking Windows URLs" + Add-Content -Path $LogFileName "" + + foreach ($url in $UrlsToCheck.windows.urls) { + Write-Host "Checking if $url is accessible from the Virtual Machine" + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name "SDAF-VM" --command-id RunShellScript --scripts "wget -O /tmp/foo.zip $url" --query value[0].message) + if ($result.Contains("200 OK")) { + $OutputString = "$url is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } + else { + $OutputString = "$url is not accessible" + Write-Host $OutputString -ForegroundColor Red + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } + } + + Write-Host "Checking Windows IPs" -ForegroundColor Yellow + Add-Content -Path $LogFileName "Checking Windows IPs" + Add-Content -Path $LogFileName "" + + foreach ($IP in $UrlsToCheck.windows.IPs) { + Write-Host "Checking if $IP is accessible from the Virtual Machine" + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name "SDAF-VM" --command-id RunShellScript --scripts "nc -zv $IP 443" --query value[0].message) + if ($result.Contains("succeeded!")) { + $OutputString = "$IP is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } + elseif ($result.Contains("Connected")) { + $OutputString = "$IP is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } + else { + $OutputString = "$IP is not accessible" + Write-Host $OutputString -ForegroundColor Red + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } + } + + + Write-Host "Checking 'runtime' URLs" -ForegroundColor Yellow + Add-Content -Path $LogFileName "Checking 'runtime' URLs" + Add-Content -Path $LogFileName "" + + foreach ($url in $UrlsToCheck.sap.urls) { + Write-Host "Checking if $url is accessible from the Virtual Machine" + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name "SDAF-VM" --command-id RunShellScript --scripts "wget -O /tmp/foo.zip $url" --query value[0].message) + if ($result.Contains("200 OK")) { + $OutputString = "$url is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } + else { + $OutputString = "$url is not accessible" + Write-Host $OutputString -ForegroundColor Red + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } + } + + Write-Host "Checking 'runtime' IPs" -ForegroundColor Yellow + Add-Content -Path $LogFileName "Checking 'runtime' IPs" + + foreach ($IP in $UrlsToCheck.sap.IPs) { + Write-Host "Checking if $IP is accessible from the Virtual Machine" + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name "SDAF-VM" --command-id RunShellScript --scripts "nc -zv $IP 443" --query value[0].message) + if ($result.Contains("succeeded!")) { + $OutputString = "$IP is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } + elseif ($result.Contains("Connected")) { + $OutputString = "$IP is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } + else { + $OutputString = "$IP is not accessible" + Write-Host $OutputString -ForegroundColor Red + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } + } + + } + +} + +$selection = Read-Host "Create Azure NetApp account Y/N" +if ($selection.ToUpper() -eq "Y") { + $anfAccountName = "sdafanfaccount$rnd" + $OutputString = "Creating NetApp Account: " + $anfAccountName + Write-Host $OutputString -ForegroundColor Yellow + Add-Content -Path $LogFileName $OutputString + + $OutputString = $(az netappfiles account create --resource-group $resourceGroupName --name $anfAccountName --location $Location --query "provisioningState") + Write-Host $OutputString + Add-Content -Path $LogFileName $OutputString + + $poolName = "sdafpool" + $poolSize_TiB = 2 + $serviceLevel = "Premium" # Valid values are Standard, Premium and Ultra + + $OutputString = "Creating NetApp Capacity Pool: " + $poolName + Write-Host $OutputString -ForegroundColor Yellow + Add-Content -Path $LogFileName $OutputString + + $OutputString = $(az netappfiles pool create --resource-group $resourceGroupName --location $Location --account-name $anfAccountName --pool-name $poolName --size $poolSize_TiB --service-level $serviceLevel --query "provisioningState") + Write-Host $OutputString + Add-Content -Path $LogFileName $OutputString + + $vnetID = $(az network vnet show --resource-group $resourceGroupName --name $vnetName --query "id" -o tsv) + $subnetID = $(az network vnet subnet show --resource-group $resourceGroupName --vnet-name $vnetName --name $anfSubnetName --query "id" -o tsv) + $volumeSize_GiB = 100 + $uniqueFilePath = "myfilepath2" # Please note that creation token needs to be unique within subscription and region + + $OutputString = "Creating NetApp Volume: " + "myvol1" + Write-Host $OutputString -ForegroundColor Yellow + Add-Content -Path $LogFileName $OutputString + + $OutputString = $(az netappfiles volume create --resource-group $resourceGroupName --location $Location --account-name $anfAccountName --pool-name $poolName --name "myvol1" --service-level $serviceLevel --vnet $vnetID --subnet $subnetID --usage-threshold $volumeSize_GiB --file-path $uniqueFilePath --protocol-types "NFSv3" --query "provisioningState") + Write-Host $OutputString + Add-Content -Path $LogFileName $OutputString + +} + + +$selection = Read-Host "Delete resource group Y/N?" +if ($selection.ToUpper() -eq "Y") { + az group delete --name $resourceGroupName --yes +} From 94d618d4d4dfc8501a601d37c565e4f2a15bc42d Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 5 Mar 2024 23:41:33 +0200 Subject: [PATCH 379/607] indentation --- deploy/ansible/roles-sap/windows/5.1-dbload/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/windows/5.1-dbload/tasks/main.yaml b/deploy/ansible/roles-sap/windows/5.1-dbload/tasks/main.yaml index b614117ab3..468610cfc0 100644 --- a/deploy/ansible/roles-sap/windows/5.1-dbload/tasks/main.yaml +++ b/deploy/ansible/roles-sap/windows/5.1-dbload/tasks/main.yaml @@ -42,7 +42,7 @@ sap_inifile: "{{ bom_base_name }}-dbload-{{ sid_to_be_deployed.sid | lower }}-{{ ansible_hostname }}.params" sap_inifile_template: "{{ bom_base_name }}{{ bom_suffix }}-dbload-inifile-param.j2" scs_server: "{{ hostvars[scs_server_temp | first]['virtual_host'] }}" - db_virtual_hostname: "{{ hostvars[db_server_temp | first]['virtual_host'] }}" + db_virtual_hostname: "{{ hostvars[db_server_temp | first]['virtual_host'] }}" scs_sapmnt_server: "{% if scs_high_availability %}{{ sid_to_be_deployed.sid | lower }}scs{{ scs_instance_number }}cl1{% else %}{{ hostvars[scs_server_temp | first]['virtual_host'] }}{% endif %}" dir_params: '{{ tmp_directory_windows }}\params' product_catalog: '{{ download_directory_windows }}\SWPM\product.catalog' From 4cd67d939cc0004d92f26493c9da98718e0c8889 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 5 Mar 2024 23:41:45 +0200 Subject: [PATCH 380/607] Fix casing of LUN in outputs.tf --- .../terraform-units/modules/sap_system/anydb_node/outputs.tf | 4 ++-- .../terraform-units/modules/sap_system/hdb_node/outputs.tf | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf index 55525a650f..b711793c5f 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf @@ -159,7 +159,7 @@ output "database_shared_disks" { flatten( [for vm in var.naming.virtualmachine_names.ANYDB_VMNAME : [for idx, disk in azurerm_virtual_machine_data_disk_attachment.cluster : - format("{ host: '%s', lun: %d, type: 'ASD' }", vm, disk.lun) + format("{ host: '%s', LUN: %d, type: 'ASD' }", vm, disk.lun) ] ] ) @@ -171,7 +171,7 @@ output "database_kdump_disks" { flatten( [for vm in var.naming.virtualmachine_names.ANYDB_VMNAME : [for idx, disk in azurerm_virtual_machine_data_disk_attachment.kdump : - format("{ host: '%s', lun: %d, type: 'kdump' }", vm, disk.lun) + format("{ host: '%s', LUN: %d, type: 'kdump' }", vm, disk.lun) ] ] ) diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf index e71ef9ffc8..d26682b249 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf @@ -198,7 +198,7 @@ output "database_shared_disks" { flatten( [for vm in var.naming.virtualmachine_names.HANA_COMPUTERNAME : [for idx, disk in azurerm_virtual_machine_data_disk_attachment.cluster : - format("{ host: '%s', lun: %d, type: 'ASD' }", vm, disk.lun) + format("{ host: '%s', LUN: %d, type: 'ASD' }", vm, disk.lun) ] ] ) @@ -211,7 +211,7 @@ output "database_kdump_disks" { flatten( [for vm in var.naming.virtualmachine_names.HANA_COMPUTERNAME : [for idx, disk in azurerm_virtual_machine_data_disk_attachment.kdump : - format("{ host: '%s', lun: %d, type: 'kdump' }", vm, disk.lun) + format("{ host: '%s', LUN: %d, type: 'kdump' }", vm, disk.lun) ] ] ) From 7812f4bce92af6322cf13476884cd3a238853bb5 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 5 Mar 2024 23:55:57 +0200 Subject: [PATCH 381/607] Fix variable case in sbd.yaml --- .../roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml index db160faa86..422fff9807 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml @@ -45,7 +45,7 @@ ansible.builtin.debug: msg: # Best method for formatting output with Azure Devops Logs - "item.host: {{ item.host }}" - - "item.lun: {{ item.lun }}" + - "item.lun: {{ item.LUN }}" verbosity: 2 loop: "{{ sbdDevices }}" when: @@ -83,7 +83,7 @@ vars: sbdMap_update: - { host: "{{ item.host }}" , - lun: "{{ item.lun }}" , + lun: "{{ item.LUN }}" , blockDev: '' , diskByLun: '' , diskById: '' } From 6b5d2310bf6a64a8a7d016cdc43236eaef0da022 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 6 Mar 2024 16:16:02 +0530 Subject: [PATCH 382/607] Fix initramfs image file configuration and update SAP parameters template --- .../tasks/1.17.2.0-cluster-RedHat.yml | 48 +++++++++---------- .../output_files/sap-parameters.tmpl | 4 +- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml index 874b1953a6..4b627dee0d 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml @@ -265,30 +265,6 @@ when: - kdump_enabled == "enabled" block: -# Ensure that the initramfs image file contains the fence_kdump and hosts files - - name: "1.17 Generic Pacemaker - Check that the initramfs image file contains the fence_kdump and hosts files" - ansible.builtin.command: lsinitrd /boot/initramfs-$(uname -r)kdump.img | egrep "fence|hosts" - register: initramfs_image_file - changed_when: false - failed_when: false - - - name: "1.17 Generic Pacemaker - Add hosts and kdump files to the initramfs image file" - ansible.builtin.command: dracut -f -v --add hosts --install "cat" /boot/initramfs-$(uname -r)kdump.img $(uname -r) --force - when: initramfs_image_file.rc != 0 - failed_when: false - - # Ensure that the initramfs image file contains the fence_kdump and hosts files - - name: "1.17 Generic Pacemaker - Check that the initramfs image file contains the fence_kdump and hosts files" - ansible.builtin.command: lsinitrd /boot/initramfs-$(uname -r)kdump.img | egrep "fence|hosts" - register: initramfs_image_check - changed_when: false - failed_when: initramfs_image_check.rc != 0 - - # print debug on the validation of initramfs - - name: "1.17 Generic Pacemaker - debug initramfs output" - ansible.builtin.debug: - msg: "initramfs check: {{ initramfs_image_check.stdout }}" - when: initramfs_image_check.rc == 0 # Perform the fence_kdump_nodes configuration in /etc/kdump.conf - name: "1.17 Generic Pacemaker - Perform the fence_kdump_nodes configuration in /etc/kdump.conf" @@ -322,6 +298,30 @@ name: kdump state: restarted + # Ensure that the initramfs image file contains the fence_kdump and hosts files + - name: "1.17 Generic Pacemaker - Check that the initramfs image file contains the fence_kdump and hosts files" + ansible.builtin.shell: set -o pipefail && lsinitrd /boot/initramfs-$(uname -r)kdump.img | egrep "fence|hosts" + register: initramfs_image_file + changed_when: false + failed_when: false + + # - name: "1.17 Generic Pacemaker - Add hosts and kdump files to the initramfs image file" + # ansible.builtin.shell: dracut -f -v --add hosts --add fence-kdump --install "cat" /boot/initramfs-$(uname -r)kdump.img $(uname -r) --force + # when: initramfs_image_file.rc != 0 + # failed_when: false + + # Ensure that the initramfs image file contains the fence_kdump and hosts files + - name: "1.17 Generic Pacemaker - Check that the initramfs image file contains the fence_kdump and hosts files" + ansible.builtin.shell: set -o pipefail && lsinitrd /boot/initramfs-$(uname -r)kdump.img | egrep "fence|hosts" + register: initramfs_image_check + changed_when: false + failed_when: initramfs_image_check.rc != 0 + + # print debug on the validation of initramfs + - name: "1.17 Generic Pacemaker - debug initramfs output" + ansible.builtin.debug: + msg: "initramfs check: {{ initramfs_image_check.stdout }}" + when: initramfs_image_check.rc == 0 # /*---------------------------------------------------------------------------8 # | | # | kdump stonith - END | diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl index f07cbd4386..013c9e88ed 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl @@ -34,7 +34,7 @@ scs_cluster_type: ${scs_cluster_type} # SCS Instance Number scs_instance_number: "${scs_instance_number}" -# scs_server_loadbalancer_ip is the SCS IP address of the load balancer for +# scs_lb_ip is the Virtual IP address of the load balancer for # the SAP Central Services virtual machines scs_lb_ip: ${scs_server_loadbalancer_ip} @@ -45,7 +45,7 @@ ers_instance_number: "${ers_instance_number}" # the SAP Central Services virtual machines ers_lb_ip: ${ers_server_loadbalancer_ip} -# scs_clst_lb_ip is the IP address of the load balancer for the scs cluster in Windows +# IP address of CNO in Windows and takes the form IPAddress/CIDR scs_clst_lb_ip: ${scs_cluster_loadbalancer_ip} From 936037592bd4711c5f7524261f7c159ddd71721c Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 6 Mar 2024 16:58:06 +0530 Subject: [PATCH 383/607] Update fence_kdump_nodes configuration in /etc/kdump.conf --- .../tasks/1.17.2.0-cluster-RedHat.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml index 4b627dee0d..a299f1bf26 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml @@ -270,8 +270,8 @@ - name: "1.17 Generic Pacemaker - Perform the fence_kdump_nodes configuration in /etc/kdump.conf" ansible.builtin.replace: path: /etc/kdump.conf - regexp: '^#fence_kdump_nodes' - replace: 'fence_kdump_nodes "{{ secondary_instance_name }}"' + regexp: '^#fence_kdump_nodes(.*)$' + replace: "fence_kdump_nodes {{ secondary_instance_name }}" backup: true register: kdump_conf_file failed_when: kdump_conf_file.rc != 0 @@ -283,8 +283,8 @@ - name: "1.17 Generic Pacemaker - Perform the fence_kdump_nodes configuration in /etc/kdump.conf" ansible.builtin.replace: path: /etc/kdump.conf - regexp: '^#fence_kdump_nodes' - replace: 'fence_kdump_nodes "{{ primary_instance_name }}"' + regexp: '^#fence_kdump_nodes(.*)$' + replace: "fence_kdump_nodes {{ primary_instance_name }}" backup: true register: kdump_conf_file failed_when: kdump_conf_file.rc != 0 From d4605ac639efc69ab40fab0ebeebc4edf82aa710 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 6 Mar 2024 15:42:17 +0200 Subject: [PATCH 384/607] Update DNS check in OS configuration playbook --- deploy/ansible/playbook_01_os_base_config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/playbook_01_os_base_config.yaml b/deploy/ansible/playbook_01_os_base_config.yaml index caeaec1405..20da9a53b0 100644 --- a/deploy/ansible/playbook_01_os_base_config.yaml +++ b/deploy/ansible/playbook_01_os_base_config.yaml @@ -245,7 +245,7 @@ - name: "OS configuration playbook: - Check if required DNS entries match" ansible.builtin.assert: - that: "dns_in_AD == (scs_clst_lb_ip | split('/') | first)" + that: "dns_in_AD == scs_lb_ip" fail_msg: "The DNS entry for the SCS cluster is not correct in Active Directory" when: - dns_in_AD is defined From a1a351d065c6d4d271ae5ca097634ba7543a4557 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 6 Mar 2024 21:13:58 +0530 Subject: [PATCH 385/607] Update database cluster IP address variable --- deploy/ansible/playbook_04_00_01_db_ha.yaml | 6 +++--- .../tasks/1.17.2.0-cluster-RedHat.yml | 5 ----- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/deploy/ansible/playbook_04_00_01_db_ha.yaml b/deploy/ansible/playbook_04_00_01_db_ha.yaml index 7c813b079a..3e93ec59a8 100644 --- a/deploy/ansible/playbook_04_00_01_db_ha.yaml +++ b/deploy/ansible/playbook_04_00_01_db_ha.yaml @@ -610,7 +610,7 @@ primary_node: "{{ ansible_play_hosts_all[0] }}" # Setting up Primary Instance Name secondary_node: "{{ ansible_play_hosts_all[1] }}" # Setting up Secondary Instance Name domain_user_password: "{{ hostvars.localhost.winadm_password }}" - sap_cluster_ip_address: "{{ db_clst_lb_ip }}" + sap_cluster_ip_address: "{{ database_cluster_ip }}" sql_svc_account: "{% if hostvars.localhost.sqlsvc_account is defined %}{{ hostvars.localhost.sqlsvc_account }}{% else %}{{ win_sql_svc_account }}{% endif %}" sql_svc_password: "{% if hostvars.localhost.sqlsvc_account_password is defined %}{{ hostvars.localhost.sqlsvc_account_password }}{% else %}{{ sql_svc_password | default('') }}{% endif %}" sql_agent_account: "{% if hostvars.localhost.sqlagent_account is defined %}{{ hostvars.localhost.sqlagent_account }}{% else %}{{ win_sqlagent_svc_account }}{% endif %}" @@ -621,7 +621,7 @@ name: roles-os/windows/1.17-generic-wincluster register: cluster_output vars: - sap_cluster_ip_address: "{{ db_clst_lb_ip }}" + sap_cluster_ip_address: "{{ database_cluster_ip }}" primary_node: "{{ ansible_play_hosts_all[0] }}" # Setting up Primary Instance Name secondary_node: "{{ ansible_play_hosts_all[1] }}" # Setting up Secondary Instance Name domain_user_password: "{{ hostvars.localhost.winadm_password }}" @@ -641,7 +641,7 @@ register: alwayson_output vars: config_tier: "sqlserverha" - sap_cluster_ip_address: "{{ db_clst_lb_ip }}" + sap_cluster_ip_address: "{{ database_cluster_ip }}" primary_node: "{{ ansible_play_hosts_all[0] }}" # Setting up Primary Instance Name secondary_node: "{{ ansible_play_hosts_all[1] }}" # Setting up Secondary Instance Name mssql_primary_node: "{{ ansible_play_hosts_all[0] }}" # Setting up Primary Instance Name diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml index a299f1bf26..e1946c47c0 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml @@ -305,11 +305,6 @@ changed_when: false failed_when: false - # - name: "1.17 Generic Pacemaker - Add hosts and kdump files to the initramfs image file" - # ansible.builtin.shell: dracut -f -v --add hosts --add fence-kdump --install "cat" /boot/initramfs-$(uname -r)kdump.img $(uname -r) --force - # when: initramfs_image_file.rc != 0 - # failed_when: false - # Ensure that the initramfs image file contains the fence_kdump and hosts files - name: "1.17 Generic Pacemaker - Check that the initramfs image file contains the fence_kdump and hosts files" ansible.builtin.shell: set -o pipefail && lsinitrd /boot/initramfs-$(uname -r)kdump.img | egrep "fence|hosts" From b32a17878a93d223f4b8a10fa904f412627a43d8 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 6 Mar 2024 18:24:40 +0200 Subject: [PATCH 386/607] Update disk configuration variables --- deploy/ansible/vars/disks_config.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/vars/disks_config.yml b/deploy/ansible/vars/disks_config.yml index 83e584ea8c..35a3c9d37c 100644 --- a/deploy/ansible/vars/disks_config.yml +++ b/deploy/ansible/vars/disks_config.yml @@ -34,7 +34,7 @@ disk_type_to_name_map: offline_log_dir: '{{ node_tier | lower }}_offline_logdir' saptmp: '{{ node_tier | lower }}_saptmp' # ------------------- End - disktypes required for DB2 ---------------------8 - kdump: '{{ node_tier | lower }}_kdump' + kdump: '{{ node_tier | lower }}_kdump' # ------------------- Begin - disktypes required for ASE -------------------8 sapdata_1: '{{ node_tier | lower }}_sapdata_1' saplog_1: '{{ node_tier | lower }}_saplog_1' @@ -88,10 +88,11 @@ logical_volumes: - tier: 'sapos' node_tier: 'all' supported_tiers: ['scs','hana','db2','ers'] - vg: 'vg_kdump' - lv: 'lv_kdump' + vg: 'vg_{{ node_tier | lower }}_kdump' + lv: 'lv_{{ node_tier | lower }}_kdump' size: '100%FREE' fstype: 'xfs' + # ----------------------- End - disks required for kdump --------------------8 # --------------------- Begin - disks required for usrsap -------------------8 From 10819bc631b478f79d15c3a39e2c7ea3a8ff815c Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 6 Mar 2024 18:52:14 +0200 Subject: [PATCH 387/607] Fix domain\service account names in mssql-alwayson-prerequisites.yaml --- .../tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml index 9a3d8cd517..d4c1d0a8dd 100644 --- a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml +++ b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml @@ -31,8 +31,8 @@ - name: 'WIN: Calculating the domain\service account names' ansible.builtin.set_fact: - domain_sqlsvc_account: '{{ win_sql_svc_account }}@{{ domain_name }}\' - domain_sqlagent_account: '{{ win_sqlagent_svc_account }}@{{ domain_name }}\' + domain_sqlsvc_account: '{{ win_sql_svc_account }}@{{ domain_name }}' + domain_sqlagent_account: '{{ win_sqlagent_svc_account }}@{{ domain_name }}' when: - domain is defined From 1120dbbc3abe1cc270e0c62d53bc83c65441b612 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 6 Mar 2024 19:32:54 +0200 Subject: [PATCH 388/607] Update domain account names in mssql-alwayson-config.yaml --- .../tasks/4.4.1.1-mssql-alwayson-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.1-mssql-alwayson-config.yaml b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.1-mssql-alwayson-config.yaml index 9ca5160aa4..0e86619aaf 100644 --- a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.1-mssql-alwayson-config.yaml +++ b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.1-mssql-alwayson-config.yaml @@ -32,8 +32,8 @@ - name: 'WIN: Calculating the domain\service account names' ansible.builtin.set_fact: - domain_sqlsvc_account: '{% if win_sql_svc_account != "NT SERVICE\MSSQLSERVER" %}{{ domain | upper }}\{{ win_sql_svc_account }}{% else %}{{ win_sql_svc_account }}{% endif %}' - domain_sqlagent_account: '{% if win_sql_svc_account != "NT SERVICE\SQLSERVERAGENT" %}{{ domain | upper }}\{{ win_sqlagent_svc_account }}{% else %}{{ win_sqlagent_svc_account }}{% endif %}' + domain_sqlsvc_account: '{% if win_sql_svc_account != "NT SERVICE\MSSQLSERVER" %}{{ win_sql_svc_account }}@{{ domain_name }}{% else %}{{ win_sql_svc_account }}{% endif %}' + domain_sqlagent_account: '{% if win_sql_svc_account != "NT SERVICE\SQLSERVERAGENT" %}{{ win_sqlagent_svc_account }}@{{ domain_name }}{% else %}{{ win_sqlagent_svc_account }}{% endif %}' when: - domain is defined From 982d4bb8a94a8f749967adb40dc4dfa7bff45f3a Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 6 Mar 2024 19:36:59 +0200 Subject: [PATCH 389/607] Update SQL Server service account format --- .../tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml index d4c1d0a8dd..f2508babe3 100644 --- a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml +++ b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml @@ -155,7 +155,7 @@ MembersToInclude: - "{{ domain_sqlsvc_account }}" - "{{ domain_sqlagent_account }}" - - '{{ domain | upper }}\{{ domain_service_account }}' + - '{{ domain_service_account }}@{{ domain_name }}' PsDscRunAsCredential_username: '{{ sap_sid }}adm@{{ domain_name }}' PsDscRunAsCredential_password: '{{ domain_user_password }}' register: sql_role_addition @@ -180,7 +180,7 @@ - "{{ win_cluster_svc_account }}" - "{{ domain_sqlsvc_account }}" - "{{ domain_sqlagent_account }}" - - '{{ domain | upper }}\{{ domain_service_account }}' + - '{{ domain_service_account }}@{{ domain_name }}' register: sql_server_permission - name: "Create SQL Server Availability Group Endpoint" From 3061b2046ce47d479861cc84b15817df6fc31e37 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 6 Mar 2024 19:42:22 +0200 Subject: [PATCH 390/607] Update domain service account format in SQL login --- .../tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml index f2508babe3..487d374b7e 100644 --- a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml +++ b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml @@ -142,7 +142,7 @@ - "{{ win_cluster_svc_account }}" - "{{ domain_sqlsvc_account }}" - "{{ domain_sqlagent_account }}" - - '{{ domain | upper }}\{{ domain_service_account }}' + - '{{ domain_service_account }}@{{ domain_name }}' register: sql_login - name: "Add required users to server role in SQL Server" From 74c339f8a78dfd05b955af2ddaffd316d16eb3c8 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 6 Mar 2024 23:13:36 +0530 Subject: [PATCH 391/607] Add mount for local kdump file path --- .../roles-sap-os/2.6-sap-mounts/tasks/main.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml index ad7d0a5cfa..95aac20ce9 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml @@ -80,6 +80,18 @@ - node_tier != 'observer' - ((node_tier == 'app' and usr_sap_mountpoint is undefined) or node_tier != 'app') +- name: "2.6 SAP Mounts: - Mount local kdump file path to save vmcore" + ansible.posix.mount: + src: "/dev/vg_{{ node_tier | lower }}_kdump/lv_{{ node_tier | lower }}_kdump" + path: '/var/crash' + fstype: 'xfs' + opts: defaults + state: mounted + when: + - use_fence_kdump + - ansible_os_family | upper == 'REDHAT' + - supported_tiers in ['scs','hana','db2','ers'] + - name: "2.6 SAP Mounts: - Mount local file systems (shared)" ansible.posix.mount: src: "{{ sharedpath }}" From fb6ba4674649301f815de77677c7f0a194581f62 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 6 Mar 2024 20:04:23 +0200 Subject: [PATCH 392/607] Update SQL Server service account names*** --- .../4.4.1.0-mssql-alwayson-prerequisites.yaml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml index 487d374b7e..0f6b1259ec 100644 --- a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml +++ b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml @@ -140,9 +140,9 @@ PsDscRunAsCredential_password: "{{ domain_service_password }}" loop: - "{{ win_cluster_svc_account }}" - - "{{ domain_sqlsvc_account }}" - - "{{ domain_sqlagent_account }}" - - '{{ domain_service_account }}@{{ domain_name }}' + - "{{ domain | upper }}\{{ sql_svc_account_name }}" + - "{{ domain | upper }}\{{ win_sqlagent_svc_account }}" + - '{{ domain | upper }}\{{ domain_service_account }}' register: sql_login - name: "Add required users to server role in SQL Server" @@ -153,9 +153,9 @@ InstanceName: "{{ mssql_instance_name }}" ServerRoleName: "sysadmin" MembersToInclude: - - "{{ domain_sqlsvc_account }}" - - "{{ domain_sqlagent_account }}" - - '{{ domain_service_account }}@{{ domain_name }}' + - "{{ domain | upper }}\{{ sql_svc_account_name }}" + - "{{ domain | upper }}\{{ win_sqlagent_svc_account }}" + - '{{ domain | upper }}\{{ domain_service_account }}' PsDscRunAsCredential_username: '{{ sap_sid }}adm@{{ domain_name }}' PsDscRunAsCredential_password: '{{ domain_user_password }}' register: sql_role_addition @@ -178,9 +178,9 @@ Name: "{{ item }}" loop: - "{{ win_cluster_svc_account }}" - - "{{ domain_sqlsvc_account }}" - - "{{ domain_sqlagent_account }}" - - '{{ domain_service_account }}@{{ domain_name }}' + - "{{ domain | upper }}\{{ sql_svc_account_name }}" + - "{{ domain | upper }}\{{ win_sqlagent_svc_account }}" + - '{{ domain | upper }}\{{ domain_service_account }}' register: sql_server_permission - name: "Create SQL Server Availability Group Endpoint" From eeff4023c8315690f2d1bc0dd0039e46c8eec8a7 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 6 Mar 2024 23:35:46 +0530 Subject: [PATCH 393/607] Update supported_tiers to node_tier in main.yaml --- deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml index 95aac20ce9..2a3401e618 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml @@ -90,7 +90,7 @@ when: - use_fence_kdump - ansible_os_family | upper == 'REDHAT' - - supported_tiers in ['scs','hana','db2','ers'] + - node_tier in ['scs','hana','db2','ers'] - name: "2.6 SAP Mounts: - Mount local file systems (shared)" ansible.posix.mount: From 118b10886e4e427632d20c11e4d5320b8e58ab1f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 6 Mar 2024 20:18:09 +0200 Subject: [PATCH 394/607] Fix formatting in mssql-alwayson-prerequisites.yaml --- .../tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml index 0f6b1259ec..0d03255f6f 100644 --- a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml +++ b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml @@ -140,8 +140,8 @@ PsDscRunAsCredential_password: "{{ domain_service_password }}" loop: - "{{ win_cluster_svc_account }}" - - "{{ domain | upper }}\{{ sql_svc_account_name }}" - - "{{ domain | upper }}\{{ win_sqlagent_svc_account }}" + - '{{ domain | upper }}\{{ sql_svc_account_name }}' + - '{{ domain | upper }}\{{ win_sqlagent_svc_account }}' - '{{ domain | upper }}\{{ domain_service_account }}' register: sql_login @@ -153,8 +153,8 @@ InstanceName: "{{ mssql_instance_name }}" ServerRoleName: "sysadmin" MembersToInclude: - - "{{ domain | upper }}\{{ sql_svc_account_name }}" - - "{{ domain | upper }}\{{ win_sqlagent_svc_account }}" + - '{{ domain | upper }}\{{ sql_svc_account_name }}' + - '{{ domain | upper }}\{{ win_sqlagent_svc_account }}' - '{{ domain | upper }}\{{ domain_service_account }}' PsDscRunAsCredential_username: '{{ sap_sid }}adm@{{ domain_name }}' PsDscRunAsCredential_password: '{{ domain_user_password }}' @@ -178,8 +178,8 @@ Name: "{{ item }}" loop: - "{{ win_cluster_svc_account }}" - - "{{ domain | upper }}\{{ sql_svc_account_name }}" - - "{{ domain | upper }}\{{ win_sqlagent_svc_account }}" + - '{{ domain | upper }}\{{ sql_svc_account_name }}' + - '{{ domain | upper }}\{{ win_sqlagent_svc_account }}' - '{{ domain | upper }}\{{ domain_service_account }}' register: sql_server_permission From 3611b248e7716892a27273ad8c880f529e505547 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 6 Mar 2024 20:29:59 +0200 Subject: [PATCH 395/607] Update SQL service account names in mssql-alwayson-prerequisites.yaml --- .../tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml index 0d03255f6f..41e34c2059 100644 --- a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml +++ b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml @@ -140,7 +140,7 @@ PsDscRunAsCredential_password: "{{ domain_service_password }}" loop: - "{{ win_cluster_svc_account }}" - - '{{ domain | upper }}\{{ sql_svc_account_name }}' + - '{{ domain | upper }}\{{ win_sql_svc_account }}' - '{{ domain | upper }}\{{ win_sqlagent_svc_account }}' - '{{ domain | upper }}\{{ domain_service_account }}' register: sql_login @@ -153,7 +153,7 @@ InstanceName: "{{ mssql_instance_name }}" ServerRoleName: "sysadmin" MembersToInclude: - - '{{ domain | upper }}\{{ sql_svc_account_name }}' + - '{{ domain | upper }}\{{ win_sql_svc_account }}' - '{{ domain | upper }}\{{ win_sqlagent_svc_account }}' - '{{ domain | upper }}\{{ domain_service_account }}' PsDscRunAsCredential_username: '{{ sap_sid }}adm@{{ domain_name }}' @@ -178,7 +178,7 @@ Name: "{{ item }}" loop: - "{{ win_cluster_svc_account }}" - - '{{ domain | upper }}\{{ sql_svc_account_name }}' + - '{{ domain | upper }}\{{ win_sql_svc_account }}' - '{{ domain | upper }}\{{ win_sqlagent_svc_account }}' - '{{ domain | upper }}\{{ domain_service_account }}' register: sql_server_permission From 2575da668235f038078d75983a74774a7ab6dc29 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 7 Mar 2024 02:34:47 +0200 Subject: [PATCH 396/607] Fix proximity placement group logic in vm-scs.tf --- .../terraform-units/modules/sap_system/app_tier/vm-scs.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index 18be6572b3..7aac8915aa 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -128,7 +128,7 @@ resource "azurerm_linux_virtual_machine" "scs" { //If no ppg defined do not put the scs servers in a proximity placement group proximity_placement_group_id = var.application_tier.scs_use_ppg ? ( - local.scs_zonal_deployment ? var.ppg[count.index % max(local.scs_zone_count, 1)] : var.ppg[0]) : ( + local.scs_zonal_deployment ? var.ppg[count.index % max(length(var.ppg), 1)] : var.ppg[0]) : ( null ) @@ -313,7 +313,7 @@ resource "azurerm_windows_virtual_machine" "scs" { //If no ppg defined do not put the scs servers in a proximity placement group proximity_placement_group_id = var.application_tier.scs_use_ppg ? ( - local.scs_zonal_deployment ? var.ppg[count.index % max(local.scs_zone_count, 1)] : var.ppg[0]) : ( + local.scs_zonal_deployment ? var.ppg[count.index % max(length(var.ppg), 1)] : var.ppg[0]) : ( null ) From bfb62d6b534dba15c362a9c1c90f6741e3f7e33f Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 7 Mar 2024 13:00:48 +0530 Subject: [PATCH 397/607] Update kdump mount path, update kdump path and restart kdump service --- .../tasks/1.17.2.0-cluster-RedHat.yml | 14 +++++++++- .../tasks/1.17.2.0-cluster-Suse.yml | 26 +++++++++---------- .../2.6-sap-mounts/tasks/main.yaml | 2 +- 3 files changed, 27 insertions(+), 15 deletions(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml index e1946c47c0..658bfd3f66 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml @@ -292,7 +292,19 @@ - kdump_enabled == "enabled" - inventory_hostname == secondary_instance_name - # restart kdump service + # set the kdump path to /usr/crash in /etc/kdump.conf + - name: "1.17 Generic Pacemaker - Set the kdump path to /usr/crash in /etc/kdump.conf" + ansible.builtin.replace: + path: /etc/kdump.conf + regexp: '^path(.*)$' + replace: "path /usr/crash" + backup: true + register: kdump_conf_file_path + failed_when: kdump_conf_file_path.rc != 0 + when: + - kdump_enabled == "enabled" + + # restart kdump service as we made changes to the configuration - name: "1.17 Generic Pacemaker - Restart kdump service" ansible.builtin.service: name: kdump diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml index 2e077a2115..679a2ed01e 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml @@ -229,29 +229,29 @@ (scs_cluster_type == "ISCSI") block: - - name: "systemctl enable sbd" + - name: "systemctl enable sbd" ansible.builtin.systemd: - name: sbd - enabled: true - daemon_reload: true + name: sbd + enabled: true + daemon_reload: true # restart cluster on primary node to ensure sbd is enabled - - name: "1.17 Generic Pacemaker - stop cluster on both nodes to ensure sbd is enabled." - ansible.builtin.command: crm cluster stop + - name: "1.17 Generic Pacemaker - stop cluster on both nodes to ensure sbd is enabled." + ansible.builtin.command: crm cluster stop - - name: "1.17 Generic Pacemaker - Check if cluster is stopped" + - name: "1.17 Generic Pacemaker - Check if cluster is stopped" ansible.builtin.wait_for: - path: /var/lib/pacemaker/cib/cib.xml - state: absent - timeout: 60 + path: /var/lib/pacemaker/cib/cib.xml + state: absent + timeout: 60 - - name: "1.17 Generic Pacemaker - start cluster on both nodes to ensure sbd is enabled." - ansible.builtin.command: crm cluster start + - name: "1.17 Generic Pacemaker - start cluster on both nodes to ensure sbd is enabled." + ansible.builtin.command: crm cluster start always: - name: "1.17 Generic Pacemaker - SBD service configuration" ansible.builtin.debug: - msg: "SBD service configuration ends" + msg: "SBD service configuration ends" # /*---------------------------------------------------------------------------8 # | | diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml index 2a3401e618..00b3fa1403 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml @@ -83,7 +83,7 @@ - name: "2.6 SAP Mounts: - Mount local kdump file path to save vmcore" ansible.posix.mount: src: "/dev/vg_{{ node_tier | lower }}_kdump/lv_{{ node_tier | lower }}_kdump" - path: '/var/crash' + path: '/usr/crash' fstype: 'xfs' opts: defaults state: mounted From b069f2bf70b0c56409af80b5acabe3902ee91ed4 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 7 Mar 2024 11:16:34 +0200 Subject: [PATCH 398/607] Refactor proximity_placement_group_id calculation in vm-app.tf --- .../terraform-units/modules/sap_system/app_tier/vm-app.tf | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index 6e00c45219..49cae50458 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -264,9 +264,9 @@ resource "azurerm_windows_virtual_machine" "app" { proximity_placement_group_id = var.application_tier.app_use_ppg ? ( - local.app_zonal_deployment ? var.ppg[count.index % max(local.app_zone_count, 1)] : var.ppg[0]) : ( - null - ) + var.ppg[count.index % max(length(var.ppg), 1)]) : ( + null + ) //If more than one servers are deployed into a single zone put them in an availability set and not a zone availability_set_id = var.application_tier.app_use_avset ? ( @@ -277,6 +277,7 @@ resource "azurerm_windows_virtual_machine" "app" { null ) + virtual_machine_scale_set_id = length(var.scale_set_id) > 0 ? var.scale_set_id : null //If length of zones > 1 distribute servers evenly across zones zone = var.application_tier.app_use_avset ? null : try(local.app_zones[count.index % max(local.app_zone_count, 1)], null) From 799e2a63e0616d7bbb48012729068f3972369041 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 7 Mar 2024 14:34:58 +0200 Subject: [PATCH 399/607] Fix SQL Server Always On configuration --- .../tasks/4.4.1.1-mssql-alwayson-config.yaml | 27 ++++++++++--------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.1-mssql-alwayson-config.yaml b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.1-mssql-alwayson-config.yaml index 0e86619aaf..5fae2fe8cb 100644 --- a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.1-mssql-alwayson-config.yaml +++ b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.1-mssql-alwayson-config.yaml @@ -50,19 +50,6 @@ when: - ansible_hostname == mssql_primary_node block: - - name: "Set SQL Server Always On Endpoint Permission" - ansible.windows.win_dsc: - resource_name: SqlEndpointPermission - Ensure: Present - ServerName: "{{ ansible_hostname }}" - InstanceName: "{{ mssql_instance_name }}" - Name: "SDAF-HADR" - Principal: "{{ domain_sqlsvc_account }}" - Permission: "CONNECT" - PsDscRunAsCredential_username: "{{ domain_service_account }}@{{ domain_name }}" - PsDscRunAsCredential_password: "{{ domain_service_password }}" - register: sql_always_on_endpoint_permission - - name: "Enable SQL Server Always On Service" ansible.windows.win_dsc: resource_name: SqlAlwaysOnService @@ -77,6 +64,20 @@ retries: 6 delay: 20 + - name: "Set SQL Server Always On Endpoint Permission" + ansible.windows.win_dsc: + resource_name: SqlEndpointPermission + Ensure: Present + ServerName: "{{ ansible_hostname }}" + InstanceName: "{{ mssql_instance_name }}" + Name: "SDAF-HADR" + Principal: "{{ domain_sqlsvc_account }}" + Permission: "CONNECT" + PsDscRunAsCredential_username: "{{ domain_service_account }}@{{ domain_name }}" + PsDscRunAsCredential_password: "{{ domain_service_password }}" + register: sql_always_on_endpoint_permission + + # restart SQL Server service - name: "Restart SQL Server service on {{ ansible_hostname }}" ansible.windows.win_service: From 96fac4c4211d7985e79661f1c295ffb3e0f8f39b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 7 Mar 2024 14:52:41 +0200 Subject: [PATCH 400/607] Update SQL Server SPNs and service account --- .../tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml index 41e34c2059..1062c293f7 100644 --- a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml +++ b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.0-mssql-alwayson-prerequisites.yaml @@ -37,7 +37,7 @@ - domain is defined -- name: "WIN: Joining the domain {{ domain }}" +- name: "WIN: Information {{ domain }}" ansible.builtin.debug: msg: - "Domain: {{ domain_name }}" @@ -85,10 +85,10 @@ - name: "Add SPNs for SQL Server" ansible.windows.win_shell: | - 'setspn -A MSSQLSvc/{{ mssql_primary_node }}.{{ domain_name }} {{ domain_sqlsvc_account }}' - 'setspn -A MSSQLSvc/{{ mssql_primary_node }}.{{ domain_name }}:1433 {{ domain_sqlsvc_account }}' - 'setspn -A MSSQLSvc/{{ mssql_secondary_node }}.{{ domain_name }} {{ domain_sqlsvc_account }}' - 'setspn -A MSSQLSvc/{{ mssql_secondary_node }}.{{ domain_name }}:1433 {{ domain_sqlsvc_account }}' + 'setspn -A MSSQLSvc/{{ mssql_primary_node }}.{{ domain_name }} {{ domain | upper }}\{{ win_sql_svc_account }}' + 'setspn -A MSSQLSvc/{{ mssql_primary_node }}.{{ domain_name }}:1433 {{ domain | upper }}\{{ win_sql_svc_account }}' + 'setspn -A MSSQLSvc/{{ mssql_secondary_node }}.{{ domain_name }} {{ domain | upper }}\{{ win_sql_svc_account }}' + 'setspn -A MSSQLSvc/{{ mssql_secondary_node }}.{{ domain_name }}:1433 {{ domain | upper }}\{{ win_sql_svc_account }}' - name: "Convert SQL Server service from running with local account to domain account" ansible.windows.win_service: From 13353dc03889fb20aff32e621e88e8b7830a3672 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 7 Mar 2024 18:32:34 +0530 Subject: [PATCH 401/607] Update SQL Server AlwaysOn configuration --- .../tasks/4.4.1.1-mssql-alwayson-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.1-mssql-alwayson-config.yaml b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.1-mssql-alwayson-config.yaml index 5fae2fe8cb..5afa0f2b92 100644 --- a/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.1-mssql-alwayson-config.yaml +++ b/deploy/ansible/roles-db/windows/4.4.0-mssql-install/tasks/4.4.1.1-mssql-alwayson-config.yaml @@ -71,7 +71,7 @@ ServerName: "{{ ansible_hostname }}" InstanceName: "{{ mssql_instance_name }}" Name: "SDAF-HADR" - Principal: "{{ domain_sqlsvc_account }}" + Principal: '{{ domain | upper }}\{{ win_sql_svc_account }}' Permission: "CONNECT" PsDscRunAsCredential_username: "{{ domain_service_account }}@{{ domain_name }}" PsDscRunAsCredential_password: "{{ domain_service_password }}" @@ -218,7 +218,7 @@ ServerName: "{{ ansible_hostname }}" InstanceName: "{{ mssql_instance_name }}" Name: "SDAF-HADR" - Principal: "{{ domain_sqlsvc_account }}" + Principal: '{{ domain | upper }}\{{ win_sql_svc_account }}' Permission: "CONNECT" PsDscRunAsCredential_username: "{{ domain_service_account }}@{{ domain_name }}" PsDscRunAsCredential_password: "{{ domain_service_password }}" From 00b97352205af5c8ea033dab16254e024e360133 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 7 Mar 2024 18:23:04 +0200 Subject: [PATCH 402/607] Add error handling for deployment account access --- deploy/scripts/installer.sh | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/deploy/scripts/installer.sh b/deploy/scripts/installer.sh index 6f94f95ecd..d10188f416 100755 --- a/deploy/scripts/installer.sh +++ b/deploy/scripts/installer.sh @@ -346,6 +346,22 @@ then echo "#########################################################################################" echo "" az account set --sub "${STATE_SUBSCRIPTION}" + + return_code=$? + if [ 0 != $return_code ]; then + + echo "#########################################################################################" + echo "# #" + echo -e "# $boldred The deployment account (MSI or SPN) does not have access to $resetformatting #" + echo -e "# $boldred ${STATE_SUBSCRIPTION} $resetformatting #" + echo "# #" + echo "#########################################################################################" + + echo "##vso[task.logissue type=error]The deployment account (MSI or SPN) does not have access to ${STATE_SUBSCRIPTION}" + exit $return_code + fi + + account_set=1 fi load_config_vars "${system_config_information}" "STATE_SUBSCRIPTION" From 77e29b7aa4c2b720aa0e2115e1643184895689fb Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 12:02:49 +0200 Subject: [PATCH 403/607] Update AMS subnet configuration --- deploy/terraform/terraform-units/modules/sap_landscape/ams.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf b/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf index 742600b334..e39d1f34f4 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf @@ -3,8 +3,8 @@ data "azurerm_subnet" "ams" { provider = azurerm.main count = length(local.ams_subnet_arm_id) > 0 ? 1 : 0 name = local.ams_subnet_name - virtual_network_name = local.SAP_virtualnetwork_name - resource_group_name = local.resourcegroup_name + virtual_network_name = split("/", local.ams_subnet_arm_id)[8] # Get the Network from actual arm_id + resource_group_name = split("/", local.ams_subnet_arm_id)[4] # Get RG name from actual arm_id } # Created AMS instance if log analytics workspace is NOT defined From d7b5738771195ef8a80ec9b514621c61d19235b1 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 12:24:26 +0200 Subject: [PATCH 404/607] Update default_action in key_vault.tf --- .../terraform/terraform-units/modules/sap_deployer/key_vault.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/key_vault.tf b/deploy/terraform/terraform-units/modules/sap_deployer/key_vault.tf index 453a835b82..696ebb0bf1 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/key_vault.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/key_vault.tf @@ -34,7 +34,7 @@ resource "azurerm_key_vault" "kv_user" { content { bypass = "AzureServices" - default_action = local.management_subnet_exists ? "Allow" : "Deny" + default_action = var.enable_firewall_for_keyvaults_and_storage ? "Deny" : "Allow" ip_rules = compact( [ From d72dade3b4d7ee519d5a09b6eb1f7ba94ba1f284 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 12:32:38 +0200 Subject: [PATCH 405/607] Update ams.tf to use arm_id for subnet name and network name --- deploy/terraform/terraform-units/modules/sap_landscape/ams.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf b/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf index e39d1f34f4..7a082b6751 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf @@ -2,7 +2,7 @@ data "azurerm_subnet" "ams" { provider = azurerm.main count = length(local.ams_subnet_arm_id) > 0 ? 1 : 0 - name = local.ams_subnet_name + name = split("/", local.ams_subnet_arm_id)[10] # Get the Subnet from actual arm_id virtual_network_name = split("/", local.ams_subnet_arm_id)[8] # Get the Network from actual arm_id resource_group_name = split("/", local.ams_subnet_arm_id)[4] # Get RG name from actual arm_id } From 1ee1e04829b58fba3cb0de0905785fd2cd35b521 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 13:38:07 +0200 Subject: [PATCH 406/607] Refactor Agent_IP variable to conditionally include it in storage and key vault firewalls --- deploy/terraform/bootstrap/sap_deployer/module.tf | 2 +- .../terraform/bootstrap/sap_deployer/tfvar_variables.tf | 6 ++++++ deploy/terraform/run/sap_deployer/module.tf | 2 +- deploy/terraform/run/sap_deployer/tfvar_variables.tf | 8 +++++++- 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/deploy/terraform/bootstrap/sap_deployer/module.tf b/deploy/terraform/bootstrap/sap_deployer/module.tf index 661b63985d..fb0d867adf 100644 --- a/deploy/terraform/bootstrap/sap_deployer/module.tf +++ b/deploy/terraform/bootstrap/sap_deployer/module.tf @@ -15,7 +15,7 @@ module "sap_deployer" { ) additional_users_to_add_to_keyvault_policies = var.additional_users_to_add_to_keyvault_policies agent_ado_url = var.agent_ado_url - Agent_IP = var.Agent_IP + Agent_IP = var.add_Agent_IP ? var.Agent_IP : "" agent_pat = var.agent_pat agent_pool = var.agent_pool ansible_core_version = var.ansible_core_version diff --git a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf index a3d07e4723..c4376f6163 100644 --- a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf @@ -411,6 +411,7 @@ variable "subnets_to_add_to_firewall_for_keyvaults_and_storage" { default = [] } + ######################################################################################### # # # DNS settings # @@ -519,6 +520,11 @@ variable "Agent_IP" { default = "" } +variable "add_Agent_IP" { + description = "Boolean value indicating if the Agent IP should be added to the storage and key vault firewalls" + default = true + type = bool + } ############################################################################### # # diff --git a/deploy/terraform/run/sap_deployer/module.tf b/deploy/terraform/run/sap_deployer/module.tf index 63d4142613..37c7b63032 100644 --- a/deploy/terraform/run/sap_deployer/module.tf +++ b/deploy/terraform/run/sap_deployer/module.tf @@ -15,7 +15,7 @@ module "sap_deployer" { ) additional_users_to_add_to_keyvault_policies = var.additional_users_to_add_to_keyvault_policies agent_ado_url = var.agent_ado_url - Agent_IP = var.Agent_IP + Agent_IP = var.add_Agent_IP ? var.Agent_IP : "" agent_pat = var.agent_pat agent_pool = var.agent_pool ansible_core_version = var.ansible_core_version diff --git a/deploy/terraform/run/sap_deployer/tfvar_variables.tf b/deploy/terraform/run/sap_deployer/tfvar_variables.tf index 7a7e6b14f8..0d56ca6918 100644 --- a/deploy/terraform/run/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/run/sap_deployer/tfvar_variables.tf @@ -34,7 +34,7 @@ variable "resourcegroup_name" { } variable "resourcegroup_arm_id" { - description = "If provided, the Azure resource group id" + description = "If provid, the Azure resource group id" default = "" } @@ -520,6 +520,12 @@ variable "Agent_IP" { default = "" } +variable "add_Agent_IP" { + description = "Boolean value indicating if the Agent IP should be added to the storage and key vault firewalls" + default = true + type = bool + } + variable "tfstate_resource_id" { description = "Resource id of tfstate storage account" validation { From 30c45d3608cefee5ef639caf26c35acd6369fb44 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 14:00:12 +0200 Subject: [PATCH 407/607] Refactor admin password authentication in vm-deployer.tf --- .../terraform-units/modules/sap_deployer/vm-deployer.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf b/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf index 8e72c62bd7..8422bf9cfe 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf @@ -127,7 +127,7 @@ resource "azurerm_linux_virtual_machine" "deployer" { network_interface_ids = [azurerm_network_interface.deployer[count.index].id] size = var.deployer.size admin_username = local.username - admin_password = lookup(var.deployer.authentication, "password", null) + admin_password = var.deployer.authentication.type != "password" ? null: local.password disable_password_authentication = var.deployer.authentication.type != "password" ? true : false source_image_id = var.deployer.os.source_image_id != "" ? var.deployer.os.source_image_id : null From 895755fc92365a5d738a62a2ac00035d6667c23e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 14:04:19 +0200 Subject: [PATCH 408/607] Add Agent IP configuration option --- deploy/terraform/bootstrap/sap_library/module.tf | 1 + .../terraform/bootstrap/sap_library/tfvar_variables.tf | 9 +++++++-- deploy/terraform/run/sap_library/module.tf | 4 ++-- deploy/terraform/run/sap_library/tfvar_variables.tf | 9 +++++++-- 4 files changed, 17 insertions(+), 6 deletions(-) diff --git a/deploy/terraform/bootstrap/sap_library/module.tf b/deploy/terraform/bootstrap/sap_library/module.tf index 9db27fa3e8..f015481b40 100644 --- a/deploy/terraform/bootstrap/sap_library/module.tf +++ b/deploy/terraform/bootstrap/sap_library/module.tf @@ -10,6 +10,7 @@ module "sap_library" { azurerm.dnsmanagement = azurerm.dnsmanagement } + Agent_IP = var.add_Agent_IP ? var.Agent_IP : "" bootstrap = true deployer = local.deployer deployer_tfstate = try(data.terraform_remote_state.deployer[0].outputs, []) diff --git a/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf index 4c2b36c2ed..6653d9f325 100644 --- a/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf @@ -238,11 +238,16 @@ variable "use_webapp" { variable "Agent_IP" { - description = "If provided, contains the IP address of the agent" - type = string + description = "IP address of the agent" default = "" } +variable "add_Agent_IP" { + description = "Boolean value indicating if the Agent IP should be added to the storage and key vault firewalls" + default = true + type = bool + } + ######################################################################################### # # diff --git a/deploy/terraform/run/sap_library/module.tf b/deploy/terraform/run/sap_library/module.tf index 95df96a702..7a16a19606 100644 --- a/deploy/terraform/run/sap_library/module.tf +++ b/deploy/terraform/run/sap_library/module.tf @@ -9,6 +9,7 @@ module "sap_library" { azurerm.deployer = azurerm.deployer azurerm.dnsmanagement = azurerm.dnsmanagement } + Agent_IP = var.add_Agent_IP ? var.Agent_IP : "" bootstrap = true deployer = local.deployer deployer_tfstate = try(data.terraform_remote_state.deployer[0].outputs, []) @@ -16,8 +17,8 @@ module "sap_library" { dns_zone_names = var.dns_zone_names infrastructure = local.infrastructure key_vault = local.key_vault - management_dns_subscription_id = var.management_dns_subscription_id management_dns_resourcegroup_name = var.management_dns_resourcegroup_name + management_dns_subscription_id = var.management_dns_subscription_id naming = length(var.name_override_file) > 0 ? local.custom_names : module.sap_namegenerator.naming place_delete_lock_on_resources = var.place_delete_lock_on_resources service_principal = var.use_deployer ? local.service_principal : local.account @@ -27,7 +28,6 @@ module "sap_library" { use_custom_dns_a_registration = var.use_custom_dns_a_registration use_private_endpoint = var.use_private_endpoint use_webapp = var.use_webapp || length(try(data.terraform_remote_state.deployer[0].outputs.webapp_id,"")) > 0 - Agent_IP = var.Agent_IP } module "sap_namegenerator" { diff --git a/deploy/terraform/run/sap_library/tfvar_variables.tf b/deploy/terraform/run/sap_library/tfvar_variables.tf index a54f5fe9b6..b2785da311 100644 --- a/deploy/terraform/run/sap_library/tfvar_variables.tf +++ b/deploy/terraform/run/sap_library/tfvar_variables.tf @@ -242,11 +242,16 @@ variable "use_webapp" { variable "Agent_IP" { - description = "If provided, contains the IP address of the agent" - type = string + description = "IP address of the agent" default = "" } +variable "add_Agent_IP" { + description = "Boolean value indicating if the Agent IP should be added to the storage and key vault firewalls" + default = true + type = bool + } + variable "tfstate_resource_id" { description = "Resource id of tfstate storage account" From 505c9197293b9aa45821a3cd89a09dc3ef118932 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 14:10:56 +0200 Subject: [PATCH 409/607] Add USE_MSI check before ARM_CLIENT_ID check --- deploy/pipelines/03-sap-system-deployment.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/pipelines/03-sap-system-deployment.yaml b/deploy/pipelines/03-sap-system-deployment.yaml index 58e2880d8d..2e9a8ba696 100644 --- a/deploy/pipelines/03-sap-system-deployment.yaml +++ b/deploy/pipelines/03-sap-system-deployment.yaml @@ -93,6 +93,7 @@ stages: echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined." exit 2 fi + echo "Before "USE_MSI" check" if [ $USE_MSI != "true" ]; then if [ -z $WL_ARM_CLIENT_ID ]; then echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined." From e21bf8bc507f3fad1c4f5387cdfa413b18ed2830 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 14:19:06 +0200 Subject: [PATCH 410/607] Refactor password handling in sap_deployer module --- .../terraform-units/modules/sap_deployer/variables_local.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_deployer/variables_local.tf index 4ba08257ac..7fa8473b4f 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/variables_local.tf @@ -168,7 +168,7 @@ locals { password = local.enable_password ? ( local.pwd_exist ? ( data.azurerm_key_vault_secret.pwd[0].value) : ( - try(var.authentication.password, random_password.deployer[0].result) + coalesce(var.authentication.password, random_password.deployer[0].result) )) : ( "" ) From c344737a64c431baf5bc4c90334a7cea39a99fb7 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 14:19:11 +0200 Subject: [PATCH 411/607] Update login process in deployment pipeline --- deploy/pipelines/03-sap-system-deployment.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deploy/pipelines/03-sap-system-deployment.yaml b/deploy/pipelines/03-sap-system-deployment.yaml index 2e9a8ba696..c7d6395da0 100644 --- a/deploy/pipelines/03-sap-system-deployment.yaml +++ b/deploy/pipelines/03-sap-system-deployment.yaml @@ -246,8 +246,9 @@ stages: echo "##vso[task.logissue type=error]Variable group $(variable_group) could not be found." exit 2 fi + echo -e "$green--- Login ---$reset" + echo "USE_MSI: $USE_MSI" - az logout --output none if [ $USE_MSI != "true" ]; then echo "Using SPN" From d3ddd0312841023d2e42d7ea265d8c52a1df3c57 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 14:23:40 +0200 Subject: [PATCH 412/607] Add random password generation for deployer and update dns_label variable description --- .../modules/sap_deployer/key_vault.tf | 15 --------------- .../modules/sap_deployer/randomid.tf | 17 +++++++++++++++++ .../modules/sap_library/variables_global.tf | 8 ++------ 3 files changed, 19 insertions(+), 21 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/key_vault.tf b/deploy/terraform/terraform-units/modules/sap_deployer/key_vault.tf index 696ebb0bf1..950b4d582a 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/key_vault.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/key_vault.tf @@ -402,18 +402,3 @@ resource "azurerm_management_lock" "keyvault" { } } -// Generate random password if password is set as authentication type, and save in KV -resource "random_password" "deployer" { - count = ( - local.enable_password - && !local.pwd_exist - && try(var.authentication.password, "") == "" - ) ? 1 : 0 - - length = 32 - min_upper = 2 - min_lower = 2 - min_numeric = 2 - special = true - override_special = "_%@" -} diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/randomid.tf b/deploy/terraform/terraform-units/modules/sap_deployer/randomid.tf index 6cc8beff27..f4e0842ab1 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/randomid.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/randomid.tf @@ -2,3 +2,20 @@ resource "random_id" "deployer" { byte_length = 4 } + + +// Generate random password if password is set as authentication type, and save in KV +resource "random_password" "deployer" { + count = ( + local.enable_password + && !local.pwd_exist + && try(var.authentication.password, "") == "" + ) ? 1 : 0 + + length = 32 + min_upper = 2 + min_lower = 2 + min_numeric = 2 + special = true + override_special = "_%@" +} diff --git a/deploy/terraform/terraform-units/modules/sap_library/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_library/variables_global.tf index 8c0f609719..01b3d1aad2 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/variables_global.tf @@ -60,12 +60,8 @@ variable "key_vault" { } variable "dns_label" { - validation { - condition = ( - length(trimspace(var.dns_label)) != 0 - ) - error_message = "The dns_label must be specified." - } + description = "DNS label for the deployment" + default = "" } From 6db6d15de33ac0e7c03c341280dcc4ae8abb4325 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 14:30:19 +0200 Subject: [PATCH 413/607] Update Azure Pipeline script to handle MSI usage*** --- .../pipelines/03-sap-system-deployment.yaml | 27 +++++++++++++------ 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/deploy/pipelines/03-sap-system-deployment.yaml b/deploy/pipelines/03-sap-system-deployment.yaml index c7d6395da0..6dc02bf9e5 100644 --- a/deploy/pipelines/03-sap-system-deployment.yaml +++ b/deploy/pipelines/03-sap-system-deployment.yaml @@ -249,6 +249,16 @@ stages: echo -e "$green--- Login ---$reset" echo "USE_MSI: $USE_MSI" + if [ -z $USE_MSI ]; then + USE_MSI="false" + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query USE_MSI.value --output table) + if [ -n "${az_var}" ]; then + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name USE_MSI --value false --output none --only-show-errors + else + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name USE_MSI --value false --output none --only-show-errors + fi + fi + if [ $USE_MSI != "true" ]; then echo "Using SPN" @@ -409,17 +419,18 @@ stages: displayName: Deploy_SAP_infrastructure env: - WL_ARM_SUBSCRIPTION_ID: $(ARM_SUBSCRIPTION_ID) - WL_ARM_CLIENT_ID: $(ARM_CLIENT_ID) - WL_ARM_CLIENT_SECRET: $(ARM_CLIENT_SECRET) - WL_ARM_TENANT_ID: $(ARM_TENANT_ID) - SYSTEM_ACCESSTOKEN: $(System.AccessToken) - SAP_AUTOMATION_REPO_PATH: ${{ parameters.sap_automation_repo_path }} CONFIG_REPO_PATH: ${{ parameters.config_repo_path }} + LOGON_USING_SPN: $(Logon_Using_SPN) PAT: $(WZ_PAT) + SAP_AUTOMATION_REPO_PATH: ${{ parameters.sap_automation_repo_path }} + SYSTEM_ACCESSTOKEN: $(System.AccessToken) TEST_ONLY: ${{ parameters.test }} - TF_LOG: $(TF_LOG) TF_IN_AUTOMATION: true - LOGON_USING_SPN: $(Logon_Using_SPN) + TF_LOG: $(TF_LOG) + USE_MSI: $(Use_MSI) + WL_ARM_CLIENT_ID: $(ARM_CLIENT_ID) + WL_ARM_CLIENT_SECRET: $(ARM_CLIENT_SECRET) + WL_ARM_SUBSCRIPTION_ID: $(ARM_SUBSCRIPTION_ID) + WL_ARM_TENANT_ID: $(ARM_TENANT_ID) failOnStderr: false From 5412a91b526638511993cbfb5fbd3d8a34e7eb79 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 14:37:17 +0200 Subject: [PATCH 414/607] Remove unnecessary echo statement in login section --- deploy/pipelines/03-sap-system-deployment.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/pipelines/03-sap-system-deployment.yaml b/deploy/pipelines/03-sap-system-deployment.yaml index 6dc02bf9e5..053e97aae5 100644 --- a/deploy/pipelines/03-sap-system-deployment.yaml +++ b/deploy/pipelines/03-sap-system-deployment.yaml @@ -247,7 +247,6 @@ stages: exit 2 fi echo -e "$green--- Login ---$reset" - echo "USE_MSI: $USE_MSI" if [ -z $USE_MSI ]; then USE_MSI="false" From f85a186e0599f1c91b422afe7b86fab35c153fb0 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 14:41:57 +0200 Subject: [PATCH 415/607] Update transform.tf to include additional conditions for app_use_avset --- deploy/terraform/run/sap_system/transform.tf | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/deploy/terraform/run/sap_system/transform.tf b/deploy/terraform/run/sap_system/transform.tf index ac0225c6b8..3d50aee1f8 100644 --- a/deploy/terraform/run/sap_system/transform.tf +++ b/deploy/terraform/run/sap_system/transform.tf @@ -178,11 +178,10 @@ locals { false) : ( var.application_server_use_ppg ) : false - app_use_avset = var.application_server_count == 0 || var.use_scalesets_for_deployment || !local.enable_app_tier_deployment ? ( + app_use_avset = var.application_server_count == 0 || var.use_scalesets_for_deployment || length(var.application_server_zones) > 0 || !local.enable_app_tier_deployment ? ( false) : ( var.application_server_use_avset ) - avset_arm_ids = var.application_server_vm_avset_arm_ids scs_server_count = local.enable_app_tier_deployment ? ( max(var.scs_server_count, try(var.application_tier.scs_server_count, 0)) From d947a48f6005fffa22f0ef64044128e1c2d4a007 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 14:46:38 +0200 Subject: [PATCH 416/607] Fix zone ignore_changes in app_tier VMs --- deploy/terraform/run/sap_system/transform.tf | 3 ++- .../terraform-units/modules/sap_system/app_tier/vm-app.tf | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/deploy/terraform/run/sap_system/transform.tf b/deploy/terraform/run/sap_system/transform.tf index 3d50aee1f8..ac0225c6b8 100644 --- a/deploy/terraform/run/sap_system/transform.tf +++ b/deploy/terraform/run/sap_system/transform.tf @@ -178,10 +178,11 @@ locals { false) : ( var.application_server_use_ppg ) : false - app_use_avset = var.application_server_count == 0 || var.use_scalesets_for_deployment || length(var.application_server_zones) > 0 || !local.enable_app_tier_deployment ? ( + app_use_avset = var.application_server_count == 0 || var.use_scalesets_for_deployment || !local.enable_app_tier_deployment ? ( false) : ( var.application_server_use_avset ) + avset_arm_ids = var.application_server_vm_avset_arm_ids scs_server_count = local.enable_app_tier_deployment ? ( max(var.scs_server_count, try(var.application_tier.scs_server_count, 0)) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index 49cae50458..a4875b1600 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -237,7 +237,8 @@ resource "azurerm_linux_virtual_machine" "app" { } lifecycle { ignore_changes = [ - source_image_id + source_image_id, + zone ] } @@ -365,7 +366,8 @@ resource "azurerm_windows_virtual_machine" "app" { lifecycle { ignore_changes = [ // Ignore changes to computername - source_image_id + source_image_id, + zone ] } From a30dde33f9a8442d92936ce256ea41e417eea673 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 15:57:47 +0200 Subject: [PATCH 417/607] Add azure-devops extension installation if not already installed --- deploy/scripts/configure_deployer.sh | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/deploy/scripts/configure_deployer.sh b/deploy/scripts/configure_deployer.sh index 10a41e3207..20eddccce6 100755 --- a/deploy/scripts/configure_deployer.sh +++ b/deploy/scripts/configure_deployer.sh @@ -558,10 +558,6 @@ esac az config set extension.use_dynamic_install=yes_without_prompt -devops_extension_installed=$(az extension list --query "[?name=='azure-devops'].name | [0]") -if [ -z "$devops_extension_installed" ]; then - az extension add --name azure-devops --output none -fi # Fail if any command exits with a non-zero exit status set -o errexit @@ -751,6 +747,12 @@ AGENT_DIR="/home/${USER}/agent" if [ -f "$AGENT_DIR/.agent" ]; then echo "Azure DevOps Agent is configured." echo export "PATH=${ansible_bin}:${tf_bin}:${PATH}" | tee -a /tmp/deploy_server.sh + + devops_extension_installed=$(az extension list --query "[?name=='azure-devops'].name | [0]") + if [ -z "$devops_extension_installed" ]; then + az extension add --name azure-devops --output none + fi + else echo "Azure DevOps Agent is not configured." From 35dbb3e61c3182d1baa749beb697da18ddba0e9c Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 16:00:09 +0200 Subject: [PATCH 418/607] Add files.pythonhosted.org to list of URLs --- deploy/configs/sdaf_urls.json | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/configs/sdaf_urls.json b/deploy/configs/sdaf_urls.json index 6c6ec3d582..008d9e16be 100644 --- a/deploy/configs/sdaf_urls.json +++ b/deploy/configs/sdaf_urls.json @@ -10,6 +10,7 @@ "https://checkpoint-api.hashicorp.com", "https://bootstrap.pypa.io", "https://pypi.org", + "https://files.pythonhosted.org", "https://pythonhosted.org", "https://galaxy.ansible.com" ], From 7274f3674a2b2874455889d69b3b188691cf6613 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 16:37:26 +0200 Subject: [PATCH 419/607] Update tf_version to 1.7.4 in SDAF-General variable group --- deploy/scripts/New-SDAFDevopsProject.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index c3ddfcd65a..bd2e46e468 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -583,7 +583,7 @@ Write-Host "Creating the variable group SDAF-General" -ForegroundColor Green $general_group_id = (az pipelines variable-group list --query "[?name=='SDAF-General'].id | [0]" --only-show-errors) if ($general_group_id.Length -eq 0) { - az pipelines variable-group create --name SDAF-General --variables ANSIBLE_HOST_KEY_CHECKING=false Deployment_Configuration_Path=WORKSPACES Branch=main tf_version="1.7.0" ansible_core_version="2.15" S-Username=$SUserName S-Password=$SPassword --output yaml --authorize true --output none + az pipelines variable-group create --name SDAF-General --variables ANSIBLE_HOST_KEY_CHECKING=false Deployment_Configuration_Path=WORKSPACES Branch=main tf_version="1.7.4" ansible_core_version="2.15" S-Username=$SUserName S-Password=$SPassword --output yaml --authorize true --output none $general_group_id = (az pipelines variable-group list --query "[?name=='SDAF-General'].id | [0]" --only-show-errors) az pipelines variable-group variable update --group-id $general_group_id --name "S-Password" --value $SPassword --secret true --output none --only-show-errors } From b1f5625b4f4c450057ea0fa3e209fb6267654537 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Fri, 8 Mar 2024 20:23:05 +0530 Subject: [PATCH 420/607] Add log file functionality to Test-SDAFReadiness.ps1 script --- deploy/scripts/Test-SDAFReadiness.ps1 | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/deploy/scripts/Test-SDAFReadiness.ps1 b/deploy/scripts/Test-SDAFReadiness.ps1 index d85af0015c..dd3b130d32 100644 --- a/deploy/scripts/Test-SDAFReadiness.ps1 +++ b/deploy/scripts/Test-SDAFReadiness.ps1 @@ -12,8 +12,17 @@ function Show-Menu($data) { $rnd = $(Get-Random -Minimum 1 -Maximum 1000).ToString() +$LogFileDir = Read-Host "Please enter the directory to save the log file" + +if(Test-Path $LogFileDir) { + $LogFileName = "SDAF-" + $(Get-Date -Format "yyyyMMdd-HHmm") + ".md" + $LogFileName = Join-Path $LogFileDir -ChildPath $LogFileName +} +else { + Write-Host "The directory does not exist" + return +} -$LogFileName = "SDAF-" + $(Get-Date -Format "yyyyMMdd-HHmm") + ".md" Add-Content -Path $LogFileName "# SDAF Assesment #" Add-Content -Path $LogFileName "" From 3021cbae58ca3efea002a184ce3f06e9289982a7 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Fri, 8 Mar 2024 21:06:13 +0530 Subject: [PATCH 421/607] Refactor sbd_device variable assignment in 1.17.1.2-sbd.yaml --- .../roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml index 422fff9807..105e4a5b7b 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml @@ -234,7 +234,7 @@ - name: "set fact sbd_device as list of entries in sbdMap" ansible.builtin.set_fact: - sbd_device: "{{ sbdMap | map(attribute='diskById') | join(';') | list }}" + sbd_device: "{{ sbdMap | map(attribute='diskById') | join(';') }}" when: sbdDumpCommand_results.rc == 1 - name: "Show sbd_device..." From 20281c679b0905ff3db1d8eae64a3d82a17edc3e Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Fri, 8 Mar 2024 21:11:52 +0530 Subject: [PATCH 422/607] Refactor sbd_device variable assignment --- .../roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml index 105e4a5b7b..1e0a5b5f51 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml @@ -235,13 +235,13 @@ - name: "set fact sbd_device as list of entries in sbdMap" ansible.builtin.set_fact: sbd_device: "{{ sbdMap | map(attribute='diskById') | join(';') }}" - when: sbdDumpCommand_results.rc == 1 + - name: "Show sbd_device..." ansible.builtin.debug: var: sbd_device verbosity: 2 - when: sbdDumpCommand_results.rc == 1 + # /*------------------------------------ # | From 6545b5faf04262cfa99b2a5849a89bfb8e88261c Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 17:52:50 +0200 Subject: [PATCH 423/607] Fix variable naming inconsistency in ANF HANA data and log volumes --- Webapp/SDAF/Models/SystemModel.cs | 8 ++++---- Webapp/SDAF/ParameterDetails/SystemDetails.json | 4 ++-- Webapp/SDAF/ParameterDetails/SystemTemplate.txt | 4 ++-- deploy/terraform/run/sap_system/tfvar_variables.tf | 4 ++-- deploy/terraform/run/sap_system/transform.tf | 4 ++-- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Webapp/SDAF/Models/SystemModel.cs b/Webapp/SDAF/Models/SystemModel.cs index a896d07dc7..41df392bca 100644 --- a/Webapp/SDAF/Models/SystemModel.cs +++ b/Webapp/SDAF/Models/SystemModel.cs @@ -221,7 +221,7 @@ public bool IsValid() | | +------------------------------------4--------------------------------------*/ - + public string[] proximityplacementgroup_names { get; set; } [PpgIdValidator] @@ -285,7 +285,7 @@ public bool IsValid() [IpAddressValidator] public string[] database_vm_db_nic_ips { get; set; } - + [IpAddressValidator] public string[] database_vm_db_nic_secondary_ips { get; set; } @@ -492,7 +492,7 @@ public bool IsValid() public int? ANF_HANA_data_volume_throughput { get; set; } - public int? ANF_hana_data_volume_count { get; set; } = 1; + public int? ANF_HANA_data_volume_count { get; set; } = 1; /*---------------------------------------------------------------------------8 | | @@ -509,7 +509,7 @@ public bool IsValid() public int? ANF_HANA_log_volume_throughput { get; set; } - public int? ANF_hana_log_volume_count { get; set; } = 1; + public int? ANF_HANA_log_volume_count { get; set; } = 1; /*---------------------------------------------------------------------------8 | | diff --git a/Webapp/SDAF/ParameterDetails/SystemDetails.json b/Webapp/SDAF/ParameterDetails/SystemDetails.json index e1d55b1ba1..eca523df8e 100644 --- a/Webapp/SDAF/ParameterDetails/SystemDetails.json +++ b/Webapp/SDAF/ParameterDetails/SystemDetails.json @@ -1935,7 +1935,7 @@ "Display": 3 }, { - "Name": "ANF_hana_data_volume_count", + "Name": "ANF_HANA_data_volume_count", "Required": false, "Description": "Number of ANF Data Volumes", "Type": "field", @@ -1995,7 +1995,7 @@ "Display": 3 }, { - "Name": "ANF_hana_data_volume_count", + "Name": "ANF_HANA_data_volume_count", "Required": false, "Description": "Number of ANF Data Volumes", "Type": "field", diff --git a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt index 0de4000fe4..260c61807e 100644 --- a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt @@ -525,7 +525,7 @@ $$ANF_HANA_data_use_existing_volume$$ $$ANF_HANA_data_volume_name$$ # Number of ANF Data Volumes -$$ANF_hana_data_volume_count$$ +$$ANF_HANA_data_volume_count$$ ######################################################################################### @@ -550,7 +550,7 @@ $$ANF_HANA_log_use_existing$$ $$ANF_HANA_log_volume_name$$ # Number of ANF Data Volumes -$$ANF_hana_log_volume_count$$ +$$ANF_HANA_log_volume_count$$ ######################################################################################### # # diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index b73b207b3a..6dae91ffd0 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -1108,7 +1108,7 @@ variable "ANF_HANA_data_volume_throughput" { default = 128 } -variable "ANF_hana_data_volume_count" { +variable "ANF_HANA_data_volume_count" { description = "If defined provides the number of data volumes" default = 1 } @@ -1140,7 +1140,7 @@ variable "ANF_HANA_log_volume_throughput" { default = 128 } -variable "ANF_hana_log_volume_count" { +variable "ANF_HANA_log_volume_count" { description = "If defined provides the number of data volumes" default = 1 } diff --git a/deploy/terraform/run/sap_system/transform.tf b/deploy/terraform/run/sap_system/transform.tf index ac0225c6b8..5f2570de75 100644 --- a/deploy/terraform/run/sap_system/transform.tf +++ b/deploy/terraform/run/sap_system/transform.tf @@ -650,14 +650,14 @@ locals { use_existing_data_volume = var.ANF_HANA_data_use_existing_volume data_volume_name = var.ANF_HANA_data_volume_name data_volume_throughput = var.ANF_HANA_data_volume_throughput - data_volume_count = var.ANF_hana_data_volume_count + data_volume_count = var.ANF_HANA_data_volume_count use_for_log = var.ANF_HANA_log log_volume_size = var.ANF_HANA_log_volume_size use_existing_log_volume = var.ANF_HANA_log_use_existing log_volume_name = var.ANF_HANA_log_volume_name log_volume_throughput = var.ANF_HANA_log_volume_throughput - log_volume_count = var.ANF_hana_log_volume_count + log_volume_count = var.ANF_HANA_log_volume_count use_for_shared = var.ANF_HANA_shared shared_volume_size = var.ANF_HANA_shared_volume_size From e8ad413f691b9e17069e2e9d4f273173bbb8a264 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 18:02:10 +0200 Subject: [PATCH 424/607] Update Agent_IP handling in sap_landscape module --- deploy/terraform/run/sap_landscape/module.tf | 2 +- deploy/terraform/run/sap_landscape/tfvar_variables.tf | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/deploy/terraform/run/sap_landscape/module.tf b/deploy/terraform/run/sap_landscape/module.tf index e71e92a004..a8d8753e6a 100644 --- a/deploy/terraform/run/sap_landscape/module.tf +++ b/deploy/terraform/run/sap_landscape/module.tf @@ -14,7 +14,7 @@ module "sap_landscape" { } additional_users_to_add_to_keyvault_policies = var.additional_users_to_add_to_keyvault_policies - Agent_IP = var.Agent_IP + Agent_IP = var.add_Agent_IP ? var.Agent_IP : "" ANF_settings = local.ANF_settings authentication = local.authentication create_vaults_and_storage_dns_a_records = var.create_vaults_and_storage_dns_a_records diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index 4f23f1bd8f..2cd7b88549 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -448,6 +448,11 @@ variable "Agent_IP" { type = string default = "" } +variable "add_Agent_IP" { + description = "Boolean value indicating if the Agent IP should be added to the storage and key vault firewalls" + default = true + type = bool + } variable "storage_account_replication_type" { description = "Storage account replication type" From bbca7dc7a56c328c973b5bf4f8d7e1662419cd43 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 18:11:53 +0200 Subject: [PATCH 425/607] Add conditional logic for Agent IP in module.tf and add add_Agent_IP variable in tfvar_variables.tf --- deploy/terraform/run/sap_system/module.tf | 2 +- deploy/terraform/run/sap_system/tfvar_variables.tf | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index d9263dfaa9..5137b5d520 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -54,7 +54,7 @@ module "common_infrastructure" { azurerm.main = azurerm.system azurerm.dnsmanagement = azurerm.dnsmanagement } - Agent_IP = var.Agent_IP + Agent_IP = var.add_Agent_IP ? var.Agent_IP : "" application_tier = local.application_tier application_tier_ppg_names = module.sap_namegenerator.naming_new.app_ppg_names authentication = local.authentication diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index 6dae91ffd0..7d79006303 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -955,6 +955,11 @@ variable "Agent_IP" { type = string default = "" } +variable "add_Agent_IP" { + description = "Boolean value indicating if the Agent IP should be added to the storage and key vault firewalls" + default = true + type = bool + } variable "shared_home" { description = "If defined provides shared-home support" From 229c5b396a472f929bc2463806c41e71710aa75d Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Fri, 8 Mar 2024 22:03:18 +0530 Subject: [PATCH 426/607] Fix virtual host assignment in hosts file task --- deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml index 272e65728b..d5f6af056c 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml @@ -119,10 +119,9 @@ {%- if ( query('inventory_hostnames', sap_sid | upper ~ '_PAS') | length > 0 ) -%} {%- set _virthost = hostvars[query('inventory_hostnames', sap_sid | upper ~ '_PAS') | first]['virtual_host'] -%} {%- else -%} - {%- for hostvars[query('inventory_hostnames', 'all')] -%} + {%- for item in hostvars[query('inventory_hostnames', 'all')] -%} {%- if 'pas' in hostvars[item][supported_tiers] -%} {%- set _virthost = hostvars[item]['virtual_host'] -%} - {%- break -%} {%- endif -%} {%- endfor -%} {%- endif -%} From aa1aee9d8d32a696535ff1228ad02962a8ef2ec8 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 19:29:06 +0200 Subject: [PATCH 427/607] Variable name adjustments --- deploy/terraform/run/sap_system/module.tf | 1 - .../run/sap_system/tfvar_variables.tf | 16 ----------- .../common_infrastructure/storage_accounts.tf | 27 ------------------- .../common_infrastructure/variables_global.tf | 6 ----- 4 files changed, 50 deletions(-) diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index 5137b5d520..f64290fd10 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -94,7 +94,6 @@ module "common_infrastructure" { use_private_endpoint = var.use_private_endpoint use_random_id_for_storageaccounts = var.use_random_id_for_storageaccounts use_scalesets_for_deployment = var.use_scalesets_for_deployment - use_service_endpoint = var.use_service_endpoint } #------------------------------------------------------------------------------- diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index 7d79006303..4619e4f99f 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -111,12 +111,6 @@ variable "app_proximityplacementgroup_arm_ids" { } -variable "use_service_endpoint" { - description = "Boolean value indicating if service endpoints should be used for the deployment" - default = false - type = bool - } - variable "use_private_endpoint" { description = "Boolean value indicating if private endpoint should be used for the deployment" default = false @@ -1008,11 +1002,6 @@ variable "management_dns_resourcegroup_name" { type = string } -variable "create_storage_dns_a_records" { - description = "Boolean value indicating if dns a records should be created for the storage accounts" - default = false - type = bool - } variable "dns_zone_names" { description = "Private DNS zone names" @@ -1064,11 +1053,6 @@ variable "sapmnt_private_endpoint_id" { default = "" } -variable "Use_AFS_for_Installation" { - description = "If true, will use AFS for installation media." - default = false - } - ######################################################################################### # # # ANF settings # diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf index 565c8e8141..b418389a51 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf @@ -62,33 +62,6 @@ resource "azurerm_storage_account" "sapmnt" { } -# resource "azurerm_private_dns_a_record" "sapmnt" { -# provider = azurerm.dnsmanagement -# depends_on = [ -# azurerm_private_endpoint.sapmnt -# ] -# count = var.create_storage_dns_a_records ? 1 : 0 -# name = replace( -# lower( -# format("%s%s", -# local.prefix, -# local.resource_suffixes.sapmnt -# ) -# ), -# "/[^a-z0-9]/", -# "" -# ) -# zone_name = var.dns_zone_names.file_dns_zone_name -# resource_group_name = var.management_dns_resourcegroup_name -# ttl = 3600 -# records = [data.azurerm_network_interface.sapmnt[count.index].ip_configuration[0].private_ip_address] - - -# lifecycle { -# ignore_changes = [tags] -# } -# } - data "azurerm_storage_account" "sapmnt" { provider = azurerm.main count = var.NFS_provider == "AFS" ? ( diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf index 0b2ac0df03..ceaf33efcb 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf @@ -211,12 +211,6 @@ variable "use_private_endpoint" { type = bool } -variable "use_service_endpoint" { - description = "Boolean value indicating if service endpoints should be used for the deployment" - default = false - type = bool - } - ######################################################################################### # # # DNS settings # From 4dfa9245c9d82cb118377ad0c115e94f48e65d9e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 20:16:46 +0200 Subject: [PATCH 428/607] Remove create_vaults_and_storage_dns_a_records variable --- deploy/terraform/run/sap_landscape/module.tf | 1 - deploy/terraform/run/sap_landscape/tfvar_variables.tf | 5 ----- .../modules/sap_landscape/variables_global.tf | 5 ----- 3 files changed, 11 deletions(-) diff --git a/deploy/terraform/run/sap_landscape/module.tf b/deploy/terraform/run/sap_landscape/module.tf index a8d8753e6a..92fcdeb905 100644 --- a/deploy/terraform/run/sap_landscape/module.tf +++ b/deploy/terraform/run/sap_landscape/module.tf @@ -17,7 +17,6 @@ module "sap_landscape" { Agent_IP = var.add_Agent_IP ? var.Agent_IP : "" ANF_settings = local.ANF_settings authentication = local.authentication - create_vaults_and_storage_dns_a_records = var.create_vaults_and_storage_dns_a_records create_transport_storage = var.create_transport_storage deployer_tfstate = try(data.terraform_remote_state.deployer[0].outputs, []) diagnostics_storage_account = local.diagnostics_storage_account diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index 2cd7b88549..7a61ae2de0 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -484,11 +484,6 @@ variable "management_dns_resourcegroup_name" { type = string } -variable "create_vaults_and_storage_dns_a_records" { - description = "Boolean value indicating if dns a records should be created for the vaults and storage accounts" - default = false - type = bool - } variable "dns_server_list" { description = "DNS server list" diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf index 57b46ad539..e7eda1cce9 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf @@ -231,11 +231,6 @@ variable "register_virtual_network_to_dns" { } -variable "create_vaults_and_storage_dns_a_records" { - description = "Boolean value indicating if dns a records should be created for the vaults and storage accounts" - type = bool - } - variable "use_custom_dns_a_registration" { description = "Boolean value indicating if a custom dns a records should be created for private endpoints" default = false From 58ccb833b7159da48219e1fc496be616a09bf127 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Sat, 9 Mar 2024 00:10:10 +0530 Subject: [PATCH 429/607] Add systemd reload and stop SBD service in cluster setup --- .../tasks/1.17.1.2-sbd.yaml | 4 ++++ .../tasks/1.17.2.0-cluster-Suse.yml | 16 ++++++++++++---- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml index 1e0a5b5f51..a6db93c040 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml @@ -276,6 +276,10 @@ # - name: "systemctl enable sbd" # ansible.builtin.shell: | # systemctl enable sbd +- name: "Ensure systemctl daemon is reloaded" + ansible.builtin.systemd: + daemon_reload: true + - name: "systemctl enable sbd" ansible.builtin.systemd: name: sbd diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml index 679a2ed01e..c080fd781c 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml @@ -33,6 +33,16 @@ # - name: "1.17 Generic Pacemaker - Ensure csync2 is configured" # ansible.builtin.command: crm cluster init -y csync2 --interface eth0 + - name: "1.17 Generic Pacemaker - Stop SBD service" + ansible.builtin.systemd: + name: sbd + state: stopped + when: + - (database_cluster_type == "ASD") or + (database_cluster_type == "ISCSI") or + (scs_cluster_type == "ASD") or + (scs_cluster_type == "ISCSI") + - name: "1.17 Generic Pacemaker - Ensure corosync is configured" ansible.builtin.command: "crm cluster init -y -u corosync --interface eth0" @@ -58,7 +68,7 @@ - scs_cluster_type == "AFA" - name: "1.17 Generic Pacemaker - Ensure cluster (scs_{{ sap_sid | upper }}) is configured - SBD" - ansible.builtin.command: "crm cluster init -y --name 'scs_{{ db_sid | upper }}' --interface eth0 --no-overwrite-sshkey --sbd-device={{ sbd_device }}" + ansible.builtin.command: "crm cluster init -y --name 'scs_{{ sap_sid | upper }}' --interface eth0 --no-overwrite-sshkey --sbd-device={{ sbd_device }}" when: - node_tier == 'scs' - not scs_cluster_type == "AFA" @@ -239,10 +249,8 @@ - name: "1.17 Generic Pacemaker - stop cluster on both nodes to ensure sbd is enabled." ansible.builtin.command: crm cluster stop - - name: "1.17 Generic Pacemaker - Check if cluster is stopped" + - name: "1.17 Generic Pacemaker - wait for 60 seconds" ansible.builtin.wait_for: - path: /var/lib/pacemaker/cib/cib.xml - state: absent timeout: 60 - name: "1.17 Generic Pacemaker - start cluster on both nodes to ensure sbd is enabled." From d73d4791276bba262a51f303f7b83a65f3212529 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Sat, 9 Mar 2024 00:29:43 +0530 Subject: [PATCH 430/607] Refactor 2.4 Hosts: Remove unnecessary condition and update debug message --- deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml index d5f6af056c..96b868cc5c 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml @@ -108,8 +108,6 @@ - platform == 'DB2' - name: "2.4 Hosts: Process pas_hostname variable and update host file when it is defined" - when: - - ( custom_pas_virtual_hostname | length > 1 )| default(false, true) block: - name: "2.4 Hosts: - Set virtual_host fact from the fetched PAS server list" @@ -134,8 +132,8 @@ - name: "2.4 Hosts: - Display the variables being used" ansible.builtin.debug: msg: - - "pas_hostname: {{ custom_pas_virtual_hostname }} " - "pas_virtual_hostname: {{ pas_virtual_hostname }} " + - "custom_pas_hostname: {{ custom_pas_virtual_hostname }} " - "virtualhost_in_inventory: {{ pas_virtualhost_from_inventory }}" - name: "2.4 Hosts: - Get the line from /etc/hosts with virtual_host" From ce6600d9e182e69d8224dbd283eea46c9bd66cfd Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 22:20:00 +0200 Subject: [PATCH 431/607] Update crm resource command to use status instead of show --- .../1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml index c080fd781c..6ddd509645 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml @@ -203,7 +203,7 @@ - inventory_hostname == primary_instance_name block: - name: "1.17 Generic Pacemaker - Check if Stonith SBD is configured in cluster" - ansible.builtin.shell: crm resource show stonith-sbd + ansible.builtin.shell: crm resource status stonith-sbd register: stonith_sbd_configured failed_when: false changed_when: false From 8c88deeb24ace0c1c54143f70698044689bdfd06 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 22:25:37 +0200 Subject: [PATCH 432/607] Update proximity_placement_group_id in vm-app.tf --- .../modules/sap_system/app_tier/vm-app.tf | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index a4875b1600..bdbafced26 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -123,10 +123,12 @@ resource "azurerm_linux_virtual_machine" "app" { location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - proximity_placement_group_id = var.application_tier.app_use_ppg ? ( - var.ppg[count.index % max(length(var.ppg), 1)]) : ( - null - ) + proximity_placement_group_id = var.application_tier.app_use_avset ? ( + null) : ( + var.application_tier.app_use_ppg ? ( + var.ppg[count.index % max(length(var.ppg), 1)]) : ( + null) + ) //If more than one servers are deployed into a single zone put them in an availability set and not a zone availability_set_id = var.application_tier.app_use_avset ? ( @@ -264,10 +266,13 @@ resource "azurerm_windows_virtual_machine" "app" { source_image_id = var.application_tier.app_os.type == "custom" ? var.application_tier.app_os.source_image_id : null - proximity_placement_group_id = var.application_tier.app_use_ppg ? ( - var.ppg[count.index % max(length(var.ppg), 1)]) : ( - null - ) + proximity_placement_group_id = var.application_tier.app_use_avset ? ( + null) : ( + var.application_tier.app_use_ppg ? ( + var.ppg[count.index % max(length(var.ppg), 1)]) : ( + null) + ) + //If more than one servers are deployed into a single zone put them in an availability set and not a zone availability_set_id = var.application_tier.app_use_avset ? ( From 35781152b7d3be62bfcbe29e80effdd9e1173732 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 22:43:17 +0200 Subject: [PATCH 433/607] Update proximity_placement_group_id in vm-app.tf --- .../terraform-units/modules/sap_system/app_tier/vm-app.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index bdbafced26..85dc755f96 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -123,7 +123,7 @@ resource "azurerm_linux_virtual_machine" "app" { location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - proximity_placement_group_id = var.application_tier.app_use_avset ? ( + proximity_placement_group_id = length(var.scale_set_id) > 0 ? ( null) : ( var.application_tier.app_use_ppg ? ( var.ppg[count.index % max(length(var.ppg), 1)]) : ( @@ -266,7 +266,7 @@ resource "azurerm_windows_virtual_machine" "app" { source_image_id = var.application_tier.app_os.type == "custom" ? var.application_tier.app_os.source_image_id : null - proximity_placement_group_id = var.application_tier.app_use_avset ? ( + proximity_placement_group_id = length(var.scale_set_id) > 0 ? ( null) : ( var.application_tier.app_use_ppg ? ( var.ppg[count.index % max(length(var.ppg), 1)]) : ( From 3a6bd42b89ce3f0c9eeadcc6c5d05fe67415910c Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 23:01:37 +0200 Subject: [PATCH 434/607] Add proximity placement group ID to virtual machine scale set resource --- .../sap_system/common_infrastructure/infrastructure.tf | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/infrastructure.tf index 2aa4edecac..cf7c32438f 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/infrastructure.tf @@ -168,6 +168,11 @@ resource "azurerm_orchestrated_virtual_machine_scale_set" "scale_set" { zones = local.zones tags = var.tags + + proximity_placement_group_id = length(local.zones) <= 1 ? ( + local.ppg_exists ? local.ppg_arm_ids[0] : azurerm_proximity_placement_group.ppg[0].id) :( + null + ) } data "azurerm_orchestrated_virtual_machine_scale_set" "scale_set" { From 390981ae99f08a1693c262024dbf0654fa741dd6 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 8 Mar 2024 23:29:47 +0200 Subject: [PATCH 435/607] AvSet logic --- deploy/terraform/run/sap_system/transform.tf | 4 ++-- .../sap_system/common_infrastructure/infrastructure.tf | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/deploy/terraform/run/sap_system/transform.tf b/deploy/terraform/run/sap_system/transform.tf index 5f2570de75..e0d8b2ee99 100644 --- a/deploy/terraform/run/sap_system/transform.tf +++ b/deploy/terraform/run/sap_system/transform.tf @@ -174,10 +174,10 @@ locals { 0 ) app_sku = var.application_server_sku - app_use_ppg = var.application_server_count > 0 ? var.use_scalesets_for_deployment ? ( + app_use_ppg = var.application_server_count == 0 || var.use_scalesets_for_deployment || !local.enable_app_tier_deployment ? ( false) : ( var.application_server_use_ppg - ) : false + ) app_use_avset = var.application_server_count == 0 || var.use_scalesets_for_deployment || !local.enable_app_tier_deployment ? ( false) : ( var.application_server_use_avset diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/infrastructure.tf index cf7c32438f..d81b1be2c7 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/infrastructure.tf @@ -169,10 +169,10 @@ resource "azurerm_orchestrated_virtual_machine_scale_set" "scale_set" { zones = local.zones tags = var.tags - proximity_placement_group_id = length(local.zones) <= 1 ? ( - local.ppg_exists ? local.ppg_arm_ids[0] : azurerm_proximity_placement_group.ppg[0].id) :( - null - ) + # proximity_placement_group_id = length(local.zones) <= 1 ? ( + # local.ppg_exists ? local.ppg_arm_ids[0] : azurerm_proximity_placement_group.ppg[0].id) :( + # null + # ) } data "azurerm_orchestrated_virtual_machine_scale_set" "scale_set" { From f70e59232d0e17597d9b10664fde4662cc6d208e Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Sat, 9 Mar 2024 03:02:23 +0530 Subject: [PATCH 436/607] Refactor STONITH Azure fence agent creation in 1.17 Generic Pacemaker role --- .../1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml index 6ddd509645..0bc730e647 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml @@ -194,7 +194,7 @@ # | | # +------------------------------------4--------------------------------------*/ # scs_high_availability = true is already assumed -- name: "1.17 Generic Pacemaker - Ensure the STONITH Azure fence agent is created when SBD is used" +- name: "1.17 Generic Pacemaker - Ensure that STONITH using SBD is created" when: - (database_cluster_type == "ASD") or (database_cluster_type == "ISCSI") or @@ -214,7 +214,7 @@ - name: "1.17 Generic Pacemaker - Ensure Stonith SBD is configured in cluster" when: stonith_sbd_configured.rc != 0 - ansible.builtin.command: >- + ansible.builtin.shell: >- crm configure primitive stonith-sbd stonith:external/sbd \ params pcmk_delay_max="15" \ op monitor interval="600" timeout="15" From 567bf9cfe329ac503b329abf0d268a7184afdf9f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 9 Mar 2024 17:37:09 +0200 Subject: [PATCH 437/607] Add file share and key vault creation --- deploy/scripts/Test-SDAFReadiness.ps1 | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/deploy/scripts/Test-SDAFReadiness.ps1 b/deploy/scripts/Test-SDAFReadiness.ps1 index dd3b130d32..ac9b3075f5 100644 --- a/deploy/scripts/Test-SDAFReadiness.ps1 +++ b/deploy/scripts/Test-SDAFReadiness.ps1 @@ -228,7 +228,6 @@ if ($selection.ToUpper() -eq "Y") { } - $selection = Read-Host "Create file share Y/N" if ($selection.ToUpper() -eq "Y") { $OutputString = "Creating File share: " + $shareName @@ -237,7 +236,17 @@ if ($selection.ToUpper() -eq "Y") { az storage share-rm create --resource-group $resourceGroupName --storage-account $storageAccountName --name $shareName --enabled-protocols NFS --access-tier "Premium" --quota 128 --output none } +$kvName = "sdaftestKV$rnd" + +$selection = Read-Host "Create key vault Y/N" +if ($selection.ToUpper() -eq "Y") { + $OutputString = "Creating Key vault: " + $kvName + Write-Host $OutputString -foregroundcolor Yellow + Add-Content -Path $LogFileName $OutputString + az vault create --name $kvName --resource-group $resourceGroupName --location $Location --query "provisioningState" --enable-purge-protection false --retention-days 7 + az vault secret set --vault-name $kvName --name "sdaftestsecret" --value "sdaftestsecretvalue" --query "id" +} $vmssName = "SDAF-VmssFlex" $OutputString = "Creating flexible scale set: " + $vmssName From 4a83c6cc82a48f17ac565715daa23b662915ad09 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 9 Mar 2024 21:11:26 +0200 Subject: [PATCH 438/607] Remove unnecessary blank lines in Test-SDAFReadiness.ps1 script --- deploy/scripts/Test-SDAFReadiness.ps1 | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/deploy/scripts/Test-SDAFReadiness.ps1 b/deploy/scripts/Test-SDAFReadiness.ps1 index ac9b3075f5..f6561e516e 100644 --- a/deploy/scripts/Test-SDAFReadiness.ps1 +++ b/deploy/scripts/Test-SDAFReadiness.ps1 @@ -413,13 +413,11 @@ if ($selection.ToUpper() -eq "Y") { $OutputString = "$url is accessible" Write-Host $OutputString -ForegroundColor Green Add-Content -Path $LogFileName $OutputString - Add-Content -Path $LogFileName "" } else { $OutputString = "$url is not accessible" Write-Host $OutputString -ForegroundColor Red Add-Content -Path $LogFileName $OutputString - Add-Content -Path $LogFileName "" } } @@ -434,26 +432,22 @@ if ($selection.ToUpper() -eq "Y") { $OutputString = "$IP is accessible" Write-Host $OutputString -ForegroundColor Green Add-Content -Path $LogFileName $OutputString - Add-Content -Path $LogFileName "" } elseif ($result.Contains("Connected")) { $OutputString = "$IP is accessible" Write-Host $OutputString -ForegroundColor Green Add-Content -Path $LogFileName $OutputString - Add-Content -Path $LogFileName "" } else { $OutputString = "$IP is not accessible" Write-Host $OutputString -ForegroundColor Red Add-Content -Path $LogFileName $OutputString - Add-Content -Path $LogFileName "" } } Write-Host "Checking 'runtime' URLs" -ForegroundColor Yellow Add-Content -Path $LogFileName "Checking 'runtime' URLs" - Add-Content -Path $LogFileName "" foreach ($url in $UrlsToCheck.sap.urls) { Write-Host "Checking if $url is accessible from the Virtual Machine" @@ -462,13 +456,11 @@ if ($selection.ToUpper() -eq "Y") { $OutputString = "$url is accessible" Write-Host $OutputString -ForegroundColor Green Add-Content -Path $LogFileName $OutputString - Add-Content -Path $LogFileName "" } else { $OutputString = "$url is not accessible" Write-Host $OutputString -ForegroundColor Red Add-Content -Path $LogFileName $OutputString - Add-Content -Path $LogFileName "" } } @@ -482,19 +474,16 @@ if ($selection.ToUpper() -eq "Y") { $OutputString = "$IP is accessible" Write-Host $OutputString -ForegroundColor Green Add-Content -Path $LogFileName $OutputString - Add-Content -Path $LogFileName "" } elseif ($result.Contains("Connected")) { $OutputString = "$IP is accessible" Write-Host $OutputString -ForegroundColor Green Add-Content -Path $LogFileName $OutputString - Add-Content -Path $LogFileName "" } else { $OutputString = "$IP is not accessible" Write-Host $OutputString -ForegroundColor Red Add-Content -Path $LogFileName $OutputString - Add-Content -Path $LogFileName "" } } From 56a3c81ee8e3c91715460c7e4122210542aa6e66 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 11 Mar 2024 11:33:44 +0530 Subject: [PATCH 439/607] Refactor cluster configuration and stop SBD service --- .../tasks/1.17.2.0-cluster-Suse.yml | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml index 0bc730e647..eaa958a7ac 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml @@ -26,6 +26,15 @@ - ansible_facts.packages['cloud-netconfig-azure'] - (ansible_facts.packages['cloud-netconfig-azure'][0].version | float) < 1.3 +- name: "1.17 Generic Pacemaker - Stop SBD service" + ansible.builtin.systemd: + name: sbd + state: stopped + when: + - (database_cluster_type == "ASD") or + (database_cluster_type == "ISCSI") or + (scs_cluster_type == "ASD") or + (scs_cluster_type == "ISCSI") - name: "1.17 Generic Pacemaker - Ensure Primary node initiates the Cluster" when: ansible_hostname == primary_instance_name @@ -33,16 +42,6 @@ # - name: "1.17 Generic Pacemaker - Ensure csync2 is configured" # ansible.builtin.command: crm cluster init -y csync2 --interface eth0 - - name: "1.17 Generic Pacemaker - Stop SBD service" - ansible.builtin.systemd: - name: sbd - state: stopped - when: - - (database_cluster_type == "ASD") or - (database_cluster_type == "ISCSI") or - (scs_cluster_type == "ASD") or - (scs_cluster_type == "ISCSI") - - name: "1.17 Generic Pacemaker - Ensure corosync is configured" ansible.builtin.command: "crm cluster init -y -u corosync --interface eth0" From 485499c5ebaeaac785e93546992abfa3f47c9cb4 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 11 Mar 2024 12:21:11 +0530 Subject: [PATCH 440/607] convert to list --- deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml index 96b868cc5c..88c218ecf3 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml @@ -117,7 +117,7 @@ {%- if ( query('inventory_hostnames', sap_sid | upper ~ '_PAS') | length > 0 ) -%} {%- set _virthost = hostvars[query('inventory_hostnames', sap_sid | upper ~ '_PAS') | first]['virtual_host'] -%} {%- else -%} - {%- for item in hostvars[query('inventory_hostnames', 'all')] -%} + {%- for item in hostvars[query('inventory_hostnames', 'all')] | list -%} {%- if 'pas' in hostvars[item][supported_tiers] -%} {%- set _virthost = hostvars[item]['virtual_host'] -%} {%- endif -%} From 91df5f22e94c599e8caa91f9ffb02daa342089d5 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 11 Mar 2024 12:56:59 +0530 Subject: [PATCH 441/607] Refactor virtual host retrieval in 2.4 Hosts playbook --- .../2.4-hosts-file/tasks/main.yaml | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml index 88c218ecf3..9197c47e89 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml @@ -110,20 +110,20 @@ - name: "2.4 Hosts: Process pas_hostname variable and update host file when it is defined" block: - - name: "2.4 Hosts: - Set virtual_host fact from the fetched PAS server list" + - name: "2.4 Hosts: Set virtual_host fact from the fetched PAS server list" ansible.builtin.set_fact: - pas_virtualhost_from_inventory: >- - {%- set _virthost = "" -%} - {%- if ( query('inventory_hostnames', sap_sid | upper ~ '_PAS') | length > 0 ) -%} - {%- set _virthost = hostvars[query('inventory_hostnames', sap_sid | upper ~ '_PAS') | first]['virtual_host'] -%} - {%- else -%} - {%- for item in hostvars[query('inventory_hostnames', 'all')] | list -%} - {%- if 'pas' in hostvars[item][supported_tiers] -%} - {%- set _virthost = hostvars[item]['virtual_host'] -%} - {%- endif -%} - {%- endfor -%} - {%- endif -%} - {{- _virthost -}} + pas_virtualhost_from_inventory: >- + {%- set _virthost = "" -%} + {%- if ( groups[sap_sid | upper ~ '_PAS'] | length > 0 ) -%} + {%- set _virthost = hostvars[groups[sap_sid | upper ~ '_PAS'][0]]['virtual_host'] -%} + {%- else -%} + {%- for item in groups['all'] -%} + {%- if hostvars[item].supported_tiers is defined and 'pas' in hostvars[item].supported_tiers -%} + {%- set _virthost = hostvars[item]['virtual_host'] -%} + {%- endif -%} + {%- endfor -%} + {%- endif -%} + {{- _virthost -}} - name: "2.4 Hosts: - Set fact for the PAS if pas_hostname is defined" ansible.builtin.set_fact: From 3738a18c5fe71164b726a00be99d393c49e93faf Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 11 Mar 2024 09:30:34 +0200 Subject: [PATCH 442/607] Add a script for just checking URLs --- deploy/configs/sdaf_urls.json | 6 +- deploy/scripts/Test-SDAFReadiness.ps1 | 43 +++-- deploy/scripts/Test-SDAFURLs.ps1 | 248 ++++++++++++++++++++++++++ 3 files changed, 283 insertions(+), 14 deletions(-) create mode 100644 deploy/scripts/Test-SDAFURLs.ps1 diff --git a/deploy/configs/sdaf_urls.json b/deploy/configs/sdaf_urls.json index 008d9e16be..40fb9bbfbf 100644 --- a/deploy/configs/sdaf_urls.json +++ b/deploy/configs/sdaf_urls.json @@ -12,9 +12,10 @@ "https://pypi.org", "https://files.pythonhosted.org", "https://pythonhosted.org", - "https://galaxy.ansible.com" + "https://galaxy.ansible.com", + "https://ansible-galaxy-ng.s3.dualstack.us-east-1.amazonaws.com" ], - "IPs": [] + "IPs": ["vstsagenttools.blob.core.windows.net"] }, "windows": { "urls": [ @@ -44,7 +45,6 @@ "52.142.4.99", "20.248.180.252", "20.24.186.80", - "13.72.186.193", "13.72.14.155", "52.244.249.194", "52.187.53.250", diff --git a/deploy/scripts/Test-SDAFReadiness.ps1 b/deploy/scripts/Test-SDAFReadiness.ps1 index f6561e516e..7365519b95 100644 --- a/deploy/scripts/Test-SDAFReadiness.ps1 +++ b/deploy/scripts/Test-SDAFReadiness.ps1 @@ -12,9 +12,14 @@ function Show-Menu($data) { $rnd = $(Get-Random -Minimum 1 -Maximum 1000).ToString() -$LogFileDir = Read-Host "Please enter the directory to save the log file" -if(Test-Path $LogFileDir) { +$LogFileDir = $Env:LogFileDir +if ($null -eq $LogFileDir -or $LogFileDir -eq "") { + $LogFileDir = Read-Host "Please enter the directory to save the log file" +} + + +if (Test-Path $LogFileDir) { $LogFileName = "SDAF-" + $(Get-Date -Format "yyyyMMdd-HHmm") + ".md" $LogFileName = Join-Path $LogFileDir -ChildPath $LogFileName } @@ -161,6 +166,8 @@ if ($authenticationMethod -ne "User Account") { } } +$vmName="SDAF-VM" + $vnetName = "SDAF-VNet" $anfSubnetName = "SDAF-anf" $subnetName = "SDAF-Subnet" @@ -331,7 +338,7 @@ if ($selection.ToUpper() -eq "Y") { if ($null -eq $zone -or $zone -eq "") { Write-Host "Creating a Virtual Machine" -foregroundcolor Yellow - $vmStatus = $(az vm create --resource-group $resourceGroupName --name "SDAF-VM" --image $distro --admin-username "azureadm" --admin-password $ARM_CLIENT_SECRET --size $vmSKU --vnet-name $vnetName --subnet $subnetName --vmss $vmssid --no-wait --query "provisioningState") + $vmStatus = $(az vm create --resource-group $resourceGroupName --name $vmName --image $distro --admin-username "azureadm" --admin-password $ARM_CLIENT_SECRET --size $vmSKU --vnet-name $vnetName --subnet $subnetName --vmss $vmssid --no-wait --query "provisioningState") } else { $diskName = "SDAFdisk" @@ -340,7 +347,7 @@ if ($selection.ToUpper() -eq "Y") { Write-Host "Creating a Premium SSD v2 disk" -foregroundcolor Yellow az disk create -n $diskName -g $resourceGroupName --size-gb 100 --disk-iops-read-write 5000 --disk-mbps-read-write 150 --location $Location --zone $zone --sku PremiumV2_LRS --logical-sector-size $logicalSectorSize --query "provisioningState" Write-Host "Creating a Virtual Machine" -foregroundcolor Yellow - $vmStatus = $(az vm create --resource-group $resourceGroupName --name "SDAF-VM" --image $distro --admin-username "azureadm" --admin-password $VM_password --size $vmSKU --vnet-name $vnetName --subnet $subnetName --vmss $vmssid --zone $zone --attach-data-disks $diskName --query "provisioningState") + $vmStatus = $(az vm create --resource-group $resourceGroupName --name $vmName --image $distro --admin-username "azureadm" --admin-password $VM_password --size $vmSKU --vnet-name $vnetName --subnet $subnetName --vmss $vmssid --zone $zone --attach-data-disks $diskName --query "provisioningState") } @@ -360,12 +367,16 @@ if ($selection.ToUpper() -eq "Y") { foreach ($url in $UrlsToCheck.deployer.urls) { Write-Host "Checking if $url is accessible from the Virtual Machine" - $result = $(az vm run-command invoke --resource-group $resourceGroupName --name "SDAF-VM" --command-id RunShellScript --scripts "wget -O /tmp/foo.zip $url" --query value[0].message) + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name $vmName --command-id RunShellScript --scripts "wget -O /tmp/foo.zip $url" --query value[0].message) if ($result.Contains("200 OK")) { $OutputString = "$url is accessible" Write-Host $OutputString -ForegroundColor Green Add-Content -Path $LogFileName $OutputString - + } + elseif ($result.Contains("403 Forbidden")) { + $OutputString = "$url is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString } else { $OutputString = "$url is not accessible" @@ -380,7 +391,7 @@ if ($selection.ToUpper() -eq "Y") { foreach ($IP in $UrlsToCheck.deployer.IPs) { Write-Host "Checking if $IP is accessible from the Virtual Machine" - $result = $(az vm run-command invoke --resource-group $resourceGroupName --name "SDAF-VM" --command-id RunShellScript --scripts "nc -zv $IP 443" --query value[0].message) + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name $vmName --command-id RunShellScript --scripts "nc -zv $IP 443" --query value[0].message) if ($result.Contains("succeeded!")) { $OutputString = "$IP is accessible" Write-Host $OutputString -ForegroundColor Green @@ -408,12 +419,17 @@ if ($selection.ToUpper() -eq "Y") { foreach ($url in $UrlsToCheck.windows.urls) { Write-Host "Checking if $url is accessible from the Virtual Machine" - $result = $(az vm run-command invoke --resource-group $resourceGroupName --name "SDAF-VM" --command-id RunShellScript --scripts "wget -O /tmp/foo.zip $url" --query value[0].message) + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name $vmName --command-id RunShellScript --scripts "wget -O /tmp/foo.zip $url" --query value[0].message) if ($result.Contains("200 OK")) { $OutputString = "$url is accessible" Write-Host $OutputString -ForegroundColor Green Add-Content -Path $LogFileName $OutputString } + elseif ($result.Contains("403 Forbidden")) { + $OutputString = "$url is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + } else { $OutputString = "$url is not accessible" Write-Host $OutputString -ForegroundColor Red @@ -427,7 +443,7 @@ if ($selection.ToUpper() -eq "Y") { foreach ($IP in $UrlsToCheck.windows.IPs) { Write-Host "Checking if $IP is accessible from the Virtual Machine" - $result = $(az vm run-command invoke --resource-group $resourceGroupName --name "SDAF-VM" --command-id RunShellScript --scripts "nc -zv $IP 443" --query value[0].message) + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name $vmName --command-id RunShellScript --scripts "nc -zv $IP 443" --query value[0].message) if ($result.Contains("succeeded!")) { $OutputString = "$IP is accessible" Write-Host $OutputString -ForegroundColor Green @@ -451,12 +467,17 @@ if ($selection.ToUpper() -eq "Y") { foreach ($url in $UrlsToCheck.sap.urls) { Write-Host "Checking if $url is accessible from the Virtual Machine" - $result = $(az vm run-command invoke --resource-group $resourceGroupName --name "SDAF-VM" --command-id RunShellScript --scripts "wget -O /tmp/foo.zip $url" --query value[0].message) + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name $vmName --command-id RunShellScript --scripts "wget -O /tmp/foo.zip $url" --query value[0].message) if ($result.Contains("200 OK")) { $OutputString = "$url is accessible" Write-Host $OutputString -ForegroundColor Green Add-Content -Path $LogFileName $OutputString } + elseif ($result.Contains("403 Forbidden")) { + $OutputString = "$url is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + } else { $OutputString = "$url is not accessible" Write-Host $OutputString -ForegroundColor Red @@ -469,7 +490,7 @@ if ($selection.ToUpper() -eq "Y") { foreach ($IP in $UrlsToCheck.sap.IPs) { Write-Host "Checking if $IP is accessible from the Virtual Machine" - $result = $(az vm run-command invoke --resource-group $resourceGroupName --name "SDAF-VM" --command-id RunShellScript --scripts "nc -zv $IP 443" --query value[0].message) + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name $vmName --command-id RunShellScript --scripts "nc -zv $IP 443" --query value[0].message) if ($result.Contains("succeeded!")) { $OutputString = "$IP is accessible" Write-Host $OutputString -ForegroundColor Green diff --git a/deploy/scripts/Test-SDAFURLs.ps1 b/deploy/scripts/Test-SDAFURLs.ps1 new file mode 100644 index 0000000000..cbee0ae260 --- /dev/null +++ b/deploy/scripts/Test-SDAFURLs.ps1 @@ -0,0 +1,248 @@ +function Show-Menu($data) { + Write-Host "================ $Title ================" + $i = 1 + foreach ($d in $data) { + Write-Host "($i): Select '$i' for $($d)" + $i++ + } + + Write-Host "q: Select 'q' for Exit" + +} + + +$LogFileDir = $Env:LogFileDir +if ($null -eq $LogFileDir -or $LogFileDir -eq "") { + + $LogFileDir = Read-Host "Please enter the directory to save the log file" +} + +if (Test-Path $LogFileDir) { + $LogFileName = "SDAF-" + $(Get-Date -Format "yyyyMMdd-HHmm") + ".md" + $LogFileName = Join-Path $LogFileDir -ChildPath $LogFileName +} +else { + Write-Host "The directory does not exist" + return +} + +Add-Content -Path $LogFileName "# SDAF URL Assesment #" +Add-Content -Path $LogFileName "" +$OutputString = "Time of assessment: " + $(Get-Date -Format "yyyy-MM-dd HH:mm:ss") +Add-Content -Path $LogFileName $OutputString +$authenticationMethod = 'Service Principal (recommended)' +$Title = "Select the authentication method to use" +$data = @('Service Principal (recommended)', 'User Account') +Show-Menu($data) +$selection = Read-Host $Title +$authenticationMethod = $data[$selection - 1] + +Add-Content -Path $LogFileName "" +$OutputString = "Authentication model: " + $authenticationMethod +Add-Content -Path $LogFileName $OutputString + + +if ($authenticationMethod -eq "User Account") { + az logout + az login --output none +} +else { + $ARM_CLIENT_ID = $Env:ARM_CLIENT_ID + $ARM_CLIENT_SECRET = $Env:ARM_CLIENT_SECRET + $ARM_TENANT_ID = $Env:ARM_TENANT_ID + + if ($null -eq $ARM_CLIENT_ID -or $ARM_CLIENT_ID -eq "") { + $ARM_CLIENT_ID = Read-Host "Please enter the Service Principal's Application ID" + } + + if ($null -eq $ARM_CLIENT_SECRET -or $ARM_CLIENT_SECRET -eq "") { + $ARM_CLIENT_SECRET = Read-Host "Please enter the Service Principals App ID Password" -AsSecureString + } + + $VM_password = $ARM_CLIENT_SECRET + + if ($null -eq $ARM_TENANT_ID -or $ARM_TENANT_ID -eq "") { + $ARM_TENANT_ID = Read-Host "Please enter the Tenant ID" + } + + if ($null -eq $ARM_SUBSCRIPTION_ID -or $ARM_SUBSCRIPTION_ID -eq "") { + $ARM_SUBSCRIPTION_ID = Read-Host "Please enter the Subscription ID" + } + az logout + az login --service-principal -u $ARM_CLIENT_ID -p $ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none +} + +$ARM_SUBSCRIPTION_ID = $Env:ARM_SUBSCRIPTION_ID +if ($null -eq $ARM_SUBSCRIPTION_ID -or $ARM_SUBSCRIPTION_ID -eq "") { + $ARM_SUBSCRIPTION_ID = Read-Host "Please enter the Subscription ID" +} + +az account set --subscription $ARM_SUBSCRIPTION_ID + +Add-Content -Path $LogFileName "" +$OutputString = "Subscription: " + $ARM_SUBSCRIPTION_ID +Add-Content -Path $LogFileName $OutputString + + +$resourceGroupName = $Env:ResourceGroupName +if ($null -eq $resourceGroupName -or $resourceGroupName -eq "") { + $resourceGroupName = Read-Host "Please enter the Resource Group Name" +} + +$vmName = $Env:VMName +if ($null -eq $vmName -or $vmName -eq "") { + $vmName = Read-Host "Please enter the Virtual Machine Name" +} + +$UrlsToCheck = Get-Content -Raw -Path ..\configs\sdaf_urls.json | ConvertFrom-Json + +Add-Content -Path $LogFileName "" +Add-Content -Path $LogFileName "## Check URLS ##" +Add-Content -Path $LogFileName "" + +Write-Host "Checking Deployer URLs" -ForegroundColor Yellow +Add-Content -Path $LogFileName "Checking Deployer URLs" + +foreach ($url in $UrlsToCheck.deployer.urls) { + Write-Host "Checking if $url is accessible from the Virtual Machine" + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name $vmName --command-id RunShellScript --scripts "wget -O /tmp/foo.zip $url" --query value[0].message) + if ($result.Contains("200 OK")) { + $OutputString = "$url is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + } + elseif ($result.Contains("403 Forbidden")) { + $OutputString = "$url is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + } + else { + $OutputString = "$url is not accessible" + Write-Host $OutputString -ForegroundColor Red + Add-Content -Path $LogFileName $OutputString + } +} + +Write-Host "Checking Deployer IPs" -ForegroundColor Yellow +Add-Content -Path $LogFileName "Checking Deployer IPs" +Add-Content -Path $LogFileName "" + +foreach ($IP in $UrlsToCheck.deployer.IPs) { + Write-Host "Checking if $IP is accessible from the Virtual Machine" + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name $vmName --command-id RunShellScript --scripts "nc -zv $IP 443" --query value[0].message) + if ($result.Contains("succeeded!")) { + $OutputString = "$IP is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } + elseif ($result.Contains("Connected")) { + $OutputString = "$IP is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } + else { + $OutputString = "$IP is not accessible" + Write-Host $OutputString -ForegroundColor Red + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } +} + + +Write-Host "Checking Windows URLs" -ForegroundColor Yellow +Add-Content -Path $LogFileName "Checking Windows URLs" +Add-Content -Path $LogFileName "" + +foreach ($url in $UrlsToCheck.windows.urls) { + Write-Host "Checking if $url is accessible from the Virtual Machine" + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name $vmName --command-id RunShellScript --scripts "wget -O /tmp/foo.zip $url" --query value[0].message) + if ($result.Contains("200 OK")) { + $OutputString = "$url is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + } + elseif ($result.Contains("403 Forbidden")) { + $OutputString = "$url is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + } + else { + $OutputString = "$url is not accessible" + Write-Host $OutputString -ForegroundColor Red + Add-Content -Path $LogFileName $OutputString + } +} + +Write-Host "Checking Windows IPs" -ForegroundColor Yellow +Add-Content -Path $LogFileName "Checking Windows IPs" +Add-Content -Path $LogFileName "" + +foreach ($IP in $UrlsToCheck.windows.IPs) { + Write-Host "Checking if $IP is accessible from the Virtual Machine" + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name $vmName --command-id RunShellScript --scripts "nc -zv $IP 443" --query value[0].message) + if ($result.Contains("succeeded!")) { + $OutputString = "$IP is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + } + elseif ($result.Contains("Connected")) { + $OutputString = "$IP is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + } + else { + $OutputString = "$IP is not accessible" + Write-Host $OutputString -ForegroundColor Red + Add-Content -Path $LogFileName $OutputString + } +} + + +Write-Host "Checking 'runtime' URLs" -ForegroundColor Yellow +Add-Content -Path $LogFileName "Checking 'runtime' URLs" + +foreach ($url in $UrlsToCheck.sap.urls) { + Write-Host "Checking if $url is accessible from the Virtual Machine" + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name $vmName --command-id RunShellScript --scripts "wget -O /tmp/foo.zip $url" --query value[0].message) + if ($result.Contains("200 OK")) { + $OutputString = "$url is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + } + elseif ($result.Contains("403 Forbidden")) { + $OutputString = "$url is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + } + else { + $OutputString = "$url is not accessible" + Write-Host $OutputString -ForegroundColor Red + Add-Content -Path $LogFileName $OutputString + } +} + +Write-Host "Checking 'runtime' IPs" -ForegroundColor Yellow +Add-Content -Path $LogFileName "Checking 'runtime' IPs" + +foreach ($IP in $UrlsToCheck.sap.IPs) { + Write-Host "Checking if $IP is accessible from the Virtual Machine" + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name $vmName --command-id RunShellScript --scripts "nc -zv $IP 443" --query value[0].message) + if ($result.Contains("succeeded!")) { + $OutputString = "$IP is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + } + elseif ($result.Contains("Connected")) { + $OutputString = "$IP is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + } + else { + $OutputString = "$IP is not accessible" + Write-Host $OutputString -ForegroundColor Red + Add-Content -Path $LogFileName $OutputString + } +} + From 132cbd9edb93a5706e4e9d6a748508f787b8c79d Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 11 Mar 2024 14:10:55 +0530 Subject: [PATCH 443/607] Refactor virtual host retrieval logic in main.yaml --- .../roles-sap-os/2.4-hosts-file/tasks/main.yaml | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml index 9197c47e89..ab5f47a83c 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml @@ -114,15 +114,11 @@ ansible.builtin.set_fact: pas_virtualhost_from_inventory: >- {%- set _virthost = "" -%} - {%- if ( groups[sap_sid | upper ~ '_PAS'] | length > 0 ) -%} - {%- set _virthost = hostvars[groups[sap_sid | upper ~ '_PAS'][0]]['virtual_host'] -%} - {%- else -%} - {%- for item in groups['all'] -%} - {%- if hostvars[item].supported_tiers is defined and 'pas' in hostvars[item].supported_tiers -%} - {%- set _virthost = hostvars[item]['virtual_host'] -%} - {%- endif -%} - {%- endfor -%} - {%- endif -%} + {%- for item in groups['all'] -%} + {%- if hostvars[item].supported_tiers is defined and 'pas' in hostvars[item].supported_tiers -%} + {%- set _virthost = hostvars[item]['virtual_host'] -%} + {%- endif -%} + {%- endfor -%} {{- _virthost -}} - name: "2.4 Hosts: - Set fact for the PAS if pas_hostname is defined" From 57d63e9f4d872951d6afa21e95011c3d58a6ad9a Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 11 Mar 2024 14:43:02 +0530 Subject: [PATCH 444/607] - Add cluster restart and wait tasks - Update virtual host fact in hosts file --- .../tasks/1.17.2.0-cluster-Suse.yml | 17 +++++++++++++++++ .../roles-sap-os/2.4-hosts-file/tasks/main.yaml | 5 +++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml index eaa958a7ac..54447e9df0 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml @@ -100,6 +100,7 @@ ansible.builtin.user: name: hacluster password: "{{ password_ha_db_cluster | password_hash('sha512', 65534 | random(seed=None) | string) }}" + - name: "1.17 Generic Pacemaker - Ensure cluster configuration contains correct details" ansible.builtin.template: src: corosync.conf.j2 @@ -193,6 +194,21 @@ # | | # +------------------------------------4--------------------------------------*/ # scs_high_availability = true is already assumed +- name: "1.17 Generic Pacemaker - Restart cluster services on all nodes before SBD roles are assigned" + when: + - (database_cluster_type == "ASD") or + (database_cluster_type == "ISCSI") or + (scs_cluster_type == "ASD") or + (scs_cluster_type == "ISCSI") + block: + + - name: "1.17 Generic Pacemaker - Restart all cluster services on all members" + ansible.builtin.shell: crm cluster restart --all + + - name: "1.17 Generic Pacemaker - wait for 60 seconds" + ansible.builtin.wait_for: + timeout: 120 + - name: "1.17 Generic Pacemaker - Ensure that STONITH using SBD is created" when: - (database_cluster_type == "ASD") or @@ -201,6 +217,7 @@ (scs_cluster_type == "ISCSI") - inventory_hostname == primary_instance_name block: + - name: "1.17 Generic Pacemaker - Check if Stonith SBD is configured in cluster" ansible.builtin.shell: crm resource status stonith-sbd register: stonith_sbd_configured diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml index ab5f47a83c..31aefb527d 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml @@ -110,13 +110,14 @@ - name: "2.4 Hosts: Process pas_hostname variable and update host file when it is defined" block: - - name: "2.4 Hosts: Set virtual_host fact from the fetched PAS server list" + - name: "2.4 Hosts: Set virtual_host fact from the fetched PAS server list" ansible.builtin.set_fact: pas_virtualhost_from_inventory: >- {%- set _virthost = "" -%} {%- for item in groups['all'] -%} + {%- set _virthost = 'abcd-' -%} {%- if hostvars[item].supported_tiers is defined and 'pas' in hostvars[item].supported_tiers -%} - {%- set _virthost = hostvars[item]['virtual_host'] -%} + {%- set _virthost = _virthost + hostvars[item]['virtual_host'] -%} {%- endif -%} {%- endfor -%} {{- _virthost -}} From 33afa310e95892b11b708581a6bf2beb15ddc009 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 11 Mar 2024 14:47:58 +0530 Subject: [PATCH 445/607] refactor the hosts file --- .../roles-sap-os/2.4-hosts-file/tasks/main.yaml | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml index 31aefb527d..0694fa3a83 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml @@ -114,14 +114,18 @@ ansible.builtin.set_fact: pas_virtualhost_from_inventory: >- {%- set _virthost = "" -%} - {%- for item in groups['all'] -%} - {%- set _virthost = 'abcd-' -%} - {%- if hostvars[item].supported_tiers is defined and 'pas' in hostvars[item].supported_tiers -%} - {%- set _virthost = _virthost + hostvars[item]['virtual_host'] -%} - {%- endif -%} - {%- endfor -%} + {%- if ( groups[sap_sid ~ '_PAS'] | length > 0 ) -%} + {%- set _virthost = hostvars[groups[sap_sid ~ '_PAS'][0]]['virtual_host'] -%} + {%- else -%} + {%- for item in groups['all'] -%} + {%- if ( hostvars[item].supported_tiers is defined ) and ( 'pas' in hostvars[item].supported_tiers ) -%} + {%- set _virthost = hostvars[item]['virtual_host'] -%} + {%- endif -%} + {%- endfor -%} + {%- endif -%} {{- _virthost -}} + - name: "2.4 Hosts: - Set fact for the PAS if pas_hostname is defined" ansible.builtin.set_fact: pas_virtual_hostname: "{{ custom_pas_virtual_hostname | default(pas_virtualhost_from_inventory, true) }}" From aaafd216872601fbafd962bf9fd125c247272d9e Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 11 Mar 2024 15:40:47 +0530 Subject: [PATCH 446/607] This commit refactors the logic for retrieving the virtual host in the 2.4-hosts-file task. --- .../roles-sap-os/2.4-hosts-file/tasks/main.yaml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml index 0694fa3a83..c3e48b41bb 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml @@ -117,15 +117,18 @@ {%- if ( groups[sap_sid ~ '_PAS'] | length > 0 ) -%} {%- set _virthost = hostvars[groups[sap_sid ~ '_PAS'][0]]['virtual_host'] -%} {%- else -%} - {%- for item in groups['all'] -%} - {%- if ( hostvars[item].supported_tiers is defined ) and ( 'pas' in hostvars[item].supported_tiers ) -%} - {%- set _virthost = hostvars[item]['virtual_host'] -%} - {%- endif -%} + {%- for group_name, group in groups.items() -%} + {%- for host in group -%} + {%- if hostvars[host].supported_tiers is defined and 'pas' in hostvars[host].supported_tiers -%} + {%- set _virthost = hostvars[host]['virtual_host'] -%} + {%- endif -%} + {%- endfor -%} {%- endfor -%} {%- endif -%} {{- _virthost -}} + - name: "2.4 Hosts: - Set fact for the PAS if pas_hostname is defined" ansible.builtin.set_fact: pas_virtual_hostname: "{{ custom_pas_virtual_hostname | default(pas_virtualhost_from_inventory, true) }}" From 2fa060f3e6f648984beb00c187f23605763a08a9 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 11 Mar 2024 18:03:23 +0530 Subject: [PATCH 447/607] Refactor 2.4 Hosts: Set virtual_host fact from the fetched PAS server list --- .../2.4-hosts-file/tasks/main.yaml | 23 ++++++------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml index c3e48b41bb..37c5777701 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml @@ -112,22 +112,13 @@ - name: "2.4 Hosts: Set virtual_host fact from the fetched PAS server list" ansible.builtin.set_fact: - pas_virtualhost_from_inventory: >- - {%- set _virthost = "" -%} - {%- if ( groups[sap_sid ~ '_PAS'] | length > 0 ) -%} - {%- set _virthost = hostvars[groups[sap_sid ~ '_PAS'][0]]['virtual_host'] -%} - {%- else -%} - {%- for group_name, group in groups.items() -%} - {%- for host in group -%} - {%- if hostvars[host].supported_tiers is defined and 'pas' in hostvars[host].supported_tiers -%} - {%- set _virthost = hostvars[host]['virtual_host'] -%} - {%- endif -%} - {%- endfor -%} - {%- endfor -%} - {%- endif -%} - {{- _virthost -}} - - + pas_virtualhost_from_inventory: "{{ pas_virtualhost_from_inventory | default([]) + [item] }}" + with_items: + - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_PAS') }}" + - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" + - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + when: + - "'pas' in hostvars[item]['supported_tiers']" - name: "2.4 Hosts: - Set fact for the PAS if pas_hostname is defined" ansible.builtin.set_fact: From 75c2695fd7adbfdd3661efd6c50db8a930bba7e1 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 11 Mar 2024 18:29:06 +0530 Subject: [PATCH 448/607] Refactor 2.4 Hosts file tasks to use pas_server_temp for virtual host --- deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml index 37c5777701..7df2769ed2 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml @@ -112,7 +112,7 @@ - name: "2.4 Hosts: Set virtual_host fact from the fetched PAS server list" ansible.builtin.set_fact: - pas_virtualhost_from_inventory: "{{ pas_virtualhost_from_inventory | default([]) + [item] }}" + pas_server_temp: "{{ pas_server_temp | default([]) + [item] }}" with_items: - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_PAS') }}" - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" @@ -122,7 +122,8 @@ - name: "2.4 Hosts: - Set fact for the PAS if pas_hostname is defined" ansible.builtin.set_fact: - pas_virtual_hostname: "{{ custom_pas_virtual_hostname | default(pas_virtualhost_from_inventory, true) }}" + pas_virtualhost_from_inventory: "{{ pas_server_temp | first }}" + pas_virtual_hostname: "{{ custom_pas_virtual_hostname | default(pas_server_temp[0], true) }}" - name: "2.4 Hosts: - Display the variables being used" ansible.builtin.debug: From 20ed51771be3f14e9a4625d1c9a42e3a689eb681 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 11 Mar 2024 18:30:58 +0530 Subject: [PATCH 449/607] Fix pas_virtual_hostname assignment in 2.4-hosts-file --- deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml index 7df2769ed2..ac712c519a 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml @@ -123,7 +123,7 @@ - name: "2.4 Hosts: - Set fact for the PAS if pas_hostname is defined" ansible.builtin.set_fact: pas_virtualhost_from_inventory: "{{ pas_server_temp | first }}" - pas_virtual_hostname: "{{ custom_pas_virtual_hostname | default(pas_server_temp[0], true) }}" + pas_virtual_hostname: "{{ custom_pas_virtual_hostname | default( pas_server_temp | first , true) }}" - name: "2.4 Hosts: - Display the variables being used" ansible.builtin.debug: From e61d1226ad20e5575118316fc57500d75617e8eb Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 11 Mar 2024 18:37:42 +0530 Subject: [PATCH 450/607] Fix virtual host duplication issue in 2.4 Hosts file --- deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml index ac712c519a..8b78906c92 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml @@ -124,6 +124,8 @@ ansible.builtin.set_fact: pas_virtualhost_from_inventory: "{{ pas_server_temp | first }}" pas_virtual_hostname: "{{ custom_pas_virtual_hostname | default( pas_server_temp | first , true) }}" + when: + - pas_server_temp | length > 0 - name: "2.4 Hosts: - Display the variables being used" ansible.builtin.debug: @@ -131,6 +133,8 @@ - "pas_virtual_hostname: {{ pas_virtual_hostname }} " - "custom_pas_hostname: {{ custom_pas_virtual_hostname }} " - "virtualhost_in_inventory: {{ pas_virtualhost_from_inventory }}" + when: + - pas_server_temp | length > 0 - name: "2.4 Hosts: - Get the line from /etc/hosts with virtual_host" ansible.builtin.slurp: @@ -140,6 +144,8 @@ - name: "2.4 Hosts: - Extract the line with virtual_host" ansible.builtin.set_fact: virtual_host_line: "{{ (hosts_content['content'] | b64decode).split('\n') | select('search', pas_virtualhost_from_inventory) | first }}" + when: + - pas_virtualhost_from_inventory is defined - name: "2.4 Hosts: - Duplicate the line with virtual_host and replace with pas_virtual_hostname in /etc/hosts" when: From 65b67f4dc3b8149c49d4e809153cca1b9afa4d49 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 11 Mar 2024 16:19:05 +0200 Subject: [PATCH 451/607] Remove the wait for the first run --- .../tasks/5.6.4-provision.yml | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4-provision.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4-provision.yml index e1d4bff43b..d22826bb38 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4-provision.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4-provision.yml @@ -15,13 +15,13 @@ register: scs_installed when: ansible_hostname == primary_instance_name -- name: "Wait for SCS check on first node to finish" - when: ansible_hostname == secondary_instance_name - ansible.builtin.set_fact: - is_scs_check_complete: "{{ hostvars[primary_instance_name].scs_installed is defined }}" - retries: 30 - delay: 60 - until: is_scs_check_complete +# - name: "Wait for SCS check on first node to finish" +# when: ansible_hostname == secondary_instance_name +# ansible.builtin.set_fact: +# is_scs_check_complete: "{{ hostvars[primary_instance_name].scs_installed is defined }}" +# retries: 30 +# delay: 60 +# until: is_scs_check_complete - name: "5.6 SCSERS: ERS Install: check if installed" ansible.builtin.stat: @@ -29,13 +29,13 @@ register: ers_installed when: ansible_hostname == secondary_instance_name -- name: "Wait for ERS check on second node to finish" - when: ansible_hostname == primary_instance_name - ansible.builtin.set_fact: - is_ers_check_complete: "{{ hostvars[secondary_instance_name].ers_installed is defined }}" - retries: 30 - delay: 60 - until: is_ers_check_complete +# - name: "Wait for ERS check on second node to finish" +# when: ansible_hostname == primary_instance_name +# ansible.builtin.set_fact: +# is_ers_check_complete: "{{ hostvars[secondary_instance_name].ers_installed is defined }}" +# retries: 30 +# delay: 60 +# until: is_ers_check_complete - name: "5.6 SCSERS: SCS HA Install: check if installed" ansible.builtin.set_fact: From 3fd8795d549b4b044505cbb9584be76c44e060e1 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 11 Mar 2024 20:07:46 +0530 Subject: [PATCH 452/607] Update Stonith SBD configuration in cluster --- .../1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml index 54447e9df0..4660876abd 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml @@ -224,9 +224,10 @@ failed_when: false changed_when: false - - name: "1.17 Generic Pacemaker - Delete Stonith SBD if it is already configured in cluster" + - name: "1.17 Generic Pacemaker - Stonith SBD is already configured in cluster" when: stonith_sbd_configured.rc == 0 - ansible.builtin.command: crm configure delete stonith-sbd + ansible.builtin.debug: + msg: "Stonith Status: {{ stonith_sbd_configured.stdout }}" - name: "1.17 Generic Pacemaker - Ensure Stonith SBD is configured in cluster" when: stonith_sbd_configured.rc != 0 From 49ea369ef3d6ae3da1136780de7de3675ac2fcfd Mon Sep 17 00:00:00 2001 From: "R. de Veen" Date: Mon, 11 Mar 2024 16:26:28 +0100 Subject: [PATCH 453/607] When Terraform plan failed, stop the script from executing Terraform apply (#560) * Update return_value with new return code to stop when plan failed When the Terraform Plan has errors, the script will continue to run. The return_value was not being updated with the return code of the plan, so the check is not working. * Sanitaze shebang for bash scripts * Update installer.sh * Update installer.sh --- deploy/scripts/installer.sh | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/deploy/scripts/installer.sh b/deploy/scripts/installer.sh index d10188f416..d5c2b9e248 100755 --- a/deploy/scripts/installer.sh +++ b/deploy/scripts/installer.sh @@ -1,4 +1,8 @@ -#!/bin/bash +#!/usr/bin/env bash + +# Ensure that the exit status of a pipeline command is non-zero if any +# stage of the pipefile has a non-zero exit status. +set -o pipefail #colors for terminal boldreduscore="\e[1;4;31m" @@ -616,10 +620,10 @@ fi allParams=$(printf " -var-file=%s %s %s %s %s %s %s" "${var_file}" "${extra_vars}" "${tfstate_parameter}" "${landscape_tfstate_key_parameter}" "${deployer_tfstate_key_parameter}" "${deployment_parameter}" "${version_parameter}" ) terraform -chdir="$terraform_module_directory" plan -no-color -detailed-exitcode $allParams | tee -a plan_output.log -echo "Plan returned $return_value" +return_value=$? +echo "Terraform Plan return code: " $return_value -if [ 0 != $return_value ] -then +if [ 1 == $return_value ]: then echo "" echo "#########################################################################################" echo "# #" @@ -635,7 +639,7 @@ then fi state_path="SYSTEM" -if [ 0 == $return_value ] ; then +if [ 1 != $return_value ] ; then if [ "${deployment_system}" == sap_deployer ] then From 3a7f5e2392b67c949d5d372c8b98c0771b687de9 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 11 Mar 2024 17:27:49 +0200 Subject: [PATCH 454/607] Remove unnecessary variable assignment in Test-SDAFURLs.ps1 script --- deploy/scripts/Test-SDAFURLs.ps1 | 2 -- 1 file changed, 2 deletions(-) diff --git a/deploy/scripts/Test-SDAFURLs.ps1 b/deploy/scripts/Test-SDAFURLs.ps1 index cbee0ae260..b7434e2648 100644 --- a/deploy/scripts/Test-SDAFURLs.ps1 +++ b/deploy/scripts/Test-SDAFURLs.ps1 @@ -59,8 +59,6 @@ else { $ARM_CLIENT_SECRET = Read-Host "Please enter the Service Principals App ID Password" -AsSecureString } - $VM_password = $ARM_CLIENT_SECRET - if ($null -eq $ARM_TENANT_ID -or $ARM_TENANT_ID -eq "") { $ARM_TENANT_ID = Read-Host "Please enter the Tenant ID" } From 24fdd2f4f3a61cb1850dee13426d8b63dadfc093 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 11 Mar 2024 17:30:23 +0200 Subject: [PATCH 455/607] Fix syntax error in installer.sh --- deploy/scripts/installer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/scripts/installer.sh b/deploy/scripts/installer.sh index d5c2b9e248..f0b721eecb 100755 --- a/deploy/scripts/installer.sh +++ b/deploy/scripts/installer.sh @@ -623,7 +623,7 @@ terraform -chdir="$terraform_module_directory" plan -no-color -detailed-exitcode return_value=$? echo "Terraform Plan return code: " $return_value -if [ 1 == $return_value ]: then +if [ 1 == $return_value ] ; then echo "" echo "#########################################################################################" echo "# #" From 4dd161cb12f1f78e29054f2a0a14ea0919bafcf0 Mon Sep 17 00:00:00 2001 From: hdamecharla <71097261+hdamecharla@users.noreply.github.com> Date: Mon, 11 Mar 2024 21:09:16 +0530 Subject: [PATCH 456/607] Update github-actions-ansible-lint.yml update to use setup-python@v5 --- .github/workflows/github-actions-ansible-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/github-actions-ansible-lint.yml b/.github/workflows/github-actions-ansible-lint.yml index fb61500c1b..55999ec59d 100644 --- a/.github/workflows/github-actions-ansible-lint.yml +++ b/.github/workflows/github-actions-ansible-lint.yml @@ -9,7 +9,7 @@ jobs: uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.x' From 180bdd12739a656703fc13cf6f78432604a5de2c Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 11 Mar 2024 21:13:28 +0530 Subject: [PATCH 457/607] Fix firewalld module fqcn in pre_checks.yml --- .../roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml index 02fa59a3ca..cff0b32de2 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml @@ -175,7 +175,7 @@ # Allow the required ports for fence_kdump through the firewall. - name: "1.17 Generic Pacemaker - RHEL - Allow ports for fence_kdump through the firewall" - ansible.builtin.firewalld: + ansible.posix.firewalld: port: "7410/udp" permanent: true state: enabled From 9449925521c0bdeca91e451551bac68c0eaaf66d Mon Sep 17 00:00:00 2001 From: daradicscsaba <62608496+daradicscsaba@users.noreply.github.com> Date: Mon, 11 Mar 2024 18:06:30 +0200 Subject: [PATCH 458/607] Add optional extended log collection, fix kdump_enabled undefined variable error (#562) * Add optional, extended log collection functionality * Use default filter with kdump_enabled variable Prevent 'kdump_enabled' is undefined errors --------- Co-authored-by: Csaba Daradics --- .../tasks/1.17.2.0-cluster-RedHat.yml | 12 ++--- .../7.0.0-post-install/tasks/main.yaml | 51 +++++++++++++++++++ 2 files changed, 57 insertions(+), 6 deletions(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml index 658bfd3f66..c2ce01ae52 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml @@ -222,7 +222,7 @@ - name: "1.17 Generic Pacemaker - Install fence-agents-kdump package" when: - - kdump_enabled == "enabled" + - kdump_enabled | default("disabled") == "enabled" ansible.builtin.yum: name: fence-agents-kdump state: present @@ -232,7 +232,7 @@ - name: "1.17 Generic Pacemaker - configure the special fencing device fence_kdump" when: - inventory_hostname == primary_instance_name - - kdump_enabled == "enabled" + - kdump_enabled | default("disabled") == "enabled" block: # we can assume that the stonith:fence_azure_rm is already configured @@ -263,7 +263,7 @@ - name: "1.17 Generic Pacemaker - Ensure that the kdump service is enabled" when: - - kdump_enabled == "enabled" + - kdump_enabled | default("disabled") == "enabled" block: # Perform the fence_kdump_nodes configuration in /etc/kdump.conf @@ -276,7 +276,7 @@ register: kdump_conf_file failed_when: kdump_conf_file.rc != 0 when: - - kdump_enabled == "enabled" + - kdump_enabled | default("disabled") == "enabled" - inventory_hostname == primary_instance_name # Perform the fence_kdump_nodes configuration in /etc/kdump.conf @@ -289,7 +289,7 @@ register: kdump_conf_file failed_when: kdump_conf_file.rc != 0 when: - - kdump_enabled == "enabled" + - kdump_enabled | default("disabled") == "enabled" - inventory_hostname == secondary_instance_name # set the kdump path to /usr/crash in /etc/kdump.conf @@ -302,7 +302,7 @@ register: kdump_conf_file_path failed_when: kdump_conf_file_path.rc != 0 when: - - kdump_enabled == "enabled" + - kdump_enabled | default("disabled") == "enabled" # restart kdump service as we made changes to the configuration - name: "1.17 Generic Pacemaker - Restart kdump service" diff --git a/deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml b/deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml index 173bf85227..a0b6b02014 100644 --- a/deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml @@ -190,6 +190,57 @@ - tier != 'oracle' - sapinst_list_of_files is defined +- name: "Post Installation (optional): check if sapinst_instdir exists" + become_user: root + become: true + ansible.builtin.stat: + path: "{{ tmp_directory }}/{{ this_sid }}/sapinst_instdir" + register: sapinst_instdir_exists + when: + - tier not in ['hana', 'oracle'] + - all_sapinst_instdir_logs | default(false) + +- name: "Post Installation (optional): Find all log files in {{ tmp_directory }}/{{ this_sid }}/sapinst_instdir/" + become_user: root + become: true + ansible.builtin.find: + paths: "{{ tmp_directory }}/{{ this_sid }}/sapinst_instdir/" + file_type: file + patterns: '*.log' + recurse: true + register: sapinst_instdir_logs + when: + - tier not in ['hana', 'oracle'] + - sapinst_instdir_exists.stat.exists + - all_sapinst_instdir_logs | default(false) + +- name: "Post Installation (optional): Compress all log files from {{ tmp_directory }}/{{ this_sid }}/sapinst_instdir/" + become_user: root + become: true + community.general.archive: + path: "{{ sapinst_instdir_logs.files | map(attribute='path') | list }}" + dest: "{{ tmp_directory }}/{{ this_sid }}/{{ this_sid }}{{ suffix }}_{{ inventory_hostname }}_all_logs.zip" + format: zip + mode: 0755 + when: + - tier not in ['hana', 'oracle'] + - sapinst_instdir_logs.files is defined + - sapinst_instdir_logs.files | length > 0 + - all_sapinst_instdir_logs | default(false) + +- name: "Post Installation (optional): Copy the zipped sapinst_instdir installation logs" + become_user: root + become: true + ansible.builtin.fetch: + src: "{{ tmp_directory }}/{{ this_sid }}/{{ this_sid }}{{ suffix }}_{{ inventory_hostname }}_all_logs.zip" + dest: "{{ _workspace_directory }}/logs/{{ this_sid }}{{ suffix }}_{{ inventory_hostname }}_all_logs.zip" + flat: true + when: + - tier not in ['hana', 'oracle'] + - sapinst_instdir_logs.files is defined + - sapinst_instdir_logs.files | length > 0 + - all_sapinst_instdir_logs | default(false) + - name: "Post Installation: HANA" when: - tier == 'hana' From d21f7c445358b21a3c5276a9b1d8e9967c567528 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 11 Mar 2024 18:51:44 +0200 Subject: [PATCH 459/607] Fix conditional check for sapinst_instdir_exists --- deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml b/deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml index a0b6b02014..e71d141b3b 100644 --- a/deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml @@ -211,7 +211,7 @@ register: sapinst_instdir_logs when: - tier not in ['hana', 'oracle'] - - sapinst_instdir_exists.stat.exists + - (sapinst_instdir_exists.stat.exists | default(false)) - all_sapinst_instdir_logs | default(false) - name: "Post Installation (optional): Compress all log files from {{ tmp_directory }}/{{ this_sid }}/sapinst_instdir/" From a413d2a3dbd03b3464cdf9fbe25285d64104f2e4 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 11 Mar 2024 23:10:14 +0530 Subject: [PATCH 460/607] Add additional destination port ranges to NSG rule --- deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf b/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf index 2ec18be8ab..f4cc742a22 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf @@ -142,7 +142,7 @@ resource "azurerm_network_security_rule" "nsr_controlplane_app" { access = "Allow" protocol = "Tcp" source_port_range = "*" - destination_port_ranges = [22, 443, 3389, 5985, 5986] + destination_port_ranges = [22, 443, 3389, 5985, 5986, 5404, 5405, 7630] source_address_prefixes = compact(concat(var.deployer_tfstate.subnet_mgmt_address_prefixes, var.deployer_tfstate.subnet_bastion_address_prefixes)) destination_address_prefixes = azurerm_subnet.app[0].address_prefixes } From 3ad61a4cbc48c9681bb0f468d1694df9885809b0 Mon Sep 17 00:00:00 2001 From: msftvapolasa <57106612+msftvapolasa@users.noreply.github.com> Date: Sat, 16 Mar 2024 10:33:26 +0000 Subject: [PATCH 461/607] Oracle-non-asm (#566) * lsnrctl status update * oracle-asm dataguard setup * asm file permission updates * asm variable update * asm file conditional change * oracle-asm file creation updates * oracle-asm listener handling * asm listener handling * asm lsnrctl reload test * asm listener adjustments * oracle asm initSID.ora file updates * asm * asm testing * rman command update * update the replace function * oracle-asm * fail message update * asm * remove fail flag * asm restore update * oracle-asm db updates * oracleasm changes * oracle-asm update1 * oracle-asm2 * oracle-asm restore adjustments * syntax error whilst creating asm files * asm retrofit with spfile and restore scripts * asm update * asm flashback folder creation * asm * enable fsfo on secondary * asm * asm clusterware config * asm oracle cluster ware restart updates * asm updates for registering stdby in srvctl * asm * asm * ASM changes * Oracle DG automated trigger testing * listener change * dbload changes * ASM DG changes * asm * asm * asm * asm * oracle grid sbp location update * grid file permissions * gsbp file permissions * grid sbp change * gsbp patching * ' updated * oracle * sidadm creation on secondary db node * sidadm for oracle secondary * user change * sidadm for oracle * asm dg * oracle asm dg * oracle asm opatch * updated SBPFUSER variable for grid patching * added oracle-asm node * updated the post-install file * grid sbp patch * grid sbp * grid sbp copy * grid * grid update * tnsnames update for sap app * sbp grid error handling * spell corrrection * error handling * grid patching * sbp 2308 fixes * grid * grid sbp test * grid sbp new version testing * grid * grid * ok * ok * sbp * grid sbp * GRID SBP * gsbp oradism paermissions * SBp * grid sbp * debug * debug * grid pre-install patch * debug * debug1 * debug * old mopatch and opatch to test * old sbp * opatch 11 * sbp updates * sbp * grid sbp folder creation * change the order of SBP GRID first then RDBMS * permission update * oracle grid * grid patching 2311 testing * comment out GRID Patching * sga and pga adjustments * oracle changes * repo update added 8.9 * added packages for OEL8.9 * repo * spfile changes * oracle pga caluculated based on the oracle documentation. * lsnrctl on the secondary is ot running yet remove the lsnrctl stop. * lsnrctl for asm updated * syntax correction * syntax correction for paramter * oracle asm lsnrctl parameter update * syntax update * updated the register parameter * re-arranged the order for evaluation of node_tier * updated the syntax for node_tier * rearranged the order of the conditions * non-asm ha setup * creating sidadm user on secondary * rman restore for non-asm updates * initsid.ora parameter updates for oracle HA * updating the spfile for ora non-asm secondary * update the flag file * rman duplicate change * update the spfile * spfile update for oracle * oracle dataguard replication * oracle non-asm dg changes * reduced the temp disk space to 50 from 100 on the deployer * reduced the tmp disk space setting * updated the /mnt value check to bypass the free space check * updated code to handle oracle bug whilst creating the redologs on secondary * oracle non-asm dg setup update * updated the local_listener value in primary to re-register as secondary after failback * converted caps to small for local_listener values * added rebstart of secondary to activate HA service for SAP * adding oraflash filesystem creation * oracle non-asm changes * updates the oracle sga and pga caluculation * oracle listener on primary update * spfile scope update * oracle local_listener changes * commenting the local_listener as it is now implemented after dbload * oracle listner updates * debug the file update * debug listener change * file update for local_listener value * commeting out the debug lines * changing the sequeunce of reboot * added reboot block to allow database to set correct huge pages * error handling for lsnrctl restart on primary * correcting the typos --- .../playbook_00_validate_parameters.yaml | 4 +- .../ansible/playbook_04_00_00_db_install.yaml | 4 + deploy/ansible/playbook_04_00_01_db_ha.yaml | 38 +- .../4.1.2-ora-asm-db-install/tasks/main.yaml | 575 ++++++++++------- .../roles-db/4.1.3-ora-dg/tasks/main.yaml | 14 +- .../tasks/ora-dg-observer-setup.yaml | 6 +- ...aml => ora-dg-postprocessing-primary.yaml} | 33 + .../ora-dg-postprocessing-secondary.yaml | 274 ++++++++ .../tasks/ora-dg-preparation.yaml | 198 +++++- .../tasks/ora-dg-setup-primary.yaml | 93 +-- .../tasks/ora-dg-setup-secondary.yaml | 609 ++++++++++++++++-- .../templates/asmfilesystems-secondary.j2 | 10 + .../templates/dbparametersfordg.j2 | 6 + .../4.1.3-ora-dg/templates/dbtrigger.j2 | 13 + .../templates/listener_primary.j2 | 2 +- .../templates/rman-restore-asm.j2 | 1 + .../4.1.3-ora-dg/templates/rman-restore.j2 | 4 +- .../templates/secondary_log_apply_off.j2 | 1 + .../templates/secondary_log_apply_on.j2 | 1 + .../4.1.3-ora-dg/templates/standbyredolog.j2 | 21 + .../templates/tnsnames_primary.j2 | 1 + .../4.1.3-ora-dg/templates/tnsnamesforsap.j2 | 29 + .../tasks/1.3.1-repositories-RedHat.yaml | 2 +- .../1.9-kernelparameters/tasks/main.yaml | 8 +- .../tasks/2.6.6-oracle-nfs-mounts.yaml | 7 +- .../tasks/oracle-postprocessing.yaml | 66 +- .../7.0.0-post-install/tasks/main.yaml | 2 +- deploy/ansible/vars/disks_config.yml | 11 + 28 files changed, 1692 insertions(+), 341 deletions(-) rename deploy/ansible/roles-db/4.1.3-ora-dg/tasks/{ora-dg-postprocessing.yaml => ora-dg-postprocessing-primary.yaml} (83%) create mode 100644 deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing-secondary.yaml create mode 100644 deploy/ansible/roles-db/4.1.3-ora-dg/templates/asmfilesystems-secondary.j2 create mode 100644 deploy/ansible/roles-db/4.1.3-ora-dg/templates/dbparametersfordg.j2 create mode 100644 deploy/ansible/roles-db/4.1.3-ora-dg/templates/dbtrigger.j2 create mode 100644 deploy/ansible/roles-db/4.1.3-ora-dg/templates/rman-restore-asm.j2 create mode 100644 deploy/ansible/roles-db/4.1.3-ora-dg/templates/secondary_log_apply_off.j2 create mode 100644 deploy/ansible/roles-db/4.1.3-ora-dg/templates/secondary_log_apply_on.j2 create mode 100644 deploy/ansible/roles-db/4.1.3-ora-dg/templates/standbyredolog.j2 create mode 100644 deploy/ansible/roles-db/4.1.3-ora-dg/templates/tnsnamesforsap.j2 diff --git a/deploy/ansible/playbook_00_validate_parameters.yaml b/deploy/ansible/playbook_00_validate_parameters.yaml index c2618d38f6..8496333e6b 100644 --- a/deploy/ansible/playbook_00_validate_parameters.yaml +++ b/deploy/ansible/playbook_00_validate_parameters.yaml @@ -363,13 +363,13 @@ - name: "0.0 Validations - Deployer disk space requirements" ansible.builtin.set_fact: - deployer_free_temp_disk_space: 100 + deployer_free_temp_disk_space: 40 when: - deployer_free_temp_disk_space is not defined - name: "0.0 Validations - Check for free disk space on deployer" ansible.builtin.assert: - that: (mnt_free_diskspace | first | int / (1024 * 1024 * 1024) | int) > (deployer_free_temp_disk_space | int) + that: (mnt_free_diskspace | first | int / (1024 * 1024 * 1024) | int) > (40 ) fail_msg: "The deployer needs at least {{ deployer_free_temp_disk_space }} GB of free disk space in /mnt" when: - mnt_free_diskspace | length > 0 diff --git a/deploy/ansible/playbook_04_00_00_db_install.yaml b/deploy/ansible/playbook_04_00_00_db_install.yaml index e21fa466f1..7a606ba769 100644 --- a/deploy/ansible/playbook_04_00_00_db_install.yaml +++ b/deploy/ansible/playbook_04_00_00_db_install.yaml @@ -401,6 +401,10 @@ main_password: "{{ hostvars.localhost.sap_password }}" tags: - always + + - name: "Configure accounts for oracle" + ansible.builtin.include_role: + name: roles-os/1.11-accounts - name: "Database Installation Playbook: - Check for file system mounts" ansible.builtin.include_role: diff --git a/deploy/ansible/playbook_04_00_01_db_ha.yaml b/deploy/ansible/playbook_04_00_01_db_ha.yaml index 3e93ec59a8..fa86f92867 100644 --- a/deploy/ansible/playbook_04_00_01_db_ha.yaml +++ b/deploy/ansible/playbook_04_00_01_db_ha.yaml @@ -304,6 +304,42 @@ suffix: "_DC_ACTION_2" tier: 'oracle' +- hosts: "{{ sap_sid | upper }}_DB[1]" + name: DB Dataguard setup on secondary - Oracle + remote_user: "root" + gather_facts: true + vars_files: + - vars/ansible-input-api.yaml # API Input template with defaults + + tasks: + + - name: "Oracle Data Guard Setup on Secondary" + when: + - db_high_availability + - platform in ['ORACLE', 'ORACLE-ASM'] + become: true + block: + - name: Setting the DB facts + ansible.builtin.set_fact: + tier: ora # Actions for Oracle DB Servers + action: 3 + main_password: "{{ hostvars.localhost.sap_password }}" + tags: + - always + + - name: Oracle Data guard + ansible.builtin.include_role: + name: roles-db/4.1.3-ora-dg + tags: + - 4.1.3-ora-dg + + - name: "Observer Playbook: - Run post installation routines" + ansible.builtin.include_role: + name: roles-sap/7.0.0-post-install + vars: + suffix: "_DC_ACTION_2" + tier: 'oracle' + # /*----------------------------------------------------------------------------8 # | | # | PLAY FOR Observer Node setup | @@ -335,7 +371,7 @@ - name: "Observer Playbook: Setting the DB facts" ansible.builtin.set_fact: node_tier: observer - action: 3 + action: 4 main_password: "{{ hostvars.localhost.sap_password }}" tags: - always diff --git a/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml b/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml index 6b975118a8..35fdb2c0d8 100644 --- a/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml @@ -67,6 +67,8 @@ path: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/grid_sbp_installed.txt" register: oracle_installed + + # /*---------------------------------------------------------------------------8 # | Start of Oracle software installation using SAP RUNINSTALLER wrapper. | # | Before running Installer set DB_SID and CV_ASSUME_DISTID according to | @@ -224,160 +226,28 @@ # /*---------------------------------------------------------------------------8 # | | -# | Step 4 SBP Patching for Oracle | +# | Step 4 SBP Patching for Oracle GRID | # | | # +------------------------------------4--------------------------------------*/ + # Backup Oracle GRID Home prior to patching. - - name: "Oracle ASM: Find MOPatch" - ansible.builtin.find: - paths: "{{ target_media_location }}/SBP/SAPSBP" - patterns: ["MOPatch"] - file_type: directory - recurse: true - register: mopatch_directory - - - name: "Oracle ASM: Find MOPatch" - ansible.builtin.fail: - msg: "Too many MOPatches found" - when: mopatch_directory.matched != 1 - - - name: "Oracle ASM: MOPatch path" - ansible.builtin.set_fact: - mopatch_path: "{{ mopatch_directory.files[0].path }}" - when: mopatch_directory.matched == 1 - - - name: "Oracle ASM: Check if 'OPatch.bck' exists" - ansible.builtin.stat: - path: /oracle/{{ db_sid | upper }}/{{ ora_version }}/OPatch.bck - register: opatch_stat - - - name: "Oracle ASM: backup OPatch" - ansible.builtin.copy: - src: /oracle/{{ db_sid | upper }}/{{ ora_version }}/OPatch - dest: /oracle/{{ db_sid | upper }}/{{ ora_version }}/OPatch.bck - remote_src: true - mode: '0755' - when: - - not opatch_stat.stat.exists - - - name: "Oracle ASM: remove old OPatch" - ansible.builtin.file: - path: /oracle/{{ db_sid | upper }}/{{ ora_version }}/OPatch - state: absent - when: - - not opatch_stat.stat.exists - - - name: "Oracle ASM: copy OPatch" - # become: true - # become_user: "oracle" - ansible.builtin.copy: - src: "{{ target_media_location }}/SBP/OPATCH/OPatch" - dest: /oracle/{{ db_sid | upper }}/{{ ora_version }} - remote_src: true - mode: '0755' - owner: oracle - group: oinstall - - - name: "Oracle ASM: copy MOPatch" - # become: true - # become_user: "oracle" - ansible.builtin.copy: - src: "{{ mopatch_path }}" - dest: /oracle/{{ db_sid | upper }}/{{ ora_version }} - remote_src: true - mode: '0777' - owner: oracle - group: oinstall - - - - name: "Oracle ASM: Pre Processing set permissions" - ansible.builtin.file: - path: "/oracle/{{ db_sid | upper }}/{{ ora_version }}/bin/oradism" - state: file - mode: '0750' - owner: oracle - group: oinstall - - - name: "Oracle ASM: Post Processing - SBP Patching" - become: true - become_user: "oracle" - ansible.builtin.shell: $IHRDBMS/MOPatch/mopatch.sh -v -s {{ oracle_sbp_patch }} - environment: - DB_SID: "{{ db_sid }}" - CV_ASSUME_DISTID: OL7 - IHRDBMS: /oracle/{{ db_sid | upper }}/{{ ora_version }} - ORACLE_HOME: /oracle/{{ db_sid | upper }}/{{ ora_version }} - RDBMS: /oracle/{{ db_sid | upper }}/{{ ora_version }} - register: sbpscript_results - failed_when: sbpscript_results.rc >= 2 + - name: "Oracle ASM : BACKUP ORACLE GRID" + become: true + become_user: "root" + ansible.builtin.shell: | + cp -rp /oracle/GRID/{{ ora_version }} /oracle/GRID/{{ ora_version }}.bck + register: gridbackup args: - creates: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/sbpdb_installed.txt" - chdir: "{{ target_media_location }}/SBP" + creates: /etc/sap_deployment_automation/{{ sap_sid | upper }}/gridbackedup.txt executable: /bin/csh - - name: "Oracle ASM: pre processing reset permissions" - ansible.builtin.file: - path: "/oracle/{{ db_sid | upper }}/{{ ora_version }}/bin/oradism" - state: file - mode: '4750' - owner: root - group: oinstall - - - name: "Oracle ASM: Post processing installer output" - ansible.builtin.debug: - var: sbpscript_results.stdout_lines - verbosity: 2 - - - name: "Oracle ASM: Post processing installer output" - ansible.builtin.copy: - dest: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/sbp.log" - content: "{{ sbpscript_results.stdout }}" - mode: '0777' - when: sbpscript_results.stdout is defined - - - name: "Oracle ASM: Create sbp_installed.txt" + + - name: "Oracle ASM: Create flag gridbackedup.txt " ansible.builtin.file: - path: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/sbpdb_installed.txt" + path: /etc/sap_deployment_automation/{{ sap_sid | upper }}/gridbackedup.txt state: touch mode: '0755' - owner: oracle - group: oinstall - - - # - name: Remove SAPSBP - # ansible.builtin.file: - # path: "{{ mopatch_path }}" - # state: absent - -# /*---------------------------------------------------------------------------8 -# | | -# | Step 4 SBP Patching for Oracle GRID | -# | | -# +------------------------------------4--------------------------------------*/ - # Backup Oracle GRID Home prior to patching. - - # - name: "Oracle ASM : BACKUP ASM GRID" - # # become: true - # # become_user: "oracle" - # ansible.builtin.copy: - # src: "/oracle/GRID" - # dest: /oracle/GRID.bck - # remote_src: true - # mode: '0755' - # owner: oracle - # group: oinstall - - - - name: "Oracle ASM : BACKUP ORACLE GRID" - # become: true - # become_user: "oracle" - ansible.builtin.copy: - src: "/oracle/GRID/{{ ora_version }}" - dest: /oracle/GRID/{{ ora_version }}.bck - remote_src: true - mode: '0755' - owner: oracle - group: oinstall + when: gridbackup.rc == 0 - name: "Oracle ASM: Check if 'OPatch.bck' exists" ansible.builtin.stat: @@ -419,12 +289,32 @@ when: - not mopatchgrid_stat.stat.exists +# MOPATCH for GRID + + - name: "Oracle ASM: Find MOPatch for GRID" + ansible.builtin.find: + paths: "{{ target_media_location }}/SBP/GSBP/SGR19P" + patterns: ["MOPatch"] + file_type: directory + recurse: true + register: mopatch_grid_directory + + - name: "Oracle ASM: Find MOPatch for GRID" + ansible.builtin.fail: + msg: "Too many MOPatches found" + when: mopatch_grid_directory.matched != 1 + + - name: "Oracle ASM: MOPatch path" + ansible.builtin.set_fact: + mopatch_grid_path: "{{ mopatch_grid_directory.files[0].path }}" + when: mopatch_grid_directory.matched == 1 + - name: "Oracle ASM: copy MOPatch for GRID" # become: true # become_user: "oracle" ansible.builtin.copy: - src: "{{ mopatch_path }}" + src: "{{ mopatch_grid_path }}" dest: /oracle/GRID/{{ ora_version }} remote_src: true mode: '0755' @@ -461,124 +351,361 @@ # become: true # become_user: "oracle" ansible.builtin.copy: - src: "{{ target_media_location }}/SBP/OPATCH/OPatch" + src: "{{ target_media_location }}/SBP/GSBP/OPATCH/OPatch" dest: /oracle/GRID/{{ ora_version }} remote_src: true mode: '0755' owner: oracle group: oinstall - - name: "Oracle ASM: Prepare for GRID SBP Patching" - become: true - become_user: "root" - ansible.builtin.shell: /oracle/GRID/{{ ora_version }}/crs/install/roothas.sh -prepatch - register: gridpreinstall - args: - creates: /etc/sap_deployment_automation/{{ sap_sid | upper }}/gridpreinstall.txt +# Commenting the SBP GRID Patching code till the issue is resolved. + # - name: "Oracle ASM: Prepare for GRID SBP Patching" + # become: true + # become_user: "root" + # ansible.builtin.shell: /oracle/GRID/{{ ora_version }}/crs/install/roothas.sh -prepatch |tee /etc/sap_deployment_automation/{{ sap_sid | upper }}/grid-pre-processing.log + # register: gridpreinstall + # args: + # creates: /etc/sap_deployment_automation/{{ sap_sid | upper }}/gridpreinstall.txt - # Debug for testing - - name: "Oracle ASM: Prepare for GRID SBP Patching print output" - ansible.builtin.debug: - var: gridpreinstall.stdout_lines - verbosity: 2 + # - name: "Wait to shutdown of Oracle processes for 15 sec" + # ansible.builtin.wait_for: + # timeout: 30 + + + + # # Debug for testing + # - name: "Oracle ASM: Prepare for GRID SBP Patching print output" + # ansible.builtin.debug: + # var: gridpreinstall.stdout_lines + # verbosity: 2 + + # - name: "Oracle ASM: Prepare for GRID SBP Patching" + # ansible.builtin.copy: + # dest: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/pre-patchcmd.log" + # content: "{{ gridpreinstall.stdout }}" + # mode: 0777 + # when: gridpreinstall.stdout is defined + + # - name: "Oracle ASM: Create flag after a successful preparation" + # ansible.builtin.file: + # path: /etc/sap_deployment_automation/{{ sap_sid | upper }}/gridpreinstall.txt + # state: touch + # mode: '0755' + # when: gridpreinstall.rc == 0 + + # # Adding Block for Preventing orachk problems as per SBP Guidelines for patching 2308. + + # - name: "Preventing orachk Issue" + # become: true + # become_user: "root" + # ansible.builtin.shell: | + # set -o errexit + # set -o pipefail + # chown -R oracle:oinstall $OHGRID/suptools/orachk + # chmod -R u+w $OHGRID/suptools/orachk + # rm -f $IHRDBMS/suptools/orachk/orachk + # register: orachkdeletion + # environment: + # OHGRID: /oracle/GRID/{{ ora_version }} + # ORACLE_HOME: /oracle/GRID/{{ ora_version }} + # IHRDBMS: /oracle/{{ db_sid | upper}}/{{ ora_release }} + # SBPFUSER: /usr/sbin/fuser + # # ORACLE_SID: "{{ db_sid | upper}}" + # args: + # creates: /etc/sap_deployment_automation/{{ sap_sid | upper }}/orachkdeleted.txt + # chdir: "/oracle/GRID/{{ ora_version }}" + # executable: /bin/csh + + # - name: "Oracle ASM: Create flag after a successful orachk deletion" + # ansible.builtin.file: + # path: /etc/sap_deployment_automation/{{ sap_sid | upper }}/orachkdeleted.txt + # state: touch + # mode: '0755' + # when: orachkdeletion.rc == 0 + + # # STEP 4.2.2 SBP Patching for Oracle GRID. + + # # Copy the GSBP patch to local folder to avoid failures. + + # # - name: "Copy the GRID SBP Patches to local folders" + # # ansible.builtin.copy: + # # src: "{{ target_media_location }}/GSBP" + # # dest: /home/oracle/ + # # remote_src: true + # # owner: oracle + # # group: oinstall + # # mode: '0775' + + # - name: "Oracle ASM: Pre Processing set permissions GRID" + # become: true + # become_user: "root" + # ansible.builtin.file: + # path: "/oracle/GRID/{{ ora_version }}/bin/oradism" + # state: file + # owner: root + # group: oinstall + # mode: u+rw + + + # - name: "debug 2311 Patching" + # fail: + # msg: "fail here for manual GRID SBP installation" + + # - name: "File update wait for 15 sec to avoid multiple locks" + # ansible.builtin.wait_for: + # timeout: 30 + + + # - name: "Oracle GRID Patching Error Handling Block" + # block: + # - name: "Oracle ASM: Post Processing - GRID SBP Patching" + # become: true + # become_user: "oracle" + # ansible.builtin.shell: $OHGRID/MOPatch/mopatch.sh -v -s {{ oraclegrid_sbp_patch }} + # environment: + # OHGRID: /oracle/GRID/{{ ora_version }} + # ORACLE_HOME: /oracle/GRID/{{ ora_version }} + # # IHRDBMS: /oracle/{{ db_sid | upper}}/{{ ora_release }} + # SBPFUSER: /usr/sbin/fuser + # OPATCH_DEBUG: "TRUE" + # register: gridsbpscript_results + # failed_when: gridsbpscript_results.rc >= 1 + # args: + # creates: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/gridsbp_installed.txt" + # chdir: "{{ target_media_location }}/SBP/GSBP" + # executable: /bin/csh + + # rescue: + # - name: "Remove the Pre-install.txt File for preparing Re-run" + # become: true + # become_user: "oracle" + # ansible.builtin.shell: rm -rf gridpreinstall.txt + # args: + # chdir: /etc/sap_deployment_automation/{{ db_sid | upper }} + # executable: /bin/csh + + # - name: "Remove the Link Files created in the previous run" + # become: true + # become_user: "oracle" + # ansible.builtin.shell: rm -rf link* + # register: removelinkfiles + # failed_when: removelinkfiles.rc >= 2 + # args: + # chdir: "{{ target_media_location }}/SBP" + # executable: /bin/csh + + # - name: "Oracle ASM: Prepare for GRID SBP Patching" + # become: true + # become_user: "root" + # ansible.builtin.shell: /oracle/GRID/{{ ora_version }}/crs/install/roothas.sh -prepatch |tee /etc/sap_deployment_automation/{{ sap_sid | upper }}/grid-pre-processing.log + # register: gridpreinstall + # args: + # creates: /etc/sap_deployment_automation/{{ sap_sid | upper }}/gridpreinstall.txt + + # # - name: "Wait to shutdown Oracle processes for 15 sec" + # # ansible.builtin.wait_for: + # # timeout: 30 + + # - name: "Oracle ASM: Post Processing - GRID SBP Patching" + # become: true + # become_user: "oracle" + # ansible.builtin.shell: $OHGRID/MOPatch/mopatch.sh -v -s {{ oraclegrid_sbp_patch }} + # environment: + # OHGRID: /oracle/GRID/{{ ora_version }} + # ORACLE_HOME: /oracle/GRID/{{ ora_version }} + # # IHRDBMS: /oracle/{{ db_sid | upper}}/{{ ora_release }} + # SBPFUSER: /usr/sbin/fuser + # OPATCH_DEBUG: "TRUE" + # register: gridsbpscript_results + # failed_when: gridsbpscript_results.rc >= 1 + # args: + # creates: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/gridsbp_installed.txt" + # chdir: "{{ target_media_location }}/SBP/GSBP" + # executable: /bin/csh + + + # - name: "Oracle ASM: Pre Processing reset permissions GRID" + # ansible.builtin.file: + # path: "/oracle/GRID/{{ ora_version }}/bin/oradism" + # state: file + # mode: '4750' + # owner: oracle + # group: oinstall + + # - name: "Oracle ASM: Post processing installer output" + # ansible.builtin.debug: + # var: gridsbpscript_results.stdout_lines + # verbosity: 2 + + # - name: "Oracle ASM: Post processing installer output" + # ansible.builtin.copy: + # dest: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/sbpgrid.log" + # content: "{{ gridsbpscript_results.stdout }}" + # mode: '0777' + # when: gridsbpscript_results.stdout is defined + + # - name: "Oracle ASM: Create flag after a successful SBP GRID installation" + # ansible.builtin.file: + # path: /etc/sap_deployment_automation/{{ sap_sid | upper }}/gridsbp_installed.txt + # state: touch + # mode: '0755' + + # STEP 4.2.3 Post-Processing SBP Patching for Oracle GRID. + # SAP Note 2893317 - ORA-12547: TNS:lost contact during SWPM system copy import -NetWeaver + # Run the post install script for GRID SBP patching to start the Oracle Cluster service manager, ASMCA and relevant toolset. + + # - name: "Oracle ASM: Oracle Post Processing - GRID SBP GRID Patching Post-Processing" + # become: true + # become_user: root + # ansible.builtin.shell: | + # set -o errexit + # set -o pipefail + # /oracle/GRID/{{ ora_version }}/rdbms/install/rootadd_rdbms.sh + # /oracle/GRID/{{ ora_version }}/crs/install/roothas.sh -postpatch |tee /etc/sap_deployment_automation/{{ sap_sid | upper }}/grid-post-processing.log + # register: sbppostpro_results + # args: + # chdir: /oracle/GRID/{{ ora_version }} + # creates: /etc/sap_deployment_automation/{{ sap_sid | upper }}/sbp-grid-postprocess.txt + # when: + # - gridsbpscript_results.rc >= 1 + + # - name: "Oracle ASM: Create flag after a successful change" + # ansible.builtin.file: + # path: /etc/sap_deployment_automation/{{ sap_sid | upper }}/sbp-grid-postprocess.txt + # state: touch + # mode: '0755' + # # when: sbppostpro_results.rc >= 2 - - name: "Oracle ASM: Prepare for GRID SBP Patching" + # - name: "Oracle ASM: Create sbp_installed.txt" + # ansible.builtin.file: + # path: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/grid_sbp_installed.txt" + # state: touch + # mode: '0755' + # when: sbpscript_results.rc < 2 + +# /*---------------------------------------------------------------------------8 +# | | +# | Step 5 SBP Patching for Oracle RDBMS | +# | | +# +------------------------------------4--------------------------------------*/ + +# MOPATCH for SBP + - name: "Oracle ASM: Find MOPatch" + ansible.builtin.find: + paths: "{{ target_media_location }}/SBP/SAPSBP" + patterns: ["MOPatch"] + file_type: directory + recurse: true + register: mopatch_directory + + - name: "Oracle ASM: Find MOPatch" + ansible.builtin.fail: + msg: "Too many MOPatches found" + when: mopatch_directory.matched != 1 + + - name: "Oracle ASM: MOPatch path" + ansible.builtin.set_fact: + mopatch_path: "{{ mopatch_directory.files[0].path }}" + when: mopatch_directory.matched == 1 + + + + - name: "Oracle ASM: Check if 'OPatch.bck' exists" + ansible.builtin.stat: + path: /oracle/{{ db_sid | upper }}/{{ ora_version }}/OPatch.bck + register: opatch_stat + + - name: "Oracle ASM: backup OPatch" ansible.builtin.copy: - dest: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/pre-patchcmd.log" - content: "{{ gridpreinstall.stdout }}" - mode: 0777 - when: gridpreinstall.stdout is defined + src: /oracle/{{ db_sid | upper }}/{{ ora_version }}/OPatch + dest: /oracle/{{ db_sid | upper }}/{{ ora_version }}/OPatch.bck + remote_src: true + mode: '0755' + when: + - not opatch_stat.stat.exists - - name: "Oracle ASM: Create flag after a successful preparation" + - name: "Oracle ASM: remove old OPatch" ansible.builtin.file: - path: /etc/sap_deployment_automation/{{ sap_sid | upper }}/gridpreinstall.txt - state: touch + path: /oracle/{{ db_sid | upper }}/{{ ora_version }}/OPatch + state: absent + when: + - not opatch_stat.stat.exists + + - name: "Oracle ASM: copy OPatch" + # become: true + # become_user: "oracle" + ansible.builtin.copy: + src: "{{ target_media_location }}/SBP/OPATCH/OPatch" + dest: /oracle/{{ db_sid | upper }}/{{ ora_version }} + remote_src: true mode: '0755' - when: gridpreinstall.rc == 0 + owner: oracle + group: oinstall + + - name: "Oracle ASM: copy MOPatch" + # become: true + # become_user: "oracle" + ansible.builtin.copy: + src: "{{ mopatch_path }}" + dest: /oracle/{{ db_sid | upper }}/{{ ora_version }} + remote_src: true + mode: '0777' + owner: oracle + group: oinstall - # STEP 4.2.2 SBP Patching for Oracle GRID. - - name: "Oracle ASM: Pre Processing set permissions GRID" + - name: "Oracle ASM: Pre Processing set permissions" ansible.builtin.file: - path: "/oracle/GRID/{{ ora_version }}/bin/oradism" + path: "/oracle/{{ db_sid | upper }}/{{ ora_version }}/bin/oradism" state: file mode: '0750' owner: oracle group: oinstall - - name: "Oracle ASM: Post Processing - GRID SBP Patching" + - name: "Oracle ASM: Post Processing - SBP Patching" become: true become_user: "oracle" - ansible.builtin.shell: $OHGRID/MOPatch/mopatch.sh -v -s {{ oraclegrid_sbp_patch }} + ansible.builtin.shell: $IHRDBMS/MOPatch/mopatch.sh -v -s {{ oracle_sbp_patch }} environment: - OHGRID: /oracle/GRID/{{ ora_version }} - # ORACLE_BASE: /oracle/GRID/ - ORACLE_HOME: /oracle/GRID/{{ ora_version }} - # IHRDBMS: /oracle/{{ db_sid | upper}}/{{ ora_release }} - # ORACLE_SID: "{{ db_sid | upper}}" - register: gridsbpscript_results - failed_when: gridsbpscript_results.rc >= 2 + DB_SID: "{{ db_sid }}" + CV_ASSUME_DISTID: OL7 + IHRDBMS: /oracle/{{ db_sid | upper }}/{{ ora_version }} + ORACLE_HOME: /oracle/{{ db_sid | upper }}/{{ ora_version }} + RDBMS: /oracle/{{ db_sid | upper }}/{{ ora_version }} + register: sbpscript_results + failed_when: sbpscript_results.rc >= 2 args: - creates: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/gridsbp_installed.txt" - chdir: "{{ target_media_location }}/SBP" - executable: /bin/csh + creates: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/sbpdb_installed.txt" + chdir: "{{ target_media_location }}/SBP" + executable: /bin/csh - - name: "Oracle ASM: Pre Processing reset permissions GRID" + - name: "Oracle ASM: pre processing reset permissions" ansible.builtin.file: - path: "/oracle/GRID/{{ ora_version }}/bin/oradism" + path: "/oracle/{{ db_sid | upper }}/{{ ora_version }}/bin/oradism" state: file mode: '4750' - owner: oracle + owner: root group: oinstall - name: "Oracle ASM: Post processing installer output" ansible.builtin.debug: - var: gridsbpscript_results.stdout_lines + var: sbpscript_results.stdout_lines verbosity: 2 - name: "Oracle ASM: Post processing installer output" ansible.builtin.copy: - dest: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/sbpgrid.log" - content: "{{ gridsbpscript_results.stdout }}" + dest: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/sbp.log" + content: "{{ sbpscript_results.stdout }}" mode: '0777' - when: gridsbpscript_results.stdout is defined - - - name: "Oracle ASM: Create flag after a successful SBP GRID installation" - ansible.builtin.file: - path: /etc/sap_deployment_automation/{{ sap_sid | upper }}/gridsbp_installed.txt - state: touch - mode: '0755' - - # STEP 4.2.3 Post-Processing SBP Patching for Oracle GRID. - # SAP Note 2893317 - ORA-12547: TNS:lost contact during SWPM system copy import -NetWeaver - # Run the post install script for GRID SBP patching to start the Oracle Cluster service manager, ASMCA and relevant toolset. - - - name: "Oracle ASM: Oracle Post Processing - GRID SBP GRID Patching Post-Processing" - become: true - become_user: root - ansible.builtin.shell: | - set -o errexit - set -o pipefail - /oracle/GRID/{{ ora_version }}/rdbms/install/rootadd_rdbms.sh - /oracle/GRID/{{ ora_version }}/crs/install/roothas.sh -postpatch |tee /etc/sap_deployment_automation/{{ sap_sid | upper }}/grid-post-processing.log - register: sbppostpro_results - args: - chdir: /oracle/GRID/{{ ora_version }} - creates: /etc/sap_deployment_automation/{{ sap_sid | upper }}/sbp-grid-postprocess.txt - - - name: "Oracle ASM: Create flag after a successful change" - ansible.builtin.file: - path: /etc/sap_deployment_automation/{{ sap_sid | upper }}/sbp-grid-postprocess.txt - state: touch - mode: '0755' - # when: sbppostpro_results.rc >= 2 + when: sbpscript_results.stdout is defined - name: "Oracle ASM: Create sbp_installed.txt" ansible.builtin.file: - path: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/grid_sbp_installed.txt" + path: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/sbpdb_installed.txt" state: touch mode: '0755' - when: sbpscript_results.rc < 2 + owner: oracle + group: oinstall - name: "Oracle ASM: Permissions" ansible.builtin.file: @@ -587,6 +714,10 @@ owner: oracle group: oinstall mode: '6751' + # - name: Remove SAPSBP + # ansible.builtin.file: + # path: "{{ mopatch_path }}" + # state: absent - name: "Oracle ASM: environment variables to the Bash profile" become: true diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml index 27249b01a2..3cc341483e 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml @@ -43,19 +43,27 @@ - node_tier == "oracle" or node_tier == "oracle-asm" - node_tier != "observer" -- name: "Oracle Data Guard: Post processing" - ansible.builtin.include_tasks: "ora-dg-postprocessing.yaml" +- name: "Oracle Data Guard: Post processing on Primary" + ansible.builtin.include_tasks: "ora-dg-postprocessing-primary.yaml" when: - node_tier == "oracle" or node_tier == "oracle-asm" - node_tier != "observer" - action == 2 +# Enable Flashback Loggining on the Secondary for FSFO +- name: "Oracle Data Guard: Post processing on Secondary" + ansible.builtin.include_tasks: "ora-dg-postprocessing-secondary.yaml" + when: + - node_tier == "oracle" or node_tier == "oracle-asm" + - node_tier != "observer" + - action == 3 + # FSFO is enabled from the Observer. - name: "Oracle Data Guard: Setup Observer" ansible.builtin.include_tasks: "ora-dg-observer-setup.yaml" when: - node_tier == "observer" - - action == 3 + - action == 4 ... # /*---------------------------------------------------------------------------8 # | END | diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-observer-setup.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-observer-setup.yaml index dbbfe73244..b34517031d 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-observer-setup.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-observer-setup.yaml @@ -100,7 +100,7 @@ path: /etc/sap_deployment_automation/oracle_installed.txt state: touch mode: '0755' - when: orainstall_results.rc < 3 + when: orainstall_results.rc >= 3 # Create SQLNET.ORA . @@ -210,9 +210,9 @@ # ENABLE FSFO -- name: "Oracle Data Guard - Observer: Check if DB post-processing is completed" +- name: "Oracle Data Guard - Observer: Check if DB post-processing is completed secondary" ansible.builtin.stat: - path: /usr/sap/install/downloads/{{ db_sid | upper }}/post_processing_completed.txt + path: /usr/sap/install/downloads/{{ db_sid | upper }}/post_processing_secondary_completed.txt register: dg_enabled - name: "ENABLE FSFO" diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing-primary.yaml similarity index 83% rename from deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing.yaml rename to deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing-primary.yaml index 11212a6893..69edb8861a 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing-primary.yaml @@ -154,6 +154,39 @@ state: touch mode: '0755' when: enable_dgconfig_results.rc == 0 + + # Enable the DB trigger for SAP HA + - name: "Oracle Data Guard - Post Processing: Enable DB Trigger" + become: true + become_user: "oracle" + ansible.builtin.shell: sqlplus / as sysdba @dbtrigger.sql + register: enable_dbtrigger_results + failed_when: enable_dbtrigger_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/enable_dbtrigger.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + # when: current_host == ora_primary + - name: "Oracle Data Guard - Post Processing: Create dbtrigger on Primary (debug)" + ansible.builtin.debug: + var: enable_dbtrigger_results.stdout_lines + verbosity: 2 + + - name: "Oracle Data Guard - Post Processing: Create dbtrigger configuration on Primary (save output)" + ansible.builtin.copy: + dest: /etc/sap_deployment_automation/dgscripts/enable_dbtrigger.log + content: "{{ enable_dbtrigger_results.stdout }}" + mode: '0777' + when: enable_dbtrigger_results.stdout is defined + + - name: "Oracle Data Guard - Post Processing: Create enable_dbtrigger.txt" + become: true + become_user: "oracle" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/enable_dbtrigger.txt + state: touch + mode: '0755' + when: enable_dbtrigger_results.rc == 0 - name: "Create post processing completed" become: true diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing-secondary.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing-secondary.yaml new file mode 100644 index 0000000000..4839c3b26e --- /dev/null +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing-secondary.yaml @@ -0,0 +1,274 @@ +--- +# DGMGRL Config on Secondary for Enabling the Falshback logs. + +- name: "Oracle Data Guard - Check SAP Restore on secondary is completed" + ansible.builtin.stat: + path: /usr/sap/install/downloads/{{ db_sid | upper }}/post_processing_completed.txt + register: primary_post_processing_completed + when: node_tier == "oracle" or node_tier == "oracle-asm" + +# Create the dgmgrl file for enabling the flashback on secondary database. + +- name: "Oracle Data Guard - Preparation: Create the secondary_log_apply_off file for Secondary" + become: true + become_user: oracle + ansible.builtin.template: + backup: true + src: secondary_log_apply_off.j2 + dest: "/etc/sap_deployment_automation/dgscripts/secondary_log_apply_off.dgmgrl" + mode: '0644' + force: true + + +- name: "Oracle Data Guard - Preparation: Create the secondary_log_apply_on file for Secondary" + become: true + become_user: oracle + ansible.builtin.template: + backup: true + src: secondary_log_apply_on.j2 + dest: "/etc/sap_deployment_automation/dgscripts/secondary_log_apply_on.dgmgrl" + mode: '0644' + force: true + + + +- name: "Execute Block only if secondary DB restore is completed" + block: + + # Disable the Log apply on Secondary for enabling Flashback + - name: "Oracle Data Guard - Post Processing: Set Log apply off on Standby" + become: true + become_user: "oracle" + ansible.builtin.shell: dgmgrl / as sysdba @secondary_log_apply_off.dgmgrl + register: secondary_log_apply_off_results + failed_when: secondary_log_apply_off_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/secondary_log_apply_off.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + + + - name: "Oracle Data Guard - Post Processing: Set Log apply off on Standby (Debug)" + ansible.builtin.debug: + var: secondary_log_apply_off_results.stdout_lines + verbosity: 2 + + - name: "Oracle Data Guard - Post Processing: Restart lsnrctl on Primary (save output)" + ansible.builtin.copy: + dest: /etc/sap_deployment_automation/secondary_log_apply_off.log + content: "{{ secondary_log_apply_off_results.stdout }}" + mode: '0777' + when: secondary_log_apply_off_results.stdout is defined + + - name: "Oracle Data Guard - Post Processing: Create secondary_log_apply_off.txt" + become: true + become_user: "oracle" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/secondary_log_apply_off.txt + state: touch + mode: '0755' + when: secondary_log_apply_off_results.rc == 0 + +# Enable Flashback on Secondary + - name: "Oracle Data Guard - Enable Flashback on Oracle Secondary DB" + become: true + become_user: "oracle" + ansible.builtin.shell: sqlplus / as sysdba @turnonflashback.sql + register: turn_on_flashback_secondary_results + failed_when: turn_on_flashback_secondary_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/turn_on_flashback_secondary.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + + - name: "Oracle Data Guard - Setup Primary: Enable Flashback on Oracle Primary DB (debug)" + ansible.builtin.debug: + var: turn_on_flashback_secondary_results.stdout_lines + verbosity: 2 + - name: "Oracle Data Guard - Setup Primary: Enable Flashback on Oracle Secondary DB (save output)" + ansible.builtin.copy: + dest: /etc/sap_deployment_automation/dgscripts/turn_on_flashback_secondary.log + content: "{{ turn_on_flashback_secondary_results.stdout }}" + mode: '0755' + when: turn_on_flashback_secondary_results.stdout is defined + + - name: "Oracle Data Guard - Setup Primary: Create turn_on_flashback_secondary.txt" + become: true + become_user: "oracle" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/turn_on_flashback_secondary.txt + state: touch + mode: '0755' + when: + - turn_on_flashback_secondary_results.rc == 0 + +# Enable the log apply on secondary DB after enabling Flashback + - name: "Oracle Data Guard - Post Processing: Set Log apply on on Standby" + become: true + become_user: "oracle" + ansible.builtin.shell: dgmgrl / as sysdba @secondary_log_apply_on.dgmgrl + register: secondary_log_apply_on_results + failed_when: secondary_log_apply_on_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/secondary_log_apply_on.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + + + - name: "Oracle Data Guard - Post Processing: Set Log apply off on Standby (Debug)" + ansible.builtin.debug: + var: secondary_log_apply_off_results.stdout_lines + verbosity: 2 + + - name: "Oracle Data Guard - Post Processing: Restart lsnrctl on Primary (save output)" + ansible.builtin.copy: + dest: /etc/sap_deployment_automation/secondary_log_apply_on.log + content: "{{ secondary_log_apply_on_results.stdout }}" + mode: '0777' + when: secondary_log_apply_on_results.stdout is defined + + - name: "Oracle Data Guard - Post Processing: Create secondary_log_apply_on.txt" + become: true + become_user: "oracle" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/secondary_log_apply_on.txt + state: touch + mode: '0755' + when: secondary_log_apply_off_results.rc == 0 + + # Stop the Secondary Database. + - name: "Oracle Data Guard - Setup Secondary: Stop secondary DB for oracle clusterware configuration." + become: true + become_user: "oracle" + ansible.builtin.shell: | + set -o pipefail + sqlplus / as sysdba @shutdownsecondary.sql | tee /etc/sap_deployment_automation/dgscripts/secondary_shutdown.log + register: secondary_shutdown_cfg_results + failed_when: secondary_shutdown_cfg_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/secondary_shutdown_cfg.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + when: + - current_host == ora_secondary + + + - name: "Oracle Data Guard - Setup Secondary: Create secondary_shutdown.txt" + become: true + become_user: "root" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/secondary_shutdown_cfg.txt + state: touch + mode: '0755' + owner: oracle + group: oinstall + when: + - secondary_shutdown_cfg_results.rc == 0 + - current_host == ora_secondary + + + # Start the Secondary Database for non-ASM. + - name: "Oracle Data Guard - Setup Secondary: Start secondary DB after HA Service configuration on non-ASM." + become: true + become_user: "oracle" + ansible.builtin.shell: | + set -o pipefail + sqlplus / as sysdba @orasecondarystartup.sql | tee /etc/sap_deployment_automation/dgscripts/secondary_startup.log + register: secondary_startup_cfg_results + failed_when: secondary_startup_cfg_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/secondary_startup_cfg.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + when: + - node_tier == "oracle" + - current_host == ora_secondary + + + - name: "Oracle Data Guard - Setup Secondary: Create secondary_startup_cfg.txt" + become: true + become_user: "root" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/secondary_startup_cfg.txt + state: touch + mode: '0755' + owner: oracle + group: oinstall + when: + - node_tier == "oracle" + - secondary_startup_cfg_results.rc == 0 + - current_host == ora_secondary + + + - name: "Oracle CLusterware Restart Configuration" + become: true + become_user: "oracle" + ansible.builtin.shell: | + srvctl add database -db {{ db_sid | upper }}_STDBY -oraclehome /oracle/{{ db_sid | upper }}/{{ ora_release }} -spfile +DATA/{{ db_sid | upper }}_STDBY/PARAMETERFILE/spfile{{ db_sid | lower }}.ora -role PHYSICAL_STANDBY -instance {{ db_sid | upper }} -startoption mount -diskgroup "ARCH,DATA,RECO" + srvctl start database -db {{ db_sid | upper }}_STDBY + register: oracle_clusterware_register + failed_when: oracle_clusterware_register.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/oracle_clusterware_registered.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + when: + - node_tier == "oracle-asm" + + - name: "Create oracle_clusterware_registered on secondary" + become: true + become_user: root + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/oracle_clusterware_registered.txt + state: touch + mode: '0755' + owner: oracle + group: oinstall + when: + - node_tier == "oracle-asm" + - oracle_clusterware_register.rc == 0 + + - name: "Create post processing completed on secondary" + become: true + become_user: root + ansible.builtin.file: + path: /usr/sap/install/downloads/{{ db_sid | upper }}/post_processing_secondary_completed.txt + state: touch + mode: '0755' + owner: oracle + group: oinstall + + + # Create User for SIDADM on Secondary Node. + - name: "2.5.1 SAP Users: - Create Oracle ASM Users Assignment" + ansible.builtin.user: + name: "{{sap_sid | lower }}adm" + uid: "{{ sidadm_uid }}" + group: "sapsys" + groups: asmoper,asmdba,dba,oper,oinstall + append: true + shell: /bin/csh + # when: node_tier == "oracle-asm" + + + + # Update the tnsnames.ora for SAP application servers + + - name: "Oracle Data Guard - Update tnsnames.ora for SAP application servers" + become: true + become_user: "root" + ansible.builtin.copy: + src: /etc/sap_deployment_automation/dgscripts/tnsnames.ora + dest: /sapmnt/{{ sap_sid |upper }}/profile/oracle/tnsnames.ora + remote_src: true + owner: '{{sap_sid |lower }}adm' + group: sapsys + mode: "{{ '0777' | int - (custom_umask | default('022') | int) }}" + + when: + - primary_post_processing_completed.stat.exists + +... + # /*---------------------------------------------------------------------------8 + # | END | + # +------------------------------------4--------------------------------------*/ diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-preparation.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-preparation.yaml index 36c33e8263..140cb4ebaa 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-preparation.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-preparation.yaml @@ -85,6 +85,7 @@ when: node_tier == "oracle-asm" + - name: "Oracle Data Guard for oracle asm - Preparation: create fralogs.sql" become: true become_user: "oracle" @@ -155,6 +156,56 @@ CREATE SPFILE FROM PFILE; exit mode: '0755' + when: + - node_tier == 'oracle' + + +- name: "Oracle Data Guard for ASM - Preparation: create createspfile.sql" + become: true + become_user: "oracle" + ansible.builtin.blockinfile: + create: true + path: /etc/sap_deployment_automation/dgscripts/createspfilesecondary.sql + marker_begin: "-- BEGIN" + marker_end: "-- END" + block: | + CREATE spfile='+DATA/{{ db_sid | upper }}_STDBY/PARAMETERFILE/spfile{{ db_sid | upper }}.ora' from pfile='/oracle/{{ db_sid | upper }}/{{ ora_release }}/dbs/init{{ db_sid | upper }}.ora'; + exit + mode: '0755' + when: + - node_tier == 'oracle-asm' + +- name: "Oracle Data Guard for ASM - Preparation: create asmsecondarystartup.sql" + become: true + become_user: "oracle" + ansible.builtin.blockinfile: + create: true + path: /etc/sap_deployment_automation/dgscripts/asmsecondarystartup.sql + marker_begin: "-- BEGIN" + marker_end: "-- END" + block: | + STARTUP MOUNT; + exit + mode: '0755' + when: + - node_tier == 'oracle-asm' + +# orasecondarystartup.sql +- name: "Oracle Data Guard - Preparation: create orasecondarystartup.sql" + become: true + become_user: "oracle" + ansible.builtin.blockinfile: + create: true + path: /etc/sap_deployment_automation/dgscripts/orasecondarystartup.sql + marker_begin: "-- BEGIN" + marker_end: "-- END" + block: | + STARTUP MOUNT; + exit + mode: '0755' + when: + - node_tier == 'oracle' + - name: "Oracle Data Guard - Preparation: create secondarystartup.sql" become: true @@ -169,6 +220,22 @@ exit mode: '0755' + +- name: "Oracle Data Guard - Preparation: create asmsecondarystartup.sql" + become: true + become_user: "oracle" + ansible.builtin.blockinfile: + create: true + path: /etc/sap_deployment_automation/dgscripts/asmsecondarystartup.sql + marker_begin: "-- BEGIN" + marker_end: "-- END" + block: | + STARTUP MOUNT; + exit + mode: '0755' + + + - name: "Oracle Data Guard - Preparation: create dgstatus.sql" become: true become_user: "oracle" @@ -326,6 +393,55 @@ when: current_host != ora_primary +# Create the sql script for updating the data guard specific parameters for Primary node. + +- name: "Oracle Data Guard - Preparation: Create the SQL for Dataguard parameters on Primary" + become: true + become_user: oracle + ansible.builtin.template: + backup: true + src: dbparametersfordg.j2 + dest: "/etc/sap_deployment_automation/dgscripts/dbparametersfordg.sql" + mode: '0644' + force: true + vars: + hostname: "{{ ora_primary }}" + sap_dbp_hostname: "{{ ora_primary }}" + sap_dbs_hostname: "{{ ora_secondary }}" + when: current_host == ora_primary + + +# Create the tnsnames.ora for SAP. + +- name: "Oracle Data Guard - Create TNSNAMES.ORA for SAP Application servers" + become: true + become_user: oracle + ansible.builtin.template: + backup: true + src: tnsnamesforsap.j2 + dest: "/etc/sap_deployment_automation/dgscripts/tnsnames.ora" + mode: '0644' + force: true + vars: + hostname: "{{ ora_primary }}" + sap_dbp_hostname: "{{ ora_primary }}" + sap_dbs_hostname: "{{ ora_secondary }}" + + +# Create DB trigger sql for SAP HA Setup + +- name: "Oracle Data Guard - Preparation: Create the SQL for DBTrigger on Primary" + become: true + become_user: oracle + ansible.builtin.template: + backup: true + src: dbtrigger.j2 + dest: "/etc/sap_deployment_automation/dgscripts/dbtrigger.sql" + mode: '0644' + force: true + when: current_host == ora_primary + + - name: "Oracle Data Guard - Preparation: create dgconfig.sh" become: true become_user: oracle @@ -346,7 +462,23 @@ dest: "/etc/sap_deployment_automation/dgscripts/rman-restore.rman" mode: '0644' force: true - when: current_host == ora_secondary + when: + - current_host == ora_secondary + - node_tier == 'oracle' + +- name: "Oracle Data Guard - Preparation: create rman-restore.rman for oracle-asm" + become: true + become_user: oracle + ansible.builtin.template: + backup: true + src: rman-restore-asm.j2 + dest: "/etc/sap_deployment_automation/dgscripts/rman-restore.rman" + mode: '0644' + force: true + when: + - current_host == ora_secondary + - node_tier == 'oracle-asm' + - name: "Oracle Data Guard - Preparation: create secondarystartup.sql" become: true @@ -362,6 +494,57 @@ mode: '0755' when: current_host == ora_secondary +# Update the Local_Listener value for Secondary Node. +- name: "Oracle Local Listener Update - Preparation: create listenerupdate.sql" + become: true + become_user: "oracle" + ansible.builtin.blockinfile: + create: true + path: /etc/sap_deployment_automation/dgscripts/listenerupdate.sql + marker_begin: "-- BEGIN" + marker_end: "-- END" + block: | + alter system set LOCAL_LISTENER="(ADDRESS=(PROTOCOL=TCP)(HOST={{ ora_secondary }})(PORT=1521))" SCOPE=both; + exit + mode: '0755' + when: current_host == ora_secondary + + +# Update the Local_Listener value for Primary Node. +- name: "Oracle Local Listener Update - Preparation: create listenerupdate.sql" + become: true + become_user: "oracle" + ansible.builtin.blockinfile: + create: true + path: /etc/sap_deployment_automation/dgscripts/listenerupdate.sql + marker_begin: "-- BEGIN" + marker_end: "-- END" + block: | + alter system set LOCAL_LISTENER="(ADDRESS=(PROTOCOL=TCP)(HOST={{ ora_primary }})(PORT=1521))" SCOPE=both; + exit + mode: '0755' + when: current_host == ora_primary + +# Enable Flashback for Secondary Node. + +- name: "Oracle Local Listener Update - Preparation: create listenerupdate.sql" + become: true + become_user: "oracle" + ansible.builtin.blockinfile: + create: true + path: /etc/sap_deployment_automation/dgscripts/secondary_flashback_secondary.sql + marker_begin: "-- BEGIN" + marker_end: "-- END" + block: | + ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL; + ALTER DATABASE FLASHBACK ON; + ALTER DATABASE RECOVER MANAGED STANDBY DATABASE DISCONNECT FROM SESSION THROUGH LAST SWITCHOVER; + exit + mode: '0755' + when: current_host == ora_secondary + + + # Create Directories for RESTORE on Secondary - name: "ORACLE: Create oracle SIDarch directory" @@ -485,6 +668,19 @@ exit mode: '0755' +- name: "Oracle Data Guard - Preparation: Create ASM Directories for Secondary System" + become: true + become_user: oracle + ansible.builtin.template: + backup: true + src: asmfilesystems-secondary.j2 + dest: "/etc/sap_deployment_automation/dgscripts/asmfilesystems-secondary.sh" + mode: '0755' + force: true + when: + - current_host == ora_secondary + - node_tier == "oracle-asm" + ... # /*---------------------------------------------------------------------------8 # | END | diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-primary.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-primary.yaml index 10dc2228b9..d7ba6435b4 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-primary.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-primary.yaml @@ -147,7 +147,7 @@ mode: '0755' when: - current_host == ora_primary - - standby_redo_log_results.rc < 2 + - standby_redo_log_results.rc == 0 - name: "Oracle Data Guard - Setup Primary: Enable Flashback on Oracle Primary DB" become: true @@ -184,51 +184,66 @@ - current_host == ora_primary - turn_on_flashback_results.rc == 0 -# STEP3 DGMGRL Config on Primary - -# Restart the LSNRCTL START -# - name: "RESTART LSNRCTL ON PRIMARY" +# - name: "Update the Local_Listener Value in primary" # become: true # become_user: "oracle" -# ansible.builtin.shell: lsnrctl reload -# register: lsnrctl_start_primary_results -# failed_when: lsnrctl_start_primary_results.rc > 0 +# ansible.builtin.shell: sqlplus / as sysdba @listenerupdate.sql +# register: local_listener_on_secondary_results +# failed_when: local_listener_on_secondary_results.rc > 0 # args: -# creates: /etc/sap_deployment_automation/dgscripts/lsnrctl_started_primary.txt -# chdir: /etc/sap_deployment_automation/dgscripts -# executable: /bin/csh -# when: current_host == ora_primary +# creates: /etc/sap_deployment_automation/dgscripts/local_listener_on_secondary.txt +# chdir: /etc/sap_deployment_automation/dgscripts +# executable: /bin/csh +# when: +# - current_host == ora_primary -# - name: Create lsnrctl_started_sec.txt -# become: true -# become_user: "oracle" -# ansible.builtin.file: -# path: /etc/sap_deployment_automation/dgscripts/lsnrctl_started_primary.txt -# state: touch -# mode: 0755 -# when: current_host == ora_primary +# Setup DG parameters for Primary Database. + + +- name: "Oracle Data Guard - Setup Primary: Setup DG parameters on Primary DB" + become: true + become_user: "oracle" + ansible.builtin.shell: sqlplus / as sysdba @dbparametersfordg.sql + register: dbparametersfordg_results + failed_when: dbparametersfordg_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/dbparametersfordg.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + when: + - current_host == ora_primary + - node_tier == "oracle-asm" + +- name: "Oracle Data Guard - Setup Primary: Enable Flashback on Oracle Primary DB (debug)" + ansible.builtin.debug: + var: dbparametersfordg_results.stdout_lines + verbosity: 2 + when: + - node_tier == "oracle-asm" + +- name: "Oracle Data Guard - Setup Primary: Enable Flashback on Oracle Primary DB (save output)" + ansible.builtin.copy: + dest: /etc/sap_deployment_automation/dgscripts/dbparametersfordg.log + content: "{{ turn_on_flashback_results.stdout }}" + mode: '0777' + when: + - node_tier == "oracle-asm" + - dbparametersfordg_results.stdout is defined + +- name: "Oracle Data Guard - Setup Primary: Create dbparametersfordg.txt" + become: true + become_user: "oracle" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/dbparametersfordg.txt + state: touch + mode: '0755' + when: + - current_host == ora_primary + - node_tier == "oracle-asm" + - dbparametersfordg_results.rc == 0 -# - name: " DGMGRL CONFIG ON PRIMARY " -# become: true -# become_user: "oracle" -# ansible.builtin.shell: dgmgrl / as sysdba @dgconfig.dgmgrl | tee /etc/sap_deployment_automation/dgscripts/dgcreate.log -# register: dgconfig_results -# failed_when: dgconfig_results.rc > 0 -# args: -# creates: /etc/sap_deployment_automation/dgscripts/dgconfig.txt -# chdir: /etc/sap_deployment_automation/dgscripts -# executable: /bin/csh -# when: current_host == ora_primary -# - name: Create dgconfig.txt -# become: true -# become_user: "oracle" -# ansible.builtin.file: -# path: /etc/sap_deployment_automation/dgscripts/dgconfig.txt -# state: touch -# mode: 0755 -# when: current_host == ora_primary ... # /*---------------------------------------------------------------------------8 diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml index 89caa15f70..b9593de4c1 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml @@ -40,9 +40,137 @@ mode: "{{ '0777' | int - (custom_umask | default('022') | int) }}" when: current_host == ora_secondary -# Restart the Listener on Secondary node. +# Add additional parameters for Oracle ASM to match the file locations of Primary in secondary. +# This has to be done to prevent rman shooting file systems all over the disk groups. + +- name: "Update the initSID.ora for changing the control file location" + ansible.builtin.replace: + path: /oracle/{{ db_sid | upper }}/{{ ora_release }}/dbs/init{{ db_sid | upper }}.ora + regexp: '/{{ db_sid | upper }}/c' + replace: '/{{ db_sid | upper }}_STDBY/c' + backup: true + when: + - node_tier == "oracle-asm" + +- name: "File update wait for 15 sec to avoid multiple locks" + ansible.builtin.wait_for: + timeout: 15 + +- name: "Update the initSID.ora for adopting oraarch location" + ansible.builtin.replace: + path: /oracle/{{ db_sid | upper }}/{{ ora_release }}/dbs/init{{ db_sid | upper }}.ora + regexp: '{{ db_sid | upper }}/oraarch' + replace: '{{ db_sid | upper }}_STDBY/oraarch' + when: + - node_tier == "oracle-asm" +# You can also use "sed" to replace the string. sed -i 's|/DBSID/c|/DBSID_STDBY/c|g' + +- name: "File update wait for 15 sec to avoid multiple locks" + ansible.builtin.wait_for: + timeout: 15 + +- name: "Update the initSID.ora to delete the old local_listener value" + become: true + become_user: "root" + ansible.builtin.shell: | + sed -i '/local_listener=/d' /oracle/{{ db_sid | upper }}/{{ ora_release }}/dbs/init{{ db_sid | upper }}.ora + register: local_listener_results + failed_when: local_listener_results.rc > 1 + + +# - name: "debug file update" +# fail: +# msg: "fail here for checking if the file is updated" + +- name: "File update wait for 15 sec to avoid multiple locks" + ansible.builtin.wait_for: + timeout: 15 + + +- name: "Replace the local listener entires in initSID.ora" + ansible.builtin.blockinfile: + path: /oracle/{{ db_sid | upper }}/{{ ora_release }}/dbs/init{{ db_sid | upper }}.ora + marker_begin: "-- BEGIN" + marker_end: "-- END" + insertafter: ".db_name='{{ db_sid | upper }}'" + block: | + *.db_create_file_dest='+DATA' + *.db_unique_name ='{{ db_sid | upper }}_STDBY' + *.log_file_name_convert='{{ db_sid | upper }}','{{ db_sid | upper }}' + *.db_create_online_log_dest_1='+DATA' + *.db_create_online_log_dest_2='+DATA' + + when: + - node_tier == "oracle-asm" + +# Replace the archive log destination in the initSID.ora file. +# - name: "Update the initSID.ora for changing the control file location" +# ansible.builtin.replace: +# path: /oracle/{{ db_sid | upper }}/{{ ora_release }}/dbs/init{{ db_sid | upper }}.ora +# regexp: "'log_archive_dest_1='LOCATION='" +# replace: "log_archive_dest_1='LOCATION=/oracle/{{ db_sid | upper }}/oraarch'" +# backup: true +# when: +# - node_tier == "oracle" + + +- name: "Replace the local listener entires in initSID.ora" + ansible.builtin.blockinfile: + path: /oracle/{{ db_sid | upper }}/{{ ora_release }}/dbs/init{{ db_sid | upper }}.ora + marker_begin: "-- BEGIN" + marker_end: "-- END" + insertafter: ".db_name='{{ db_sid | upper }}'" + block: | + *.db_unique_name ='{{ db_sid | upper }}_STDBY' + *.log_file_name_convert='{{ db_sid | upper }}','{{ db_sid | upper }}' + *.local_listener='(Address=(Protocol=TCP)(Host={{ ora_secondary }})(Port=1521))' + + when: + - node_tier == "oracle" + +- name: "Oracle Data Guard - Setup Secondary: start lsnrctl on Secondary" + become: true + become_user: "oracle" + ansible.builtin.shell: lsnrctl start + register: lsnrctl_start_secondary_results + failed_when: lsnrctl_start_secondary_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/lsnrctl_started_sec.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + when: + - current_host == ora_secondary + - node_tier == "oracle" + +- name: "Oracle Data Guard - Setup Secondary: started lsnrctl on Secondary (Debug)" + ansible.builtin.debug: + var: lsnrctl_start_secondary_results.stdout_lines + verbosity: 2 + +- name: "Oracle Data Guard - Setup Secondary: restart lsnrctl on Secondary (save output)" + ansible.builtin.copy: + dest: /etc/sap_deployment_automation/lsnrctl_start_primary.log + content: "{{ lsnrctl_start_secondary_results.stdout }}" + mode: '0777' + when: lsnrctl_start_secondary_results.stdout is defined + +- name: "Oracle Data Guard - Setup Secondary: Create lsnrctl_started_sec.txt" + become: true + become_user: "oracle" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/lsnrctl_started_sec.txt + state: touch + mode: '0755' + when: + - node_tier == "oracle" + - current_host == ora_secondary + - lsnrctl_start_secondary_results.rc == 0 + + -- name: "Oracle Data Guard - Setup Secondary: stop lsnrctl on Secondary" +# Restart the Listener on Secondary node when the node_tier is Oracle-ASM. + +- name: "ASM Oracle Data Guard - Setup Secondary: Stop lsnrctl on Secondary" become: true become_user: "oracle" ansible.builtin.shell: lsnrctl stop @@ -52,11 +180,12 @@ creates: /etc/sap_deployment_automation/dgscripts/lsnrctl_stopped_sec.txt chdir: /etc/sap_deployment_automation/dgscripts executable: /bin/csh - when: - - platform == 'ORACLE-ASM' + when: + - node_tier == "oracle-asm" - current_host == ora_secondary + -- name: "Oracle Data Guard - Setup Secondary: Create lsnrctl_stopped_sec.txt" +- name: "ASM Oracle Data Guard - Setup Secondary: Create lsnrctl_stopped_sec.txt" become: true become_user: "oracle" ansible.builtin.file: @@ -64,50 +193,54 @@ state: touch mode: '0755' when: - - platform == 'ORACLE-ASM' + - node_tier == "oracle-asm" - current_host == ora_secondary - lsnrctl_stop_secondary_results.rc == 0 + -- name: "Oracle Data Guard - Setup Secondary: restart lsnrctl on Secondary" +- name: "Oracle Data Guard - Setup Secondary: Start lsnrctl on Secondary" become: true become_user: "oracle" ansible.builtin.shell: lsnrctl start - register: lsnrctl_start_secondary_results - failed_when: lsnrctl_start_secondary_results.rc > 0 + register: lsnrctl_asm_start_secondary_results + failed_when: lsnrctl_asm_start_secondary_results.rc > 0 args: - creates: /etc/sap_deployment_automation/dgscripts/lsnrctl_started_sec.txt + creates: /etc/sap_deployment_automation/dgscripts/lsnrctl_asm_started_sec.txt chdir: /etc/sap_deployment_automation/dgscripts executable: /bin/csh - when: - - platform == 'ORACLE-ASM' + when: + - node_tier == "oracle-asm" - current_host == ora_secondary + - name: "Oracle Data Guard - Setup Secondary: restart lsnrctl on Secondary (Debug)" ansible.builtin.debug: - var: lsnrctl_start_secondary_results.stdout_lines + var: lsnrctl_asm_start_secondary_results.stdout_lines verbosity: 2 - name: "Oracle Data Guard - Setup Secondary: restart lsnrctl on Secondary (save output)" ansible.builtin.copy: dest: /etc/sap_deployment_automation/lsnrctl_start_primary.log - content: "{{ lsnrctl_start_secondary_results.stdout }}" + content: "{{ lsnrctl_asm_start_secondary_results.stdout }}" mode: '0777' - when: - - platform == 'ORACLE-ASM' - - current_host == ora_secondary - - lsnrctl_start_secondary_results.stdout is defined + when: lsnrctl_asm_start_secondary_results.stdout is defined - name: "Oracle Data Guard - Setup Secondary: Create lsnrctl_started_sec.txt" become: true become_user: "oracle" ansible.builtin.file: - path: /etc/sap_deployment_automation/dgscripts/lsnrctl_started_sec.txt + path: /etc/sap_deployment_automation/dgscripts/lsnrctl_asm_started_sec.txt state: touch mode: '0755' when: - - platform == 'ORACLE-ASM' + - node_tier == "oracle-asm" - current_host == ora_secondary - - lsnrctl_start_secondary_results.rc == 0 + - lsnrctl_asm_start_secondary_results.rc == 0 + + +- name: "ASM Listener Starting: Sleep for 40 seconds and continue with play" + ansible.builtin.wait_for: + timeout: 40 - name: "Oracle Data Guard - Setup Secondary: Startup secondary DB using pfile" become: true @@ -147,36 +280,6 @@ - current_host == ora_secondary - secondary_startup_results.rc == 0 -# Create the oraarch and spfile parameter folder on ASM for recovery - -- name: "Oracle Data Guard - Create oraarch and parameter folders for oracle-asm" - become: true - become_user: "oracle" - ansible.builtin.shell: | - /oracle/GRID/{{ ora_version }}/bin/asmcmd --privilege sysdba mkdir +RECO/{{ db_sid | upper }} - /oracle/GRID/{{ ora_version }}/bin/asmcmd --privilege sysdba mkdir +ARCH/{{ db_sid | upper }} - /oracle/GRID/{{ ora_version }}/bin/asmcmd --privilege sysdba mkdir +ARCH/{{ db_sid | upper }}/oraarch - /oracle/GRID/{{ ora_version }}/bin/asmcmd --privilege sysdba mkdir +DATA/{{ db_sid | upper }}/ - register: asm_oraarch_created_results - failed_when: asm_oraarch_created_results.rc > 0 - args: - executable: /bin/csh - creates: /etc/sap_deployment_automation/dgscripts/asm_oraarch_created.txt - when: - - node_tier == 'oracle-asm' - - current_host == ora_secondary - -- name: "Oracle Data Guard - Setup Secondary: Create asm_oraarch_created.txt" - become: true - become_user: "oracle" - ansible.builtin.file: - path: /etc/sap_deployment_automation/dgscripts/asm_oraarch_created.txt - state: touch - mode: '0755' - when: - - node_tier == 'oracle-asm' - - current_host == ora_secondary - - asm_oraarch_created_results.rc == 0 - name: "Oracle Data Guard - Setup Secondary: Duplicate Secondary DB from Primary DB using RMAN" block: @@ -285,6 +388,52 @@ group: oinstall when: rman_results.rc == 0 +# Work around for Oracle Bug Duplicate for Standby fails with RMAN-05535 even when LOG_FILE_NAME_CONVERT is set (Doc ID 2756315.1) +# Collecting the required regolog file and renaming and creating in the correct directories as Primary. +# Renaming the Brokenredo files to the correct names and creating the required directories. +# RUn the redolog clear to physical creation of files. + +- name: "Oracle Secondary Redo log rename template" + become: true + become_user: "oracle" + ansible.builtin.template: + backup: true + src: standbyredolog.j2 + dest: "/etc/sap_deployment_automation/dgscripts/standbyredolog.sql" + mode: '0644' + force: true + when: node_tier == "oracle" + + +- name: "Oracle Data Guard - Setup Secondary: Rename the redolog files afer RMAN Restore" + become: true + become_user: "oracle" + ansible.builtin.shell: | + set -o pipefail + sqlplus / as sysdba @standbyredolog.sql | tee /etc/sap_deployment_automation/dgscripts/standbyredolog.log + register: redo_rename_results + failed_when: redo_rename_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/redo_rename.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + when: + - node_tier == "oracle" + - current_host == ora_secondary + +- name: "Oracle Data Guard - Setup Secondary: Create redo_rename.txt" + become: true + become_user: "root" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/redo_rename.txt + state: touch + mode: '0755' + owner: oracle + group: oinstall + when: + - node_tier == "oracle" + - redo_rename_results.rc == 0 + - name: "Oracle Data Guard - Setup Secondary: Create restore_completed.txt" become: true become_user: "root" @@ -296,6 +445,364 @@ group: oinstall when: rman_results.rc == 0 +# Create the oraarch and spfile parameter folder on ASM for recovery + +- name: "Create oraarch and parameter folders for oracle-asm" + become: true + become_user: "oracle" + ansible.builtin.shell: | + /oracle/GRID/{{ ora_version }}/bin/asmcmd --privilege sysdba mkdir +ARCH/{{ db_sid | upper }}_STDBY/oraarch + /oracle/GRID/{{ ora_version }}/bin/asmcmd --privilege sysdba mkdir +DATA/{{ db_sid | upper }}_STDBY/PARAMETERFILE + + register: asm_oraarch_created_results + failed_when: asm_oraarch_created_results.rc > 0 + args: + executable: /bin/csh + creates: /etc/sap_deployment_automation/dgscripts/asm_oraarch_created.txt + when: + - node_tier == "oracle-asm" + - current_host == ora_secondary + + +- name: "Oracle Data Guard - Setup Secondary: Create asm_oraarch_created.txt" + become: true + become_user: "oracle" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/asm_oraarch_created.txt + state: touch + mode: '0755' + when: + - node_tier == "oracle-asm" + - current_host == ora_secondary + - asm_oraarch_created_results.rc == 0 +#--------------------------------------------------------------------------------------------------------------- + +# Create the SPFILE from PFILE on Standby and Start the Standby Database using PFILE for non-ASM. +- name: "Oracle Data Guard - Setup Secondary: Create spfile on non-ASM" + become: true + become_user: "oracle" + ansible.builtin.shell: | + set -o pipefail + sqlplus / as sysdba @createspfile.sql | tee /etc/sap_deployment_automation/dgscripts/create_spfile.log + register: create_spfile_results + failed_when: create_spfile_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/spfile_created.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + when: + - node_tier == "oracle" + - current_host == ora_secondary + + +- name: "Oracle Data Guard - Setup Secondary: Create spfile_created.txt" + become: true + become_user: "root" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/spfile_created.txt + state: touch + mode: '0755' + owner: oracle + group: oinstall + when: + - node_tier == "oracle" + - create_spfile_results.rc == 0 + - current_host == ora_secondary + +# Backup the PFILE and Create new PFILE refering to SPFILE. +- name: "Oracle Data Guard - Setup Secondary: Backup and remove pfile" + become: true + become_user: "oracle" + ansible.builtin.shell: | + set -o pipefail + mv init{{ db_sid | upper}}.ora init{{ db_sid | upper}}.ora_backup_after_restore + register: backup_pfile_results + failed_when: backup_pfile_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/pfile_backup_created.txt + chdir: /oracle/{{ db_sid |upper }}/{{ ora_release}}/dbs + executable: /bin/csh + when: + - node_tier == "oracle" + - current_host == ora_secondary + + +- name: "Oracle Data Guard - Setup Secondary: Create pfile_backup_created.txt" + become: true + become_user: "root" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/pfile_backup_created.txt + state: touch + mode: '0755' + owner: oracle + group: oinstall + when: + - node_tier == "oracle" + - backup_pfile_results.rc == 0 + - current_host == ora_secondary + +- name: "Update the Local_Listener Value in secondary" + become: true + become_user: "oracle" + ansible.builtin.shell: sqlplus / as sysdba @listenerupdate.sql + register: local_listener_on_secondary_results + failed_when: local_listener_on_secondary_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/local_listener_on_secondary.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + when: + - current_host == ora_secondary + +# Stop the Secondary Database. +- name: "Oracle Data Guard - Setup Secondary: Stop secondary DB for spfile" + become: true + become_user: "oracle" + ansible.builtin.shell: | + set -o pipefail + sqlplus / as sysdba @shutdownsecondary.sql | tee /etc/sap_deployment_automation/dgscripts/ora_secondary_shutdown.log + register: secondary_shutdown_results + failed_when: secondary_shutdown_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/ora_secondary_shutdown.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + when: + - node_tier == "oracle" + - current_host == ora_secondary + + +- name: "Oracle Data Guard - Setup Secondary: Create ora_secondary_shutdown.txt" + become: true + become_user: "root" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/ora_secondary_shutdown.txt + state: touch + mode: '0755' + owner: oracle + group: oinstall + when: + - node_tier == "oracle" + - secondary_shutdown_results.rc == 0 + - current_host == ora_secondary + +# # Create new PFILE with SPFILE Details for NON-ASM. +- name: "Oracle Data Guard - Preparation: create createspfile" + become: true + become_user: "oracle" + ansible.builtin.blockinfile: + create: true + path: /oracle/{{ db_sid |upper }}/{{ ora_release}}/dbs/init{{ db_sid | upper }}.ora + marker_begin: "-- BEGIN" + marker_end: "-- END" + block: | + spfile='/oracle/{{ db_sid |upper }}/{{ ora_release}}/dbs/spfile{{ db_sid | upper }}.ora' + mode: '0755' + when: + - node_tier == "oracle" + - current_host == ora_secondary + + + +# Start the Secondary Database with the new SPFILE; +- name: "Oracle Data Guard - Setup Secondary: Start secondary DB with spfile" + become: true + become_user: "oracle" + ansible.builtin.shell: | + set -o pipefail + sqlplus / as sysdba @orasecondarystartup.sql | tee /etc/sap_deployment_automation/dgscripts/ora_secondary_startup_spfile.log + register: ora_secondary_startup_spfile_results + failed_when: ora_secondary_startup_spfile_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/ora_secondary_startup_spfile.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + when: + - node_tier == "oracle" + - current_host == ora_secondary + + +- name: "Oracle Data Guard - Setup Secondary: Create ora_secondary_startup_spfile.txt" + become: true + become_user: "root" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/ora_secondary_startup_spfile.txt + state: touch + mode: '0755' + owner: oracle + group: oinstall + when: + - node_tier == "oracle" + - ora_secondary_startup_spfile_results.rc == 0 + - current_host == ora_secondary + + +#--------------------------------------------------------------------------------------------------------------- +# Create the SPFILE from PFILE on Standby and Start the Standby Database using PFILE. +- name: "Oracle Data Guard - Setup Secondary: Create spfile on ASM" + become: true + become_user: "oracle" + ansible.builtin.shell: | + set -o pipefail + sqlplus / as sysdba @createspfilesecondary.sql | tee /etc/sap_deployment_automation/dgscripts/create_spfile.log + register: create_spfile_results + failed_when: create_spfile_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/spfile_created.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + when: + - node_tier == "oracle-asm" + - current_host == ora_secondary + + +- name: "Oracle Data Guard - Setup Secondary: Create spfile_created.txt" + become: true + become_user: "root" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/spfile_created.txt + state: touch + mode: '0755' + owner: oracle + group: oinstall + when: + - node_tier == "oracle-asm" + - create_spfile_results.rc == 0 + - current_host == ora_secondary + +# Backup the PFILE and Create new PFILE refering to SPFILE. +- name: "Oracle Data Guard - Setup Secondary: Backup and remove pfile" + become: true + become_user: "oracle" + ansible.builtin.shell: | + set -o pipefail + mv init{{ db_sid | upper}}.ora init{{ db_sid | upper}}.ora_backup_after_restore + register: backup_pfile_results + failed_when: backup_pfile_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/pfile_backup_created.txt + chdir: /oracle/{{ db_sid |upper }}/{{ ora_release}}/dbs + executable: /bin/csh + when: + - node_tier == "oracle-asm" + - current_host == ora_secondary + + +- name: "Oracle Data Guard - Setup Secondary: Create pfile_backup_created.txt" + become: true + become_user: "root" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/pfile_backup_created.txt + state: touch + mode: '0755' + owner: oracle + group: oinstall + when: + - node_tier == "oracle-asm" + - backup_pfile_results.rc == 0 + - current_host == ora_secondary + + +# Stop the Secondary Database. +- name: "Oracle Data Guard - Setup Secondary: Stop secondary DB for spfile" + become: true + become_user: "oracle" + ansible.builtin.shell: | + set -o pipefail + sqlplus / as sysdba @shutdownsecondary.sql | tee /etc/sap_deployment_automation/dgscripts/asm_secondary_shutdown.log + register: secondary_shutdown_results + failed_when: secondary_shutdown_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/asm_secondary_shutdown.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + when: + - node_tier == "oracle-asm" + - current_host == ora_secondary + + +- name: "Oracle Data Guard - Setup Secondary: Create asm_secondary_shutdown.txt" + become: true + become_user: "root" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/asm_secondary_shutdown.txt + state: touch + mode: '0755' + owner: oracle + group: oinstall + when: + - node_tier == "oracle-asm" + - secondary_shutdown_results.rc == 0 + - current_host == ora_secondary + +# Create new PFILE with SPFILE Details for ASM. +- name: "Oracle Data Guard for ASM - Preparation: create createspfile" + become: true + become_user: "oracle" + ansible.builtin.blockinfile: + create: true + path: /oracle/{{ db_sid |upper }}/{{ ora_release}}/dbs/init{{ db_sid | upper }}.ora + marker_begin: "-- BEGIN" + marker_end: "-- END" + block: | + spfile='+DATA/{{ db_sid | upper }}_STDBY/PARAMETERFILE/spfile{{ db_sid | upper }}.ora' + mode: '0755' + when: + - node_tier == "oracle-asm" + - current_host == ora_secondary + + + +# Start the Secondary Database with the new SPFILE; +- name: "Oracle Data Guard - Setup Secondary: Start secondary DB with spfile" + become: true + become_user: "oracle" + ansible.builtin.shell: | + set -o pipefail + sqlplus / as sysdba @asmsecondarystartup.sql | tee /etc/sap_deployment_automation/dgscripts/asm_secondary_startup_spfile.log + register: asm_secondary_startup_spfile_results + failed_when: asm_secondary_startup_spfile_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/asm_secondary_startup_spfile.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + when: + - node_tier == "oracle-asm" + - current_host == ora_secondary + + +- name: "Oracle Data Guard - Setup Secondary: Create asm_secondary_startup_spfile.txt" + become: true + become_user: "root" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/asm_secondary_startup_spfile.txt + state: touch + mode: '0755' + owner: oracle + group: oinstall + when: + - node_tier == "oracle-asm" + - asm_secondary_startup_spfile_results.rc == 0 + - current_host == ora_secondary + + + +- name: "Create local_listener_on_secondary on secondary" + become: true + become_user: root + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/local_listener_on_secondary.txt + state: touch + mode: '0755' + owner: oracle + group: oinstall + when: + - node_tier == "oracle-asm" + - local_listener_on_secondary_results.rc == 0 + - current_host == ora_secondary + + +# Enable the DataGaurd Broker - name: "Oracle Data Guard - Setup Secondary: startup DG Broker on Secondary" become: true become_user: "oracle" diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/templates/asmfilesystems-secondary.j2 b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/asmfilesystems-secondary.j2 new file mode 100644 index 0000000000..22ca7f4473 --- /dev/null +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/asmfilesystems-secondary.j2 @@ -0,0 +1,10 @@ +/oracle/GRID/{{ ora_version }}/bin/asmcmd --privilege sysdba mkdir +DATA/{{ db_sid | upper }} +/oracle/GRID/{{ ora_version }}/bin/asmcmd --privilege sysdba mkdir +ARCH/{{ db_sid | upper }} +/oracle/GRID/{{ ora_version }}/bin/asmcmd --privilege sysdba mkdir +ARCH/{{ db_sid | upper }}/oraarch +/oracle/GRID/{{ ora_version }}/bin/asmcmd --privilege sysdba mkdir +ARCH/{{ db_sid | upper }}/ARCHIVELOG +/oracle/GRID/{{ ora_version }}/bin/asmcmd --privilege sysdba mkdir +ARCH/{{ db_sid | upper }}/CONTROLFILE +/oracle/GRID/{{ ora_version }}/bin/asmcmd --privilege sysdba mkdir +ARCH/{{ db_sid | upper }}/ONLINELOG +/oracle/GRID/{{ ora_version }}/bin/asmcmd --privilege sysdba mkdir +RECO/{{ db_sid | upper }} +/oracle/GRID/{{ ora_version }}/bin/asmcmd --privilege sysdba mkdir +RECO/{{ db_sid | upper }}/AUTOBACKUP +/oracle/GRID/{{ ora_version }}/bin/asmcmd --privilege sysdba mkdir +RECO/{{ db_sid | upper }}/CONTROLFILE +/oracle/GRID/{{ ora_version }}/bin/asmcmd --privilege sysdba mkdir +RECO/{{ db_sid | upper }}/FLASHBACK \ No newline at end of file diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/templates/dbparametersfordg.j2 b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/dbparametersfordg.j2 new file mode 100644 index 0000000000..aa23e2cace --- /dev/null +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/dbparametersfordg.j2 @@ -0,0 +1,6 @@ +ALTER SYSTEM SET log_archive_dest_state_1='ENABLE' SCOPE=BOTH; +ALTER SYSTEM SET log_archive_max_processes=2 SCOPE=BOTH; +ALTER SYSTEM SET log_archive_min_succeed_dest=1 SCOPE=BOTH; +ALTER SYSTEM SET log_archive_trace=0 SCOPE=BOTH; +ALTER SYSTEM SET db_create_online_log_dest_2='+ARCH' SCOPE=BOTH; +exit; \ No newline at end of file diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/templates/dbtrigger.j2 b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/dbtrigger.j2 new file mode 100644 index 0000000000..ca22464d73 --- /dev/null +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/dbtrigger.j2 @@ -0,0 +1,13 @@ +exec dbms_service.create_service(service_name=>'{{ db_sid | upper }}_HA',network_name=>'{{ db_sid | upper }}_HA'); +exec dbms_service.start_service('{{ db_sid | upper }}_HA'); +CREATE OR REPLACE TRIGGER {{ db_sid | lower }}_HA_SERVICE +after startup on database +DECLARE +role VARCHAR(30); +BEGIN +SELECT DATABASE_ROLE INTO role FROM V$DATABASE; IF role = 'PRIMARY' THEN +DBMS_SERVICE.START_SERVICE('{{ db_sid | upper }}_HA'); +END IF; +END; +/ +exit; diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/templates/listener_primary.j2 b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/listener_primary.j2 index c3289aa766..688d2497d5 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/templates/listener_primary.j2 +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/listener_primary.j2 @@ -31,7 +31,7 @@ SID_LIST_LISTENER = (SID_DESC = (SID_NAME = {{ db_sid }}) (GLOBAL_DBNAME = {{ db_sid }}) - (ORACLE_HOME = /oracle/{{ db_sid }}/{{ ora_release }}) + (ORACLE_HOME = /oracle/{{ db_sid }}/{{ ora_release }}) ) (SID_DESC = (SID_NAME = {{ db_sid }}) diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/templates/rman-restore-asm.j2 b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/rman-restore-asm.j2 new file mode 100644 index 0000000000..709154db6a --- /dev/null +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/rman-restore-asm.j2 @@ -0,0 +1 @@ +DUPLICATE TARGET DATABASE FOR STANDBY FROM ACTIVE DATABASE NOFILENAMECHECK; diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/templates/rman-restore.j2 b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/rman-restore.j2 index 88a1382554..2b8ad7fc1e 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/templates/rman-restore.j2 +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/rman-restore.j2 @@ -1 +1,3 @@ -DUPLICATE TARGET DATABASE FOR STANDBY FROM ACTIVE DATABASE DORECOVER SPFILE SET db_unique_name='{{ db_sid | upper }}_STDBY' COMMENT 'Is standby' NOFILENAMECHECK; +DUPLICATE TARGET DATABASE FOR STANDBY FROM ACTIVE DATABASE DORECOVER NOFILENAMECHECK; +#NOFILENAMECHECK; +#DUPLICATE TARGET DATABASE FOR STANDBY FROM ACTIVE DATABASE DORECOVER SPFILE SET db_unique_name='{{ db_sid | upper }}_STDBY' COMMENT 'Is standby' NOFILENAMECHECK; \ No newline at end of file diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/templates/secondary_log_apply_off.j2 b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/secondary_log_apply_off.j2 new file mode 100644 index 0000000000..f5d53c977e --- /dev/null +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/secondary_log_apply_off.j2 @@ -0,0 +1 @@ +EDIT DATABASE {{ db_sid | upper }}_STDBY set state ='LOG-APPLY-OFF'; \ No newline at end of file diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/templates/secondary_log_apply_on.j2 b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/secondary_log_apply_on.j2 new file mode 100644 index 0000000000..c04caa5ea1 --- /dev/null +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/secondary_log_apply_on.j2 @@ -0,0 +1 @@ +EDIT DATABASE {{ db_sid | upper }}_STDBY set state ='ONLINE'; \ No newline at end of file diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/templates/standbyredolog.j2 b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/standbyredolog.j2 new file mode 100644 index 0000000000..7b1e1fa84d --- /dev/null +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/standbyredolog.j2 @@ -0,0 +1,21 @@ +alter database rename file 'broken0' to '/oracle/{{ db_sid | upper }}/mirrlogA/log_g11m2.dbf'; +alter database rename file 'broken1' to '/oracle/{{ db_sid | upper }}/origlogA/log_g11m1.dbf'; +alter database rename file 'broken2' to '/oracle/{{ db_sid | upper }}/mirrlogB/log_g12m2.dbf'; +alter database rename file 'broken3' to '/oracle/{{ db_sid | upper }}/origlogB/log_g12m1.dbf'; +alter database rename file 'broken4' to '/oracle/{{ db_sid | upper }}/mirrlogA/log_g13m2.dbf'; +alter database rename file 'broken5' to '/oracle/{{ db_sid | upper }}/origlogA/log_g13m1.dbf'; +alter database rename file 'broken6' to '/oracle/{{ db_sid | upper }}/mirrlogB/log_g14m2.dbf'; +alter database rename file 'broken7' to '/oracle/{{ db_sid | upper }}/origlogB/log_g14m1.dbf'; +alter database rename file 'broken8' to '/oracle/{{ db_sid | upper }}/oraarch/standbylog/srl1.dbf'; +alter database rename file 'broken9' to '/oracle/{{ db_sid | upper }}/oraarch/standbylog/srl2.dbf'; +alter database rename file 'broken10' to '/oracle/{{ db_sid | upper }}/oraarch/standbylog/srl3.dbf'; +alter database rename file 'broken11' to '/oracle/{{ db_sid | upper }}/oraarch/standbylog/srl4.dbf'; +alter database clear logfile group 1; +alter database clear logfile group 2; +alter database clear logfile group 3; +alter database clear logfile group 4; +alter database clear logfile group 5; +alter database clear logfile group 6; +alter database clear logfile group 7; +alter database clear logfile group 8; +exit; \ No newline at end of file diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/templates/tnsnames_primary.j2 b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/tnsnames_primary.j2 index 55002748dc..8671f700cb 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/templates/tnsnames_primary.j2 +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/tnsnames_primary.j2 @@ -17,6 +17,7 @@ ) (CONNECT_DATA = (SID = {{ db_sid }}) + (GLOBAL_NAME = {{ db_sid }}.WORLD) (SERVICE_NAME = {{ db_sid }}) ) ) diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/templates/tnsnamesforsap.j2 b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/tnsnamesforsap.j2 new file mode 100644 index 0000000000..ed9628cdc2 --- /dev/null +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/templates/tnsnamesforsap.j2 @@ -0,0 +1,29 @@ +################ +# Filename......: tnsnames.ora +# Created.......: created by SAP AG, R/3 Rel. >= 6.10 +# Name..........: +# Date..........: +# @(#) $Id: //inst/inst_scripts/lmts_004_REL/tpls/ora/TNSNAMES.ORA#4 $ +################ +{{ db_sid | upper}}.WORLD= + (DESCRIPTION = + (ADDRESS_LIST = + (ADDRESS = + (PROTOCOL = TCP) + (HOST = {{ sap_dbp_hostname }}) + (PORT = 1521) + ) + ) + (ADDRESS_LIST = + (ADDRESS = + (PROTOCOL = TCP) + (HOST = {{ sap_dbs_hostname }}) + (PORT = 1521) + ) + ) + (CONNECT_DATA = + (SID = {{ db_sid | upper}}) + (GLOBAL_NAME = {{ db_sid | upper }}.WORLD) + (SERVICE_NAME = {{ db_sid | upper }}_HA) + ) + ) \ No newline at end of file diff --git a/deploy/ansible/roles-os/1.3-repository/tasks/1.3.1-repositories-RedHat.yaml b/deploy/ansible/roles-os/1.3-repository/tasks/1.3.1-repositories-RedHat.yaml index 9af9c44d91..a1a5abe502 100644 --- a/deploy/ansible/roles-os/1.3-repository/tasks/1.3.1-repositories-RedHat.yaml +++ b/deploy/ansible/roles-os/1.3-repository/tasks/1.3.1-repositories-RedHat.yaml @@ -39,4 +39,4 @@ changed_when: false when: - distribution_id in ['redhat9'] - - node_tier == 'ha' + - node_tier == 'ha' \ No newline at end of file diff --git a/deploy/ansible/roles-os/1.9-kernelparameters/tasks/main.yaml b/deploy/ansible/roles-os/1.9-kernelparameters/tasks/main.yaml index 0ec0aedee0..57cf3f12b4 100644 --- a/deploy/ansible/roles-os/1.9-kernelparameters/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.9-kernelparameters/tasks/main.yaml @@ -89,12 +89,12 @@ block: - name: "1.9 Kernel parameters - Calculate the Huge Pages when RAM < 4TB" ansible.builtin.set_fact: - huge_pages: "{{ ((ansible_memory_mb.real.total * 0.95 * 1024 * 1024) / (2 * 1024 * 1024)) | round | int }}" + huge_pages: "{{ ((ansible_memory_mb.real.total * 0.68 * 1024 * 1024) / (2 * 1024 * 1024)) | round | int }}" when: ansible_memory_mb.real.total < 4194304 - name: "1.9 Kernel parameters - Calculate the Huge Pages when RAM > 4TB " ansible.builtin.set_fact: - huge_pages: "{{ ((ansible_memory_mb.real.total * 0.95 * 1024 * 1024) / (2 * 1024 * 1024)) | round | int }}" + huge_pages: "{{ ((ansible_memory_mb.real.total * 0.75 * 1024 * 1024) / (2 * 1024 * 1024)) | round | int }}" when: ansible_memory_mb.real.total > 4194304 # print only when -vv, otherwise you will have terminal nausea @@ -119,8 +119,8 @@ insertafter: '#@student - maxlogins 4' state: present block: | - oracle soft memlock unlimited - oracle hard memlock unlimited + oracle soft memlock unlimited + oracle hard memlock unlimited @sapsys hard nproc unlimited @sapsys soft nproc unlimited @dba hard nproc unlimited diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.6-oracle-nfs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.6-oracle-nfs-mounts.yaml index b063d47278..72318c610b 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.6-oracle-nfs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.6-oracle-nfs-mounts.yaml @@ -22,9 +22,10 @@ group: "{{ item.group }}" recurse: true loop: - - { path: '{{ target_media_location }}/SBP', group: 'oinstall', owner: 'oracle'} - - { path: '{{ target_media_location }}/oraclient', group: 'oinstall', owner: 'oracle'} - - { path: '{{ target_media_location }}/oraserver', group: 'oinstall', owner: 'oracle'} + - { path: '{{ target_media_location }}/SBP', group: 'oinstall', owner: 'oracle'} + - { path: '{{ target_media_location }}/oraclient', group: 'oinstall', owner: 'oracle'} + - { path: '{{ target_media_location }}/oraserver', group: 'oinstall', owner: 'oracle'} + - { path: '{{ target_media_location }}/SBP/GSBP', group: 'oinstall', owner: 'oracle'} when: - node_tier != "observer" diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml index 130b085356..3c484f5a75 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml @@ -1,13 +1,15 @@ - name: "ORACLE Post Processing: Set Variables" ansible.builtin.set_fact: PAS: 'pas' - pga_temp_l4tb_std: "{{ ((ansible_memory_mb.real.total * 0.7) * 0.2) | round | int }}" - pga_temp_l4tb_dis: "{{ ((ansible_memory_mb.real.total * 0.85) * 0.2) | round | int }}" + pga_temp_l4tb_std: "{{ ((ansible_memory_mb.real.total*0.60)*0.2) |round |int }}" + pga_temp_l4tb_dis: "{{ ((ansible_memory_mb.real.total*0.80)*0.2) |round |int }}" # pga_temp_g4tb: "{{ ((ansible_memory_mb.real.total*0.85)*0.2) |round |int }}" + +# PGA & SGA for RAM < 4TB - name: "ORACLE Post Processing: Set the SGA and PGA Sizes for RAM < 4TB" ansible.builtin.set_fact: main_mem1: "{{ ansible_memory_mb.real.total }}" - ora_sga: "{{ (ansible_memory_mb.real.total * 0.85) | round | int }}" + ora_sga: "{{ (ansible_memory_mb.real.total * 0.60) | round | int }}" ora_pga: "{{ [(pga_temp_l4tb_std | int), 4194304] | min }}" when: @@ -16,7 +18,7 @@ - name: "ORACLE Post Processing: Set the SGA and PGA Sizes for RAM < 4TB Single Node Deployments" ansible.builtin.set_fact: main_mem1: "{{ ansible_memory_mb.real.total }}" - ora_sga: "{{ (ansible_memory_mb.real.total * 0.7) | round | int }}" + ora_sga: "{{ (ansible_memory_mb.real.total * 0.75) | round | int }}" ora_pga: "{{ [(pga_temp_l4tb_std | int), 4194304] | min }}" when: - ansible_memory_mb.real.total < 4194304 @@ -31,13 +33,18 @@ - name: "Set the SGA and PGA Sizes for RAM > 4TB" ansible.builtin.set_fact: - ora_sga: "{{ (ansible_memory_mb.real.total * 0.95) | round | int }}" - ora_pga: "{{ ((ansible_memory_mb.real.total * 0.95) * 0.2) | round | int }}" + ora_sga: "{{ (ansible_memory_mb.real.total * 0.65) | round | int }}" + ora_pga: "{{ ((ansible_memory_mb.real.total*0.65)*0.2) |round |int }}" when: - ansible_memory_mb.real.total > 4194304 - supported_tiers != "pas" - supported_tiers == "scs" +# Block to check if a reboot has been performed already after updating SGA & PGS +- name: "DBLoad: - check if DBLoad is performed for {{ sid_to_be_deployed.sid | upper }}" + ansible.builtin.stat: + path: "/etc/sap_deployment_automation/{{ sid_to_be_deployed.sid | upper }}/ora_sga_updated.txt" + register: sga_update_status - name: "ORACLE Post Processing: Oracle SGA & PGA: create updatesga.sql" become: true @@ -50,10 +57,12 @@ block: | ALTER SYSTEM SET sga_max_size={{ ora_sga }}M SCOPE=spfile; ALTER SYSTEM SET pga_aggregate_target={{ ora_pga }}M SCOPE=spfile; + ALTER SYSTEM SET use_large_pages=only SCOPE=spfile; + alter system set LOCAL_LISTENER="(ADDRESS=(PROTOCOL=TCP)(HOST={{ ansible_hostname }})(PORT=1521))" scope=both; SHUTDOWN IMMEDIATE; - exit + exit; mode: '0755' - + - name: "ORACLE Post Processing: Oracle Startup: create startup.sql" become: true become_user: "oracle" @@ -67,6 +76,7 @@ exit mode: '0755' + - name: "ORACLE Post Processing: Oracle SGA Change Execution" become: true become_user: "oracle" @@ -88,6 +98,24 @@ mode: '0755' when: updatesga_results.rc == 0 +# Wait for creation of HugePages +# Rebbot the VM to avoid the error "ORA-27102: out of memory" + +- name: "DB VM reboot" + block: + + - name: "Oracle post-processing: Reboot after the Enabling HugePages" + ansible.builtin.reboot: + reboot_timeout: 300 + + # Wait for Connection after reboot + - name: "Wait for Connection after reboot" + ansible.builtin.wait_for_connection: + delay: 10 + timeout: 300 + when: + - not sga_update_status.stat.exists + - name: "ORACLE Post Processing: Start Oracle after SGA Change" become: true become_user: "oracle" @@ -101,6 +129,28 @@ chdir: /etc/sap_deployment_automation/{{ db_sid | upper }} executable: /bin/csh +# Create a block for starting the oracle listener on primary node +- name: "Oracle Post-Processing - start lsnrctl on Primary" + become: true + become_user: "oracle" + ansible.builtin.shell: lsnrctl start + register: lsnrctl_start_primary_results + failed_when: lsnrctl_start_primary_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/{{ db_sid | upper }}/lsnrctl_started.txt + chdir: /etc/sap_deployment_automation/{{ db_sid | upper }} + executable: /bin/csh + +- name: "Oracle Post-Processing: Create lsnrctl_started.txt" + become: true + become_user: "oracle" + ansible.builtin.file: + path: /etc/sap_deployment_automation/{{ db_sid | upper }}/lsnrctl_started.txt + state: touch + mode: '0755' + when: + - lsnrctl_start_primary_results.rc == 0 + - name: "ORACLE Post Processing: Create db_startup_completed.txt" ansible.builtin.file: path: /etc/sap_deployment_automation/{{ db_sid | upper }}/db_startup_completed.txt diff --git a/deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml b/deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml index e71d141b3b..b4700b3d4b 100644 --- a/deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml @@ -334,4 +334,4 @@ ... # /*---------------------------------------------------------------------------8 # | END | -# +------------------------------------4--------------------------------------*/ +# +------------------------------------4--------------------------------------*/ \ No newline at end of file diff --git a/deploy/ansible/vars/disks_config.yml b/deploy/ansible/vars/disks_config.yml index 35a3c9d37c..9362c37733 100644 --- a/deploy/ansible/vars/disks_config.yml +++ b/deploy/ansible/vars/disks_config.yml @@ -27,6 +27,7 @@ disk_type_to_name_map: mirrloga: '{{ node_tier | lower }}_mirrlogA' mirrlogb: '{{ node_tier | lower }}_mirrlogB' oraarch: '{{ node_tier | lower }}_oraarch' + oraflash: '{{ node_tier | lower }}_oraflash' # orabackup: '{{ node_tier | lower }}_orabackup' # ------------------- Begin - disktypes required for DB2 -------------------8 sapdata: '{{ node_tier | lower }}_sapdata' @@ -215,6 +216,16 @@ logical_volumes: size: '100%FREE' fstype: 'xfs' +# Add disk for Oracle Flash Recovery Area + - tier: 'sapos' + node_tier: 'oracle' + vg: 'vg_oracle_oraflash' + lv: 'lv_oracle_oraflash' + size: '100%FREE' + stripesize: "{{ oracle_log_stripe_size }}" + fstype: 'xfs' + + - tier: 'sapos' node_tier: 'oracle' vg: 'vg_orabackup' From e1e14a35e1266720d6ab93d59fc6e0bea281adc1 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 16 Mar 2024 12:38:48 +0200 Subject: [PATCH 462/607] Remove whitespace --- .../ansible/playbook_03_bom_processing.yaml | 1 - .../ora-dg-postprocessing-secondary.yaml | 36 ++++--- .../tasks/ora-dg-preparation.yaml | 12 +-- .../tasks/ora-dg-setup-primary.yaml | 10 +- .../tasks/ora-dg-setup-secondary.yaml | 96 +++++++++---------- 5 files changed, 67 insertions(+), 88 deletions(-) diff --git a/deploy/ansible/playbook_03_bom_processing.yaml b/deploy/ansible/playbook_03_bom_processing.yaml index 928a3c53b8..c89ebaaff5 100644 --- a/deploy/ansible/playbook_03_bom_processing.yaml +++ b/deploy/ansible/playbook_03_bom_processing.yaml @@ -68,7 +68,6 @@ sapbits_sas_token: "{{ hostvars.localhost.sapbits_sas_token }}" when: hostvars.localhost.sapbits_sas_token is defined - - name: 3.3-bom-processing role for Linux become: true when: ansible_os_family != "Windows" diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing-secondary.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing-secondary.yaml index 4839c3b26e..4d7f335691 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing-secondary.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing-secondary.yaml @@ -30,8 +30,6 @@ mode: '0644' force: true - - - name: "Execute Block only if secondary DB restore is completed" block: @@ -46,7 +44,7 @@ creates: /etc/sap_deployment_automation/dgscripts/secondary_log_apply_off.txt chdir: /etc/sap_deployment_automation/dgscripts executable: /bin/csh - + - name: "Oracle Data Guard - Post Processing: Set Log apply off on Standby (Debug)" ansible.builtin.debug: @@ -69,7 +67,7 @@ mode: '0755' when: secondary_log_apply_off_results.rc == 0 -# Enable Flashback on Secondary +# Enable Flashback on Secondary - name: "Oracle Data Guard - Enable Flashback on Oracle Secondary DB" become: true become_user: "oracle" @@ -91,7 +89,7 @@ content: "{{ turn_on_flashback_secondary_results.stdout }}" mode: '0755' when: turn_on_flashback_secondary_results.stdout is defined - + - name: "Oracle Data Guard - Setup Primary: Create turn_on_flashback_secondary.txt" become: true become_user: "oracle" @@ -101,7 +99,7 @@ mode: '0755' when: - turn_on_flashback_secondary_results.rc == 0 - + # Enable the log apply on secondary DB after enabling Flashback - name: "Oracle Data Guard - Post Processing: Set Log apply on on Standby" become: true @@ -113,7 +111,7 @@ creates: /etc/sap_deployment_automation/dgscripts/secondary_log_apply_on.txt chdir: /etc/sap_deployment_automation/dgscripts executable: /bin/csh - + - name: "Oracle Data Guard - Post Processing: Set Log apply off on Standby (Debug)" ansible.builtin.debug: @@ -149,10 +147,10 @@ creates: /etc/sap_deployment_automation/dgscripts/secondary_shutdown_cfg.txt chdir: /etc/sap_deployment_automation/dgscripts executable: /bin/csh - when: + when: - current_host == ora_secondary - - + + - name: "Oracle Data Guard - Setup Secondary: Create secondary_shutdown.txt" become: true become_user: "root" @@ -162,7 +160,7 @@ mode: '0755' owner: oracle group: oinstall - when: + when: - secondary_shutdown_cfg_results.rc == 0 - current_host == ora_secondary @@ -180,7 +178,7 @@ creates: /etc/sap_deployment_automation/dgscripts/secondary_startup_cfg.txt chdir: /etc/sap_deployment_automation/dgscripts executable: /bin/csh - when: + when: - node_tier == "oracle" - current_host == ora_secondary @@ -194,12 +192,12 @@ mode: '0755' owner: oracle group: oinstall - when: + when: - node_tier == "oracle" - secondary_startup_cfg_results.rc == 0 - current_host == ora_secondary - + - name: "Oracle CLusterware Restart Configuration" become: true become_user: "oracle" @@ -225,7 +223,7 @@ owner: oracle group: oinstall when: - - node_tier == "oracle-asm" + - node_tier == "oracle-asm" - oracle_clusterware_register.rc == 0 - name: "Create post processing completed on secondary" @@ -237,7 +235,7 @@ mode: '0755' owner: oracle group: oinstall - + # Create User for SIDADM on Secondary Node. - name: "2.5.1 SAP Users: - Create Oracle ASM Users Assignment" @@ -249,9 +247,7 @@ append: true shell: /bin/csh # when: node_tier == "oracle-asm" - - - + # Update the tnsnames.ora for SAP application servers - name: "Oracle Data Guard - Update tnsnames.ora for SAP application servers" @@ -264,7 +260,7 @@ owner: '{{sap_sid |lower }}adm' group: sapsys mode: "{{ '0777' | int - (custom_umask | default('022') | int) }}" - + when: - primary_post_processing_completed.stat.exists diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-preparation.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-preparation.yaml index 140cb4ebaa..e90b5d78b0 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-preparation.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-preparation.yaml @@ -84,8 +84,6 @@ mode: '0755' when: node_tier == "oracle-asm" - - - name: "Oracle Data Guard for oracle asm - Preparation: create fralogs.sql" become: true become_user: "oracle" @@ -234,8 +232,6 @@ exit mode: '0755' - - - name: "Oracle Data Guard - Preparation: create dgstatus.sql" become: true become_user: "oracle" @@ -462,7 +458,7 @@ dest: "/etc/sap_deployment_automation/dgscripts/rman-restore.rman" mode: '0644' force: true - when: + when: - current_host == ora_secondary - node_tier == 'oracle' @@ -475,7 +471,7 @@ dest: "/etc/sap_deployment_automation/dgscripts/rman-restore.rman" mode: '0644' force: true - when: + when: - current_host == ora_secondary - node_tier == 'oracle-asm' @@ -543,8 +539,6 @@ mode: '0755' when: current_host == ora_secondary - - # Create Directories for RESTORE on Secondary - name: "ORACLE: Create oracle SIDarch directory" @@ -677,7 +671,7 @@ dest: "/etc/sap_deployment_automation/dgscripts/asmfilesystems-secondary.sh" mode: '0755' force: true - when: + when: - current_host == ora_secondary - node_tier == "oracle-asm" diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-primary.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-primary.yaml index d7ba6435b4..64a0f33118 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-primary.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-primary.yaml @@ -194,7 +194,7 @@ # creates: /etc/sap_deployment_automation/dgscripts/local_listener_on_secondary.txt # chdir: /etc/sap_deployment_automation/dgscripts # executable: /bin/csh -# when: +# when: # - current_host == ora_primary @@ -211,7 +211,7 @@ creates: /etc/sap_deployment_automation/dgscripts/dbparametersfordg.txt chdir: /etc/sap_deployment_automation/dgscripts executable: /bin/csh - when: + when: - current_host == ora_primary - node_tier == "oracle-asm" @@ -219,7 +219,7 @@ ansible.builtin.debug: var: dbparametersfordg_results.stdout_lines verbosity: 2 - when: + when: - node_tier == "oracle-asm" - name: "Oracle Data Guard - Setup Primary: Enable Flashback on Oracle Primary DB (save output)" @@ -227,7 +227,7 @@ dest: /etc/sap_deployment_automation/dgscripts/dbparametersfordg.log content: "{{ turn_on_flashback_results.stdout }}" mode: '0777' - when: + when: - node_tier == "oracle-asm" - dbparametersfordg_results.stdout is defined @@ -243,8 +243,6 @@ - node_tier == "oracle-asm" - dbparametersfordg_results.rc == 0 - - ... # /*---------------------------------------------------------------------------8 # | END | diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml index b9593de4c1..4825c562e8 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml @@ -76,10 +76,10 @@ sed -i '/local_listener=/d' /oracle/{{ db_sid | upper }}/{{ ora_release }}/dbs/init{{ db_sid | upper }}.ora register: local_listener_results failed_when: local_listener_results.rc > 1 - + # - name: "debug file update" -# fail: +# fail: # msg: "fail here for checking if the file is updated" - name: "File update wait for 15 sec to avoid multiple locks" @@ -99,7 +99,7 @@ *.log_file_name_convert='{{ db_sid | upper }}','{{ db_sid | upper }}' *.db_create_online_log_dest_1='+DATA' *.db_create_online_log_dest_2='+DATA' - + when: - node_tier == "oracle-asm" @@ -123,11 +123,11 @@ block: | *.db_unique_name ='{{ db_sid | upper }}_STDBY' *.log_file_name_convert='{{ db_sid | upper }}','{{ db_sid | upper }}' - *.local_listener='(Address=(Protocol=TCP)(Host={{ ora_secondary }})(Port=1521))' + *.local_listener='(Address=(Protocol=TCP)(Host={{ ora_secondary }})(Port=1521))' when: - node_tier == "oracle" - + - name: "Oracle Data Guard - Setup Secondary: start lsnrctl on Secondary" become: true become_user: "oracle" @@ -138,7 +138,7 @@ creates: /etc/sap_deployment_automation/dgscripts/lsnrctl_started_sec.txt chdir: /etc/sap_deployment_automation/dgscripts executable: /bin/csh - when: + when: - current_host == ora_secondary - node_tier == "oracle" @@ -165,8 +165,6 @@ - node_tier == "oracle" - current_host == ora_secondary - lsnrctl_start_secondary_results.rc == 0 - - # Restart the Listener on Secondary node when the node_tier is Oracle-ASM. @@ -180,10 +178,10 @@ creates: /etc/sap_deployment_automation/dgscripts/lsnrctl_stopped_sec.txt chdir: /etc/sap_deployment_automation/dgscripts executable: /bin/csh - when: + when: - node_tier == "oracle-asm" - current_host == ora_secondary - + - name: "ASM Oracle Data Guard - Setup Secondary: Create lsnrctl_stopped_sec.txt" become: true @@ -196,7 +194,7 @@ - node_tier == "oracle-asm" - current_host == ora_secondary - lsnrctl_stop_secondary_results.rc == 0 - + - name: "Oracle Data Guard - Setup Secondary: Start lsnrctl on Secondary" become: true @@ -208,10 +206,10 @@ creates: /etc/sap_deployment_automation/dgscripts/lsnrctl_asm_started_sec.txt chdir: /etc/sap_deployment_automation/dgscripts executable: /bin/csh - when: + when: - node_tier == "oracle-asm" - current_host == ora_secondary - + - name: "Oracle Data Guard - Setup Secondary: restart lsnrctl on Secondary (Debug)" ansible.builtin.debug: @@ -236,7 +234,7 @@ - node_tier == "oracle-asm" - current_host == ora_secondary - lsnrctl_asm_start_secondary_results.rc == 0 - + - name: "ASM Listener Starting: Sleep for 40 seconds and continue with play" ansible.builtin.wait_for: @@ -417,7 +415,7 @@ creates: /etc/sap_deployment_automation/dgscripts/redo_rename.txt chdir: /etc/sap_deployment_automation/dgscripts executable: /bin/csh - when: + when: - node_tier == "oracle" - current_host == ora_secondary @@ -430,7 +428,7 @@ mode: '0755' owner: oracle group: oinstall - when: + when: - node_tier == "oracle" - redo_rename_results.rc == 0 @@ -453,7 +451,7 @@ ansible.builtin.shell: | /oracle/GRID/{{ ora_version }}/bin/asmcmd --privilege sysdba mkdir +ARCH/{{ db_sid | upper }}_STDBY/oraarch /oracle/GRID/{{ ora_version }}/bin/asmcmd --privilege sysdba mkdir +DATA/{{ db_sid | upper }}_STDBY/PARAMETERFILE - + register: asm_oraarch_created_results failed_when: asm_oraarch_created_results.rc > 0 args: @@ -462,7 +460,7 @@ when: - node_tier == "oracle-asm" - current_host == ora_secondary - + - name: "Oracle Data Guard - Setup Secondary: Create asm_oraarch_created.txt" become: true @@ -475,7 +473,7 @@ - node_tier == "oracle-asm" - current_host == ora_secondary - asm_oraarch_created_results.rc == 0 -#--------------------------------------------------------------------------------------------------------------- +#--------------------------------------------------------------------------------------------------------------- # Create the SPFILE from PFILE on Standby and Start the Standby Database using PFILE for non-ASM. - name: "Oracle Data Guard - Setup Secondary: Create spfile on non-ASM" @@ -490,10 +488,10 @@ creates: /etc/sap_deployment_automation/dgscripts/spfile_created.txt chdir: /etc/sap_deployment_automation/dgscripts executable: /bin/csh - when: + when: - node_tier == "oracle" - current_host == ora_secondary - + - name: "Oracle Data Guard - Setup Secondary: Create spfile_created.txt" become: true @@ -504,7 +502,7 @@ mode: '0755' owner: oracle group: oinstall - when: + when: - node_tier == "oracle" - create_spfile_results.rc == 0 - current_host == ora_secondary @@ -522,10 +520,10 @@ creates: /etc/sap_deployment_automation/dgscripts/pfile_backup_created.txt chdir: /oracle/{{ db_sid |upper }}/{{ ora_release}}/dbs executable: /bin/csh - when: + when: - node_tier == "oracle" - current_host == ora_secondary - + - name: "Oracle Data Guard - Setup Secondary: Create pfile_backup_created.txt" become: true @@ -536,7 +534,7 @@ mode: '0755' owner: oracle group: oinstall - when: + when: - node_tier == "oracle" - backup_pfile_results.rc == 0 - current_host == ora_secondary @@ -551,8 +549,8 @@ creates: /etc/sap_deployment_automation/dgscripts/local_listener_on_secondary.txt chdir: /etc/sap_deployment_automation/dgscripts executable: /bin/csh - when: - - current_host == ora_secondary + when: + - current_host == ora_secondary # Stop the Secondary Database. - name: "Oracle Data Guard - Setup Secondary: Stop secondary DB for spfile" @@ -567,10 +565,10 @@ creates: /etc/sap_deployment_automation/dgscripts/ora_secondary_shutdown.txt chdir: /etc/sap_deployment_automation/dgscripts executable: /bin/csh - when: + when: - node_tier == "oracle" - current_host == ora_secondary - + - name: "Oracle Data Guard - Setup Secondary: Create ora_secondary_shutdown.txt" become: true @@ -581,7 +579,7 @@ mode: '0755' owner: oracle group: oinstall - when: + when: - node_tier == "oracle" - secondary_shutdown_results.rc == 0 - current_host == ora_secondary @@ -602,8 +600,6 @@ - node_tier == "oracle" - current_host == ora_secondary - - # Start the Secondary Database with the new SPFILE; - name: "Oracle Data Guard - Setup Secondary: Start secondary DB with spfile" become: true @@ -617,10 +613,10 @@ creates: /etc/sap_deployment_automation/dgscripts/ora_secondary_startup_spfile.txt chdir: /etc/sap_deployment_automation/dgscripts executable: /bin/csh - when: + when: - node_tier == "oracle" - current_host == ora_secondary - + - name: "Oracle Data Guard - Setup Secondary: Create ora_secondary_startup_spfile.txt" become: true @@ -631,7 +627,7 @@ mode: '0755' owner: oracle group: oinstall - when: + when: - node_tier == "oracle" - ora_secondary_startup_spfile_results.rc == 0 - current_host == ora_secondary @@ -651,10 +647,10 @@ creates: /etc/sap_deployment_automation/dgscripts/spfile_created.txt chdir: /etc/sap_deployment_automation/dgscripts executable: /bin/csh - when: + when: - node_tier == "oracle-asm" - current_host == ora_secondary - + - name: "Oracle Data Guard - Setup Secondary: Create spfile_created.txt" become: true @@ -665,7 +661,7 @@ mode: '0755' owner: oracle group: oinstall - when: + when: - node_tier == "oracle-asm" - create_spfile_results.rc == 0 - current_host == ora_secondary @@ -683,10 +679,10 @@ creates: /etc/sap_deployment_automation/dgscripts/pfile_backup_created.txt chdir: /oracle/{{ db_sid |upper }}/{{ ora_release}}/dbs executable: /bin/csh - when: + when: - node_tier == "oracle-asm" - current_host == ora_secondary - + - name: "Oracle Data Guard - Setup Secondary: Create pfile_backup_created.txt" become: true @@ -697,7 +693,7 @@ mode: '0755' owner: oracle group: oinstall - when: + when: - node_tier == "oracle-asm" - backup_pfile_results.rc == 0 - current_host == ora_secondary @@ -716,10 +712,10 @@ creates: /etc/sap_deployment_automation/dgscripts/asm_secondary_shutdown.txt chdir: /etc/sap_deployment_automation/dgscripts executable: /bin/csh - when: + when: - node_tier == "oracle-asm" - current_host == ora_secondary - + - name: "Oracle Data Guard - Setup Secondary: Create asm_secondary_shutdown.txt" become: true @@ -730,7 +726,7 @@ mode: '0755' owner: oracle group: oinstall - when: + when: - node_tier == "oracle-asm" - secondary_shutdown_results.rc == 0 - current_host == ora_secondary @@ -751,8 +747,6 @@ - node_tier == "oracle-asm" - current_host == ora_secondary - - # Start the Secondary Database with the new SPFILE; - name: "Oracle Data Guard - Setup Secondary: Start secondary DB with spfile" become: true @@ -766,10 +760,10 @@ creates: /etc/sap_deployment_automation/dgscripts/asm_secondary_startup_spfile.txt chdir: /etc/sap_deployment_automation/dgscripts executable: /bin/csh - when: + when: - node_tier == "oracle-asm" - current_host == ora_secondary - + - name: "Oracle Data Guard - Setup Secondary: Create asm_secondary_startup_spfile.txt" become: true @@ -780,13 +774,11 @@ mode: '0755' owner: oracle group: oinstall - when: + when: - node_tier == "oracle-asm" - asm_secondary_startup_spfile_results.rc == 0 - current_host == ora_secondary - - - name: "Create local_listener_on_secondary on secondary" become: true become_user: root @@ -796,7 +788,7 @@ mode: '0755' owner: oracle group: oinstall - when: + when: - node_tier == "oracle-asm" - local_listener_on_secondary_results.rc == 0 - current_host == ora_secondary From 368aceb40da425dbf8b0c9df44293e6c00b45522 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 16 Mar 2024 12:42:21 +0200 Subject: [PATCH 463/607] Linting --- .../ansible/playbook_04_00_00_db_install.yaml | 2 +- .../4.1.2-ora-asm-db-install/tasks/main.yaml | 21 +++++++++---------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/deploy/ansible/playbook_04_00_00_db_install.yaml b/deploy/ansible/playbook_04_00_00_db_install.yaml index 7a606ba769..d0b18a5847 100644 --- a/deploy/ansible/playbook_04_00_00_db_install.yaml +++ b/deploy/ansible/playbook_04_00_00_db_install.yaml @@ -401,7 +401,7 @@ main_password: "{{ hostvars.localhost.sap_password }}" tags: - always - + - name: "Configure accounts for oracle" ansible.builtin.include_role: name: roles-os/1.11-accounts diff --git a/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml b/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml index 35fdb2c0d8..4f2b56682c 100644 --- a/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml @@ -68,7 +68,6 @@ register: oracle_installed - # /*---------------------------------------------------------------------------8 # | Start of Oracle software installation using SAP RUNINSTALLER wrapper. | # | Before running Installer set DB_SID and CV_ASSUME_DISTID according to | @@ -238,16 +237,16 @@ cp -rp /oracle/GRID/{{ ora_version }} /oracle/GRID/{{ ora_version }}.bck register: gridbackup args: - creates: /etc/sap_deployment_automation/{{ sap_sid | upper }}/gridbackedup.txt + creates: /etc/sap_deployment_automation/{{ sap_sid | upper }}/gridbackedup.txt executable: /bin/csh - + - name: "Oracle ASM: Create flag gridbackedup.txt " ansible.builtin.file: - path: /etc/sap_deployment_automation/{{ sap_sid | upper }}/gridbackedup.txt + path: /etc/sap_deployment_automation/{{ sap_sid | upper }}/gridbackedup.txt state: touch mode: '0755' - when: gridbackup.rc == 0 + when: gridbackup.rc == 0 - name: "Oracle ASM: Check if 'OPatch.bck' exists" ansible.builtin.stat: @@ -398,7 +397,7 @@ # - name: "Preventing orachk Issue" # become: true # become_user: "root" - # ansible.builtin.shell: | + # ansible.builtin.shell: | # set -o errexit # set -o pipefail # chown -R oracle:oinstall $OHGRID/suptools/orachk @@ -424,7 +423,7 @@ # when: orachkdeletion.rc == 0 # # STEP 4.2.2 SBP Patching for Oracle GRID. - + # # Copy the GSBP patch to local folder to avoid failures. # # - name: "Copy the GRID SBP Patches to local folders" @@ -448,7 +447,7 @@ # - name: "debug 2311 Patching" - # fail: + # fail: # msg: "fail here for manual GRID SBP installation" # - name: "File update wait for 15 sec to avoid multiple locks" @@ -483,7 +482,7 @@ # args: # chdir: /etc/sap_deployment_automation/{{ db_sid | upper }} # executable: /bin/csh - + # - name: "Remove the Link Files created in the previous run" # become: true # become_user: "oracle" @@ -493,7 +492,7 @@ # args: # chdir: "{{ target_media_location }}/SBP" # executable: /bin/csh - + # - name: "Oracle ASM: Prepare for GRID SBP Patching" # become: true # become_user: "root" @@ -522,7 +521,7 @@ # creates: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/gridsbp_installed.txt" # chdir: "{{ target_media_location }}/SBP/GSBP" # executable: /bin/csh - + # - name: "Oracle ASM: Pre Processing reset permissions GRID" # ansible.builtin.file: From 50d96c2f295ab23d1c7d5539d90844fab6721fc1 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 16 Mar 2024 12:53:02 +0200 Subject: [PATCH 464/607] More linting --- .../roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml | 6 +----- .../tasks/ora-dg-postprocessing-primary.yaml | 2 +- .../4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml | 4 ++-- .../tasks/1.3.1-repositories-RedHat.yaml | 2 +- .../5.1-dbload/tasks/oracle-postprocessing.yaml | 10 +++++----- .../roles-sap/7.0.0-post-install/tasks/main.yaml | 2 +- 6 files changed, 11 insertions(+), 15 deletions(-) diff --git a/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml b/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml index 4f2b56682c..992e55e290 100644 --- a/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml @@ -67,7 +67,7 @@ path: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/grid_sbp_installed.txt" register: oracle_installed - +oles-db # /*---------------------------------------------------------------------------8 # | Start of Oracle software installation using SAP RUNINSTALLER wrapper. | # | Before running Installer set DB_SID and CV_ASSUME_DISTID according to | @@ -370,8 +370,6 @@ # ansible.builtin.wait_for: # timeout: 30 - - # # Debug for testing # - name: "Oracle ASM: Prepare for GRID SBP Patching print output" # ansible.builtin.debug: @@ -607,8 +605,6 @@ mopatch_path: "{{ mopatch_directory.files[0].path }}" when: mopatch_directory.matched == 1 - - - name: "Oracle ASM: Check if 'OPatch.bck' exists" ansible.builtin.stat: path: /oracle/{{ db_sid | upper }}/{{ ora_version }}/OPatch.bck diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing-primary.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing-primary.yaml index 69edb8861a..b0300923f5 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing-primary.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing-primary.yaml @@ -154,7 +154,7 @@ state: touch mode: '0755' when: enable_dgconfig_results.rc == 0 - + # Enable the DB trigger for SAP HA - name: "Oracle Data Guard - Post Processing: Enable DB Trigger" become: true diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml index 4825c562e8..542a909c2e 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml @@ -473,7 +473,7 @@ - node_tier == "oracle-asm" - current_host == ora_secondary - asm_oraarch_created_results.rc == 0 -#--------------------------------------------------------------------------------------------------------------- +# --------------------------------------------------------------------------------------------------------------- # Create the SPFILE from PFILE on Standby and Start the Standby Database using PFILE for non-ASM. - name: "Oracle Data Guard - Setup Secondary: Create spfile on non-ASM" @@ -633,7 +633,7 @@ - current_host == ora_secondary -#--------------------------------------------------------------------------------------------------------------- +# --------------------------------------------------------------------------------------------------------------- # Create the SPFILE from PFILE on Standby and Start the Standby Database using PFILE. - name: "Oracle Data Guard - Setup Secondary: Create spfile on ASM" become: true diff --git a/deploy/ansible/roles-os/1.3-repository/tasks/1.3.1-repositories-RedHat.yaml b/deploy/ansible/roles-os/1.3-repository/tasks/1.3.1-repositories-RedHat.yaml index a1a5abe502..9af9c44d91 100644 --- a/deploy/ansible/roles-os/1.3-repository/tasks/1.3.1-repositories-RedHat.yaml +++ b/deploy/ansible/roles-os/1.3-repository/tasks/1.3.1-repositories-RedHat.yaml @@ -39,4 +39,4 @@ changed_when: false when: - distribution_id in ['redhat9'] - - node_tier == 'ha' \ No newline at end of file + - node_tier == 'ha' diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml index 3c484f5a75..6630786128 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml @@ -1,8 +1,8 @@ - name: "ORACLE Post Processing: Set Variables" ansible.builtin.set_fact: PAS: 'pas' - pga_temp_l4tb_std: "{{ ((ansible_memory_mb.real.total*0.60)*0.2) |round |int }}" - pga_temp_l4tb_dis: "{{ ((ansible_memory_mb.real.total*0.80)*0.2) |round |int }}" + pga_temp_l4tb_std: "{{ ((ansible_memory_mb.real.total * 0.60 ) * 0.2) | round | int }}" + pga_temp_l4tb_dis: "{{ ((ansible_memory_mb.real.total * 0.80 ) * 0.2) | round | int }}" # pga_temp_g4tb: "{{ ((ansible_memory_mb.real.total*0.85)*0.2) |round |int }}" # PGA & SGA for RAM < 4TB @@ -34,7 +34,7 @@ - name: "Set the SGA and PGA Sizes for RAM > 4TB" ansible.builtin.set_fact: ora_sga: "{{ (ansible_memory_mb.real.total * 0.65) | round | int }}" - ora_pga: "{{ ((ansible_memory_mb.real.total*0.65)*0.2) |round |int }}" + ora_pga: "{{ ((ansible_memory_mb.real.total*0.65)*0.2) | round | int }}" when: - ansible_memory_mb.real.total > 4194304 - supported_tiers != "pas" @@ -62,7 +62,7 @@ SHUTDOWN IMMEDIATE; exit; mode: '0755' - + - name: "ORACLE Post Processing: Oracle Startup: create startup.sql" become: true become_user: "oracle" @@ -107,7 +107,7 @@ - name: "Oracle post-processing: Reboot after the Enabling HugePages" ansible.builtin.reboot: reboot_timeout: 300 - + # Wait for Connection after reboot - name: "Wait for Connection after reboot" ansible.builtin.wait_for_connection: diff --git a/deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml b/deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml index b4700b3d4b..e71d141b3b 100644 --- a/deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/7.0.0-post-install/tasks/main.yaml @@ -334,4 +334,4 @@ ... # /*---------------------------------------------------------------------------8 # | END | -# +------------------------------------4--------------------------------------*/ \ No newline at end of file +# +------------------------------------4--------------------------------------*/ From ce6a6ef48e8b125288c71585f2a0e1300cf57842 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 16 Mar 2024 12:55:58 +0200 Subject: [PATCH 465/607] Fixing typo --- deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml b/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml index 992e55e290..d286e5164a 100644 --- a/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml @@ -67,7 +67,6 @@ path: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/grid_sbp_installed.txt" register: oracle_installed -oles-db # /*---------------------------------------------------------------------------8 # | Start of Oracle software installation using SAP RUNINSTALLER wrapper. | # | Before running Installer set DB_SID and CV_ASSUME_DISTID according to | From 1fff16088d7860d9f4a6a22f39d8cc0841083a17 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 19 Mar 2024 11:48:15 +0200 Subject: [PATCH 466/607] Support zonal shared disks (#567) * Make SCS Shared disk Premium_ZRS * Add support for Premium_ZRS zonal disks * Add Cluster disk info to the Web App * Don't specify a zone for Premium_ZRS disks * Ensure that the --username parameter is correct * Fix error message in variables_global.tf * Remove unused variables in variables_global.tf * Refactor error message in variables_global.tf * Added database_cluster_disk_type * Update scs_cluster_disk_lun and database_cluster_disk_lun properties in SystemModel.cs * Remove trailing spaces in parameter names in SystemDetails.json * Fix ARM_CLIENT_ID variable interpolation in configure_deployer.sh.tmpl * Update proximity_placement_group_id in vm-app.tf * Fix zone assignment for scs_cluster_disk_type in vm-scs.tf * Update disk zone configuration * Update proximity_placement_group_id in vm-app.tf * Update proximity placement group ID in app tier VM configuration * Merge branch 'experimental' --------- Co-authored-by: Kimmo Forss --- Webapp/SDAF/Models/SystemModel.cs | 14 ++++ .../SDAF/ParameterDetails/SystemDetails.json | 72 +++++++++++++++++++ .../SDAF/ParameterDetails/SystemTemplate.txt | 18 +++++ deploy/terraform/run/sap_system/module.tf | 6 -- .../run/sap_system/tfvar_variables.tf | 11 +++ deploy/terraform/run/sap_system/transform.tf | 8 +++ .../sap_system/anydb_node/variables_global.tf | 2 - .../modules/sap_system/anydb_node/vm-anydb.tf | 8 +-- .../sap_system/app_tier/variables_global.tf | 2 - .../modules/sap_system/app_tier/vm-app.tf | 5 +- .../modules/sap_system/app_tier/vm-scs.tf | 8 +-- .../common_infrastructure/variables_global.tf | 4 +- .../sap_system/hdb_node/variables_global.tf | 2 - .../modules/sap_system/hdb_node/vm-hdb.tf | 8 +-- 14 files changed, 140 insertions(+), 28 deletions(-) diff --git a/Webapp/SDAF/Models/SystemModel.cs b/Webapp/SDAF/Models/SystemModel.cs index 41df392bca..13bad1e6f1 100644 --- a/Webapp/SDAF/Models/SystemModel.cs +++ b/Webapp/SDAF/Models/SystemModel.cs @@ -213,8 +213,22 @@ public bool IsValid() public bool? use_simple_mount { get; set; } = false; public string database_cluster_type { get; set; } = "AFA"; + public string scs_cluster_type { get; set; } = "AFA"; + + public int? scs_cluster_disk_lun { get; set; } = 5; + + public int? scs_cluster_disk_size { get; set; } = 128; + + public string scs_cluster_disk_type { get; set; } = "Premium_ZRS"; + + public int? database_cluster_disk_lun { get; set; } = 8; + + public int? database_cluster_disk_size { get; set; } = 128; + + public string database_cluster_disk_type { get; set; } = "Premium_ZRS"; + /*---------------------------------------------------------------------------8 | | | PPG information | diff --git a/Webapp/SDAF/ParameterDetails/SystemDetails.json b/Webapp/SDAF/ParameterDetails/SystemDetails.json index eca523df8e..91e01cebd9 100644 --- a/Webapp/SDAF/ParameterDetails/SystemDetails.json +++ b/Webapp/SDAF/ParameterDetails/SystemDetails.json @@ -1363,6 +1363,78 @@ "Overrules": "", "Display": 2 }, + { + "Name": "scs_cluster_disk_lun", + "Required": false, + "Description": "The LUN of the shared disk for the SAP Central Services cluster", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "scs_cluster_disk_size", + "Required": false, + "Description": "The size of the shared disk for the SAP Central Services cluster", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "scs_cluster_disk_type", + "Required": false, + "Description": "The storage_account_type of the shared disk for the SAP Central Services cluster", + "Type": "lookup", + "Options": [ + { + "Text": "Premium LRS", + "Value": "Premium_LRS" + }, + { + "Text": "Premium ZRS", + "Value": "Premium_ZRS" + } + ], + "Overrules": "", + "Display": 3 + }, + { + "Name": "database_cluster_disk_lun", + "Required": false, + "Description": "The LUN of the shared disk for the Database cluster", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "database_cluster_disk_size", + "Required": false, + "Description": "The size of the shared disk for the Database cluster", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "database_cluster_disk_type", + "Required": false, + "Description": "The storage_account_type of the shared disk for the Database cluster", + "Type": "lookup", + "Options": [ + { + "Text": "Premium LRS", + "Value": "Premium_LRS" + }, + { + "Text": "Premium ZRS", + "Value": "Premium_ZRS" + } + ], + "Overrules": "", + "Display": 3 + }, { "Name": "use_msi_for_clusters", "Required": false, diff --git a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt index 260c61807e..6cf46448bf 100644 --- a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt @@ -118,9 +118,27 @@ $$use_private_endpoint$$ # scs_cluster_type defines cluster quorum type; AFA (Azure Fencing Agent), ASD (Azure Shared Disk), ISCSI $$scs_cluster_type$$ +#scs_cluster_disk_lun defines the LUN number for the SAP Central Services cluster disk +$$scs_cluster_disk_lun$$ + +#scs_cluster_disk_size defines the size for the SAP Central Services cluster disk +$$scs_cluster_disk_size$$ + +#scs_cluster_disk_type defines the storage_account_type of the shared disk for the SAP Central Services cluster +$$scs_cluster_disk_type$$ + # database_cluster_type defines cluster quorum type; AFA (Azure Fencing Agent), ASD (Azure Shared Disk), ISCSI $$database_cluster_type$$ +#database_cluster_disk_lun defines the LUN number for the database cluster disk +$$database_cluster_disk_lun$$ + +#database_cluster_disk_size defines the size for the database cluster disk +$$database_cluster_disk_size$$ + +#database_cluster_disk_type defines the storage_account_type of the shared disk for the Database cluster +$$database_cluster_disk_type$$ + # use_msi_for_clusters if defined will use managed service identity for the Pacemaker cluster fencing $$use_msi_for_clusters$$ diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index f64290fd10..75779bce3e 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -116,8 +116,6 @@ module "hdb_node" { cloudinit_growpart_config = null # This needs more consideration module.common_infrastructure.cloudinit_growpart_config custom_disk_sizes_filename = try(coalesce(var.custom_disk_sizes_filename, var.db_disk_sizes_filename), "") database = local.database - database_cluster_disk_lun = var.database_cluster_disk_lun - database_cluster_disk_size = var.database_cluster_disk_size database_dual_nics = try(module.common_infrastructure.admin_subnet, null) == null ? false : var.database_dual_nics database_server_count = upper(try(local.database.platform, "HANA")) == "HANA" ? ( local.database.high_availability ? ( @@ -206,8 +204,6 @@ module "app_tier" { route_table_id = module.common_infrastructure.route_table_id sap_sid = local.sap_sid scale_set_id = try(module.common_infrastructure.scale_set_id, null) - scs_cluster_disk_lun = var.scs_cluster_disk_lun - scs_cluster_disk_size = var.scs_cluster_disk_size sdu_public_key = module.common_infrastructure.sdu_public_key sid_keyvault_user_id = module.common_infrastructure.sid_keyvault_user_id sid_password = module.common_infrastructure.sid_password @@ -251,8 +247,6 @@ module "anydb_node" { 0) : ( local.database.high_availability ? 2 * var.database_server_count : var.database_server_count ) - database_cluster_disk_lun = var.database_cluster_disk_lun - database_cluster_disk_size = var.database_cluster_disk_size db_asg_id = module.common_infrastructure.db_asg_id db_subnet = module.common_infrastructure.db_subnet deploy_application_security_groups = var.deploy_application_security_groups diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index 4619e4f99f..5bf9e150e2 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -380,6 +380,12 @@ variable "database_cluster_disk_size" { default = 128 } +variable "database_cluster_disk_type" { + description = "The storage_account_type of the shared disk for the Database cluster" + default = "Premium_ZRS" + } + + variable "database_platform" { description = "Database platform, supported values are HANA, DB2, ORACLE, ORACLE-ASM, ASE, SQLSERVER or NONE (in this case no database tier is deployed)" default = "" @@ -709,6 +715,11 @@ variable "scs_cluster_disk_size" { default = 128 } +variable "scs_cluster_disk_type" { + description = "The storage_account_type of the shared disk for the SAP Central Services cluster" + default = "Premium_ZRS" + } + ######################################################################################### # # # Application Server variables # diff --git a/deploy/terraform/run/sap_system/transform.tf b/deploy/terraform/run/sap_system/transform.tf index e0d8b2ee99..da6b6ae890 100644 --- a/deploy/terraform/run/sap_system/transform.tf +++ b/deploy/terraform/run/sap_system/transform.tf @@ -82,6 +82,10 @@ locals { deploy_v1_monitoring_extension = var.deploy_v1_monitoring_extension dual_nics = var.database_dual_nics || try(var.databases[0].dual_nics, false) high_availability = var.database_high_availability || try(var.databases[0].high_availability, false) + database_cluster_disk_lun = var.database_cluster_disk_lun + database_cluster_disk_size = var.database_cluster_disk_size + database_cluster_disk_type = var.database_cluster_disk_type + platform = var.database_platform use_ANF = var.database_HANA_use_ANF_scaleout_scenario || try(var.databases[0].use_ANF, false) use_avset = var.database_server_count == 0 || var.use_scalesets_for_deployment || length(var.database_vm_zones) > 0 || var.database_platform == "NONE" ? ( @@ -206,6 +210,10 @@ locals { false) : ( var.scs_server_use_avset ) + scs_cluster_disk_lun = var.scs_cluster_disk_lun + scs_cluster_disk_size = var.scs_cluster_disk_size + scs_cluster_disk_type = var.scs_cluster_disk_type + webdispatcher_count = local.enable_app_tier_deployment ? ( max(var.webdispatcher_server_count, try(var.application_tier.webdispatcher_count, 0)) ) : ( diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_global.tf index 212e68460f..499f675977 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_global.tf @@ -12,8 +12,6 @@ variable "database_server_count" { description = "The number of database servers" default = 1 } -variable "database_cluster_disk_size" { description = "The size of the shared disk for the Database cluster" } -variable "database_cluster_disk_lun" { description = "The LUN of the shared disk for the Database cluster" } variable "database_vm_admin_nic_ips" { description = "If provided, the database tier will be configured with the specified IPs (admin subnet)" } variable "database_vm_db_nic_ips" { description = "If provided, the database tier will be configured with the specified IPs" } variable "database_vm_db_nic_secondary_ips" { description = "If provided, the database tier will be configured with the specified IPs as secondary IPs" } diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf index e5e01c7c07..55ca1bdc5c 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf @@ -545,13 +545,13 @@ resource "azurerm_managed_disk" "cluster" { location = var.resource_group[0].location resource_group_name = var.resource_group[0].name create_option = "Empty" - storage_account_type = "Premium_LRS" - disk_size_gb = var.database_cluster_disk_size + storage_account_type = var.database.database_cluster_disk_type + disk_size_gb = var.database.database_cluster_disk_size disk_encryption_set_id = try(var.options.disk_encryption_set_id, null) max_shares = var.database_server_count tags = var.tags - zone = local.zonal_deployment && !var.database.use_avset ? ( + zone = var.database.database_cluster_disk_type == "Premium_LRS" && local.zonal_deployment && !var.database.use_avset ? ( upper(local.anydb_ostype) == "LINUX" ? ( azurerm_linux_virtual_machine.dbserver[local.anydb_disks[count.index].vm_index].zone) : ( azurerm_windows_virtual_machine.dbserver[local.anydb_disks[count.index].vm_index].zone @@ -595,7 +595,7 @@ resource "azurerm_virtual_machine_data_disk_attachment" "cluster" { ) ) caching = "None" - lun = var.database_cluster_disk_lun + lun = var.database.database_cluster_disk_lun } diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_global.tf index 0410afd401..4da842be12 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_global.tf @@ -71,8 +71,6 @@ variable "management_dns_resourcegroup_name" { type = string } -variable "scs_cluster_disk_lun" { description = "The LUN of the shared disk for the SAP Central Services cluster" } -variable "scs_cluster_disk_size" { description = "The size of the shared disk for the SAP Central Services cluster" } ######################################################################################### # # diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index 85dc755f96..725c5ef3cd 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -123,7 +123,7 @@ resource "azurerm_linux_virtual_machine" "app" { location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - proximity_placement_group_id = length(var.scale_set_id) > 0 ? ( + proximity_placement_group_id = var.application_tier.app_use_avset || length(var.scale_set_id) > 0 ? ( null) : ( var.application_tier.app_use_ppg ? ( var.ppg[count.index % max(length(var.ppg), 1)]) : ( @@ -240,6 +240,7 @@ resource "azurerm_linux_virtual_machine" "app" { lifecycle { ignore_changes = [ source_image_id, + proximity_placement_group_id, zone ] } @@ -266,7 +267,7 @@ resource "azurerm_windows_virtual_machine" "app" { source_image_id = var.application_tier.app_os.type == "custom" ? var.application_tier.app_os.source_image_id : null - proximity_placement_group_id = length(var.scale_set_id) > 0 ? ( + proximity_placement_group_id = var.application_tier.app_use_avset || length(var.scale_set_id) > 0 ? ( null) : ( var.application_tier.app_use_ppg ? ( var.ppg[count.index % max(length(var.ppg), 1)]) : ( diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index 7aac8915aa..27321851c5 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -574,12 +574,12 @@ resource "azurerm_managed_disk" "cluster" { location = var.resource_group[0].location resource_group_name = var.resource_group[0].name create_option = "Empty" - storage_account_type = "Premium_LRS" - disk_size_gb = var.scs_cluster_disk_size + storage_account_type = var.application_tier.scs_cluster_disk_type + disk_size_gb = var.application_tier.scs_cluster_disk_size disk_encryption_set_id = try(var.options.disk_encryption_set_id, null) max_shares = local.scs_server_count - zone = !local.use_scs_avset ? ( + zone = (var.application_tier.scs_cluster_disk_type == "Premium_LRS") && !local.use_scs_avset ? ( upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( azurerm_linux_virtual_machine.scs[local.scs_data_disks[count.index].vm_index].zone) : ( azurerm_windows_virtual_machine.scs[local.scs_data_disks[count.index].vm_index].zone @@ -623,7 +623,7 @@ resource "azurerm_virtual_machine_data_disk_attachment" "cluster" { ) ) caching = "None" - lun = var.scs_cluster_disk_lun + lun = var.application_tier.scs_cluster_disk_lun } ######################################################################################### diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf index ceaf33efcb..eee75211c5 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf @@ -11,13 +11,13 @@ variable "application_tier" { var.application_tier.scs_high_availability ? ( var.application_tier.scs_cluster_type != "ASD" ? ( true) : ( - length(try(var.application_tier.scs_zones, [])) <= 1 + length(try(var.application_tier.scs_zones, [])) <= (var.application_tier.scs_cluster_disk_type == "Premium_ZRS" ? 2 : 1) )) : ( true ) ) - error_message = "Cluster type 'ASD' does not support cross zonal deployments." + error_message = format("Cluster type 'ASD' with disk type %s does not support deployments across %d zones.", var.application_tier.scs_cluster_disk_type, length(try(var.application_tier.scs_zones, []))) } } diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf index bc8f58a9e4..2166f156ef 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf @@ -10,8 +10,6 @@ variable "custom_disk_sizes_filename" { type = string } variable "database" {} -variable "database_cluster_disk_size" { description = "The size of the shared disk for the Database cluster" } -variable "database_cluster_disk_lun" { description = "The LUN of the shared disk for the Database cluster" } variable "database_dual_nics" { description = "Defines if the HANA DB uses dual network interfaces" diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf index a8fb333a63..c0ab1e6107 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf @@ -430,13 +430,13 @@ resource "azurerm_managed_disk" "cluster" { location = var.resource_group[0].location resource_group_name = var.resource_group[0].name create_option = "Empty" - storage_account_type = "Premium_LRS" - disk_size_gb = var.database_cluster_disk_size + storage_account_type = var.database.database_cluster_disk_type + disk_size_gb = var.database.database_cluster_disk_size disk_encryption_set_id = try(var.options.disk_encryption_set_id, null) max_shares = var.database_server_count tags = var.tags - zone = !local.use_avset ? ( + zone = var.database.database_cluster_disk_type == "Premium_LRS" && !local.use_avset ? ( azurerm_linux_virtual_machine.vm_dbnode[local.data_disk_list[count.index].vm_index].zone) : ( null ) @@ -478,7 +478,7 @@ resource "azurerm_virtual_machine_data_disk_attachment" "cluster" { ) ) caching = "None" - lun = var.database_cluster_disk_lun + lun = var.database.database_cluster_disk_lun } ######################################################################################### From 98fed1d9176a0e6073b76dd4ba91d5215cba6fd4 Mon Sep 17 00:00:00 2001 From: "Shekhar Sorot ( MSFT )" Date: Mon, 25 Mar 2024 22:55:56 +0530 Subject: [PATCH 467/607] Hotfix/scaleout anf multi-node standby (optional ) (#568) * adding variable for deploying HANA scale out - ANF without a standby node. * Add disclaimer --- .../4.0.3-hdb-install-scaleout/tasks/main.yaml | 12 +++++++++++- deploy/ansible/vars/ansible-input-api.yaml | 1 + deploy/terraform/run/sap_system/module.tf | 1 + deploy/terraform/run/sap_system/tfvar_variables.tf | 5 +++++ .../modules/sap_system/output_files/inventory.tf | 1 + .../sap_system/output_files/sap-parameters.tmpl | 2 +- .../sap_system/output_files/variables_global.tf | 1 + 7 files changed, 21 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml index 8a890e59ff..885b49260d 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml @@ -7,7 +7,13 @@ # | deploy hdblcm password file | # | | # +------------------------------------4--------------------------------------*/ - +# /*---------------------------------------------------------------------------8 +# | | +# | This code contains references to terms that Microsoft no longer uses. | +# | When these terms are removed from the SAP software and documentation, | +# | we’ll remove them from this codebase. | +# | | +# +------------------------------------4--------------------------------------*/ --- # +------------------------------------4--------------------------------------*/ @@ -109,7 +115,11 @@ # This is the way !!! _rsp_additional_hosts: "{% for item in db_hosts[1:] %} {% if loop.index == db_hosts | length -1 %} + {% if db_no_standby %} + {{ item }}:role=worker:group=default:workergroup=default + {% else %} {{ item }}:role=standby:group=default:workergroup=default + {% endif %} {% else %} {{ item }}:role=worker:group=default:workergroup=default, {% endif %} diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index a9f98425b2..63dae8c70f 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -215,6 +215,7 @@ use_simple_mount: false # database_high_availability: false db_scale_out: false database_cluster_type: "AFA" +db_no_standby: false # when set to true, will deploy the scale out - ANF cluster without a standby node. # scs_high_availability: false scs_cluster_type: "AFA" # Configure pacemaker for Azure scheduled events diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index 75779bce3e..8fd2d81727 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -434,6 +434,7 @@ module "output_files" { use_simple_mount = local.validated_use_simple_mount upgrade_packages = var.upgrade_packages scale_out = var.database_HANA_use_ANF_scaleout_scenario + scale_out_no_standby_role = var.database_HANA_no_standby_role ######################################################################################### # iSCSI # diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index 5bf9e150e2..01960fe4d8 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -1347,6 +1347,11 @@ variable "database_HANA_use_ANF_scaleout_scenario" { default = false } +variable "database_HANA_no_standby_role" { + description = "If true, the database scale out tier will not have a standby role" + default = false + } + variable "stand_by_node_count" { description = "The number of standby nodes" default = 0 diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf index 811a8b3093..f2bc876c87 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf @@ -222,6 +222,7 @@ resource "local_file" "sap-parameters_yml" { ) asd_disks = concat(var.scs_shared_disks, var.database_shared_disks) scale_out = var.scale_out + scale_out_no_standby_role = var.scale_out_no_standby_role scs_cluster_loadbalancer_ip = try(format("%s/%s", var.scs_cluster_loadbalancer_ip, var.app_subnet_netmask), "") scs_cluster_type = var.scs_cluster_type scs_high_availability = var.scs_high_availability diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl index 013c9e88ed..7ac2d8a324 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl @@ -78,7 +78,7 @@ platform: ${platform} # Scale out defines if the database is to be deployed in a scale out configuration db_scale_out: ${scale_out} - +db_no_standby: ${scale_out_no_standby_role} # db_high_availability is a boolean flag indicating if the # SAP database servers are deployed using high availability db_high_availability: ${database_high_availability} diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf index d3c91e94e9..7291e8586e 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf @@ -141,6 +141,7 @@ variable "save_naming_information" { default = false } variable "scale_out" { description = "If true, the SAP System will be scale out" } +variable "scale_out_no_standby_role" { description = "If true, the SAP Scale out system will not have a standby-node. Only applicable for shared storage based deployment" } variable "scs_shared_disks" { description = "SCS Azure Shared Disk" } From 44aee32a66b54857f93be9245aa15883106e57a1 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 25 Mar 2024 21:49:24 +0200 Subject: [PATCH 468/607] Bring in Scale out improvements (#569) * Make SCS Shared disk Premium_ZRS * Add support for Premium_ZRS zonal disks * Add Cluster disk info to the Web App * Don't specify a zone for Premium_ZRS disks * Ensure that the --username parameter is correct * Fix error message in variables_global.tf * Remove unused variables in variables_global.tf * Refactor error message in variables_global.tf * Added database_cluster_disk_type * Update scs_cluster_disk_lun and database_cluster_disk_lun properties in SystemModel.cs * Remove trailing spaces in parameter names in SystemDetails.json * Fix ARM_CLIENT_ID variable interpolation in configure_deployer.sh.tmpl * Update proximity_placement_group_id in vm-app.tf * Fix zone assignment for scs_cluster_disk_type in vm-scs.tf * Update disk zone configuration * Update proximity_placement_group_id in vm-app.tf * Update proximity placement group ID in app tier VM configuration * Merge branch 'experimental' * Add storage subnet for SAP VNET * Add storage_subnet_id and storage_nsg_id outputs to sap_landscape module * Add SAP storage subnet NSG and associate it with the storage subnet. Add SSH network security rule for connectivity to SAP application subnet from Control Plane. * Update NSG rule protocol to allow all traffic * Refactor proximityPlacementGroup assignment in avg.tf * Update NSG protocol to allow all traffic * Refactor subnet_storage configuration in transform.tf * Add storage subnet to Web App * Get the latest * Add scaleout variables * Update proximityPlacementGroup in avg.tf * Add storage subnet variables for scale-out configuration * Update storage subnet condition for ANF support * Refactor storage subnet count logic * Fix conditional expression in subnets.tf * Refactor network_interface_ids in vm-hdb.tf * Custom Mount fix * Add "Custom" sizing to UX * Create the root folder if it does not exist * Add ANF & Storage subnet prefixes to sap-parameters.yaml * Update storage subnet netmask variable name * Fix storage subnet ID in variables_local.tf * Fix ANF_subnet_prefix value in outputs.tf * Refactor sap-parameters.tmpl file * Update subnet prefixes for storage and ANF * Update principal_id in azurerm_role_assignment * Fix src path in custom mount task * Update IP address retrieval in Ansible playbooks * Update variable name in main.yaml * Update debug message to use correct variable name * Update debug message to print ipadd variable * Update networking tasks and hosts file template * Add client subnet * Add subnet_prefix_client to sap-parameters_yml resource * Update IMDS URL in networking tasks * Refactor host entries in hosts.j2 template * Fix formatting and typo in sap-parameters.tmpl * Refactor host entries generation in hosts.j2 template * Add routes and restart VM for HANA scaleout * Add app subnet * Add use_msi_for_clusters variable to sap-parameters.tmpl * Fix conditional value assignment in outputs.tf * Update network configuration details in main.yaml * Update sap-parameters.tmpl file * Update enable_storage_subnet condition in variables_local.tf * Debug * Web App updates * Refactor virtual host name assignment in hosts.j2 template * Add description property * Add the download ability * Add the MSI to the extension object * Fix virtual host names duplication issue * Fix virtual host name iteration in hosts.j2 template * Add support for secondary IP addresses in azure_interfaces.j2 template * Add dependencies for storage network interface creation * Refactor networking configuration and route creation. * Add DB subnet to sap-parameters.yaml * Update network interface conditions in main.yaml * Update VM-Images.json with new SKUs * Update internal network configuration in HDB installation playbook * Update database host roles in main.yaml * Update internal network configuration in HDB install playbook * Fix missing quotation mark in hdblcm command * Update internal network configuration in HDB install playbook * Refactor HDB installation command in main.yaml * Add additional destination port ranges to NSG rule * Task naming and Linting * Add database_HANA_no_standby_role variable --------- Co-authored-by: Kimmo Forss Co-authored-by: hdamecharla --- Webapp/SDAF.sln | 12 -- .../SDAF/Controllers/LandscapeController.cs | 4 +- Webapp/SDAF/Controllers/SystemController.cs | 27 +++- Webapp/SDAF/Models/LandscapeModel.cs | 17 +++ Webapp/SDAF/Models/SystemModel.cs | 5 + .../ParameterDetails/LandscapeDetails.json | 70 +++++++++ .../ParameterDetails/LandscapeTemplate.txt | 27 ++++ .../SDAF/ParameterDetails/SystemDetails.json | 18 +++ .../SDAF/ParameterDetails/SystemTemplate.txt | 7 + Webapp/SDAF/ParameterDetails/VM-Images.json | 16 +-- Webapp/SDAF/SDAFWebApp.csproj | 6 +- Webapp/SDAF/Views/Landscape/Edit.cshtml | 30 ++-- Webapp/SDAF/Views/Shared/_FormPartial.cshtml | 9 +- Webapp/SDAF/Views/System/Edit.cshtml | 38 ++++- Webapp/SDAF/wwwroot/js/site.js | 6 +- .../playbook_02_os_sap_specific_config.yaml | 10 +- .../tasks/main.yaml | 2 +- .../tasks/ora-dg-setup-secondary.yaml | 125 +++++++--------- .../roles-os/1.10-networking/tasks/main.yaml | 136 ++++++++++++++++++ .../templates/azure_interfaces.j2 | 18 +++ .../2.4-hosts-file/templates/hosts.j2 | 81 +++++++++-- .../tasks/2.6.9-custom-mounts.yaml | 34 ++++- deploy/scripts/install_workloadzone.sh | 2 + deploy/terraform/run/sap_landscape/output.tf | 11 +- .../run/sap_landscape/tfvar_variables.tf | 31 ++++ .../terraform/run/sap_landscape/transform.tf | 54 +++++++ deploy/terraform/run/sap_system/module.tf | 7 + .../run/sap_system/tfvar_variables.tf | 39 ++++- deploy/terraform/run/sap_system/transform.tf | 58 +++++++- .../sap_landscape/key_vault_sap_landscape.tf | 2 +- .../modules/sap_landscape/nsg.tf | 54 ++++++- .../modules/sap_landscape/outputs.tf | 19 +++ .../modules/sap_landscape/subnets.tf | 18 +++ .../modules/sap_landscape/variables_local.tf | 65 ++++++++- .../sap_system/anydb_node/variables_local.tf | 5 + .../modules/sap_system/anydb_node/vm-anydb.tf | 6 +- .../modules/sap_system/app_tier/outputs.tf | 13 ++ .../sap_system/app_tier/variables_local.tf | 6 + .../modules/sap_system/app_tier/vm-app.tf | 6 +- .../modules/sap_system/app_tier/vm-scs.tf | 6 +- .../modules/sap_system/app_tier/vm-webdisp.tf | 6 +- .../common_infrastructure/outputs.tf | 38 ++++- .../common_infrastructure/subnets.tf | 12 +- .../common_infrastructure/variables_local.tf | 10 +- .../modules/sap_system/hdb_node/anf.tf | 10 ++ .../modules/sap_system/hdb_node/avg.tf | 13 +- .../modules/sap_system/hdb_node/outputs.tf | 10 ++ .../sap_system/hdb_node/variables_local.tf | 6 +- .../modules/sap_system/hdb_node/vm-hdb.tf | 24 ++-- .../sap_system/output_files/inventory.tf | 5 + .../output_files/sap-parameters.tmpl | 29 +++- .../output_files/variables_global.tf | 5 + 52 files changed, 1085 insertions(+), 183 deletions(-) create mode 100644 deploy/ansible/roles-os/1.10-networking/templates/azure_interfaces.j2 diff --git a/Webapp/SDAF.sln b/Webapp/SDAF.sln index 034798d1e1..93c29b5927 100644 --- a/Webapp/SDAF.sln +++ b/Webapp/SDAF.sln @@ -3,10 +3,6 @@ Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio Version 17 VisualStudioVersion = 17.3.32825.248 MinimumVisualStudioVersion = 10.0.40219.1 -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AutomationForm.UnitTests", "AutomationForm.UnitTests\AutomationForm.UnitTests.csproj", "{10584C61-C4D5-4CA9-B171-0FE75A60CACE}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AutomationForm.IntegrationTests", "AutomationForm.IntegrationTests\AutomationForm.IntegrationTests.csproj", "{ECDBC3FC-22DF-4E16-97B4-E64B7AEDE632}" -EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SDAFWebApp", "SDAF\SDAFWebApp.csproj", "{BB09D45E-2643-4548-9B09-F8718E98DAB2}" EndProject Global @@ -15,14 +11,6 @@ Global Release|Any CPU = Release|Any CPU EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution - {10584C61-C4D5-4CA9-B171-0FE75A60CACE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {10584C61-C4D5-4CA9-B171-0FE75A60CACE}.Debug|Any CPU.Build.0 = Debug|Any CPU - {10584C61-C4D5-4CA9-B171-0FE75A60CACE}.Release|Any CPU.ActiveCfg = Release|Any CPU - {10584C61-C4D5-4CA9-B171-0FE75A60CACE}.Release|Any CPU.Build.0 = Release|Any CPU - {ECDBC3FC-22DF-4E16-97B4-E64B7AEDE632}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {ECDBC3FC-22DF-4E16-97B4-E64B7AEDE632}.Debug|Any CPU.Build.0 = Debug|Any CPU - {ECDBC3FC-22DF-4E16-97B4-E64B7AEDE632}.Release|Any CPU.ActiveCfg = Release|Any CPU - {ECDBC3FC-22DF-4E16-97B4-E64B7AEDE632}.Release|Any CPU.Build.0 = Release|Any CPU {BB09D45E-2643-4548-9B09-F8718E98DAB2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {BB09D45E-2643-4548-9B09-F8718E98DAB2}.Debug|Any CPU.Build.0 = Debug|Any CPU {BB09D45E-2643-4548-9B09-F8718E98DAB2}.Release|Any CPU.ActiveCfg = Release|Any CPU diff --git a/Webapp/SDAF/Controllers/LandscapeController.cs b/Webapp/SDAF/Controllers/LandscapeController.cs index 9b6f205a54..d532df87a5 100644 --- a/Webapp/SDAF/Controllers/LandscapeController.cs +++ b/Webapp/SDAF/Controllers/LandscapeController.cs @@ -351,7 +351,7 @@ public async Task EditAsync(LandscapeModel landscape) await _landscapeService.CreateTFVarsAsync(file); - return RedirectToAction("Index"); + return RedirectToAction("Edit", "Landscape", new { @id = landscape.Id, @partitionKey = landscape.environment }); //RedirectToAction("Index"); } else { @@ -378,7 +378,7 @@ public async Task EditAsync(LandscapeModel landscape) await _landscapeService.CreateTFVarsAsync(file); - return RedirectToAction("Index"); + return RedirectToAction("Edit", "Landscape", new { @id = landscape.Id, @partitionKey = landscape.environment }); //RedirectToAction("Index"); } } catch (Exception e) diff --git a/Webapp/SDAF/Controllers/SystemController.cs b/Webapp/SDAF/Controllers/SystemController.cs index 2a6cc925bf..4ad0ff6134 100644 --- a/Webapp/SDAF/Controllers/SystemController.cs +++ b/Webapp/SDAF/Controllers/SystemController.cs @@ -3,6 +3,7 @@ using Microsoft.AspNetCore.Mvc; using Microsoft.AspNetCore.Mvc.Rendering; using Microsoft.Extensions.Configuration; +using Microsoft.IdentityModel.Tokens; using Microsoft.Net.Http.Headers; using Newtonsoft.Json; using System; @@ -430,6 +431,17 @@ public async Task EditAsync(SystemModel system) if (system.Id == null) system.Id = newId; if (newId != system.Id) { + if (String.IsNullOrEmpty(system.Description)) + { + if ((bool)system.database_high_availability || (bool)system.scs_high_availability) + { + system.Description = system.database_platform + " high availability system on " + system.scs_server_image.publisher + " " + system.scs_server_image.offer + " " + system.scs_server_image.sku; + } + else + { + system.Description = system.database_platform + " distributed system on " + system.scs_server_image.publisher + " " + system.scs_server_image.offer + " " + system.scs_server_image.sku; + } + } await SubmitNewAsync(system); string id = system.Id; string path = $"/SYSTEM/{id}/{id}.tfvars"; @@ -446,7 +458,7 @@ public async Task EditAsync(SystemModel system) }; await _systemService.CreateTFVarsAsync(file); - return RedirectToAction("Index"); + return RedirectToAction("Edit", "System", new { @id = system.Id, @partitionKey = system.environment }); //RedirectToAction("Index"); } @@ -456,6 +468,17 @@ public async Task EditAsync(SystemModel system) { await UnsetDefault(system.Id); } + if (String.IsNullOrEmpty(system.Description)) + { + if ((bool)system.database_high_availability || (bool)system.scs_high_availability) + { + system.Description = system.database_platform + " high availability system on " + system.scs_server_image.publisher + " " + system.scs_server_image.offer + " " + system.scs_server_image.sku; + } + else + { + system.Description = system.database_platform + " distributed system on " + system.scs_server_image.publisher + " " + system.scs_server_image.offer + " " + system.scs_server_image.sku; + } + } await _systemService.UpdateAsync(new SystemEntity(system)); TempData["success"] = "Successfully updated system " + system.Id; string id = system.Id; @@ -473,7 +496,7 @@ public async Task EditAsync(SystemModel system) }; await _systemService.CreateTFVarsAsync(file); - return RedirectToAction("Index"); + return RedirectToAction("Edit", "System", new { @id = system.Id, @partitionKey = system.environment }); //RedirectToAction("Index"); } } catch (Exception e) diff --git a/Webapp/SDAF/Models/LandscapeModel.cs b/Webapp/SDAF/Models/LandscapeModel.cs index 8e3031a612..fc2dd05072 100644 --- a/Webapp/SDAF/Models/LandscapeModel.cs +++ b/Webapp/SDAF/Models/LandscapeModel.cs @@ -38,6 +38,8 @@ public bool IsValid() [LocationValidator(ErrorMessage = "Location is not a valid Azure region")] public string location { get; set; } + public string Description { get; set; } + public string name_override_file { get; set; } public bool? save_naming_information { get; set; } @@ -171,6 +173,21 @@ public bool IsValid() public string ams_subnet_nsg_name { get; set; } + [SubnetArmIdValidator(ErrorMessage = "Invalid Storage subnet arm id")] + public string storage_subnet_arm_id { get; set; } + + //[Required] + [AddressPrefixValidator(ErrorMessage = "Storage subnet address space must be a valid RFC 1918 address")] + public string storage_subnet_address_prefix { get; set; } + + public string storage_subnet_name { get; set; } + + [NsgArmIdValidator(ErrorMessage = "Invalid storage subnet nsg arm id")] + public string storage_subnet_nsg_arm_id { get; set; } + + public string storage_subnet_nsg_name { get; set; } + + /*---------------------------------------------------------------------------8 diff --git a/Webapp/SDAF/Models/SystemModel.cs b/Webapp/SDAF/Models/SystemModel.cs index 13bad1e6f1..6f491a9af9 100644 --- a/Webapp/SDAF/Models/SystemModel.cs +++ b/Webapp/SDAF/Models/SystemModel.cs @@ -42,6 +42,9 @@ public bool IsValid() [LocationValidator(ErrorMessage = "Location is not a valid Azure region")] public string location { get; set; } + public string Description { get; set; } + + [RequiredIfNotDefault] [DisplayName("Network name")] [RegularExpression(@"^\w{0,7}$", ErrorMessage = "Logical network name cannot exceed seven characters")] @@ -624,6 +627,8 @@ public bool IsValid() public bool? database_HANA_use_ANF_scaleout_scenario { get; set; } = false; + public bool? database_HANA_no_standby_role { get; set; } = false; + public int? stand_by_node_count { get; set; } = 0; diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index 25bc191154..26d700c64f 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -85,6 +85,15 @@ "Overrules": "", "Display": 1 }, + { + "Name": "Description", + "Required": false, + "Description": "Workload zone description.", + "Type": "textbox", + "Options": [], + "Overrules": "", + "Display": 1 + }, { "Name": "save_naming_information", "Required": false, @@ -722,6 +731,67 @@ } ] }, + { + "Section": "Storage subnet", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone", + "Parameters": [ + { + "Name": "storage_subnet_arm_id", + "Required": false, + "Description": "Specifies Azure resource identifier for the existing subnet for the storage subnet", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "storage_subnet_address_prefix", + "Display": 2 + }, + { + "Name": "storage_subnet_address_prefix", + "Required": false, + "Description": "Defines the subnet address range for the storage subnet.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "storage_subnet_name", + "Required": false, + "Description": "Should only be used if the default naming is not acceptable.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "storage_subnet_nsg_arm_id", + "Required": false, + "Description": "Specifies Azure resource identifier for the existing network security group to use.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + } + ], + "Overrules": "storage_subnet_nsg_name", + "Display": 2 + }, + { + "Name": "storage_subnet_nsg_name", + "Required": false, + "Description": "Should only be used if the default naming is not acceptable for the network security group name", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + } + ] + }, { "Section": "Azure keyvault support", "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#key-vault-parameters", diff --git a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt index b21a8d1e22..b02ce2e2a4 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt @@ -30,6 +30,9 @@ $$environment$$ # The location value is a mandatory field, it is used to control where the resources are deployed $$location$$ +# Description of the Workload zone. +$$Description$$ + #If you want to provide a custom naming json use the following parameter. $$name_override_file$$ @@ -258,6 +261,30 @@ $$ams_subnet_nsg_arm_id$$ $$ams_subnet_nsg_name$$ +########################################################################### +# # +# Storage Subnet # +# # +########################################################################### + +/* storage subnet information */ +# If defined these parameters control the subnet name and the subnet prefix +# storage_subnet_name is an optional parameter and should only be used if the default naming is not acceptable +$$storage_subnet_name$$ + +# storage_subnet_arm_id is an optional parameter that if provided specifies Azure resource identifier for the existing subnet +$$storage_subnet_arm_id$$ + +# storage_subnet_address_prefix is a mandatory parameter if the subnets are not defined in the workload or if existing subnets are not used +$$storage_subnet_address_prefix$$ + +# storage_subnet_nsg_arm_id is an optional parameter that if provided specifies Azure resource identifier for the existing nsg +$$storage_subnet_nsg_arm_id$$ + +# storage_subnet_nsg_name is an optional parameter and should only be used if the default naming is not acceptable for the network security group name +$$storage_subnet_nsg_name$$ + + ######################################################################################### # # # DNS Settings # diff --git a/Webapp/SDAF/ParameterDetails/SystemDetails.json b/Webapp/SDAF/ParameterDetails/SystemDetails.json index 91e01cebd9..2ca3633b01 100644 --- a/Webapp/SDAF/ParameterDetails/SystemDetails.json +++ b/Webapp/SDAF/ParameterDetails/SystemDetails.json @@ -105,6 +105,15 @@ "Overrules": "", "Display": 1 }, + { + "Name": "Description", + "Required": false, + "Description": "System description.", + "Type": "textbox", + "Options": [], + "Overrules": "", + "Display": 1 + }, { "Name": "network_logical_name", "Required": true, @@ -1946,6 +1955,15 @@ "Overrules": "", "Display": 3 }, + { + "Name": "database_HANA_no_standby_role", + "Required": false, + "Description": "If true, the database scale out tier will not have a standby role.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, { "Name": "stand_by_node_count", "Required": false, diff --git a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt index 6cf46448bf..6f01f60f77 100644 --- a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt @@ -39,6 +39,10 @@ $$environment$$ # The location value is a mandatory field, it is used to control where the resources are deployed $$location$$ +# Description of the SAP system. +$$Description$$ + + #If you want to customize the disk sizes for VMs use the following parameter to specify the custom sizing file. $$custom_disk_sizes_filename$$ @@ -890,6 +894,9 @@ $$tags$$ #If true, the database tier will be configured for scaleout scenario $$database_HANA_use_ANF_scaleout_scenario$$ +#If true, the database scale out tier will not have a standby role +$$database_HANA_no_standby_role$$ + # Defined the standbynode count in a scaleout scenario $$stand_by_node_count$$ diff --git a/Webapp/SDAF/ParameterDetails/VM-Images.json b/Webapp/SDAF/ParameterDetails/VM-Images.json index ed9e6ff8f8..7c2ec5bb52 100644 --- a/Webapp/SDAF/ParameterDetails/VM-Images.json +++ b/Webapp/SDAF/ParameterDetails/VM-Images.json @@ -54,7 +54,7 @@ "source_image_id": "", "publisher": "RedHat", "offer": "RHEL-SAP-APPS", - "sku": "8_4", + "sku": "84sapapps-gen2", "version": "latest", "type": "marketplace" } @@ -78,7 +78,7 @@ "source_image_id": "", "publisher": "RedHat", "offer": "RHEL-SAP-APPS", - "sku": "8_6", + "sku": "86sapapps-gen2", "version": "latest", "type": "marketplace" } @@ -102,7 +102,7 @@ "source_image_id": "", "publisher": "RedHat", "offer": "RHEL-SAP-APPS", - "sku": "8_8", + "sku": "88sapapps-gen2", "version": "latest", "type": "marketplace" } @@ -126,7 +126,7 @@ "source_image_id": "", "publisher": "RedHat", "offer": "RHEL-SAP-APPS", - "sku": "9_0", + "sku": "90sapapps-gen2", "version": "latest", "type": "marketplace" } @@ -150,7 +150,7 @@ "source_image_id": "", "publisher": "RedHat", "offer": "RHEL-SAP-APPS", - "sku": "9_2", + "sku": "92sapapps-gen2", "version": "latest", "type": "marketplace" } @@ -300,7 +300,7 @@ } }, { - "name": "Windows Server 2019-GS", + "name": "Windows Server 2019-G2", "data": { "os_type": "WINDOWS", "source_image_id": "", @@ -324,7 +324,7 @@ } }, { - "name": "Windows Server 2022-GS", + "name": "Windows Server 2022-G2", "data": { "os_type": "WINDOWS", "source_image_id": "", @@ -336,7 +336,7 @@ } }, { - "name": "Windows Server 2022-GS", + "name": "Windows Server 2022-G2", "data": { "os_type": "WINDOWS", "source_image_id": "", diff --git a/Webapp/SDAF/SDAFWebApp.csproj b/Webapp/SDAF/SDAFWebApp.csproj index 7961079e0c..52c45d9318 100644 --- a/Webapp/SDAF/SDAFWebApp.csproj +++ b/Webapp/SDAF/SDAFWebApp.csproj @@ -17,7 +17,7 @@ - + @@ -26,8 +26,8 @@ - - + + diff --git a/Webapp/SDAF/Views/Landscape/Edit.cshtml b/Webapp/SDAF/Views/Landscape/Edit.cshtml index 1c14f0f68a..014dd2785b 100644 --- a/Webapp/SDAF/Views/Landscape/Edit.cshtml +++ b/Webapp/SDAF/Views/Landscape/Edit.cshtml @@ -1,4 +1,4 @@ -@model AutomationForm.Models.FormViewModel; +@model AutomationForm.Models.FormViewModel; @using AutomationForm.Models @using System.Text.Json @@ -92,13 +92,27 @@ @await Html.PartialAsync("_FormPartial", Model) - - Update - - - Cancel - + + + + + Update + + + + + + Close + + + + + + Deploy + + + - \ No newline at end of file + diff --git a/Webapp/SDAF/Views/Shared/_FormPartial.cshtml b/Webapp/SDAF/Views/Shared/_FormPartial.cshtml index bc33efdf6d..654169c98b 100644 --- a/Webapp/SDAF/Views/Shared/_FormPartial.cshtml +++ b/Webapp/SDAF/Views/Shared/_FormPartial.cshtml @@ -26,10 +26,15 @@
+ @if (@p.Type == "field") { @Html.TextBox(p.Name, (string) ("" + value), new { @class = "ms-TextField-field", @onchange = $"overrulesHandler({p.Name}, {p.Overrules})" }) } + else if (@p.Type == "textbox") + { + @Html.TextArea(p.Name, (string)("" + value), new { @class = "ms-TextField-field", @style = "width: 100%; height: auto", @onchange = $"overrulesHandler({p.Name}, {p.Overrules})" }) + } else if (@p.Type == "lookup") { @Html.DropDownList(p.Name, p.Options, new { @class = "js-example-placeholder-single", @style = "width: 100%;", @onchange = $"overrulesHandler({p.Name}, {p.Overrules})" }) @@ -121,7 +126,7 @@ } else { -

Invalid parameter

+

Invalid parameter type @Html.Raw(p.Type)

} @Html.ValidationMessage(p.Name) @@ -132,4 +137,4 @@
} - \ No newline at end of file + diff --git a/Webapp/SDAF/Views/System/Edit.cshtml b/Webapp/SDAF/Views/System/Edit.cshtml index 66230b43c1..70feb26ba9 100644 --- a/Webapp/SDAF/Views/System/Edit.cshtml +++ b/Webapp/SDAF/Views/System/Edit.cshtml @@ -115,13 +115,37 @@ - - Save - - - - Cancel - + + +   + + Save + + + + +   + + Close + + + + +   + + Deploy + + + + +   + + Download + + + + + diff --git a/Webapp/SDAF/wwwroot/js/site.js b/Webapp/SDAF/wwwroot/js/site.js index 1a497b1c21..9686190ef6 100644 --- a/Webapp/SDAF/wwwroot/js/site.js +++ b/Webapp/SDAF/wwwroot/js/site.js @@ -42,7 +42,11 @@ var hanadb_sizes = [ { "text": "Default", "value": "Default" - }, + }, + { + "text": "Custom", + "value": "Custom" + }, { "text": "S4Demo", "value": "S4Demo" diff --git a/deploy/ansible/playbook_02_os_sap_specific_config.yaml b/deploy/ansible/playbook_02_os_sap_specific_config.yaml index d50f751699..4735538d5d 100644 --- a/deploy/ansible/playbook_02_os_sap_specific_config.yaml +++ b/deploy/ansible/playbook_02_os_sap_specific_config.yaml @@ -119,19 +119,19 @@ - name: "SAP OS configuration playbook: - Get the IP information from instance meta data service" ansible.builtin.uri: - url: http://169.254.169.254/metadata/instance/network/interface/0?api-version=2021-02-01 + url: http://169.254.169.254/metadata/instance/network?api-version=2021-02-01 use_proxy: false headers: Metadata: true register: azure_network_metadata - - name: "SAP OS configuration playbook: - Filter out the values for IPAddresses in json format" - ansible.builtin.set_fact: - private_ips_info: "{{ azure_network_metadata.json.ipv4.ipAddress }}" + # - name: "SAP OS configuration playbook: - Filter out the values for IPAddresses in json format" + # ansible.builtin.set_fact: + # private_ips_info: "{{ azure_network_metadata.json.ipv4.ipAddress }}" - name: "SAP OS configuration playbook: - Convert ips to list" ansible.builtin.set_fact: - ipadd: "{{ private_ips_info | map(attribute='privateIpAddress') | list }}" + ipadd: "{{ azure_network_metadata.json | json_query('interface[*].ipv4.ipAddress[*].privateIpAddress') | flatten(levels=1) | default([]) | list }}" # add assertion to validate if ipadd is not empty and has at least one ip address - name: "SAP OS configuration playbook: - Assert if IP Address is not empty" diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml index 885b49260d..6f0aadd39b 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml @@ -108,7 +108,7 @@ _rsp_system_usage: "custom" use_master_password: "{{ hana_use_master_password }}" password_copy: "{%if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" - _rsp_internal_network: "{{ (subnet_address + '/' + subnet_prefix) }}" + _rsp_internal_network: "{{ subnet_prefix_db }}" # This comes in from the main ansible playbook. It is the password for the root user. Must be randomized after the installation. _rsp_root_password: "{{ root_password }}" # Note: Last node in the DB list is marked as standby, while everything else except first node is marked as worker node diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml index 542a909c2e..6282c552c5 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml @@ -1,9 +1,8 @@ --- # /*---------------------------------------------------------------------------8 -# |Execute the SQL scripts for data guard initial configuration. | -# | | # | | +# | Execute the SQL scripts for Oracle Data Guard configuration. | # | | # +------------------------------------4--------------------------------------*/ @@ -43,51 +42,46 @@ # Add additional parameters for Oracle ASM to match the file locations of Primary in secondary. # This has to be done to prevent rman shooting file systems all over the disk groups. -- name: "Update the initSID.ora for changing the control file location" +- name: "Oracle Data Guard - Update the initSID.ora for changing the control file location" ansible.builtin.replace: - path: /oracle/{{ db_sid | upper }}/{{ ora_release }}/dbs/init{{ db_sid | upper }}.ora - regexp: '/{{ db_sid | upper }}/c' - replace: '/{{ db_sid | upper }}_STDBY/c' - backup: true + path: /oracle/{{ db_sid | upper }}/{{ ora_release }}/dbs/init{{ db_sid | upper }}.ora + regexp: '/{{ db_sid | upper }}/c' + replace: '/{{ db_sid | upper }}_STDBY/c' + backup: true when: - node_tier == "oracle-asm" -- name: "File update wait for 15 sec to avoid multiple locks" +- name: "Oracle Data Guard - File update wait for 15 sec to avoid multiple locks" ansible.builtin.wait_for: - timeout: 15 + timeout: 15 -- name: "Update the initSID.ora for adopting oraarch location" +- name: "Oracle Data Guard - Update the initSID.ora for adopting oraarch location" ansible.builtin.replace: - path: /oracle/{{ db_sid | upper }}/{{ ora_release }}/dbs/init{{ db_sid | upper }}.ora - regexp: '{{ db_sid | upper }}/oraarch' - replace: '{{ db_sid | upper }}_STDBY/oraarch' + path: /oracle/{{ db_sid | upper }}/{{ ora_release }}/dbs/init{{ db_sid | upper }}.ora + regexp: '{{ db_sid | upper }}/oraarch' + replace: '{{ db_sid | upper }}_STDBY/oraarch' when: - node_tier == "oracle-asm" # You can also use "sed" to replace the string. sed -i 's|/DBSID/c|/DBSID_STDBY/c|g' -- name: "File update wait for 15 sec to avoid multiple locks" +- name: "Oracle Data Guard - File update wait for 15 sec to avoid multiple locks" ansible.builtin.wait_for: - timeout: 15 - -- name: "Update the initSID.ora to delete the old local_listener value" - become: true - become_user: "root" - ansible.builtin.shell: | - sed -i '/local_listener=/d' /oracle/{{ db_sid | upper }}/{{ ora_release }}/dbs/init{{ db_sid | upper }}.ora - register: local_listener_results - failed_when: local_listener_results.rc > 1 - + timeout: 15 -# - name: "debug file update" -# fail: -# msg: "fail here for checking if the file is updated" +- name: "Oracle Data Guard - Update the initSID.ora to delete the old local_listener value" + become: true + become_user: "root" + ansible.builtin.lineinfile: + path: /oracle/{{ db_sid | upper }}/{{ ora_release }}/dbs/init{{ db_sid | upper }}.ora + regexp: '^local_listener=' + state: absent -- name: "File update wait for 15 sec to avoid multiple locks" +- name: "Oracle Data Guard - File update wait for 15 sec to avoid multiple locks" ansible.builtin.wait_for: - timeout: 15 + timeout: 15 -- name: "Replace the local listener entires in initSID.ora" +- name: "Oracle Data Guard - Replace the local listener entires in initSID.ora" ansible.builtin.blockinfile: path: /oracle/{{ db_sid | upper }}/{{ ora_release }}/dbs/init{{ db_sid | upper }}.ora marker_begin: "-- BEGIN" @@ -103,18 +97,7 @@ when: - node_tier == "oracle-asm" -# Replace the archive log destination in the initSID.ora file. -# - name: "Update the initSID.ora for changing the control file location" -# ansible.builtin.replace: -# path: /oracle/{{ db_sid | upper }}/{{ ora_release }}/dbs/init{{ db_sid | upper }}.ora -# regexp: "'log_archive_dest_1='LOCATION='" -# replace: "log_archive_dest_1='LOCATION=/oracle/{{ db_sid | upper }}/oraarch'" -# backup: true -# when: -# - node_tier == "oracle" - - -- name: "Replace the local listener entires in initSID.ora" +- name: "Oracle Data Guard - Replace the local listener entires in initSID.ora" ansible.builtin.blockinfile: path: /oracle/{{ db_sid | upper }}/{{ ora_release }}/dbs/init{{ db_sid | upper }}.ora marker_begin: "-- BEGIN" @@ -168,7 +151,7 @@ # Restart the Listener on Secondary node when the node_tier is Oracle-ASM. -- name: "ASM Oracle Data Guard - Setup Secondary: Stop lsnrctl on Secondary" +- name: "Oracle Data Guard - ASM - Setup Secondary: Stop lsnrctl on Secondary" become: true become_user: "oracle" ansible.builtin.shell: lsnrctl stop @@ -182,8 +165,7 @@ - node_tier == "oracle-asm" - current_host == ora_secondary - -- name: "ASM Oracle Data Guard - Setup Secondary: Create lsnrctl_stopped_sec.txt" +- name: "Oracle Data Guard - ASM - Setup Secondary: Create lsnrctl_stopped_sec.txt" become: true become_user: "oracle" ansible.builtin.file: @@ -196,7 +178,7 @@ - lsnrctl_stop_secondary_results.rc == 0 -- name: "Oracle Data Guard - Setup Secondary: Start lsnrctl on Secondary" +- name: "Oracle Data Guard - ASM - Setup Secondary: Start lsnrctl on Secondary" become: true become_user: "oracle" ansible.builtin.shell: lsnrctl start @@ -236,9 +218,9 @@ - lsnrctl_asm_start_secondary_results.rc == 0 -- name: "ASM Listener Starting: Sleep for 40 seconds and continue with play" +- name: "Oracle Data Guard - ASM Listener Starting: Sleep for 40 seconds and continue with play" ansible.builtin.wait_for: - timeout: 40 + timeout: 40 - name: "Oracle Data Guard - Setup Secondary: Startup secondary DB using pfile" become: true @@ -316,7 +298,10 @@ - name: "Oracle Data Guard - Setup Secondary: Remove files" become: true become_user: "oracle" - ansible.builtin.command: rm -rf {{ item.directory_to_empty }}/* + ansible.builtin.file: + path: "{{ item.directory_to_empty }}" + state: absent + recurse: true loop: - { directory_to_empty: '/oracle/{{ db_sid | upper }}/sapdata1' } - { directory_to_empty: '/oracle/{{ db_sid | upper }}/sapdata2' } @@ -391,16 +376,16 @@ # Renaming the Brokenredo files to the correct names and creating the required directories. # RUn the redolog clear to physical creation of files. -- name: "Oracle Secondary Redo log rename template" - become: true - become_user: "oracle" +- name: "Oracle Data Guard - Oracle Secondary Redo log rename using template" + become: true + become_user: "oracle" ansible.builtin.template: - backup: true - src: standbyredolog.j2 - dest: "/etc/sap_deployment_automation/dgscripts/standbyredolog.sql" - mode: '0644' - force: true - when: node_tier == "oracle" + backup: true + src: standbyredolog.j2 + dest: "/etc/sap_deployment_automation/dgscripts/standbyredolog.sql" + mode: '0644' + force: true + when: node_tier == "oracle" - name: "Oracle Data Guard - Setup Secondary: Rename the redolog files afer RMAN Restore" @@ -445,7 +430,7 @@ # Create the oraarch and spfile parameter folder on ASM for recovery -- name: "Create oraarch and parameter folders for oracle-asm" +- name: "Oracle Data Guard - ASM - Create oraarch and parameter folders" become: true become_user: "oracle" ansible.builtin.shell: | @@ -462,7 +447,7 @@ - current_host == ora_secondary -- name: "Oracle Data Guard - Setup Secondary: Create asm_oraarch_created.txt" +- name: "Oracle Data Guard - ASM - Setup Secondary: Create asm_oraarch_created.txt" become: true become_user: "oracle" ansible.builtin.file: @@ -539,7 +524,7 @@ - backup_pfile_results.rc == 0 - current_host == ora_secondary -- name: "Update the Local_Listener Value in secondary" +- name: "Oracle Data Guard - Update the Local_Listener Value on secondary" become: true become_user: "oracle" ansible.builtin.shell: sqlplus / as sysdba @listenerupdate.sql @@ -765,7 +750,7 @@ - current_host == ora_secondary -- name: "Oracle Data Guard - Setup Secondary: Create asm_secondary_startup_spfile.txt" +- name: "Oracle Data Guard - ASM - Setup Secondary: Create asm_secondary_startup_spfile.txt" become: true become_user: "root" ansible.builtin.file: @@ -779,15 +764,15 @@ - asm_secondary_startup_spfile_results.rc == 0 - current_host == ora_secondary -- name: "Create local_listener_on_secondary on secondary" - become: true - become_user: root +- name: "Oracle Data Guard - ASM - Create local_listener_on_secondary on secondary" + become: true + become_user: root ansible.builtin.file: - path: /etc/sap_deployment_automation/dgscripts/local_listener_on_secondary.txt - state: touch - mode: '0755' - owner: oracle - group: oinstall + path: /etc/sap_deployment_automation/dgscripts/local_listener_on_secondary.txt + state: touch + mode: '0755' + owner: oracle + group: oinstall when: - node_tier == "oracle-asm" - local_listener_on_secondary_results.rc == 0 diff --git a/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml b/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml index 09330df687..c60a6b9fdf 100644 --- a/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml @@ -4,6 +4,7 @@ # | | # +------------------------------------4--------------------------------------*/ +# Process the first secondary IP configuration available on the primary NIC - name: "1.10 Networking - Get the IP information from instance meta data service" ansible.builtin.uri: url: http://169.254.169.254/metadata/instance/network/interface/0?api-version=2021-02-01 @@ -94,6 +95,57 @@ # name: NetworkManager # state: restarted +# Get interface data and process network information for all interfaces except the first one + +- name: "1.10 Networking - Get IMDS data for all interfaces" + ansible.builtin.uri: + url: http://169.254.169.254/metadata/instance/network/interface?api-version=2021-02-01&format=json + use_proxy: false + headers: + Metadata: true + register: azure_network_metadata + +# build a list of all the network interfaces +- name: "1.10 Networking - Get the network interfaces" + ansible.builtin.set_fact: + az_network_interfaces: "{{ lookup('template', 'azure_interfaces.j2', wantlist=True) | default([]) | flatten(levels=1) }}" + +# display the list of azure network interfaces and the expected ethX interface names +- name: "1.10 Networking - Print the network interfaces" + ansible.builtin.debug: + msg: + - "ipAddress: {{ item.ipAddress }} " + - "Subnet: {{ item.subnet }}" + - "interfaceName: {{ item.interfaceName }}" + verbosity: 2 + loop: "{{ az_network_interfaces | list }}" + +# for each of the additional ip addresses, add the configuration to the network interface +# and create a route to the subnet - this is for the storage and application subnets +# As the IMDS does not return the IPs in the correct order always, we might run into issues +# this is an old way of doing things, we should be using the nmcli commands anyways. +# - name: "1.10 Networking - Create the file with secondary ip" +# ansible.builtin.blockinfile: +# create: true +# path: /etc/sysconfig/network-scripts/ifcfg-{{ azif.interfaceName }} +# marker_begin: "-- BEGIN" +# marker_end: "-- END" +# block: | +# DEVICE={{ azif.interfaceName }} +# BOOTPROTO=static +# ONBOOT=yes +# IPADDR={{ azif.ipAddress }} +# NETMASK={{ azif.subnet | ipaddr('netmask') }} +# mode: 644 +# loop: "{{ az_network_interfaces | list }}" +# loop_control: +# loop_var: azif +# register: definition_made +# when: +# - az_network_interfaces is defined +# - az_network_interfaces | length >= 1 +# - not azif.interfaceName in ['eth0', 'eth0:0'] + # Restart Network service (ifup eth0) - name: "1.10 Networking - Restart Network service" ansible.builtin.systemd: @@ -120,6 +172,90 @@ - definition_made.changed - distribution_id == 'redhat7' +- name: "1.10 Networking - Add routes and restart VM for HANA scaleout" + when: + - db_scale_out + - definition_made.changed + block: + # display the list of azure network interfaces and the expected ethX interface names + - name: "1.10 Networking - Print the network configuration details for storage route" + ansible.builtin.debug: + msg: + - "IpAddress: {{ azif.ipAddress }}" + - "Subnet: {{ azif.subnet }}" + - "InterfaceName: {{ azif.interfaceName }}" + - "ANF Subnet: {{ subnet_prefix_anf }}" + - "Gateway: {{ subnet_prefix_storage | ansible.utils.ipmath(1) }}" + verbosity: 2 + loop: "{{ az_network_interfaces | list }}" + loop_control: + loop_var: azif + when: + - az_network_interfaces is defined + - subnet_prefix_storage is defined + - az_network_interfaces | length >= 1 + - not azif.interfaceName in ['eth0', 'eth0:0'] + - subnet_prefix_storage | ansible.utils.network_in_usable( azif.ipAddress ) + + # since the storage nic is the 3rd added to the VM we will assume that the device is eth2 + # and the connection is 'Wired connection 2' + - name: "1.10 Networking - Add route to the ANF subnet via storage gateway" + become: true + become_user: root + ansible.builtin.shell: nmcli connection modify "Wired connection 2" +ipv4.routes "{{ subnet_prefix_anf }} {{ azif.subnet | ansible.utils.ipmath(1) }}" + loop: "{{ az_network_interfaces | list }}" + loop_control: + loop_var: azif + when: + - az_network_interfaces is defined + - subnet_prefix_storage is defined + - az_network_interfaces | length >= 2 + - not azif.interfaceName in ['eth0', 'eth0:0'] + - subnet_prefix_storage | ansible.utils.network_in_usable( azif.ipAddress ) + + - name: "1.10 Networking - Print the network configuration details for client route" + ansible.builtin.debug: + msg: + - "IpAddress: {{ azif.ipAddress }}" + - "Subnet: {{ azif.subnet }}" + - "InterfaceName: {{ azif.interfaceName }}" + - "App Subnet: {{ subnet_prefix_app }}" + - "Gateway: {{ subnet_prefix_client | ansible.utils.ipmath(1) }}" + verbosity: 2 + loop: "{{ az_network_interfaces | list }}" + loop_control: + loop_var: azif + when: + - az_network_interfaces is defined + - subnet_prefix_client is defined + - az_network_interfaces | length >= 2 + - not azif.interfaceName in ['eth0', 'eth0:0'] + - subnet_prefix_client | ansible.utils.network_in_usable( azif.ipAddress ) + + - name: "1.10 Networking - Add route to the application subnet via client gateway" + ansible.builtin.command: nmcli connection modify "Wired connection 1" +ipv4.routes "{{ subnet_prefix_app }} {{ azif.subnet | ansible.utils.ipmath(1) }}" + loop: "{{ az_network_interfaces | list }}" + loop_control: + loop_var: azif + when: + - az_network_interfaces is defined + - subnet_prefix_client is defined + - az_network_interfaces | length >= 1 + - not azif.interfaceName in ['eth0', 'eth0:0'] + - subnet_prefix_client | ansible.utils.network_in_usable( azif.ipAddress ) + +# reboot VM after the new interface definitions are changed. +- name: "1.10 Networking - Reboot VM and wait for 5 minutes" + ansible.builtin.reboot: + reboot_timeout: 300 + when: + - definition_made.changed + +- name: "1.10 Networking - Wait for connection to be established after reboot" + ansible.builtin.wait_for_connection: + delay: 10 + timeout: 300 + ... # /*----------------------------------------------------------------------------8 # | END | diff --git a/deploy/ansible/roles-os/1.10-networking/templates/azure_interfaces.j2 b/deploy/ansible/roles-os/1.10-networking/templates/azure_interfaces.j2 new file mode 100644 index 0000000000..a04ae85f8b --- /dev/null +++ b/deploy/ansible/roles-os/1.10-networking/templates/azure_interfaces.j2 @@ -0,0 +1,18 @@ +{%- set _interfaces = [] -%} +{%- for interface in azure_network_metadata.json -%} +{%- set _interface_name = "eth" + loop.index0|string -%} +{%- set _primary_ip = interface.ipv4.ipAddress[0].privateIpAddress -%} +{%- set _subnet_address = interface.ipv4.subnet[0].address -%} +{%- set _subnet_prefix = interface.ipv4.subnet[0].prefix -%} +{%- set _interface_info = {"ipAddress": _primary_ip, "interfaceName": _interface_name, "subnet": _subnet_address+"/"+_subnet_prefix} -%} +{%- set _ = _interfaces.append(_interface_info) -%} +{%- if interface.ipv4.ipAddress | length > 1 -%} +{%- for secondary_ip in interface.ipv4.ipAddress[1:] -%} +{%- set _interface_name_secondary = _interface_name+":"+loop.index0|string -%} +{%- set _secondary_ip_address = secondary_ip.privateIpAddress -%} +{%- set _interface_info_secondary = {"ipAddress": _secondary_ip_address, "interfaceName": _interface_name_secondary, "subnet": _subnet_address+"/"+_subnet_prefix} -%} +{%- set _ = _interfaces.append(_interface_info_secondary) -%} +{%- endfor -%} +{%- endif -%} +{%- endfor -%} +{{ _interfaces }} diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 b/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 index cc8c2d2cc3..253747d020 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 @@ -73,24 +73,75 @@ ansible_facts. {# This template generates host entries based on the provided data #} {% for host in ansible_play_hosts|sort %} {# Set variables for the current host #} -{% set virtual_host_name = hostvars[host]['virtual_host'] %} -{# Determine the IPs for the current host from ipaddr variable. Do not sort it, the order is already correct. #} -{% set host_ips = hostvars[host]['ipadd'] %} +{% set supported_tiers = hostvars[host]['supported_tiers'] if 'supported_tiers' in hostvars[host] else [] %} +{% set virtual_host_names = [] %} +{# Assign virtual host names based on supported tiers #} +{% for tier in supported_tiers %} +{% if tier == 'scs' %} +{% set scs_virtual_host = hostvars[host]['custom_scs_virtual_hostname'] if 'custom_scs_virtual_hostname' in hostvars[host] else hostvars[host]['virtual_host'] %} +{% if scs_virtual_host not in virtual_host_names %} +{% set _ = virtual_host_names.append(scs_virtual_host) %} +{% endif %} +{% elif tier == 'ers' %} +{% set ers_virtual_host = hostvars[host]['custom_ers_virtual_hostname'] if 'custom_ers_virtual_hostname' in hostvars[host] else hostvars[host]['virtual_host'] %} +{% if ers_virtual_host not in virtual_host_names %} +{% set _ = virtual_host_names.append(ers_virtual_host) %} +{% endif %} +{% elif tier == 'pas' %} +{% set pas_virtual_host = hostvars[host]['custom_pas_virtual_hostname'] if 'custom_pas_virtual_hostname' in hostvars[host] else hostvars[host]['virtual_host'] %} +{% if pas_virtual_host not in virtual_host_names %} +{% set _ = virtual_host_names.append(pas_virtual_host) %} +{% endif %} +{% elif tier == 'app' %} +{% set app_virtual_host = hostvars[host]['custom_app_virtual_hostname'] if 'custom_app_virtual_hostname' in hostvars[host] else hostvars[host]['virtual_host'] %} +{% if app_virtual_host not in virtual_host_names %} +{% set _ = virtual_host_names.append(app_virtual_host) %} +{% endif %} +{% elif tier == 'web' %} +{% set web_virtual_host = hostvars[host]['custom_web_virtual_hostname'] if 'custom_web_virtual_hostname' in hostvars[host] else hostvars[host]['virtual_host'] %} +{% if web_virtual_host not in virtual_host_names %} +{% set _ = virtual_host_names.append(web_virtual_host) %} +{% endif %} +{% elif tier in ['hana', 'oracle', 'oracle-asm', 'db2', 'sybase'] %} +{% set db_virtual_host = hostvars[host]['custom_db_virtual_hostname'] if 'custom_db_virtual_hostname' in hostvars[host] else hostvars[host]['virtual_host'] %} +{% if db_virtual_host not in virtual_host_names %} +{% set _ = virtual_host_names.append(db_virtual_host) %} +{% endif %} +{% endif %} +{% endfor %} +{# Keep only unique values in the virtual_host_names #} +{% set virtual_host_names = virtual_host_names | unique %} +{# Determine the IPs for the current host from ipaddr variable. Do not sort it, the order is already correct. #} +{% set host_ips = hostvars[host]['ipadd'] %} {# Check if there are IPs available for the current host #} -{% if host_ips %} -{# Print the primary host entry #} +{% if host_ips %} + {{ '%-19s' | format(host_ips[0]) }}{{ '%-80s ' | format(host + '.' + sap_fqdn) }}{{ '%-21s' | format(host) }} -{# If there's only one IP, also use it for the virtual_host #} -{% if host_ips|length == 1 %} -{{ '%-19s' | format(host_ips[0]) }}{{ '%-80s ' | format(virtual_host_name + '.' + sap_fqdn) }}{{ '%-21s' | format(virtual_host_name) }} -{% else %} +{# If there's only one IP, replicate the entry with different virtual_host_names #} +{% if host_ips|length == 1 %} +{% for vh_name in virtual_host_names if virtual_host_names | length >= 1 %} + +{{ '%-19s' | format(host_ips[0]) }}{{ '%-80s ' | format(vh_name + '.' + sap_fqdn) }}{{ '%-21s' | format(vh_name) }} + +{% endfor %} +{% else %} {# Loop through remaining IPs for the virtual host #} -{% for ip in host_ips[1:] %} -{{ '%-19s' | format(ip) }}{{ '%-80s ' | format(virtual_host_name + '.' + sap_fqdn) }}{{ '%-21s' | format(virtual_host_name) }} -{% endfor %} -{% endif %} -{% endif %} -{% endfor %} +{% for ip in host_ips[1:] %} +{% if ((db_scale_out) and ((subnet_prefix_storage is defined) and (subnet_prefix_storage | ansible.utils.network_in_usable(ip)))) %} +{{ '%-19s' | format(ip) }}{{ '%-80s ' | format(host + '-storage.' + sap_fqdn) }}{{ '%-21s' | format(host + '-storage') }} + +{% elif ((db_scale_out) and ((subnet_prefix_client is defined) and (subnet_prefix_client | ansible.utils.network_in_usable(ip)))) %} +{{ '%-19s' | format(ip) }}{{ '%-80s ' | format(host + '-client.' + sap_fqdn) }}{{ '%-21s' | format(host + '-client') }} +{% else %} +{% for vh_name in virtual_host_names if virtual_host_names | length >= 1 %} +{{ '%-19s' | format(ip) }}{{ '%-80s ' | format(vh_name + '.' + sap_fqdn) }}{{ '%-21s' | format(vh_name) }} + +{% endfor %} +{% endif %} +{% endfor %} +{% endif %} +{% endif %} +{% endfor %} diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.9-custom-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.9-custom-mounts.yaml index 47223ee27a..3c9cb9e6df 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.9-custom-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.9-custom-mounts.yaml @@ -17,6 +17,36 @@ when: - node_tier in custom_mount.target_nodes or custom_mount.target_nodes == ['all'] +- name: "Custom Mount: Create Directory on NSF ({{ custom_mount.path }})" + when: + - custom_mount.create_temp_folders + - ansible_hostname == ansible_play_hosts[0] + block: + - name: "Custom Mount: {{ custom_mount.mount }}/{{ custom_mount.folder }}" + ansible.posix.mount: + src: "{{ custom_mount.mount }}" + path: "{{ custom_mount.path }}" + fstype: "nfs4" + opts: "{{ custom_mount.opts }}" + state: mounted + + - name: "Custom Mount: Create Directory ({{ custom_mount.folder }})" + ansible.builtin.file: + owner: '{{ sidadm_uid }}' + group: sapsys + mode: 0755 + path: "{{ custom_mount.path }}/{{ custom_mount.folder }}" + state: directory + + - name: "Custom Mount: Unmount " + ansible.posix.mount: + src: "{{ custom_mount.mount }}" + path: "{{ custom_mount.path }}" + fstype: "nfs4" + opts: "{{ custom_mount.opts }}" + state: unmounted + + - name: "Custom Mount: Change attribute only when we create SAP Directories ({{ custom_mount.temppath }})" become: true become_user: "root" @@ -28,7 +58,7 @@ when: - node_tier in custom_mount.target_nodes or custom_mount.target_nodes == "all" - is_created_now.changed - - custom_mount.set_chattr_on_dir + - custom_mount.set_chattr_on_dir | default(false) - name: "Custom Mount" when: @@ -36,7 +66,7 @@ block: - name: "Custom Mount: ({{ custom_mount.path }} on {% if custom_mount.create_temp_folders %}{{ custom_mount.mount }}/{{ custom_mount.folder }}{% else %}{{ custom_mount.mount }}{% endif %})" ansible.posix.mount: - src: "{{ custom_mount.mount }}" + src: "{% if custom_mount.create_temp_folders %}{{ custom_mount.mount }}/{{ custom_mount.folder }}{% else %}{{ custom_mount.mount }}{% endif %}" path: "{{ custom_mount.path }}" fstype: "nfs4" opts: "{{ custom_mount.opts }}" diff --git a/deploy/scripts/install_workloadzone.sh b/deploy/scripts/install_workloadzone.sh index 0df8e3455c..527b20c7a4 100755 --- a/deploy/scripts/install_workloadzone.sh +++ b/deploy/scripts/install_workloadzone.sh @@ -942,6 +942,8 @@ then return_value=0 errors_occurred=$(jq 'select(."@level" == "error") | length' apply_output.json) + cat apply_output.json + if [[ -n $errors_occurred ]] then echo "" diff --git a/deploy/terraform/run/sap_landscape/output.tf b/deploy/terraform/run/sap_landscape/output.tf index c81f767bdc..4942ba1b70 100644 --- a/deploy/terraform/run/sap_landscape/output.tf +++ b/deploy/terraform/run/sap_landscape/output.tf @@ -95,6 +95,15 @@ output "web_nsg_id" { description = "Azure resource identifier for the web subnet network security group" value = module.sap_landscape.web_nsg_id } +output "storage_subnet_id" { + description = "Azure resource identifier for the storage subnet" + value = length(var.storage_subnet_arm_id) > 0 ? var.storage_subnet_arm_id : module.sap_landscape.storage_subnet_id + } + +output "storage_nsg_id" { + description = "Azure resource identifier for the storage subnet network security group" + value = module.sap_landscape.storage_nsg_id + } ############################################################################### # # @@ -326,4 +335,4 @@ output "iSCSI_servers" { output ams_resource_id { description = "AMS resource ID" value = module.sap_landscape.ams_resource_id - } \ No newline at end of file + } diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index 7a61ae2de0..57ebac56cc 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -239,6 +239,37 @@ variable "web_subnet_nsg_arm_id" { default = "" } +######################################################################################### +# # +# Storage Subnet variables - Needed only during HANA Scaleout deployments # +# # +######################################################################################### + +variable "storage_subnet_name" { + description = "If provided, the name of the stroage subnet" + default = "" + } + +variable "storage_subnet_arm_id" { + description = "If provided, Azure resource id for the storage subnet" + default = "" + } + +variable "storage_subnet_address_prefix" { + description = "The address prefix for the storage subnet" + default = "" + } + +variable "storage_subnet_nsg_name" { + description = "If provided, the name of the storage subnet NSG" + default = "" + } + +variable "storage_subnet_nsg_arm_id" { + description = "If provided, Azure resource id for the storage subnet NSG" + default = "" + } + ######################################################################################### # # diff --git a/deploy/terraform/run/sap_landscape/transform.tf b/deploy/terraform/run/sap_landscape/transform.tf index 24d6200b05..c61942e14c 100644 --- a/deploy/terraform/run/sap_landscape/transform.tf +++ b/deploy/terraform/run/sap_landscape/transform.tf @@ -76,6 +76,25 @@ locals { length(try(var.infrastructure.vnets.sap.subnet_web.nsg.arm_id, "")) ) > 0 + subnet_storage_defined = ( + length(var.storage_subnet_address_prefix) + + length(try(var.infrastructure.vnets.sap.subnet_storage.prefix, "")) + + length(var.storage_subnet_arm_id) + + length(try(var.infrastructure.vnets.sap.subnet_storage.arm_id, "")) + ) > 0 + + subnet_storage_arm_id_defined = ( + length(var.storage_subnet_arm_id) + + length(try(var.infrastructure.vnets.sap.subnet_storage.arm_id, "")) + ) > 0 + + subnet_storage_nsg_defined = ( + length(var.storage_subnet_nsg_name) + + length(try(var.infrastructure.vnets.sap.subnet_storage.nsg.name, "")) + + length(var.web_subnet_nsg_arm_id) + + length(try(var.infrastructure.vnets.sap.subnet_storage.nsg.arm_id, "")) + ) > 0 + subnet_iscsi_defined = ( length(var.iscsi_subnet_address_prefix) + length(try(var.infrastructure.vnets.sap.subnet_iscsi.prefix, "")) + @@ -328,6 +347,34 @@ locals { ) ) + subnet_storage = merge( + ( + { + "name" = try(var.infrastructure.vnets.sap.subnet_storage.name, var.storage_subnet_name) + } + ), ( + local.subnet_storage_arm_id_defined ? ( + { + "arm_id" = try(var.infrastructure.vnets.sap.subnet_storage.arm_id, var.storage_subnet_arm_id) + } + ) : ( + null + )), ( + { + "prefix" = try(var.infrastructure.vnets.sap.subnet_storage.prefix, var.storage_subnet_address_prefix) + } + ), ( + local.subnet_storage_nsg_defined ? ( + { + "nsg" = { + "name" = try(var.infrastructure.vnets.sap.subnet_storage.nsg.name, var.storage_subnet_nsg_name) + "arm_id" = try(var.infrastructure.vnets.sap.subnet_storage.nsg.arm_id, var.storage_subnet_nsg_arm_id) + } + } + ) : null + ) + ) + subnet_anf = merge( ( { @@ -447,6 +494,13 @@ locals { ) : ( null )), ( + local.subnet_storage_defined ? ( + { + "subnet_storage" = local.subnet_storage + } + ) : ( + null + )), ( local.subnet_anf_defined ? ( { "subnet_anf" = local.subnet_anf diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index 8fd2d81727..d4dd6e2e51 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -353,6 +353,13 @@ module "output_files" { ))) loadbalancers = module.hdb_node.loadbalancers + subnet_prefix_anf = module.hdb_node.ANF_subnet_prefix + subnet_prefix_app = module.app_tier.subnet_prefix_app + subnet_prefix_client = module.common_infrastructure.subnet_prefix_client + subnet_prefix_db = module.common_infrastructure.subnet_prefix_db + subnet_prefix_storage = module.common_infrastructure.subnet_prefix_storage + + ######################################################################################### # SAP Application information # ######################################################################################### diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index 01960fe4d8..e6e7975c16 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -12,7 +12,13 @@ variable "environment" { } variable "codename" { - description = "This is the code name name for the deployment" + description = "This is the code name for the deployment" + type = string + default = "" + } + +variable "Description" { + description = "This is the description for the deployment" type = string default = "" } @@ -263,6 +269,37 @@ variable "web_subnet_nsg_arm_id" { } +######################################################################################### +# # +# Storage Subnet variables - Only valid for scale-out configuration # +# # +######################################################################################### + +variable "storage_subnet_name" { + description = "If provided, the name of the storage subnet" + default = "" + } + +variable "storage_subnet_arm_id" { + description = "If provided, Azure resource id for the storage subnet" + default = "" + } + +variable "storage_subnet_address_prefix" { + description = "The address prefix for the storage subnet" + default = "" + } + +variable "storage_subnet_nsg_name" { + description = "If provided, the name of the storage subnet NSG" + default = "" + } + +variable "storage_subnet_nsg_arm_id" { + description = "If provided, Azure resource id for the storage subnet NSG" + default = "" + } + ######################################################################################### # # diff --git a/deploy/terraform/run/sap_system/transform.tf b/deploy/terraform/run/sap_system/transform.tf index da6b6ae890..1b03ae2d28 100644 --- a/deploy/terraform/run/sap_system/transform.tf +++ b/deploy/terraform/run/sap_system/transform.tf @@ -98,8 +98,9 @@ locals { var.database_use_ppg ) user_assigned_identity_id = var.user_assigned_identity_id - zones = var.database_vm_zones + scale_out = var.database_HANA_use_ANF_scaleout_scenario stand_by_node_count = var.stand_by_node_count + zones = var.database_vm_zones } db_os = { @@ -389,6 +390,25 @@ locals { length(try(var.infrastructure.vnets.sap.subnet_web.nsg.arm_id, "")) ) > 0 + subnet_storage_defined = ( + length(var.storage_subnet_address_prefix) + + length(try(var.infrastructure.vnets.sap.subnet_storage.prefix, "")) + + length(var.storage_subnet_arm_id) + + length(try(var.infrastructure.vnets.sap.subnet_storage.arm_id, "")) + ) > 0 + + subnet_storage_arm_id_defined = ( + length(var.storage_subnet_arm_id) + + length(try(var.infrastructure.vnets.sap.subnet_storage.arm_id, "")) + ) > 0 + + subnet_storage_nsg_defined = ( + length(var.storage_subnet_nsg_name) + + length(try(var.infrastructure.vnets.sap.subnet_storage.nsg.name, "")) + + length(var.storage_subnet_nsg_arm_id) + + length(try(var.infrastructure.vnets.sap.subnet_storage.nsg.arm_id, "")) + ) > 0 + app_nic_ips = distinct(concat(var.application_server_app_nic_ips, try(var.application_tier.app_nic_ips, []))) app_nic_secondary_ips = distinct(var.application_server_app_nic_ips) app_admin_nic_ips = distinct(concat(var.application_server_admin_nic_ips, try(var.application_tier.app_admin_nic_ips, []))) @@ -512,6 +532,34 @@ locals { ) ) + subnet_storage = merge( + ( + { + "name" = try(var.infrastructure.vnets.sap.subnet_storage.name, var.storage_subnet_name) + } + ), ( + local.subnet_storage_arm_id_defined ? ( + { + "arm_id" = try(var.infrastructure.vnets.sap.subnet_storage.arm_id, var.storage_subnet_arm_id) + } + ) : ( + null + )), ( + { + "prefix" = try(var.infrastructure.vnets.sap.subnet_storage.prefix, var.storage_subnet_address_prefix) + } + ), ( + local.subnet_storage_nsg_defined ? ( + { + "nsg" = { + "name" = try(var.infrastructure.vnets.sap.subnet_storage.nsg.name, var.storage_subnet_nsg_name) + "arm_id" = try(var.infrastructure.vnets.sap.subnet_storage.nsg.arm_id, var.storage_subnet_nsg_arm_id) + } + } + ) : null + ) + ) + all_subnets = merge(local.sap, ( local.subnet_admin_defined ? ( { @@ -541,6 +589,14 @@ locals { ) : ( null ) + ), ( + local.subnet_storage_defined ? ( + { + "subnet_storage" = local.subnet_storage + } + ): ( + null + ) ) ) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf index 60da5f9307..5c2bbcabe5 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf @@ -546,7 +546,7 @@ resource "azurerm_role_assignment" "kv_user_additional_users" { azurerm_key_vault.kv_user[0].id ) role_definition_name = "Key Vault Secrets Officer" - principal_id = local.service_principal.object_id + principal_id = var.additional_users_to_add_to_keyvault_policies[count.index] } resource "azurerm_management_lock" "keyvault" { diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf b/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf index f4cc742a22..7799e1bc78 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf @@ -123,6 +123,34 @@ resource "azurerm_subnet_network_security_group_association" "web" { } +# Creates SAP storage subnet nsg +resource "azurerm_network_security_group" "storage" { + provider = azurerm.main + count = local.storage_subnet_defined && !local.storage_subnet_nsg_exists ? 1 : 0 + depends_on = [ + azurerm_subnet.storage + ] + name = local.storage_subnet_nsg_name + resource_group_name = local.SAP_virtualnetwork_exists ? ( + data.azurerm_virtual_network.vnet_sap[0].resource_group_name + ) : ( + azurerm_virtual_network.vnet_sap[0].resource_group_name + ) + location = local.SAP_virtualnetwork_exists ? ( + data.azurerm_virtual_network.vnet_sap[0].location) : ( + azurerm_virtual_network.vnet_sap[0].location + ) +} + +# Associates SAP storage nsg to SAP storage subnet +resource "azurerm_subnet_network_security_group_association" "storage" { + provider = azurerm.main + count = local.storage_subnet_defined && !local.storage_subnet_nsg_exists ? 1 : 0 + subnet_id = local.storage_subnet_existing ? var.infrastructure.vnets.sap.subnet_storage.arm_id : azurerm_subnet.storage[0].id + network_security_group_id = azurerm_network_security_group.storage[0].id +} + + // Add network security rule resource "azurerm_network_security_rule" "nsr_controlplane_app" { provider = azurerm.main @@ -171,6 +199,30 @@ resource "azurerm_network_security_rule" "nsr_controlplane_web" { destination_address_prefixes = azurerm_subnet.web[0].address_prefixes } +// Add SSH network security rule +resource "azurerm_network_security_rule" "nsr_controlplane_storage" { + provider = azurerm.main + count = local.storage_subnet_defined ? local.storage_subnet_nsg_exists ? 0 : 1 : 0 + depends_on = [ + azurerm_network_security_group.storage + ] + name = "ConnectivityToSAPApplicationSubnetFromControlPlane-ssh-rdp-winrm-ANF" + resource_group_name = local.SAP_virtualnetwork_exists ? ( + data.azurerm_virtual_network.vnet_sap[0].resource_group_name + ) : ( + azurerm_virtual_network.vnet_sap[0].resource_group_name + ) + network_security_group_name = try(azurerm_network_security_group.storage[0].name, azurerm_network_security_group.app[0].name) + priority = 100 + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_ranges = [22, 443, 3389, 5985, 5986, 111, 635, 2049, 4045, 4046, 4049] + source_address_prefixes = compact(concat(var.deployer_tfstate.subnet_mgmt_address_prefixes, var.deployer_tfstate.subnet_bastion_address_prefixes)) + destination_address_prefixes = azurerm_subnet.storage[0].address_prefixes +} + // Add SSH network security rule resource "azurerm_network_security_rule" "nsr_controlplane_db" { provider = azurerm.main @@ -190,7 +242,7 @@ resource "azurerm_network_security_rule" "nsr_controlplane_db" { access = "Allow" protocol = "Tcp" source_port_range = "*" - destination_port_ranges = [22, 443, 3389, 5985, 5986] + destination_port_ranges = [22, 443, 3389, 5985, 5986,111, 635, 2049, 4045, 4046, 4049] source_address_prefixes = compact(concat( var.deployer_tfstate.subnet_mgmt_address_prefixes, var.deployer_tfstate.subnet_bastion_address_prefixes) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf b/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf index 925f509e7a..aa6bc62311 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf @@ -103,6 +103,15 @@ output "web_subnet_id" { ) } +output "storage_subnet_id" { + description = "Azure resource identifier for the storage subnet" + value = local.storage_subnet_defined ? ( + local.storage_subnet_existing ? ( + var.infrastructure.vnets.sap.subnet_storage.arm_id) : ( + try(azurerm_subnet.storage[0].id, ""))) : ( + "" + ) + } output "anf_subnet_id" { description = "Azure resource identifier for the anf subnet" @@ -164,6 +173,16 @@ output "web_nsg_id" { ) } +output "storage_nsg_id" { + description = "Azure resource identifier for the storage subnet network security group" + value = local.storage_subnet_defined ? ( + local.storage_subnet_nsg_exists ? ( + var.infrastructure.vnets.sap.subnet_storage.nsg.arm_id) : ( + try(azurerm_network_security_group.storage[0].id, ""))) : ( + "" + ) + } + output "subnet_mgmt_id" { description = "Azure resource identifier for the management subnet" value = local.deployer_subnet_management_id diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf b/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf index 4d18fc1bdd..9b5f6f33f5 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf @@ -69,6 +69,24 @@ resource "azurerm_subnet" "web" { ) } +// Creates storage subnet of SAP VNET +resource "azurerm_subnet" "storage" { + provider = azurerm.main + count = local.storage_subnet_defined && !local.storage_subnet_existing ? 1 : 0 + name = local.storage_subnet_name + resource_group_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].resource_group_name : azurerm_virtual_network.vnet_sap[0].resource_group_name + virtual_network_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].name : azurerm_virtual_network.vnet_sap[0].name + address_prefixes = [local.subnet_prefix_storage] + + private_endpoint_network_policies_enabled = var.use_private_endpoint + + service_endpoints = var.use_service_endpoint ? ( + ["Microsoft.Storage", "Microsoft.KeyVault"] + ) : ( + null + ) +} + // Creates anf subnet of SAP VNET resource "azurerm_subnet" "anf" { provider = azurerm.main diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf index 47893bf5c0..894426ce48 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf @@ -436,6 +436,69 @@ locals { ) ) + ############################################################################################## + # + # storage subnet - Check if locally provided + # + ############################################################################################## + + storage_subnet_defined = ( + length(try(var.infrastructure.vnets.sap.subnet_storage.arm_id, "")) + + length(try(var.infrastructure.vnets.sap.subnet_storage.prefix, "")) + ) > 0 + storage_subnet_arm_id = local.storage_subnet_defined ? ( + try(var.infrastructure.vnets.sap.subnet_storage.arm_id, "")) : ( + "" + ) + storage_subnet_existing = length(local.storage_subnet_arm_id) > 0 + storage_subnet_name = local.storage_subnet_existing ? ( + try(split("/", local.storage_subnet_arm_id)[10], "")) : ( + length(try(var.infrastructure.vnets.sap.subnet_storage.name, "")) > 0 ? ( + var.infrastructure.vnets.sap.subnet_storage.name) : ( + format("%s%s%s%s", + var.naming.resource_prefixes.storage_subnet, + length(local.prefix) > 0 ? ( + local.prefix) : ( + var.infrastructure.environment + ), + var.naming.separator, + local.resource_suffixes.storage_subnet + ) + ) + ) + subnet_prefix_storage = local.storage_subnet_defined ? ( + try(var.infrastructure.vnets.sap.subnet_storage.prefix, "")) : ( + "" + ) + + ############################################################################################## + # + # storage subnet NSG - Check if locally provided + # + ############################################################################################## + + storage_subnet_nsg_arm_id = local.storage_subnet_defined ? ( + try(var.infrastructure.vnets.sap.subnet_storage.nsg.arm_id, "")) : ( + "" + ) + storage_subnet_nsg_exists = length(local.storage_subnet_nsg_arm_id) > 0 + + storage_subnet_nsg_name = local.storage_subnet_nsg_exists ? ( + try(split("/", local.storage_subnet_nsg_arm_id)[8], "")) : ( + length(try(var.infrastructure.vnets.sap.subnet_storage.nsg.name, "")) > 0 ? ( + var.infrastructure.vnets.sap.subnet_storage.nsg.name) : ( + format("%s%s%s%s", + var.naming.resource_prefixes.storage_subnet_nsg, + length(local.prefix) > 0 ? ( + local.prefix) : ( + var.infrastructure.environment + ), + var.naming.separator, + local.resource_suffixes.storage_subnet_nsg + ) + ) + ) + ############################################################################################## # # ANF subnet - Check if locally provided @@ -527,7 +590,7 @@ locals { ams_subnet_prefix = local.ams_subnet_defined ? ( try(var.infrastructure.vnets.sap.subnet_ams.prefix, "")) : ( "" - ) + ) # Store the Deployer KV in workload zone KV deployer_keyvault_user_name = try(var.deployer_tfstate.deployer_kv_user_name, "") diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_local.tf index c8aeb6c471..8869ea9e50 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_local.tf @@ -475,5 +475,10 @@ locals { frontend_ips = slice(local.standard_ips, 0, local.windows_high_availability ? 2 : 1) + extension_settings = length(var.database.user_assigned_identity_id) > 0 ? [{ + "key" = "msi_res_id" + "value" = var.database.user_assigned_identity_id + }] : [] + } diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf index 55ca1bdc5c..e0384e26d5 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf @@ -454,7 +454,8 @@ resource "azurerm_virtual_machine_extension" "anydb_lnx_aem_extension" { type_handler_version = "1.0" settings = jsonencode( { - "system": "SAP" + "system": "SAP", + "cfg": local.extension_settings } ) tags = var.tags @@ -478,7 +479,8 @@ resource "azurerm_virtual_machine_extension" "anydb_win_aem_extension" { type_handler_version = "1.0" settings = jsonencode( { - "system": "SAP" + "system": "SAP", + "cfg": local.extension_settings } ) tags = var.tags diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/outputs.tf index e9a6f311b4..27e959372e 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/outputs.tf @@ -319,3 +319,16 @@ output "scs_kdump_disks" { ) ) } + + + +output "subnet_prefix_app" { + description = "Storage subnet prefix" + value = local.enable_deployment ? ( + local.application_subnet_exists ? ( + data.azurerm_subnet.subnet_sap_app[0].address_prefixes[0]) : ( + azurerm_subnet.subnet_sap_app[0].address_prefixes[0] + )) : ( + "" + ) + } diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_local.tf index 7d33edcace..3f1a392993 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_local.tf @@ -615,4 +615,10 @@ locals { ) : ( [""] ) + + extension_settings = length(var.application_tier.user_assigned_identity_id) > 0 ? [{ + "key" = "msi_res_id" + "value" = var.application_tier.user_assigned_identity_id + }] : [] + } diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index 725c5ef3cd..e6e143c487 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -443,7 +443,8 @@ resource "azurerm_virtual_machine_extension" "app_lnx_aem_extension" { type_handler_version = "1.0" settings = jsonencode( { - "system": "SAP" + "system": "SAP", + "cfg": local.extension_settings } ) tags = var.tags @@ -463,7 +464,8 @@ resource "azurerm_virtual_machine_extension" "app_win_aem_extension" { type_handler_version = "1.0" settings = jsonencode( { - "system": "SAP" + "system": "SAP", + "cfg": local.extension_settings } ) tags = var.tags diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index 27321851c5..3fbb0a4f3e 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -495,7 +495,8 @@ resource "azurerm_virtual_machine_extension" "scs_lnx_aem_extension" { type_handler_version = "1.0" settings = jsonencode( { - "system": "SAP" + "system": "SAP", + "cfg": local.extension_settings } ) tags = var.tags @@ -515,7 +516,8 @@ resource "azurerm_virtual_machine_extension" "scs_win_aem_extension" { type_handler_version = "1.0" settings = jsonencode( { - "system": "SAP" + "system": "SAP", + "cfg": local.extension_settings } ) tags = var.tags diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf index cfe80110c3..4c675c290d 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf @@ -443,7 +443,8 @@ resource "azurerm_virtual_machine_extension" "web_lnx_aem_extension" { type_handler_version = "1.0" settings = jsonencode( { - "system": "SAP" + "system": "SAP", + "cfg": local.extension_settings } ) tags = var.tags @@ -463,7 +464,8 @@ resource "azurerm_virtual_machine_extension" "web_win_aem_extension" { type_handler_version = "1.0" settings = jsonencode( { - "system": "SAP" + "system": "SAP", + "cfg": local.extension_settings } ) tags = var.tags diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/outputs.tf index 074f5cc8bb..b3b272a391 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/outputs.tf @@ -103,6 +103,31 @@ output "admin_subnet" { ) } +output "subnet_prefix_client" { + description = "Storage subnet prefix" + value = local.enable_db_deployment && local.enable_admin_subnet ? ( + local.admin_subnet_exists ? ( + data.azurerm_subnet.admin[0].address_prefixes[0]) : ( + azurerm_subnet.admin[0].address_prefixes[0] + )) : ( + "" + ) + } + + +output "subnet_prefix_db" { + description = "DB subnet prefix" + value = local.enable_db_deployment ? ( + local.database_subnet_exists ? ( + data.azurerm_subnet.db[0].address_prefixes[0]) : ( + azurerm_subnet.db[0].address_prefixes[0] + )) : ( + "" + ) + } + + + output "db_subnet" { description = "Admin subnet object" value = local.database_subnet_exists ? ( @@ -122,7 +147,7 @@ output "db_subnet_netmask" { ) } output "storage_subnet" { - description = "Database subnet netmask" + description = "Storage subnet" value = local.enable_db_deployment && local.enable_storage_subnet ? ( local.sub_storage_exists ? ( data.azurerm_subnet.storage[0]) : ( @@ -132,6 +157,17 @@ output "storage_subnet" { ) } +output "subnet_prefix_storage" { + description = "Storage subnet prefix" + value = local.enable_db_deployment && local.enable_storage_subnet ? ( + local.sub_storage_exists ? ( + data.azurerm_subnet.storage[0].address_prefixes[0]) : ( + azurerm_subnet.storage[0].address_prefixes[0] + )) : ( + "" + ) + } + output "route_table_id" { description = "Azure resource ID of the route table" value = try(var.landscape_tfstate.route_table_id, "") diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/subnets.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/subnets.tf index c407e3fa34..fa859f1ff8 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/subnets.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/subnets.tf @@ -80,10 +80,8 @@ resource "azurerm_subnet_route_table_association" "db" { resource "azurerm_subnet" "storage" { provider = azurerm.main - count = local.enable_db_deployment && local.enable_storage_subnet ? ( - local.sub_storage_exists ? 0 : 1) : ( - 0 - ) + count = !local.sub_storage_exists && local.enable_storage_subnet ? 1 : 0 + name = local.sub_storage_name resource_group_name = data.azurerm_virtual_network.vnet_sap.resource_group_name virtual_network_name = data.azurerm_virtual_network.vnet_sap.name @@ -93,10 +91,8 @@ resource "azurerm_subnet" "storage" { // Imports data of existing db subnet data "azurerm_subnet" "storage" { provider = azurerm.main - count = local.enable_db_deployment && local.enable_storage_subnet ? ( - local.sub_storage_exists ? 1 : 0) : ( - 0 - ) + count = local.sub_storage_exists && local.enable_storage_subnet ? 1 : 0 + name = split("/", local.sub_storage_arm_id)[10] resource_group_name = split("/", local.sub_storage_arm_id)[4] virtual_network_name = split("/", local.sub_storage_arm_id)[8] diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_local.tf index 3f4fa5c6bb..7d6aaa5145 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_local.tf @@ -114,7 +114,6 @@ locals { //ANF support use_ANF = try(var.database.use_ANF, false) //Scalout subnet is needed if ANF is used and there are more than one hana node - enable_storage_subnet = local.use_ANF && local.dbnode_per_site > 1 //Anchor VM deploy_anchor = try(var.infrastructure.anchor_vms.deploy, false) @@ -374,7 +373,11 @@ locals { length(try(var.infrastructure.vnets.sap.subnet_storage.arm_id, "")) + length(try(var.infrastructure.vnets.sap.subnet_storage.prefix, "")) ) > 0 - sub_storage_arm_id = try(var.infrastructure.vnets.sap.subnet_storage.arm_id, "") + sub_storage_arm_id = local.sub_storage_defined ? ( + try(var.infrastructure.vnets.sap.subnet_storage.arm_id, "")) : ( + try(var.landscape_tfstate.storage_subnet_id, "") + ) + sub_storage_exists = length(local.sub_storage_arm_id) > 0 sub_storage_name = local.sub_storage_exists ? ( try(split("/", local.sub_storage_arm_id)[10], "")) : ( @@ -415,6 +418,9 @@ locals { ) ) + enable_storage_subnet = (length(local.sub_storage_prefix) + length(local.sub_storage_arm_id)) > 0 + + // If the user specifies arm id of key vaults in input, // the key vault will be imported instead of using the landscape key vault user_key_vault_id = length(try(var.key_vault.kv_user_id, "")) > 0 ? ( diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf index 37dda1e7c8..cc5bef6836 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf @@ -233,3 +233,13 @@ data "azurerm_netapp_volume" "hanashared" { } + + +data "azurerm_subnet" "ANF" { + provider = azurerm.main + count = length(local.ANF_pool_settings.subnet_id) > 0 ? 1 : 0 + name = split("/", local.ANF_pool_settings.subnet_id)[10] + resource_group_name = split("/", local.ANF_pool_settings.subnet_id)[4] + virtual_network_name = split("/", local.ANF_pool_settings.subnet_id)[8] +} + diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/avg.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/avg.tf index d76c99c93a..895bebf180 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/avg.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/avg.tf @@ -88,7 +88,7 @@ locals { 1 ) volumeSpecName = "data" - proximityPlacementGroup = length(var.scale_set_id) == 0 ? try(var.ppg[0], null) : null + proximityPlacementGroup = length(var.ppg) > 0 ? try(var.ppg[0], null) : null storage_quota_in_gb = var.hana_ANF_volumes.data_volume_size throughput_in_mibps = var.hana_ANF_volumes.data_volume_throughput zone = local.db_zone_count > 0 ? try(local.zones[0], null) : null @@ -107,7 +107,7 @@ locals { 2 ) volumeSpecName = "data" - proximityPlacementGroup = length(var.scale_set_id) == 0 ? (length(var.ppg) > 1 ? try(var.ppg[1], null) : try(var.ppg[0], null)) : null + proximityPlacementGroup = length(var.ppg) > 1 ? try(var.ppg[1], null) : try(var.ppg[0], null) storage_quota_in_gb = var.hana_ANF_volumes.data_volume_size throughput_in_mibps = var.hana_ANF_volumes.data_volume_throughput zone = local.db_zone_count > 1 ? try(local.zones[1], null) : null @@ -127,7 +127,7 @@ locals { 1 ) volumeSpecName = "log" - proximityPlacementGroup = length(var.scale_set_id) == 0 ? try(var.ppg[0], null) : null + proximityPlacementGroup = length(var.ppg) > 0 ? try(var.ppg[0], null) : null storage_quota_in_gb = var.hana_ANF_volumes.log_volume_size throughput_in_mibps = var.hana_ANF_volumes.log_volume_throughput zone = local.db_zone_count > 0 ? try(local.zones[0], null) : null @@ -146,7 +146,7 @@ locals { 2 ) volumeSpecName = "log" - proximityPlacementGroup = length(var.scale_set_id) == 0 ? (length(var.ppg) > 1 ? try(var.ppg[1], null) : try(var.ppg[0], null)) : null + proximityPlacementGroup = length(var.ppg) > 1 ? try(var.ppg[1], null) : try(var.ppg[0], null) storage_quota_in_gb = var.hana_ANF_volumes.log_volume_size throughput_in_mibps = var.hana_ANF_volumes.log_volume_throughput zone = local.db_zone_count > 1 ? try(local.zones[1], null) : null @@ -165,7 +165,7 @@ locals { 1 ) volumeSpecName = "shared" - proximityPlacementGroup = length(var.scale_set_id) == 0 ? try(var.ppg[0], null) : null + proximityPlacementGroup = length(var.ppg) > 0 ? try(var.ppg[0], null) : null storage_quota_in_gb = var.hana_ANF_volumes.shared_volume_size throughput_in_mibps = var.hana_ANF_volumes.shared_volume_throughput zone = local.db_zone_count > 0 ? try(local.zones[0], null) : null @@ -184,8 +184,7 @@ locals { 2 ) volumeSpecName = "shared" - - proximityPlacementGroup = length(var.scale_set_id) == 0 ? (length(var.ppg) > 1 ? try(var.ppg[1], null) : try(var.ppg[0], null)) : null + proximityPlacementGroup = length(var.ppg) > 1 ? try(var.ppg[1], null) : try(var.ppg[0], null) storage_quota_in_gb = var.hana_ANF_volumes.shared_volume_size throughput_in_mibps = var.hana_ANF_volumes.shared_volume_throughput zone = local.db_zone_count > 1 ? try(local.zones[1], null) : null diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf index d26682b249..61ee1e145d 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf @@ -217,3 +217,13 @@ output "database_kdump_disks" { ) ) } + + +output "ANF_subnet_prefix" { + description = "ANF subnet prefix" + value = (local.enable_deployment && length(local.ANF_pool_settings.subnet_id) > 0) ? ( + data.azurerm_subnet.ANF[0].address_prefixes[0] ): ( + "" + ) + + } diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf index 4f8b3b649f..da5b0ebe03 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf @@ -51,7 +51,7 @@ locals { use_ANF = try(var.database.use_ANF, false) //Scalout subnet is needed if ANF is used and there are more than one hana node dbnode_per_site = length(try(var.database.dbnodes, [{}])) - enable_storage_subnet = local.use_ANF && local.dbnode_per_site > 1 + enable_storage_subnet = var.database.use_ANF && var.database.scale_out && length(try(var.storage_subnet.id,""))>0 // Availability Set availabilityset_arm_ids = try(var.database.avset_arm_ids, []) @@ -391,5 +391,9 @@ locals { (var.database_server_count - var.database.stand_by_node_count) * var.hana_ANF_volumes.log_volume_count) : ( 0 ) + extension_settings = length(var.database.user_assigned_identity_id) > 0 ? [{ + "key" = "msi_res_id" + "value" = var.database.user_assigned_identity_id + }] : [] } diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf index c0ab1e6107..9460ae9340 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf @@ -23,6 +23,8 @@ resource "azurerm_network_interface" "nics_dbnodes_admin" { var.database_server_count) : ( 0 ) + depends_on = [ azurerm_network_interface.nics_dbnodes_db ] + name = format("%s%s%s%s%s", var.naming.resource_prefixes.admin_nic, local.prefix, @@ -109,11 +111,16 @@ resource "azurerm_network_interface_application_security_group_association" "db" application_security_group_id = var.db_asg_id } +######################################################################################### +# # +# Storage Network Interface # +# # +######################################################################################### -// Creates the NIC for Hana storage resource "azurerm_network_interface" "nics_dbnodes_storage" { provider = azurerm.main count = local.enable_deployment && local.enable_storage_subnet ? var.database_server_count : 0 + depends_on = [ azurerm_network_interface.nics_dbnodes_db, azurerm_network_interface.nics_dbnodes_admin ] name = format("%s%s%s%s%s", var.naming.resource_prefixes.storage_nic, local.prefix, @@ -198,17 +205,17 @@ resource "azurerm_linux_virtual_machine" "vm_dbnode" { network_interface_ids = local.enable_storage_subnet ? ( var.options.legacy_nic_order ? ( - [ - azurerm_network_interface.nics_dbnodes_admin[count.index].id, + compact([ + var.database_dual_nics ? azurerm_network_interface.nics_dbnodes_admin[count.index].id : null, azurerm_network_interface.nics_dbnodes_db[count.index].id, azurerm_network_interface.nics_dbnodes_storage[count.index].id - ] + ]) ) : ( - [ + compact([ azurerm_network_interface.nics_dbnodes_db[count.index].id, - azurerm_network_interface.nics_dbnodes_admin[count.index].id, + var.database_dual_nics ? azurerm_network_interface.nics_dbnodes_admin[count.index].id : null, azurerm_network_interface.nics_dbnodes_storage[count.index].id - ] + ]) ) ) : ( var.database_dual_nics ? ( @@ -396,7 +403,8 @@ resource "azurerm_virtual_machine_extension" "hdb_linux_extension" { type_handler_version = "1.0" settings = jsonencode( { - "system": "SAP" + "system": "SAP", + "cfg": local.extension_settings } ) tags = var.tags diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf index f2bc876c87..fcf042a51c 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf @@ -234,6 +234,11 @@ resource "local_file" "sap-parameters_yml" { secret_prefix = local.secret_prefix, settings = local.settings sid = var.sap_sid, + subnet_prefix_anf = var.subnet_prefix_anf, + subnet_prefix_app = var.subnet_prefix_app, + subnet_prefix_client = var.subnet_prefix_client + subnet_prefix_db = var.subnet_prefix_db + subnet_prefix_storage = var.subnet_prefix_storage, upgrade_packages = var.upgrade_packages ? "true" : "false" use_msi_for_clusters = var.use_msi_for_clusters usr_sap = length(var.usr_sap) > 1 ? ( diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl index 7ac2d8a324..cc1fdfc368 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl @@ -45,8 +45,10 @@ ers_instance_number: "${ers_instance_number}" # the SAP Central Services virtual machines ers_lb_ip: ${ers_server_loadbalancer_ip} +%{~ if platform == "SQLSERVER" } # IP address of CNO in Windows and takes the form IPAddress/CIDR scs_clst_lb_ip: ${scs_cluster_loadbalancer_ip} +%{~ endif } # PAS Instance Number @@ -55,6 +57,10 @@ pas_instance_number: "${pas_instance_number}" # APP Instance Number app_instance_number: "${app_instance_number}" +# Set to true to instruct Ansible to configure Pacemaker clusters using an managed identity +use_msi_for_clusters: ${use_msi_for_clusters} + + ############################################################################# # # @@ -76,9 +82,25 @@ db_instance_number: "${db_instance_number}" platform: ${platform} +%{~ if scale_out } + +############################################################################# +# # +# Scale Out information # +# # +############################################################################# + # Scale out defines if the database is to be deployed in a scale out configuration db_scale_out: ${scale_out} db_no_standby: ${scale_out_no_standby_role} + +%{~ endif } +subnet_prefix_anf: ${subnet_prefix_anf} +subnet_prefix_app: ${subnet_prefix_app} +subnet_prefix_db: ${subnet_prefix_db} +subnet_prefix_client: ${subnet_prefix_client} +subnet_prefix_storage: ${subnet_prefix_storage} + # db_high_availability is a boolean flag indicating if the # SAP database servers are deployed using high availability db_high_availability: ${database_high_availability} @@ -87,7 +109,7 @@ database_cluster_type: ${database_cluster_type} # database_loadbalancer_ip is the IP address of the load balancer for the database virtual machines database_loadbalancer_ip: ${database_loadbalancer_ip} -# Backwards copmpatibility +# Backwards compatibility db_lb_ip: ${database_loadbalancer_ip} # database_cluster_ip is the IP address of the load balancer for the database cluster in Windows @@ -98,6 +120,8 @@ use_simple_mount: ${is_use_simple_mount} # use_fence_kdump defines if optional kdump stonith device needs to be added for RHEL clusters. use_fence_kdump: ${is_use_fence_kdump} + + ############################################################################# # # # NFS # @@ -128,9 +152,6 @@ secret_prefix: ${secret_prefix} # Set to true to instruct Ansible to update all the packages on the virtual machines upgrade_packages: ${upgrade_packages} -# Set to true to instruct Ansible to configure Pacemaker clusters using an managed identity -use_msi_for_clusters: ${use_msi_for_clusters} - ${settings} ############################################################################# diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf index 7291e8586e..b4bd6904aa 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf @@ -1,3 +1,4 @@ +variable "subnet_prefix_anf" { description = "address prefix for the ANF subnet" } variable "ansible_user" { description = "The ansible remote user account to use" default = "azureadm" @@ -24,6 +25,9 @@ variable "bom_name" { description = "Name of Bill of Materials file" default = "" } +variable "subnet_prefix_app" { description = "address prefix for the app subnet" } +variable "subnet_prefix_db" { description = "address prefix for the db subnet" } +variable "subnet_prefix_client" { description = "address prefix for the client subnet" } variable "configuration_settings" { description = "This is a dictionary that will contain values persisted to the sap-parameters.file" } variable "database_admin_ips" { description = "List of Admin NICs for the DB VMs" } variable "database_cluster_type" { @@ -171,6 +175,7 @@ variable "scs_server_secondary_ips" { description = "List of seconda variable "scs_vm_names" { description = "List of VM names for the SCS Servers" } variable "shared_home" { description = "If defined provides shared-home support" } variable "sid_keyvault_user_id" { description = "Defines the names for the resources" } +variable "subnet_prefix_storage" { description = "address prefix for the storage subnet" } variable "tfstate_resource_id" { description = "Resource ID for tf state file" } variable "upgrade_packages" { description = "Upgrade packages" } variable "use_custom_dns_a_registration" { From 429219f4d7bb23059cd423f8f0278ba639f6bdde Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 26 Mar 2024 09:23:46 +0530 Subject: [PATCH 469/607] Refactor networking tasks for HANA scaleout --- .../roles-os/1.10-networking/tasks/main.yaml | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml b/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml index c60a6b9fdf..8eb365bbd6 100644 --- a/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml @@ -175,7 +175,7 @@ - name: "1.10 Networking - Add routes and restart VM for HANA scaleout" when: - db_scale_out - - definition_made.changed + - node_tier == 'hana' block: # display the list of azure network interfaces and the expected ethX interface names - name: "1.10 Networking - Print the network configuration details for storage route" @@ -244,17 +244,17 @@ - not azif.interfaceName in ['eth0', 'eth0:0'] - subnet_prefix_client | ansible.utils.network_in_usable( azif.ipAddress ) -# reboot VM after the new interface definitions are changed. -- name: "1.10 Networking - Reboot VM and wait for 5 minutes" - ansible.builtin.reboot: - reboot_timeout: 300 - when: - - definition_made.changed + # reboot VM after the new routes are added + - name: "1.10 Networking - Reboot VM and wait for 5 minutes" + ansible.builtin.reboot: + reboot_timeout: 300 + when: + - definition_made.changed -- name: "1.10 Networking - Wait for connection to be established after reboot" - ansible.builtin.wait_for_connection: - delay: 10 - timeout: 300 + - name: "1.10 Networking - Wait for connection to be established after reboot" + ansible.builtin.wait_for_connection: + delay: 10 + timeout: 300 ... # /*----------------------------------------------------------------------------8 From 8fe40b8950be3e14d4c2ad1b814b0302ed10e057 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 26 Mar 2024 11:48:32 +0200 Subject: [PATCH 470/607] Update network interface conditions in main.yaml --- deploy/ansible/roles-os/1.10-networking/tasks/main.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml b/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml index 8eb365bbd6..e77e19480e 100644 --- a/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml @@ -209,7 +209,7 @@ when: - az_network_interfaces is defined - subnet_prefix_storage is defined - - az_network_interfaces | length >= 2 + - az_network_interfaces | length > 2 - not azif.interfaceName in ['eth0', 'eth0:0'] - subnet_prefix_storage | ansible.utils.network_in_usable( azif.ipAddress ) @@ -228,7 +228,7 @@ when: - az_network_interfaces is defined - subnet_prefix_client is defined - - az_network_interfaces | length >= 2 + - az_network_interfaces | length > 2 - not azif.interfaceName in ['eth0', 'eth0:0'] - subnet_prefix_client | ansible.utils.network_in_usable( azif.ipAddress ) @@ -240,7 +240,7 @@ when: - az_network_interfaces is defined - subnet_prefix_client is defined - - az_network_interfaces | length >= 1 + - az_network_interfaces | length > 1 - not azif.interfaceName in ['eth0', 'eth0:0'] - subnet_prefix_client | ansible.utils.network_in_usable( azif.ipAddress ) From 4e99d6a1ea7c10fa0c85eb5ce7c9243f1665d242 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 27 Mar 2024 10:06:32 +0200 Subject: [PATCH 471/607] Add ability to deploy the Monitoring extension (#570) * Add the Monitoring Extension to the Application Tier * Replace Windows virtual machine with Linux virtual machine in monitoring extension * Add monitoring extension to database tier * Update destination address prefixes in NSG rules * Fix destination_address_prefixes in nsg.tf * Update monitoring extension count based on database OS type * Add Monitoring extension for iSCSI servers * Add deploy_monitoring_extension parameter * Add storage subnet data source * Add Download link in Landscape/Edit view and update default value for deploy_monitoring_extension * Update monitoring extension names for utility VMs * Refactor monitoring extension deployment conditions in vm.tf --------- Co-authored-by: Kimmo Forss --- Webapp/SDAF/Models/LandscapeModel.cs | 6 +- Webapp/SDAF/Models/SystemModel.cs | 2 + .../ParameterDetails/LandscapeDetails.json | 19 +++++- .../ParameterDetails/LandscapeTemplate.txt | 2 + .../SDAF/ParameterDetails/SystemDetails.json | 9 +++ .../SDAF/ParameterDetails/SystemTemplate.txt | 3 + Webapp/SDAF/Views/Landscape/Edit.cshtml | 6 ++ .../run/sap_landscape/tfvar_variables.tf | 6 ++ .../terraform/run/sap_landscape/transform.tf | 10 +-- .../run/sap_system/tfvar_variables.tf | 6 ++ deploy/terraform/run/sap_system/transform.tf | 1 + .../modules/sap_landscape/iscsi.tf | 25 ++++++++ .../modules/sap_landscape/nsg.tf | 14 +++-- .../modules/sap_landscape/subnets.tf | 46 ++++++++++++++ .../modules/sap_landscape/variables_local.tf | 4 ++ .../modules/sap_landscape/vm.tf | 62 +++++++++++++++++++ .../sap_system/anydb_node/variables_local.tf | 1 + .../modules/sap_system/anydb_node/vm-anydb.tf | 50 +++++++++++++++ .../sap_system/app_tier/variables_local.tf | 3 + .../modules/sap_system/app_tier/vm-app.tf | 51 +++++++++++++++ .../modules/sap_system/app_tier/vm-scs.tf | 49 +++++++++++++++ .../modules/sap_system/app_tier/vm-webdisp.tf | 47 ++++++++++++++ .../sap_system/hdb_node/variables_local.tf | 1 + .../modules/sap_system/hdb_node/vm-hdb.tf | 24 +++++++ 24 files changed, 433 insertions(+), 14 deletions(-) diff --git a/Webapp/SDAF/Models/LandscapeModel.cs b/Webapp/SDAF/Models/LandscapeModel.cs index fc2dd05072..46725e0769 100644 --- a/Webapp/SDAF/Models/LandscapeModel.cs +++ b/Webapp/SDAF/Models/LandscapeModel.cs @@ -47,8 +47,12 @@ public bool IsValid() public bool? place_delete_lock_on_resources { get; set; } = false; public string controlPlaneLocation { get; set; } + public Tag[] tags { get; set; } + + public bool? deploy_monitoring_extension { get; set; } = false; + /*---------------------------------------------------------------------------8 | | | Networking information | @@ -188,8 +192,6 @@ public bool IsValid() public string storage_subnet_nsg_name { get; set; } - - /*---------------------------------------------------------------------------8 | | | Miscallaneous information | diff --git a/Webapp/SDAF/Models/SystemModel.cs b/Webapp/SDAF/Models/SystemModel.cs index 6f491a9af9..3474f18532 100644 --- a/Webapp/SDAF/Models/SystemModel.cs +++ b/Webapp/SDAF/Models/SystemModel.cs @@ -74,6 +74,8 @@ public bool IsValid() public bool? deploy_v1_monitoring_extension { get; set; } = true; + public bool? deploy_monitoring_extension { get; set; } = false; + public bool? use_scalesets_for_deployment { get; set; } = false; public bool? database_use_premium_v2_storage { get; set; } = false; diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index 26d700c64f..e1509814ae 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -111,11 +111,26 @@ "Options": [], "Overrules": "", "Display": 2 + } + ] + }, + { + "Section": "Infrastructure settings", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#environment-parameters", + "Parameters": [ + { + "Name": "deploy_monitoring_extension", + "Required": false, + "Description": "If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 }, { "Name": "place_delete_lock_on_resources", "Required": false, - "Description": " If defined, a delete lock will be placed on the key resources (virtual network and key vault)", + "Description": "If defined, a delete lock will be placed on the key resources (virtual network and key vault)", "Type": "checkbox", "Options": [], "Overrules": "", @@ -124,7 +139,7 @@ { "Name": "use_spn", "Required": false, - "Description": " If set, the deployment is performed using the Service Principal defined for the workload zone, otherwise the managed identity of the deployer is used", + "Description": "If set, the deployment is performed using the Service Principal defined for the workload zone, otherwise the managed identity of the deployer is used", "Type": "checkbox", "Options": [], "Overrules": "", diff --git a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt index b02ce2e2a4..67ab204842 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt @@ -36,6 +36,8 @@ $$Description$$ #If you want to provide a custom naming json use the following parameter. $$name_override_file$$ +# If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines +$$deploy_monitoring_extension$$ ######################################################################################### # # diff --git a/Webapp/SDAF/ParameterDetails/SystemDetails.json b/Webapp/SDAF/ParameterDetails/SystemDetails.json index 2ca3633b01..6778a30007 100644 --- a/Webapp/SDAF/ParameterDetails/SystemDetails.json +++ b/Webapp/SDAF/ParameterDetails/SystemDetails.json @@ -269,6 +269,15 @@ "Overrules": "", "Display": 3 }, + { + "Name": "deploy_monitoring_extension", + "Required": false, + "Description": "If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, { "Name": "vm_disk_encryption_set_id", "Required": false, diff --git a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt index 6f01f60f77..25bbdea188 100644 --- a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt @@ -486,6 +486,9 @@ $$deploy_application_security_groups$$ # deploy_v1_monitoring_extension Defines if the Microsoft.AzureCAT.AzureEnhancedMonitoring extension will be deployed $$deploy_v1_monitoring_extension$$ +# If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines +$$deploy_monitoring_extension$$ + # dns_a_records_for_secondary_names defines if DNS records should be created for the virtual host names $$dns_a_records_for_secondary_names$$ diff --git a/Webapp/SDAF/Views/Landscape/Edit.cshtml b/Webapp/SDAF/Views/Landscape/Edit.cshtml index 014dd2785b..c87ccaf96c 100644 --- a/Webapp/SDAF/Views/Landscape/Edit.cshtml +++ b/Webapp/SDAF/Views/Landscape/Edit.cshtml @@ -112,6 +112,12 @@ Deploy + +   + + Download + + diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index 57ebac56cc..a14e131d6f 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -412,6 +412,12 @@ variable "user_assigned_identity_id" { default = "" } +variable "deploy_monitoring_extension" { + description = "If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines" + default = false + } + + ######################################################################################### # # # Storage Account variables # diff --git a/deploy/terraform/run/sap_landscape/transform.tf b/deploy/terraform/run/sap_landscape/transform.tf index c61942e14c..faf5c1c6c8 100644 --- a/deploy/terraform/run/sap_landscape/transform.tf +++ b/deploy/terraform/run/sap_landscape/transform.tf @@ -168,10 +168,12 @@ locals { } temp_infrastructure = { - environment = coalesce(var.environment, try(var.infrastructure.environment, "")) - region = lower(coalesce(var.location, try(var.infrastructure.region, ""))) - codename = try(var.infrastructure.codename, var.codename) - tags = try(merge(var.resourcegroup_tags, try(var.infrastructure.tags, {})), {}) + environment = coalesce(var.environment, try(var.infrastructure.environment, "")) + region = lower(coalesce(var.location, try(var.infrastructure.region, ""))) + codename = try(var.infrastructure.codename, var.codename) + tags = try(merge(var.resourcegroup_tags, try(var.infrastructure.tags, {})), {}) + deploy_monitoring_extension = var.deploy_monitoring_extension + user_assigned_identity_id = var.user_assigned_identity_id } authentication = { diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index e6e7975c16..2926b8fa06 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -1372,6 +1372,12 @@ variable "tags" { default = {} } +variable "deploy_monitoring_extension" { + description = "If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines" + default = true + } + + ######################################################################################### # # diff --git a/deploy/terraform/run/sap_system/transform.tf b/deploy/terraform/run/sap_system/transform.tf index 1b03ae2d28..514abac51b 100644 --- a/deploy/terraform/run/sap_system/transform.tf +++ b/deploy/terraform/run/sap_system/transform.tf @@ -9,6 +9,7 @@ locals { codename = try(var.codename, try(var.infrastructure.codename, "")) tags = try(merge(var.resourcegroup_tags, try(var.infrastructure.tags, {})), {}) use_app_proximityplacementgroups = var.use_app_proximityplacementgroups + deploy_monitoring_extension = var.deploy_monitoring_extension } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf index 8fa86ab21f..eb6c60bb65 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf @@ -314,3 +314,28 @@ resource "tls_private_key" "iscsi" { rsa_bits = 2048 } + +resource "azurerm_virtual_machine_extension" "monitoring_extension_iscsi_lnx" { + provider = azurerm.main + count = local.deploy_monitoring_extension ? ( + local.iscsi_count) : ( + 0 + ) + virtual_machine_id = azurerm_linux_virtual_machine.iscsi[count.index].id + name = "AzureMonitorLinuxAgent" + publisher = "Microsoft.Azure.Monitor" + type = "AzureMonitorLinuxAgent" + type_handler_version = "1.0" + auto_upgrade_minor_version = "true" + + settings = jsonencode( + { + "authentication" = { + "managedIdentity" = { + "identifier-name" : "mi_res_id", + "identifier-value": var.infrastructure.iscsi.user_assigned_identity_id + } + } + } + ) +} diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf b/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf index 7799e1bc78..755e6fe52c 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf @@ -172,7 +172,7 @@ resource "azurerm_network_security_rule" "nsr_controlplane_app" { source_port_range = "*" destination_port_ranges = [22, 443, 3389, 5985, 5986, 5404, 5405, 7630] source_address_prefixes = compact(concat(var.deployer_tfstate.subnet_mgmt_address_prefixes, var.deployer_tfstate.subnet_bastion_address_prefixes)) - destination_address_prefixes = azurerm_subnet.app[0].address_prefixes + destination_address_prefixes = local.application_subnet_existing ? data.azurerm_subnet.app[0].address_prefixes : azurerm_subnet.app[0].address_prefixes } // Add SSH network security rule @@ -196,12 +196,13 @@ resource "azurerm_network_security_rule" "nsr_controlplane_web" { source_port_range = "*" destination_port_ranges = [22, 443, 3389, 5985, 5986] source_address_prefixes = compact(concat(var.deployer_tfstate.subnet_mgmt_address_prefixes, var.deployer_tfstate.subnet_bastion_address_prefixes)) - destination_address_prefixes = azurerm_subnet.web[0].address_prefixes + destination_address_prefixes = local.web_subnet_existing ? data.azurerm_subnet.web[0].address_prefixes : azurerm_subnet.web[0].address_prefixes } // Add SSH network security rule resource "azurerm_network_security_rule" "nsr_controlplane_storage" { provider = azurerm.main + count = local.storage_subnet_defined ? local.storage_subnet_nsg_exists ? 0 : 1 : 0 depends_on = [ azurerm_network_security_group.storage @@ -220,7 +221,7 @@ resource "azurerm_network_security_rule" "nsr_controlplane_storage" { source_port_range = "*" destination_port_ranges = [22, 443, 3389, 5985, 5986, 111, 635, 2049, 4045, 4046, 4049] source_address_prefixes = compact(concat(var.deployer_tfstate.subnet_mgmt_address_prefixes, var.deployer_tfstate.subnet_bastion_address_prefixes)) - destination_address_prefixes = azurerm_subnet.storage[0].address_prefixes + destination_address_prefixes = local.storage_subnet_existing ? data.azurerm_subnet.storage[0].address_prefixes : azurerm_subnet.storage[0].address_prefixes } // Add SSH network security rule @@ -247,7 +248,7 @@ resource "azurerm_network_security_rule" "nsr_controlplane_db" { var.deployer_tfstate.subnet_mgmt_address_prefixes, var.deployer_tfstate.subnet_bastion_address_prefixes) ) - destination_address_prefixes = azurerm_subnet.db[0].address_prefixes + destination_address_prefixes = local.database_subnet_existing ? data.azurerm_subnet.db[0].address_prefixes : azurerm_subnet.db[0].address_prefixes } // Add network security rule @@ -269,10 +270,11 @@ resource "azurerm_network_security_rule" "nsr_controlplane_admin" { access = "Allow" protocol = "Tcp" source_port_range = "*" - destination_port_ranges = [22, 443, 3389, 5985, 5986] + destination_port_ranges = [22, 443, 3389, 5985, 5986,111, 635, 2049, 4045, 4046, 4049] source_address_prefixes = compact(concat( var.deployer_tfstate.subnet_mgmt_address_prefixes, var.deployer_tfstate.subnet_bastion_address_prefixes) ) - destination_address_prefixes = azurerm_subnet.admin[0].address_prefixes + + destination_address_prefixes = local.admin_subnet_existing ? data.azurerm_subnet.admin[0].address_prefixes : azurerm_subnet.admin[0].address_prefixes } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf b/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf index 9b5f6f33f5..222c4d6f48 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf @@ -16,6 +16,15 @@ resource "azurerm_subnet" "admin" { ) } +data "azurerm_subnet" "admin" { + provider = azurerm.main + count = local.admin_subnet_existing ? 1 : 0 + name = split("/", local.admin_subnet_arm_id)[10] + resource_group_name = split("/", local.admin_subnet_arm_id)[4] + virtual_network_name = split("/", local.admin_subnet_arm_id)[8] +} + + // Creates db subnet of SAP VNET resource "azurerm_subnet" "db" { provider = azurerm.main @@ -33,6 +42,14 @@ resource "azurerm_subnet" "db" { ) } +data "azurerm_subnet" "db" { + provider = azurerm.main + count = local.database_subnet_existing ? 1 : 0 + name = split("/", local.database_subnet_arm_id)[10] + resource_group_name = split("/", local.database_subnet_arm_id)[4] + virtual_network_name = split("/", local.database_subnet_arm_id)[8] +} + // Creates app subnet of SAP VNET resource "azurerm_subnet" "app" { provider = azurerm.main @@ -51,6 +68,15 @@ resource "azurerm_subnet" "app" { ) } +data "azurerm_subnet" "app" { + provider = azurerm.main + count = local.application_subnet_existing ? 1 : 0 + name = split("/", local.application_subnet_arm_id)[10] + resource_group_name = split("/", local.application_subnet_arm_id)[4] + virtual_network_name = split("/", local.application_subnet_arm_id)[8] +} + + // Creates web subnet of SAP VNET resource "azurerm_subnet" "web" { provider = azurerm.main @@ -69,6 +95,16 @@ resource "azurerm_subnet" "web" { ) } +data "azurerm_subnet" "web" { + provider = azurerm.main + count = local.web_subnet_existing ? 1 : 0 + name = split("/", local.web_subnet_arm_id)[10] + resource_group_name = split("/", local.web_subnet_arm_id)[4] + virtual_network_name = split("/", local.web_subnet_arm_id)[8] +} + + + // Creates storage subnet of SAP VNET resource "azurerm_subnet" "storage" { provider = azurerm.main @@ -87,6 +123,16 @@ resource "azurerm_subnet" "storage" { ) } +data "azurerm_subnet" "storage" { + provider = azurerm.main + count = local.storage_subnet_existing ? 1 : 0 + name = split("/", local.storage_subnet_arm_id)[10] + resource_group_name = split("/", local.storage_subnet_arm_id)[4] + virtual_network_name = split("/", local.storage_subnet_arm_id)[8] +} + + + // Creates anf subnet of SAP VNET resource "azurerm_subnet" "anf" { provider = azurerm.main diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf index 894426ce48..e8a87053f1 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf @@ -788,4 +788,8 @@ locals { use_AFS_for_shared = (var.NFS_provider == "ANF" && var.use_AFS_for_shared_storage) || var.NFS_provider == "AFS" + + deploy_monitoring_extension = var.infrastructure.deploy_monitoring_extension && length(try(var.infrastructure.user_assigned_identity_id,"")) > 0 + } + diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf index d16a528c93..2175e176ad 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf @@ -86,6 +86,14 @@ resource "azurerm_windows_virtual_machine" "utility_vm" { sku = var.vm_settings.image.sku version = var.vm_settings.image.version } + dynamic "identity" { + for_each = range(length(var.infrastructure.user_assigned_identity_id) > 0 ? 1 : 0) + content { + type = "UserAssigned" + identity_ids = [var.infrastructure.user_assigned_identity_id] + } + } + lifecycle { ignore_changes = [ @@ -149,8 +157,62 @@ resource "azurerm_linux_virtual_machine" "utility_vm" { offer = var.vm_settings.image.offer sku = var.vm_settings.image.sku version = var.vm_settings.image.version + } + dynamic "identity" { + for_each = range(length(var.infrastructure.user_assigned_identity_id) > 0 ? 1 : 0) + content { + type = "UserAssigned" + identity_ids = [var.infrastructure.user_assigned_identity_id] + } + } + + +} +resource "azurerm_virtual_machine_extension" "monitoring_extension_utility_lnx" { + provider = azurerm.main + count = local.deploy_monitoring_extension && upper(var.vm_settings.image.os_type) == "LINUX" ? var.vm_settings.count : 0 + virtual_machine_id = azurerm_linux_virtual_machine.utility_vm[count.index].id + name = "AzureMonitorLinuxAgent" + publisher = "Microsoft.Azure.Monitor" + type = "AzureMonitorLinuxAgent" + type_handler_version = "1.0" + auto_upgrade_minor_version = "true" + settings = jsonencode( + { + "authentication" = { + "managedIdentity" = { + "identifier-name" : "mi_res_id", + "identifier-value": var.infrastructure.user_assigned_identity_id + } + } + } + ) } + +resource "azurerm_virtual_machine_extension" "monitoring_extension_utility_win" { + provider = azurerm.main + count = local.deploy_monitoring_extension && upper(var.vm_settings.image.os_type) == "WINDOWS" ? var.vm_settings.count : 0 + + virtual_machine_id = azurerm_windows_virtual_machine.utility_vm[count.index].id + name = "AzureMonitorWindowsAgent" + publisher = "Microsoft.Azure.Monitor" + type = "AzureMonitorWindowsAgent" + type_handler_version = "1.0" + auto_upgrade_minor_version = "true" + settings = jsonencode( + { + "authentication" = { + "managedIdentity" = { + "identifier-name" : "mi_res_id", + "identifier-value": var.infrastructure.user_assigned_identity_id + } + } + } + ) +} + + diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_local.tf index 8869ea9e50..6c0a8dac4b 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_local.tf @@ -480,5 +480,6 @@ locals { "value" = var.database.user_assigned_identity_id }] : [] + deploy_monitoring_extension = local.enable_deployment && var.infrastructure.deploy_monitoring_extension && length(var.database.user_assigned_identity_id) > 0 } diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf index e0384e26d5..2383f24894 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf @@ -699,3 +699,53 @@ resource "azurerm_virtual_machine_data_disk_attachment" "kdump" { caching = "None" lun = var.database.fence_kdump_lun_number } + +resource "azurerm_virtual_machine_extension" "monitoring_extension_db_lnx" { + provider = azurerm.main + count = local.deploy_monitoring_extension && upper(var.database.os.os_type) == "LINUX" ? ( + var.database_server_count) : ( + 0 ) + virtual_machine_id = azurerm_linux_virtual_machine.dbserver[count.index].id + name = "AzureMonitorLinuxAgent" + publisher = "Microsoft.Azure.Monitor" + type = "AzureMonitorLinuxAgent" + type_handler_version = "1.0" + auto_upgrade_minor_version = "true" + + settings = jsonencode( + { + "authentication" = { + "managedIdentity" = { + "identifier-name" : "mi_res_id", + "identifier-value": var.database.user_assigned_identity_id + } + } + } + ) +} + + +resource "azurerm_virtual_machine_extension" "monitoring_extension_db_win" { + provider = azurerm.main + count = local.deploy_monitoring_extension && upper(var.database.os.os_type) == "WINDOWS" ? ( + var.database_server_count) : ( + 0 ) + virtual_machine_id = azurerm_windows_virtual_machine.dbserver[count.index].id + name = "AzureMonitorWindowsAgent" + publisher = "Microsoft.Azure.Monitor" + type = "AzureMonitorWindowsAgent" + type_handler_version = "1.0" + auto_upgrade_minor_version = "true" + settings = jsonencode( + { + "authentication" = { + "managedIdentity" = { + "identifier-name" : "mi_res_id", + "identifier-value": var.database.user_assigned_identity_id + } + } + } + ) + +} + diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_local.tf index 3f1a392993..2e63c89b0b 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_local.tf @@ -621,4 +621,7 @@ locals { "value" = var.application_tier.user_assigned_identity_id }] : [] + deploy_monitoring_extension = local.enable_deployment && var.infrastructure.deploy_monitoring_extension && length(var.application_tier.user_assigned_identity_id) > 0 + + } diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index e6e143c487..fa4762481a 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -493,3 +493,54 @@ resource "azurerm_virtual_machine_extension" "configure_ansible_app" { ) tags = var.tags } + + +resource "azurerm_virtual_machine_extension" "monitoring_extension_app_lnx" { + provider = azurerm.main + count = local.deploy_monitoring_extension && upper(var.application_tier.app_os.os_type) == "LINUX" ? ( + local.application_server_count) : ( + 0 ) + virtual_machine_id = azurerm_linux_virtual_machine.app[count.index].id + name = "AzureMonitorLinuxAgent" + publisher = "Microsoft.Azure.Monitor" + type = "AzureMonitorLinuxAgent" + type_handler_version = "1.0" + auto_upgrade_minor_version = "true" + + settings = jsonencode( + { + "authentication" = { + "managedIdentity" = { + "identifier-name" : "mi_res_id", + "identifier-value": var.application_tier.user_assigned_identity_id + } + } + } + ) +} + + +resource "azurerm_virtual_machine_extension" "monitoring_extension_app_win" { + provider = azurerm.main + count = local.deploy_monitoring_extension && upper(var.application_tier.app_os.os_type) == "WINDOWS" ? ( + local.application_server_count) : ( + 0 ) + virtual_machine_id = azurerm_windows_virtual_machine.app[count.index].id + name = "AzureMonitorWindowsAgent" + publisher = "Microsoft.Azure.Monitor" + type = "AzureMonitorWindowsAgent" + type_handler_version = "1.0" + auto_upgrade_minor_version = "true" + settings = jsonencode( + { + "authentication" = { + "managedIdentity" = { + "identifier-name" : "mi_res_id", + "identifier-value": var.application_tier.user_assigned_identity_id + } + } + } + ) + +} + diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index 3fbb0a4f3e..a3510555d8 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -696,3 +696,52 @@ resource "azurerm_virtual_machine_data_disk_attachment" "kdump" { caching = "None" lun = var.application_tier.fence_kdump_lun_number } + +resource "azurerm_virtual_machine_extension" "monitoring_extension_scs_lnx" { + provider = azurerm.main + count = local.deploy_monitoring_extension && upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( + local.scs_server_count) : ( + 0 ) + virtual_machine_id = azurerm_linux_virtual_machine.scs[count.index].id + name = "AzureMonitorLinuxAgent" + publisher = "Microsoft.Azure.Monitor" + type = "AzureMonitorLinuxAgent" + type_handler_version = "1.0" + auto_upgrade_minor_version = "true" + settings = jsonencode( + { + "authentication" = { + "managedIdentity" = { + "identifier-name" : "mi_res_id", + "identifier-value": var.application_tier.user_assigned_identity_id + } + } + } + ) +} + + +resource "azurerm_virtual_machine_extension" "monitoring_extension_scs_win" { + provider = azurerm.main + count = local.deploy_monitoring_extension && upper(var.application_tier.scs_os.os_type) == "WINDOWS" ? ( + local.scs_server_count) : ( + 0 ) + virtual_machine_id = azurerm_windows_virtual_machine.scs[count.index].id + name = "AzureMonitorWindowsAgent" + publisher = "Microsoft.Azure.Monitor" + type = "AzureMonitorWindowsAgent" + type_handler_version = "1.0" + auto_upgrade_minor_version = "true" + settings = jsonencode( + { + "authentication" = { + "managedIdentity" = { + "identifier-name" : "mi_res_id", + "identifier-value": var.application_tier.user_assigned_identity_id + } + } + } + ) +} + + diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf index 4c675c290d..c4c68038ac 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf @@ -622,3 +622,50 @@ resource "azurerm_availability_set" "web" { tags = var.tags } + +resource "azurerm_virtual_machine_extension" "monitoring_extension_web_lnx" { + provider = azurerm.main + count = local.deploy_monitoring_extension && upper(var.application_tier.web_os.os_type) == "LINUX" ? ( + local.webdispatcher_count) : ( + 0 ) + virtual_machine_id = azurerm_linux_virtual_machine.web[count.index].id + name = "AzureMonitorLinuxAgent" + publisher = "Microsoft.Azure.Monitor" + type = "AzureMonitorLinuxAgent" + type_handler_version = "1.0" + auto_upgrade_minor_version = "true" + settings = jsonencode( + { + "authentication" = { + "managedIdentity" = { + "identifier-name" : "mi_res_id", + "identifier-value": var.application_tier.user_assigned_identity_id + } + } + } + ) +} + + +resource "azurerm_virtual_machine_extension" "monitoring_extension_web_win" { + provider = azurerm.main + count = local.deploy_monitoring_extension && upper(var.application_tier.web_os.os_type) == "WINDOWS" ? ( + local.webdispatcher_count) : ( + 0 ) + virtual_machine_id = azurerm_windows_virtual_machine.web[count.index].id + name = "AzureMonitorWindowsAgent" + publisher = "Microsoft.Azure.Monitor" + type = "AzureMonitorWindowsAgent" + type_handler_version = "1.0" + auto_upgrade_minor_version = "true" + settings = jsonencode( + { + "authentication" = { + "managedIdentity" = { + "identifier-name" : "mi_res_id", + "identifier-value": var.application_tier.user_assigned_identity_id + } + } + } + ) +} diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf index da5b0ebe03..eea603147e 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf @@ -396,4 +396,5 @@ locals { "value" = var.database.user_assigned_identity_id }] : [] + deploy_monitoring_extension = local.enable_deployment && var.infrastructure.deploy_monitoring_extension && length(var.database.user_assigned_identity_id) > 0 } diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf index 9460ae9340..6792baaa78 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf @@ -553,3 +553,27 @@ resource "azurerm_virtual_machine_data_disk_attachment" "kdump" { caching = "None" lun = var.database.fence_kdump_lun_number } + +resource "azurerm_virtual_machine_extension" "monitoring_extension_db_lnx" { + provider = azurerm.main + count = local.deploy_monitoring_extension ? ( + var.database_server_count) : ( + 0 ) + virtual_machine_id = azurerm_linux_virtual_machine.vm_dbnode[count.index].id + name = "AzureMonitorLinuxAgent" + publisher = "Microsoft.Azure.Monitor" + type = "AzureMonitorLinuxAgent" + type_handler_version = "1.0" + auto_upgrade_minor_version = "true" + + settings = jsonencode( + { + "authentication" = { + "managedIdentity" = { + "identifier-name" : "mi_res_id", + "identifier-value": var.database.user_assigned_identity_id + } + } + } + ) +} From e4312e769af806ed4608046c568eef2b73b7362b Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 27 Mar 2024 14:11:26 +0530 Subject: [PATCH 472/607] Update subnet prefixes to use CIDR notation --- .../tasks/main.yaml | 2 +- .../roles-os/1.10-networking/tasks/main.yaml | 28 +++++++++---------- .../2.4-hosts-file/templates/hosts.j2 | 4 +-- deploy/terraform/run/sap_system/module.tf | 10 +++---- .../modules/sap_landscape/subnets.tf | 2 +- .../modules/sap_landscape/variables_local.tf | 2 +- .../modules/sap_system/app_tier/outputs.tf | 2 +- .../common_infrastructure/outputs.tf | 6 ++-- .../sap_system/output_files/inventory.tf | 10 +++---- .../output_files/sap-parameters.tmpl | 10 +++---- .../output_files/variables_global.tf | 10 +++---- 11 files changed, 43 insertions(+), 43 deletions(-) diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml index 6f0aadd39b..8e54514de3 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml @@ -108,7 +108,7 @@ _rsp_system_usage: "custom" use_master_password: "{{ hana_use_master_password }}" password_copy: "{%if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" - _rsp_internal_network: "{{ subnet_prefix_db }}" + _rsp_internal_network: "{{ subnet_cidr_db | default((subnet_address + '/' + subnet_prefix), true) }}" # This comes in from the main ansible playbook. It is the password for the root user. Must be randomized after the installation. _rsp_root_password: "{{ root_password }}" # Note: Last node in the DB list is marked as standby, while everything else except first node is marked as worker node diff --git a/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml b/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml index e77e19480e..99661b2e35 100644 --- a/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml @@ -184,34 +184,34 @@ - "IpAddress: {{ azif.ipAddress }}" - "Subnet: {{ azif.subnet }}" - "InterfaceName: {{ azif.interfaceName }}" - - "ANF Subnet: {{ subnet_prefix_anf }}" - - "Gateway: {{ subnet_prefix_storage | ansible.utils.ipmath(1) }}" + - "ANF Subnet: {{ subnet_cidr_anf }}" + - "Gateway: {{ subnet_cidr_storage | ansible.utils.ipmath(1) }}" verbosity: 2 loop: "{{ az_network_interfaces | list }}" loop_control: loop_var: azif when: - az_network_interfaces is defined - - subnet_prefix_storage is defined + - subnet_cidr_storage is defined - az_network_interfaces | length >= 1 - not azif.interfaceName in ['eth0', 'eth0:0'] - - subnet_prefix_storage | ansible.utils.network_in_usable( azif.ipAddress ) + - subnet_cidr_storage | ansible.utils.network_in_usable( azif.ipAddress ) # since the storage nic is the 3rd added to the VM we will assume that the device is eth2 # and the connection is 'Wired connection 2' - name: "1.10 Networking - Add route to the ANF subnet via storage gateway" become: true become_user: root - ansible.builtin.shell: nmcli connection modify "Wired connection 2" +ipv4.routes "{{ subnet_prefix_anf }} {{ azif.subnet | ansible.utils.ipmath(1) }}" + ansible.builtin.shell: nmcli connection modify "Wired connection 2" +ipv4.routes "{{ subnet_cidr_anf }} {{ azif.subnet | ansible.utils.ipmath(1) }}" loop: "{{ az_network_interfaces | list }}" loop_control: loop_var: azif when: - az_network_interfaces is defined - - subnet_prefix_storage is defined + - subnet_cidr_storage is defined - az_network_interfaces | length > 2 - not azif.interfaceName in ['eth0', 'eth0:0'] - - subnet_prefix_storage | ansible.utils.network_in_usable( azif.ipAddress ) + - subnet_cidr_storage | ansible.utils.network_in_usable( azif.ipAddress ) - name: "1.10 Networking - Print the network configuration details for client route" ansible.builtin.debug: @@ -219,30 +219,30 @@ - "IpAddress: {{ azif.ipAddress }}" - "Subnet: {{ azif.subnet }}" - "InterfaceName: {{ azif.interfaceName }}" - - "App Subnet: {{ subnet_prefix_app }}" - - "Gateway: {{ subnet_prefix_client | ansible.utils.ipmath(1) }}" + - "App Subnet: {{ subnet_cidr_app }}" + - "Gateway: {{ subnet_cidr_client | ansible.utils.ipmath(1) }}" verbosity: 2 loop: "{{ az_network_interfaces | list }}" loop_control: loop_var: azif when: - az_network_interfaces is defined - - subnet_prefix_client is defined + - subnet_cidr_client is defined - az_network_interfaces | length > 2 - not azif.interfaceName in ['eth0', 'eth0:0'] - - subnet_prefix_client | ansible.utils.network_in_usable( azif.ipAddress ) + - subnet_cidr_client | ansible.utils.network_in_usable( azif.ipAddress ) - name: "1.10 Networking - Add route to the application subnet via client gateway" - ansible.builtin.command: nmcli connection modify "Wired connection 1" +ipv4.routes "{{ subnet_prefix_app }} {{ azif.subnet | ansible.utils.ipmath(1) }}" + ansible.builtin.command: nmcli connection modify "Wired connection 1" +ipv4.routes "{{ subnet_cidr_app }} {{ azif.subnet | ansible.utils.ipmath(1) }}" loop: "{{ az_network_interfaces | list }}" loop_control: loop_var: azif when: - az_network_interfaces is defined - - subnet_prefix_client is defined + - subnet_cidr_client is defined - az_network_interfaces | length > 1 - not azif.interfaceName in ['eth0', 'eth0:0'] - - subnet_prefix_client | ansible.utils.network_in_usable( azif.ipAddress ) + - subnet_cidr_client | ansible.utils.network_in_usable( azif.ipAddress ) # reboot VM after the new routes are added - name: "1.10 Networking - Reboot VM and wait for 5 minutes" diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 b/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 index 253747d020..105c162774 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 @@ -129,10 +129,10 @@ ansible_facts. {% else %} {# Loop through remaining IPs for the virtual host #} {% for ip in host_ips[1:] %} -{% if ((db_scale_out) and ((subnet_prefix_storage is defined) and (subnet_prefix_storage | ansible.utils.network_in_usable(ip)))) %} +{% if ((db_scale_out) and ((subnet_cidr_storage is defined) and (subnet_cidr_storage | ansible.utils.network_in_usable(ip)))) %} {{ '%-19s' | format(ip) }}{{ '%-80s ' | format(host + '-storage.' + sap_fqdn) }}{{ '%-21s' | format(host + '-storage') }} -{% elif ((db_scale_out) and ((subnet_prefix_client is defined) and (subnet_prefix_client | ansible.utils.network_in_usable(ip)))) %} +{% elif ((db_scale_out) and ((subnet_cidr_client is defined) and (subnet_cidr_client | ansible.utils.network_in_usable(ip)))) %} {{ '%-19s' | format(ip) }}{{ '%-80s ' | format(host + '-client.' + sap_fqdn) }}{{ '%-21s' | format(host + '-client') }} {% else %} diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index d4dd6e2e51..8abe4b7f6b 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -353,11 +353,11 @@ module "output_files" { ))) loadbalancers = module.hdb_node.loadbalancers - subnet_prefix_anf = module.hdb_node.ANF_subnet_prefix - subnet_prefix_app = module.app_tier.subnet_prefix_app - subnet_prefix_client = module.common_infrastructure.subnet_prefix_client - subnet_prefix_db = module.common_infrastructure.subnet_prefix_db - subnet_prefix_storage = module.common_infrastructure.subnet_prefix_storage + subnet_cidr_anf = module.hdb_node.ANF_subnet_prefix + subnet_cidr_app = module.app_tier.subnet_cidr_app + subnet_cidr_client = module.common_infrastructure.subnet_cidr_client + subnet_cidr_db = module.common_infrastructure.subnet_cidr_db + subnet_cidr_storage = module.common_infrastructure.subnet_cidr_storage ######################################################################################### diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf b/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf index 222c4d6f48..cf33a1823a 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf @@ -112,7 +112,7 @@ resource "azurerm_subnet" "storage" { name = local.storage_subnet_name resource_group_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].resource_group_name : azurerm_virtual_network.vnet_sap[0].resource_group_name virtual_network_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].name : azurerm_virtual_network.vnet_sap[0].name - address_prefixes = [local.subnet_prefix_storage] + address_prefixes = [local.subnet_cidr_storage] private_endpoint_network_policies_enabled = var.use_private_endpoint diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf index e8a87053f1..bf46e32d7a 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf @@ -466,7 +466,7 @@ locals { ) ) ) - subnet_prefix_storage = local.storage_subnet_defined ? ( + subnet_cidr_storage = local.storage_subnet_defined ? ( try(var.infrastructure.vnets.sap.subnet_storage.prefix, "")) : ( "" ) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/outputs.tf index 27e959372e..88bd803538 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/outputs.tf @@ -322,7 +322,7 @@ output "scs_kdump_disks" { -output "subnet_prefix_app" { +output "subnet_cidr_app" { description = "Storage subnet prefix" value = local.enable_deployment ? ( local.application_subnet_exists ? ( diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/outputs.tf index b3b272a391..054e337f8e 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/outputs.tf @@ -103,7 +103,7 @@ output "admin_subnet" { ) } -output "subnet_prefix_client" { +output "subnet_cidr_client" { description = "Storage subnet prefix" value = local.enable_db_deployment && local.enable_admin_subnet ? ( local.admin_subnet_exists ? ( @@ -115,7 +115,7 @@ output "subnet_prefix_client" { } -output "subnet_prefix_db" { +output "subnet_cidr_db" { description = "DB subnet prefix" value = local.enable_db_deployment ? ( local.database_subnet_exists ? ( @@ -157,7 +157,7 @@ output "storage_subnet" { ) } -output "subnet_prefix_storage" { +output "subnet_cidr_storage" { description = "Storage subnet prefix" value = local.enable_db_deployment && local.enable_storage_subnet ? ( local.sub_storage_exists ? ( diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf index fcf042a51c..ecadfbcd64 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf @@ -234,11 +234,11 @@ resource "local_file" "sap-parameters_yml" { secret_prefix = local.secret_prefix, settings = local.settings sid = var.sap_sid, - subnet_prefix_anf = var.subnet_prefix_anf, - subnet_prefix_app = var.subnet_prefix_app, - subnet_prefix_client = var.subnet_prefix_client - subnet_prefix_db = var.subnet_prefix_db - subnet_prefix_storage = var.subnet_prefix_storage, + subnet_cidr_anf = var.subnet_cidr_anf, + subnet_cidr_app = var.subnet_cidr_app, + subnet_cidr_client = var.subnet_cidr_client + subnet_cidr_db = var.subnet_cidr_db + subnet_cidr_storage = var.subnet_cidr_storage, upgrade_packages = var.upgrade_packages ? "true" : "false" use_msi_for_clusters = var.use_msi_for_clusters usr_sap = length(var.usr_sap) > 1 ? ( diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl index cc1fdfc368..c59476eaf1 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl @@ -95,11 +95,11 @@ db_scale_out: ${scale_out} db_no_standby: ${scale_out_no_standby_role} %{~ endif } -subnet_prefix_anf: ${subnet_prefix_anf} -subnet_prefix_app: ${subnet_prefix_app} -subnet_prefix_db: ${subnet_prefix_db} -subnet_prefix_client: ${subnet_prefix_client} -subnet_prefix_storage: ${subnet_prefix_storage} +subnet_cidr_anf: ${subnet_cidr_anf} +subnet_cidr_app: ${subnet_cidr_app} +subnet_cidr_db: ${subnet_cidr_db} +subnet_cidr_client: ${subnet_cidr_client} +subnet_cidr_storage: ${subnet_cidr_storage} # db_high_availability is a boolean flag indicating if the # SAP database servers are deployed using high availability diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf index b4bd6904aa..8ad31ffff5 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf @@ -1,4 +1,4 @@ -variable "subnet_prefix_anf" { description = "address prefix for the ANF subnet" } +variable "subnet_cidr_anf" { description = "address prefix for the ANF subnet" } variable "ansible_user" { description = "The ansible remote user account to use" default = "azureadm" @@ -25,9 +25,9 @@ variable "bom_name" { description = "Name of Bill of Materials file" default = "" } -variable "subnet_prefix_app" { description = "address prefix for the app subnet" } -variable "subnet_prefix_db" { description = "address prefix for the db subnet" } -variable "subnet_prefix_client" { description = "address prefix for the client subnet" } +variable "subnet_cidr_app" { description = "address prefix for the app subnet" } +variable "subnet_cidr_db" { description = "address prefix for the db subnet" } +variable "subnet_cidr_client" { description = "address prefix for the client subnet" } variable "configuration_settings" { description = "This is a dictionary that will contain values persisted to the sap-parameters.file" } variable "database_admin_ips" { description = "List of Admin NICs for the DB VMs" } variable "database_cluster_type" { @@ -175,7 +175,7 @@ variable "scs_server_secondary_ips" { description = "List of seconda variable "scs_vm_names" { description = "List of VM names for the SCS Servers" } variable "shared_home" { description = "If defined provides shared-home support" } variable "sid_keyvault_user_id" { description = "Defines the names for the resources" } -variable "subnet_prefix_storage" { description = "address prefix for the storage subnet" } +variable "subnet_cidr_storage" { description = "address prefix for the storage subnet" } variable "tfstate_resource_id" { description = "Resource ID for tf state file" } variable "upgrade_packages" { description = "Upgrade packages" } variable "use_custom_dns_a_registration" { From 641f41554e9c888292eaae91fb210590ca6ea713 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 27 Mar 2024 13:32:55 +0200 Subject: [PATCH 473/607] Control Private Endpoint DNS registration (#571) * Add the ability to control if Private Endpoints are registered with DNS * Merge branch 'experimental' * Add register_endpoints_with_dns option to common_infrastructure and hdb_node modules * Add register_endpoints_with_dns property to LandscapeModel and SystemModel --------- Co-authored-by: Kimmo Forss --- Webapp/SDAF/Models/LandscapeModel.cs | 4 ++++ Webapp/SDAF/Models/SystemModel.cs | 7 +++++-- Webapp/SDAF/ParameterDetails/LandscapeDetails.json | 9 +++++++++ Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt | 4 ++++ Webapp/SDAF/ParameterDetails/SystemDetails.json | 9 +++++++++ Webapp/SDAF/ParameterDetails/SystemTemplate.txt | 3 +++ deploy/terraform/run/sap_landscape/module.tf | 1 + deploy/terraform/run/sap_landscape/tfvar_variables.tf | 7 +++++++ deploy/terraform/run/sap_system/module.tf | 4 ++++ deploy/terraform/run/sap_system/tfvar_variables.tf | 6 ++++++ .../modules/sap_landscape/key_vault_sap_landscape.tf | 2 +- .../modules/sap_landscape/storage_accounts.tf | 8 ++++---- .../modules/sap_landscape/variables_global.tf | 4 ++++ .../modules/sap_system/anydb_node/variables_global.tf | 5 +++++ .../modules/sap_system/app_tier/variables_global.tf | 1 + .../sap_system/common_infrastructure/storage_accounts.tf | 2 +- .../sap_system/common_infrastructure/variables_global.tf | 6 ++++++ .../modules/sap_system/hdb_node/variables_global.tf | 5 +++++ 18 files changed, 79 insertions(+), 8 deletions(-) diff --git a/Webapp/SDAF/Models/LandscapeModel.cs b/Webapp/SDAF/Models/LandscapeModel.cs index 46725e0769..e38877d067 100644 --- a/Webapp/SDAF/Models/LandscapeModel.cs +++ b/Webapp/SDAF/Models/LandscapeModel.cs @@ -273,6 +273,7 @@ public bool IsValid() public string management_dns_subscription_id { get; set; } public bool? use_custom_dns_a_registration { get; set; } = false; + public string dns_label { get; set; } public string dns_resource_group_name { get; set; } @@ -280,6 +281,9 @@ public bool IsValid() [IpAddressValidator] public string[] dns_server_list { get; set; } + public bool? register_endpoints_with_dns { get; set; } = true; + + /*---------------------------------------------------------------------------8 | | | Key vault information | diff --git a/Webapp/SDAF/Models/SystemModel.cs b/Webapp/SDAF/Models/SystemModel.cs index 3474f18532..9359292ce5 100644 --- a/Webapp/SDAF/Models/SystemModel.cs +++ b/Webapp/SDAF/Models/SystemModel.cs @@ -159,8 +159,6 @@ public bool IsValid() public string web_subnet_nsg_name { get; set; } - public bool? use_service_endpoint { get; set; } - public bool? nsg_asg_with_vnet { get; set; } = false; /*---------------------------------------------------------------------------8 @@ -205,6 +203,11 @@ public bool IsValid() public bool? dns_a_records_for_secondary_names { get; set; } = true; public bool? use_private_endpoint { get; set; } + public bool? use_service_endpoint { get; set; } + + public bool? register_endpoints_with_dns { get; set; } = true; + + /*---------------------------------------------------------------------------8 | | | Cluster information | diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index e1509814ae..197de237d8 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -928,6 +928,15 @@ "Overrules": "", "Display": 3 }, + { + "Name": "register_endpoints_with_dns", + "Required": false, + "Description": "Defines if the Private Endpoints are registered with DNS", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, { "Name": "management_dns_subscription_id", "Required": false, diff --git a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt index 67ab204842..4ca73735c9 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt @@ -306,6 +306,10 @@ $$use_custom_dns_a_registration$$ # This also controls the creation of DNS entries for the load balancers $$register_virtual_network_to_dns$$ +# register_endpoints_with_dns defines if the endpoints should be registered with the DNS +$$register_endpoints_with_dns$$ + + ######################################################################################### # # # Azure Keyvault support # diff --git a/Webapp/SDAF/ParameterDetails/SystemDetails.json b/Webapp/SDAF/ParameterDetails/SystemDetails.json index 6778a30007..d6a044cd4b 100644 --- a/Webapp/SDAF/ParameterDetails/SystemDetails.json +++ b/Webapp/SDAF/ParameterDetails/SystemDetails.json @@ -323,6 +323,15 @@ "Overrules": "", "Display": 2 }, + { + "Name": "register_endpoints_with_dns", + "Required": false, + "Description": "Defines if the Private Endpoints are registered with DNS", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, { "Name": "use_private_endpoint", "Required": false, diff --git a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt index 25bbdea188..e3020b0b44 100644 --- a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt @@ -492,6 +492,9 @@ $$deploy_monitoring_extension$$ # dns_a_records_for_secondary_names defines if DNS records should be created for the virtual host names $$dns_a_records_for_secondary_names$$ +# register_endpoints_with_dns defines if the endpoints should be registered with the DNS +$$register_endpoints_with_dns$$ + ######################################################################################### # # # NFS support # diff --git a/deploy/terraform/run/sap_landscape/module.tf b/deploy/terraform/run/sap_landscape/module.tf index 92fcdeb905..ab1ae59733 100644 --- a/deploy/terraform/run/sap_landscape/module.tf +++ b/deploy/terraform/run/sap_landscape/module.tf @@ -47,6 +47,7 @@ module "sap_landscape" { peer_with_control_plane_vnet = var.peer_with_control_plane_vnet place_delete_lock_on_resources = var.place_delete_lock_on_resources public_network_access_enabled = var.public_network_access_enabled + register_endpoints_with_dns = var.register_endpoints_with_dns register_virtual_network_to_dns = var.register_virtual_network_to_dns service_principal = var.use_spn ? local.service_principal : local.account soft_delete_retention_days = var.soft_delete_retention_days diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index a14e131d6f..feee58cd7d 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -533,6 +533,7 @@ variable "register_virtual_network_to_dns" { default = true type = bool } + variable "dns_zone_names" { description = "Private DNS zone names" type = map(string) @@ -545,6 +546,12 @@ variable "dns_zone_names" { } } +variable "register_endpoints_with_dns" { + description = "Boolean value indicating if endpoints should be registered to the dns zone" + default = true + type = bool + } + ######################################################################################### # # # ANF variables # diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index 8abe4b7f6b..c6b7aac883 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -94,6 +94,7 @@ module "common_infrastructure" { use_private_endpoint = var.use_private_endpoint use_random_id_for_storageaccounts = var.use_random_id_for_storageaccounts use_scalesets_for_deployment = var.use_scalesets_for_deployment + register_endpoints_with_dns = var.register_endpoints_with_dns } #------------------------------------------------------------------------------- @@ -161,6 +162,7 @@ module "hdb_node" { use_msi_for_clusters = var.use_msi_for_clusters use_scalesets_for_deployment = var.use_scalesets_for_deployment use_secondary_ips = var.use_secondary_ips + register_endpoints_with_dns = var.register_endpoints_with_dns } ######################################################################################### @@ -200,6 +202,7 @@ module "app_tier" { order_deployment = null ppg = var.use_app_proximityplacementgroups ? module.common_infrastructure.app_ppg : module.common_infrastructure.ppg register_virtual_network_to_dns = try(data.terraform_remote_state.landscape.outputs.register_virtual_network_to_dns, true) + register_endpoints_with_dns = var.register_endpoints_with_dns resource_group = module.common_infrastructure.resource_group route_table_id = module.common_infrastructure.route_table_id sap_sid = local.sap_sid @@ -266,6 +269,7 @@ module "anydb_node" { ) : (null) ppg = module.common_infrastructure.ppg register_virtual_network_to_dns = try(data.terraform_remote_state.landscape.outputs.register_virtual_network_to_dns, true) + register_endpoints_with_dns = var.register_endpoints_with_dns resource_group = module.common_infrastructure.resource_group sap_sid = local.sap_sid scale_set_id = try(module.common_infrastructure.scale_set_id, null) diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index 2926b8fa06..ffa33d890e 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -1068,6 +1068,12 @@ variable "dns_a_records_for_secondary_names" { type = bool } +variable "register_endpoints_with_dns" { + description = "Boolean value indicating if endpoints should be registered to the dns zone" + default = true + type = bool + } + ######################################################################################### # # # NFS and Shared Filed settings # diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf index 5c2bbcabe5..65738a8b18 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf @@ -439,7 +439,7 @@ resource "azurerm_private_endpoint" "kv_user" { } dynamic "private_dns_zone_group" { - for_each = range(var.use_private_endpoint ? 1 : 0) + for_each = range(var.register_endpoints_with_dns ? 1 : 0) content { name = var.dns_zone_names.vault_dns_zone_name private_dns_zone_ids = [data.azurerm_private_dns_zone.keyvault[0].id] diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf index 0d477d3fd5..d1c2296a9e 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf @@ -108,7 +108,7 @@ resource "azurerm_private_endpoint" "storage_bootdiag" { } dynamic "private_dns_zone_group" { - for_each = range(var.use_private_endpoint && !var.use_custom_dns_a_registration ? 1 : 0) + for_each = range(var.register_endpoints_with_dns ? 1 : 0) content { name = var.dns_zone_names.blob_dns_zone_name private_dns_zone_ids = [data.azurerm_private_dns_zone.storage[0].id] @@ -250,7 +250,7 @@ resource "azurerm_private_endpoint" "witness_storage" { } dynamic "private_dns_zone_group" { - for_each = range(var.use_private_endpoint && !var.use_custom_dns_a_registration ? 1 : 0) + for_each = range(var.register_endpoints_with_dns ? 1 : 0) content { name = var.dns_zone_names.blob_dns_zone_name private_dns_zone_ids = [data.azurerm_private_dns_zone.storage[0].id] @@ -447,7 +447,7 @@ resource "azurerm_private_endpoint" "transport" { ] } dynamic "private_dns_zone_group" { - for_each = range(var.use_private_endpoint ? 1 : 0) + for_each = range(var.register_endpoints_with_dns ? 1 : 0) content { name = var.dns_zone_names.file_dns_zone_name private_dns_zone_ids = [data.azurerm_private_dns_zone.file[0].id] @@ -657,7 +657,7 @@ resource "azurerm_private_endpoint" "install" { } dynamic "private_dns_zone_group" { - for_each = range(var.use_private_endpoint ? 1 : 0) + for_each = range(var.register_endpoints_with_dns ? 1 : 0) content { name = var.dns_zone_names.file_dns_zone_name private_dns_zone_ids = [data.azurerm_private_dns_zone.file[0].id] diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf index e7eda1cce9..4451866759 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf @@ -230,6 +230,10 @@ variable "register_virtual_network_to_dns" { type = bool } +variable "register_endpoints_with_dns" { + description = "Boolean value indicating if endpoints should be registered to the dns zone" + type = bool + } variable "use_custom_dns_a_registration" { description = "Boolean value indicating if a custom dns a records should be created for private endpoints" diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_global.tf index 499f675977..08de04649a 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_global.tf @@ -71,6 +71,11 @@ variable "register_virtual_network_to_dns" { type = bool } +variable "register_endpoints_with_dns" { + description = "Boolean value indicating if endpoints should be registered to the dns zone" + type = bool + } + ######################################################################################### # # # Scale Set # diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_global.tf index 4da842be12..656d956576 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_global.tf @@ -30,6 +30,7 @@ variable "options" { description = "Diction variable "order_deployment" { description = "psuedo condition for ordering deployment" } variable "ppg" { description = "Details of the proximity placement group" } variable "register_virtual_network_to_dns" { description = "Boolean value indicating if the vnet should be registered to the dns zone" } +variable "register_endpoints_with_dns" { description = "Boolean value indicating if endpoints should be registered to the dns zone" } variable "resource_group" { description = "Details of the resource group" } variable "route_table_id" { description = "Route table (if any) id" } variable "sap_sid" { description = "The SID of the application" } diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf index b418389a51..4e4e001608 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf @@ -127,7 +127,7 @@ resource "azurerm_private_endpoint" "sapmnt" { dynamic "private_dns_zone_group" { - for_each = range(length(try(var.landscape_tfstate.privatelink_file_id, "")) > 0 ? 1 : 0) + for_each = range(length(try(var.landscape_tfstate.privatelink_file_id, "")) > 0 && var.register_endpoints_with_dns ? 1 : 0) content { name = var.dns_zone_names.file_dns_zone_name private_dns_zone_ids = [var.landscape_tfstate.privatelink_file_id] diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf index eee75211c5..95273c5de2 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf @@ -235,6 +235,12 @@ variable "management_dns_resourcegroup_name" { default = null type = string } + +variable "register_endpoints_with_dns" { + description = "Boolean value indicating if endpoints should be registered to the dns zone" + type = bool + } + variable "dns_zone_names" { description = "Private DNS zone names" type = map(string) diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf index 2166f156ef..9093165754 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf @@ -96,6 +96,11 @@ variable "register_virtual_network_to_dns" { type = bool } +variable "register_endpoints_with_dns" { + description = "Boolean value indicating if endpoints should be registered to the dns zone" + type = bool + } + ######################################################################################### # # From d2d7cf44b09124e48a360af3156104b62934a0c6 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 28 Mar 2024 12:41:54 +0530 Subject: [PATCH 474/607] Update SAP HANA network details extraction and display --- .../4.0.3-hdb-install-scaleout/tasks/main.yaml | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml index 8e54514de3..7853e8f150 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml @@ -71,17 +71,24 @@ var: azure_metadata.json verbosity: 2 -- name: "SAP HANA - - Extract details" +- name: "SAP HANA - - Extract network details from IMDS" ansible.builtin.set_fact: subnet_address: "{{ azure_metadata.json.network.interface[0].ipv4.subnet[0].address }}" subnet_prefix: "{{ azure_metadata.json.network.interface[0].ipv4.subnet[0].prefix }}" + subnet_db_cidr: "{{ subnet_cidr_db | default(azure_metadata.json.network.interface[0].ipv4.subnet[0].address + '/' + azure_metadata.json.network.interface[0].ipv4.subnet[0].prefix) }}" + subnet_client_cidr: "{{ subnet_cidr_client | default(azure_metadata.json.network.interface[0].ipv4.subnet[0].address + '/' + azure_metadata.json.network.interface[0].ipv4.subnet[0].prefix) }}" + subnet_storage_cidr: "{{ subnet_cidr_storage | default(azure_metadata.json.network.interface[0].ipv4.subnet[0].address + '/' + azure_metadata.json.network.interface[0].ipv4.subnet[0].prefix) }}" - name: "SAP HANA - - Show the subnet details" ansible.builtin.debug: msg: - - "Subnet Address: {{ subnet_address }}" - - "Subnet Prefix: {{ subnet_prefix }}" - - "Subnet CIDR: {{ (subnet_address + '/' + subnet_prefix) }}" + - "Subnet Address: {{ subnet_address }}" + - "Subnet Prefix: {{ subnet_prefix }}" + - "Subnet CIDR: {{ (subnet_address + '/' + subnet_prefix) }}" + - "DB Subnet CIDR: {{ subnet_db_cidr }}" + - "Client Subnet CIDR: {{ subnet_client_cidr }}" + - "Storage Subnet CIDR: {{ subnet_storage_cidr }}" + # Scale out ANF only runs on primary node or the first node in the SID_DB list. This is mandatory. - name: "HANA Install - Scale Out - ANF" block: @@ -287,10 +294,11 @@ state: present mode: 0644 option: "map_{{ hostvars[item].virtual_host }}" - value: "{{ hostvars[item].ansible_host }}" + value: "{{ (hostvars[item].ansible_all_ipv4_addresses | ansible.utils.ipaddr(subnet_client_cidr)) | default(hostvars[item].ansible_host) }}" with_items: - "{{ groups[(sap_sid | upper)~'_DB' ] }}" + - name: "Prepare global.ini for site hosts name resolution (Primary Site)" community.general.ini_file: path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" From 9344c2776419c6d88b19f2e4e9eba94c5e68713d Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 28 Mar 2024 20:02:20 +0530 Subject: [PATCH 475/607] Update SAP OS configuration playbook and hosts file configuration tasks --- .../playbook_02_os_sap_specific_config.yaml | 15 ++ .../tasks/main.yaml | 140 +++++++++--------- .../2.4-hosts-file/tasks/main.yaml | 40 ++++- 3 files changed, 119 insertions(+), 76 deletions(-) diff --git a/deploy/ansible/playbook_02_os_sap_specific_config.yaml b/deploy/ansible/playbook_02_os_sap_specific_config.yaml index 4735538d5d..af8f3ede59 100644 --- a/deploy/ansible/playbook_02_os_sap_specific_config.yaml +++ b/deploy/ansible/playbook_02_os_sap_specific_config.yaml @@ -132,6 +132,21 @@ - name: "SAP OS configuration playbook: - Convert ips to list" ansible.builtin.set_fact: ipadd: "{{ azure_network_metadata.json | json_query('interface[*].ipv4.ipAddress[*].privateIpAddress') | flatten(levels=1) | default([]) | list }}" + tags: + - always + + # we do not need to add the same assertion for subnet_cidr_client as it is + # calculated for specific deployment scenarios. + - name: "SAP OS configuration playbook: - Set the subnet_client_cidr fact" + ansible.builtin.set_fact: + subnet_client_cidr: "{{ subnet_cidr_client | default(azure_network_metadata.json.network.interface[0].ipv4.subnet[0].address + '/' + azure_network_metadata.json.network.interface[0].ipv4.subnet[0].prefix) }}" + when: + - platform == 'HANA' + - node_tier == 'hana' or ['hana'] in supported_tiers + - not database_high_availability + - db_scale_out + tags: + - always # add assertion to validate if ipadd is not empty and has at least one ip address - name: "SAP OS configuration playbook: - Assert if IP Address is not empty" diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml index 7853e8f150..1c3e03d6ed 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml @@ -17,17 +17,17 @@ --- # +------------------------------------4--------------------------------------*/ -- name: "SAP HANA: Set BOM facts" +- name: "4.0.3 - SAP HANA SCALE OUT: Set BOM facts" ansible.builtin.set_fact: sap_inifile: "hdbserver_{{ virtual_host }}_{{ sap_sid }}_install.rsp" dir_params: "{{ tmp_directory }}/.params" -- name: "SAP HANA: Create list of all db hosts" +- name: "4.0.3 - SAP HANA SCALE OUT: Create list of all db hosts" ansible.builtin.set_fact: db_hosts: "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" # 0x) Create hidden directory for parameter files -- name: "SAP HANA: Create directories" +- name: "4.0.3 - SAP HANA SCALE OUT: Create directories" ansible.builtin.file: path: "{{ item.path }}" state: directory @@ -37,18 +37,18 @@ - { mode: '0755', path: '{{ tmp_directory }}/{{ db_sid | upper }}' } - { mode: '0755', path: '/etc/sap_deployment_automation/{{ db_sid | upper }}' } -- name: "SAP HANA: Install reset" +- name: "4.0.3 - SAP HANA SCALE OUT: Install reset" ansible.builtin.file: path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" state: absent when: reinstall -- name: "SAP HANA: check if installed" +- name: "4.0.3 - SAP HANA SCALE OUT: check if installed" ansible.builtin.stat: path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" register: hana_installed -- name: "SAP HANA: check media exists" +- name: "4.0.3 - SAP HANA SCALE OUT: check media exists" ansible.builtin.stat: path: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE/hdblcm" register: hdblcm_found @@ -58,7 +58,7 @@ msg: "INSTALL:0001:Unable to find hdblcm, please check that the installation media is mounted" when: not hdblcm_found.stat.exists -- name: "SAP HANA - Retrieve Subscription ID and Resource Group Name" +- name: "4.0.3 - SAP HANA SCALE OUT: Retrieve Subscription ID and Resource Group Name" ansible.builtin.uri: url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 use_proxy: false @@ -66,12 +66,12 @@ Metadata: true register: azure_metadata -- name: "SAP HANA - Show IMDS results" +- name: "4.0.3 - SAP HANA SCALE OUT: Show IMDS results" ansible.builtin.debug: var: azure_metadata.json verbosity: 2 -- name: "SAP HANA - - Extract network details from IMDS" +- name: "4.0.3 - SAP HANA SCALE OUT: Extract network details from IMDS" ansible.builtin.set_fact: subnet_address: "{{ azure_metadata.json.network.interface[0].ipv4.subnet[0].address }}" subnet_prefix: "{{ azure_metadata.json.network.interface[0].ipv4.subnet[0].prefix }}" @@ -79,7 +79,7 @@ subnet_client_cidr: "{{ subnet_cidr_client | default(azure_metadata.json.network.interface[0].ipv4.subnet[0].address + '/' + azure_metadata.json.network.interface[0].ipv4.subnet[0].prefix) }}" subnet_storage_cidr: "{{ subnet_cidr_storage | default(azure_metadata.json.network.interface[0].ipv4.subnet[0].address + '/' + azure_metadata.json.network.interface[0].ipv4.subnet[0].prefix) }}" -- name: "SAP HANA - - Show the subnet details" +- name: "4.0.3 - SAP HANA SCALE OUT: Show the subnet details" ansible.builtin.debug: msg: - "Subnet Address: {{ subnet_address }}" @@ -90,15 +90,15 @@ - "Storage Subnet CIDR: {{ subnet_storage_cidr }}" # Scale out ANF only runs on primary node or the first node in the SID_DB list. This is mandatory. -- name: "HANA Install - Scale Out - ANF" +- name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - ANF" block: - - name: "SAP HANA: remove install response file if exists" + - name: "4.0.3 - SAP HANA SCALE OUT: remove install response file if exists" ansible.builtin.file: path: "{{ dir_params }}/{{ sap_inifile }}" state: absent - - name: "SAP HANA Template processing: Create ini file {{ sap_inifile }} from {{ HANA_2_00_install.rsp }}" + - name: "4.0.3 - SAP HANA SCALE OUT: SAP HANA Template processing: Create ini file {{ sap_inifile }} from {{ HANA_2_00_install.rsp }}" ansible.builtin.template: src: "HANA_2_00_install_scaleout_anf.rsp" dest: "{{ dir_params }}/{{ sap_inifile }}" @@ -132,13 +132,13 @@ {% endif %} {% endfor %}" - - name: "SAP HANA: Progress" + - name: "4.0.3 - SAP HANA SCALE OUT: Progress" ansible.builtin.debug: msg: "Start HANA Installation" - - name: "SAP HANA: installation" + - name: "4.0.3 - SAP HANA SCALE OUT: installation" block: - - name: "SAP HANA: Execute hdblcm on {{ virtual_host }}" + - name: "4.0.3 - SAP HANA SCALE OUT: Execute hdblcm on {{ virtual_host }}" ansible.builtin.shell: | umask {{ custom_umask | default('022') }} ; chmod 755 /usr/sap; @@ -156,15 +156,15 @@ msg: "INSTALL:0022:Execute hdblcm failed." when: hana_installation.rc > 1 - - name: "SAP HANA: Progress" + - name: "4.0.3 - SAP HANA SCALE OUT: Progress" ansible.builtin.debug: msg: "Restarting the HANA Installation" when: hana_installation.rc == 1 - - name: "SAP HANA: Re-execute hdblcm on {{ virtual_host }} and rescue" + - name: "4.0.3 - SAP HANA SCALE OUT: Re-execute hdblcm on {{ virtual_host }} and rescue" block: - - name: "SAP HANA: Re-execute hdblcm on {{ virtual_host }}" + - name: "4.0.3 - SAP HANA SCALE OUT: Re-execute hdblcm on {{ virtual_host }}" ansible.builtin.shell: | umask {{ custom_umask | default('022') }} ; chmod 755 /usr/sap; @@ -182,7 +182,7 @@ ansible.builtin.fail: msg: "INSTALL:0022:Execute hdblcm failed." - - name: "SAP HANA: Installation results" + - name: "4.0.3 - SAP HANA SCALE OUT: Installation results" ansible.builtin.debug: msg: - "HANA Installation failed" @@ -198,20 +198,20 @@ - hana_installation.rc is defined - hana_installation.rc > 0 - - name: "SAP HANA: Successful installation" + - name: "4.0.3 - SAP HANA SCALE OUT: Successful installation" block: - - name: "SAP HANA: Installation results" + - name: "4.0.3 - SAP HANA SCALE OUT: Installation results" ansible.builtin.debug: msg: "HANA Installation succeeded" - - name: "SAP HANA: HANA Install: flag" + - name: "4.0.3 - SAP HANA SCALE OUT: HANA Install: flag" ansible.builtin.file: path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" state: touch mode: 0755 - - name: "Retrieve Subscription ID and Resource Group Name" + - name: "4.0.3 - SAP HANA SCALE OUT: Retrieve Subscription ID and Resource Group Name" ansible.builtin.uri: url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 use_proxy: false @@ -219,31 +219,31 @@ Metadata: true register: azure_metadata - - name: "Extract details" + - name: "4.0.3 - SAP HANA SCALE OUT: Extract Azure subscription details" ansible.builtin.set_fact: subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" resource_group_name: "{{ azure_metadata.json.compute.resourceGroupName }}" - - name: "Show the subscription and resource group" + - name: "4.0.3 - SAP HANA SCALE OUT: Show the subscription and resource group" ansible.builtin.debug: msg: - "Subscription ID: {{ subscription_id }}" - "Resource Group Name: {{ resource_group_name }}" - - name: "Include deploy/ansible/roles-misc/0.6-ARM-Deployment" + - name: "4.0.3 - SAP HANA SCALE OUT: Include deploy/ansible/roles-misc/0.6-ARM-Deployment" ansible.builtin.include_role: name: roles-misc/0.6-ARM-Deployment vars: subscriptionId: "{{ subscription_id }}" resourceGroupName: "{{ resource_group_name }}" - - name: "SAP HANA: ARM Deployment flag" + - name: "4.0.3 - SAP HANA SCALE OUT: ARM Deployment flag" ansible.builtin.file: path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb_arm.txt" state: touch mode: 0755 - # - name: "SAP HANA: remove install response file" + # - name: "4.0.3 - SAP HANA SCALE OUT: remove install response file" # ansible.builtin.file: # path: "{{ dir_params }}/{{ sap_inifile }}" # state: absent @@ -253,7 +253,7 @@ - hana_installation.rc is defined - hana_installation.rc < 1 - - name: "SAP HANA: Create backup folder" + - name: "4.0.3 - SAP HANA SCALE OUT: Create backup folder" ansible.builtin.file: path: "{{ hana_backup_path }}" state: directory @@ -261,9 +261,9 @@ owner: "{{ db_sid | lower }}adm" mode: 0755 - - name: "SAP HANA: Configure global.ini" + - name: "4.0.3 - SAP HANA SCALE OUT: Configure global.ini" block: - - name: "Prepare global.ini for domain name resolution." + - name: "4.0.3 - SAP HANA SCALE OUT: Prepare global.ini for domain name resolution." become_user: root become: true community.general.ini_file: @@ -285,7 +285,7 @@ # option: internal_network # value: "{{ (ansible_default_ipv4.network + '/' + ansible_default_ipv4.netmask) | ipaddr('network/prefix') }}" - - name: "Prepare global.ini for public hostname resolution." + - name: "4.0.3 - SAP HANA SCALE OUT: Prepare global.ini for public hostname resolution." become_user: root become: true community.general.ini_file: @@ -299,7 +299,7 @@ - "{{ groups[(sap_sid | upper)~'_DB' ] }}" - - name: "Prepare global.ini for site hosts name resolution (Primary Site)" + - name: "4.0.3 - SAP HANA SCALE OUT: Prepare global.ini for site hosts name resolution (Primary Site)" community.general.ini_file: path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" section: "internal_hostname_resolution" @@ -310,7 +310,7 @@ with_items: - "{{ groups[(sap_sid | upper)~'_DB' ] }}" - - name: "Prepare global.ini for NetApp storage optimizations" + - name: "4.0.3 - SAP HANA SCALE OUT: Prepare global.ini for NetApp storage optimizations" community.general.ini_file: path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" section: "{{ item.section }}" @@ -327,9 +327,9 @@ - { section: "persistence", option: "datavolume_striping_size_gb", value: "15000" } - - name: "SAP HANA: Restart HANA" + - name: "4.0.3 - SAP HANA SCALE OUT: Restart HANA" block: - - name: "Stop HANA Database" + - name: "4.0.3 - SAP HANA SCALE OUT: Stop HANA Database" become_user: "{{ db_sid | lower }}adm" become: true ansible.builtin.shell: | @@ -346,11 +346,11 @@ allow_world_readable_tmpfiles: true # ToDo: Check if we can interrogate the HANA DB to see if it is stopped. - - name: "Wait 5 minutes for SAP system to stop" + - name: "4.0.3 - SAP HANA SCALE OUT: Wait 5 minutes for SAP system to stop" ansible.builtin.wait_for: timeout: 300 - - name: "Start HANA Database" + - name: "4.0.3 - SAP HANA SCALE OUT: Start HANA Database" become_user: "{{ db_sid | lower }}adm" become: true ansible.builtin.shell: | @@ -366,7 +366,7 @@ vars: allow_world_readable_tmpfiles: true - - name: "Wait 5 minutes for SAP system to start" + - name: "4.0.3 - SAP HANA SCALE OUT: Wait 5 minutes for SAP system to start" ansible.builtin.wait_for: timeout: 300 @@ -379,15 +379,15 @@ - db_scale_out # TODO: add block for Scale out with HSR support here, same as regular installation. -- name: "HANA Install - Scale Out - HSR" +- name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - HSR" block: - - name: "SAP HANA: remove install response file if exists" + - name: "4.0.3 - SAP HANA SCALE OUT: remove install response file if exists" ansible.builtin.file: path: "{{ dir_params }}/{{ sap_inifile }}" state: absent - - name: "SAP HANA Template processing: Create ini file {{ sap_inifile }} from {{ HANA_2_00_install.rsp }}" + - name: "4.0.3 - SAP HANA SCALE OUT: SAP HANA Template processing: Create ini file {{ sap_inifile }} from {{ HANA_2_00_install.rsp }}" ansible.builtin.template: src: "HANA_2_00_install.rsp" dest: "{{ dir_params }}/{{ sap_inifile }}" @@ -405,13 +405,13 @@ use_master_password: "{{ hana_use_master_password }}" password_copy: "{%if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" - - name: "SAP HANA: Progress" + - name: "4.0.3 - SAP HANA SCALE OUT: Progress" ansible.builtin.debug: msg: "Start HANA Installation" - - name: "SAP HANA: installation" + - name: "4.0.3 - SAP HANA SCALE OUT: installation" block: - - name: "SAP HANA: Execute hdblcm on {{ virtual_host }}" + - name: "4.0.3 - SAP HANA SCALE OUT: Execute hdblcm on {{ virtual_host }}" ansible.builtin.shell: | umask {{ custom_umask | default('022') }} ; chmod 755 /usr/sap; @@ -429,15 +429,15 @@ msg: "INSTALL:0022:Execute hdblcm failed." when: hana_installation.rc > 1 - - name: "SAP HANA: Progress" + - name: "4.0.3 - SAP HANA SCALE OUT: Progress" ansible.builtin.debug: msg: "Restarting the HANA Installation" when: hana_installation.rc == 1 - - name: "SAP HANA: Re-execute hdblcm on {{ virtual_host }} and rescue" + - name: "4.0.3 - SAP HANA SCALE OUT: Re-execute hdblcm on {{ virtual_host }} and rescue" block: - - name: "SAP HANA: Re-execute hdblcm on {{ virtual_host }}" + - name: "4.0.3 - SAP HANA SCALE OUT: Re-execute hdblcm on {{ virtual_host }}" ansible.builtin.shell: | umask {{ custom_umask | default('022') }} ; chmod 755 /usr/sap; @@ -455,7 +455,7 @@ ansible.builtin.fail: msg: "INSTALL:0022:Execute hdblcm failed." - - name: "SAP HANA: Installation results" + - name: "4.0.3 - SAP HANA SCALE OUT: Installation results" ansible.builtin.debug: msg: - "HANA Installation failed" @@ -471,20 +471,20 @@ - hana_installation.rc is defined - hana_installation.rc > 0 - - name: "SAP HANA: Successful installation" + - name: "4.0.3 - SAP HANA SCALE OUT: Successful installation" block: - - name: "SAP HANA: Installation results" + - name: "4.0.3 - SAP HANA SCALE OUT: Installation results" ansible.builtin.debug: msg: "HANA Installation succeeded" - - name: "SAP HANA: HANA Install: flag" + - name: "4.0.3 - SAP HANA SCALE OUT: HANA Install: flag" ansible.builtin.file: path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" state: touch mode: 0755 - - name: "Retrieve Subscription ID and Resource Group Name" + - name: "4.0.3 - SAP HANA SCALE OUT: Retrieve Subscription ID and Resource Group Name" ansible.builtin.uri: url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 use_proxy: false @@ -492,31 +492,31 @@ Metadata: true register: azure_metadata - - name: "Extract details" + - name: "4.0.3 - SAP HANA SCALE OUT: Extract Azure subscription details" ansible.builtin.set_fact: subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" resource_group_name: "{{ azure_metadata.json.compute.resourceGroupName }}" - - name: "Show the subscription and resource group" + - name: "4.0.3 - SAP HANA SCALE OUT: Show the subscription and resource group" ansible.builtin.debug: msg: - "Subscription ID: {{ subscription_id }}" - "Resource Group Name: {{ resource_group_name }}" - - name: "Include deploy/ansible/roles-misc/0.6-ARM-Deployment" + - name: "4.0.3 - SAP HANA SCALE OUT: Include deploy/ansible/roles-misc/0.6-ARM-Deployment" ansible.builtin.include_role: name: roles-misc/0.6-ARM-Deployment vars: subscriptionId: "{{ subscription_id }}" resourceGroupName: "{{ resource_group_name }}" - - name: "SAP HANA: ARM Deployment flag" + - name: "4.0.3 - SAP HANA SCALE OUT: ARM Deployment flag" ansible.builtin.file: path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb_arm.txt" state: touch mode: 0755 - - name: "SAP HANA: remove install response file" + - name: "4.0.3 - SAP HANA SCALE OUT: remove install response file" ansible.builtin.file: path: "{{ dir_params }}/{{ sap_inifile }}" state: absent @@ -525,7 +525,7 @@ - hana_installation.rc is defined - hana_installation.rc < 1 - - name: "SAP HANA: Create backup folder" + - name: "4.0.3 - SAP HANA SCALE OUT: Create backup folder" ansible.builtin.file: path: "{{ hana_backup_path }}" state: directory @@ -543,22 +543,22 @@ - name: "HANA Install status" block: - - name: "HANA Install status" + - name: "4.0.3 - SAP HANA SCALE OUT: Install status" ansible.builtin.debug: msg: "HANA is already installed" - - name: "HANA: - return value" + - name: "4.0.3 - SAP HANA SCALE OUT: - return value" ansible.builtin.set_fact: hana_already_installed: true - - name: "SAP HANA: check if ARM Deployment done" + - name: "4.0.3 - SAP HANA SCALE OUT: check if ARM Deployment done" ansible.builtin.stat: path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb_arm.txt" register: hana_arm_deployment_done - - name: "SAP HANA: Successful installation" + - name: "4.0.3 - SAP HANA SCALE OUT: Successful installation" block: - - name: "Retrieve Subscription ID and Resource Group Name" + - name: "4.0.3 - SAP HANA SCALE OUT: Retrieve Subscription ID and Resource Group Name" ansible.builtin.uri: url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 use_proxy: false @@ -566,25 +566,25 @@ Metadata: true register: azure_metadata - - name: "Extract details" + - name: "4.0.3 - SAP HANA SCALE OUT: Extract Azure subscription details" ansible.builtin.set_fact: subscription_id_tmp: "{{ azure_metadata.json.compute.subscriptionId }}" resource_group_name_tmp: "{{ azure_metadata.json.compute.resourceGroupName }}" - - name: "Show the subscription and resource group" + - name: "4.0.3 - SAP HANA SCALE OUT: Show the subscription and resource group" ansible.builtin.debug: msg: - "Subscription ID: {{ subscription_id_tmp }}" - "Resource Group Name: {{ resource_group_name_tmp }}" - - name: "Include deploy/ansible/roles-misc/0.6-ARM-Deployment" + - name: "4.0.3 - SAP HANA SCALE OUT: Include deploy/ansible/roles-misc/0.6-ARM-Deployment" ansible.builtin.include_role: name: roles-misc/0.6-ARM-Deployment vars: subscription_id: "{{ subscription_id_tmp }}" resource_group_name: "{{ resource_group_name_tmp }}" - - name: "SAP HANA: ARM Deployment flag" + - name: "4.0.3 - SAP HANA SCALE OUT: ARM Deployment flag" ansible.builtin.file: path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb_arm.txt" state: touch @@ -593,7 +593,7 @@ - not hana_arm_deployment_done.stat.exists - - name: "SAP HANA: Create backup folder" + - name: "4.0.3 - SAP HANA SCALE OUT: Create backup folder" ansible.builtin.file: path: "{{ hana_backup_path }}" state: directory diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml index 8b78906c92..87a3212b54 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml @@ -74,8 +74,8 @@ create: true state: present block: | - {{ '%-19s' | format(scs_lb_ip) }} {{ '%-50s' | format(scs_virtual_hostname + '.' + sap_fqdn) }} {{ '%-21s' | format(scs_virtual_hostname) }} - {{ '%-19s' | format(ers_lb_ip) }} {{ '%-50s' | format(ers_virtual_hostname + '.' + sap_fqdn) }} {{ '%-21s' | format(ers_virtual_hostname) }} + {{ '%-19s' | format(scs_lb_ip) }} {{ '%-80s' | format(scs_virtual_hostname + '.' + sap_fqdn) }} {{ '%-21s' | format(scs_virtual_hostname) }} + {{ '%-19s' | format(ers_lb_ip) }} {{ '%-80s' | format(ers_virtual_hostname + '.' + sap_fqdn) }} {{ '%-21s' | format(ers_virtual_hostname) }} marker: "# {mark} ASCS/ERS Entries {{ scs_virtual_hostname }}" when: - scs_high_availability @@ -88,12 +88,40 @@ create: true state: present block: | - {{ '%-19s' | format(database_loadbalancer_ip) }} {{ '%-50s' | format(db_virtual_hostname + '.' + sap_fqdn) }} {{ '%-21s' | format(db_virtual_hostname) }} + {{ '%-19s' | format(database_loadbalancer_ip) }} {{ '%-80s' | format(db_virtual_hostname + '.' + sap_fqdn) }} {{ '%-21s' | format(db_virtual_hostname) }} marker: "# {mark} DB Entries {{ db_virtual_hostname }}" when: - database_high_availability - platform == 'HANA' +# we only need the first DB VMs IP that belogs to subnet_cidr_client +- name: "2.4 Hosts: - Set the DB Virtual Instance name resolution - scale out" + when: + - platform == 'HANA' + - not database_high_availability + - db_scale_out + block: + + - name: "2.4 Hosts: - Set the DB Virtual Instance hostname - scale out" + ansible.builtin.set_fact: + db_so_virtual_hostname: "{{ custom_db_virtual_hostname | default(sap_sid | lower ~ db_sid | lower ~ 'db' ~ db_instance_number ~ 'so', true) }}" + + - name: "2.4 Hosts: - Setup Virtual host name resolution - DB - Scale Out - Standby" + ansible.builtin.set_fact: + db_so_virtualhost_ip: "{{ hostvars[item].ansible_all_ipv4_addresses | ansible.utils.ipaddr(hostvars[item]['subnet_client_cidr']) | default(hostvars[item].ansible_host) }}" + with_items: + - "{{ groups[(sap_sid | upper)~'_DB' ][0] }}" + + - name: "2.4 Hosts: - Setup Virtual host name resolution - DB - Scale Out - Standby" + ansible.builtin.blockinfile: + path: /etc/hosts + mode: 0644 + create: true + state: present + block: | + {{ '%-19s' | format(db_so_virtualhost_ip) }} {{ '%-80s' | format(db_so_virtual_hostname + '.' + sap_fqdn) }} {{ '%-21s' | format(db_so_virtual_hostname) }} + marker: "# {mark} DB Entries {{ db_virtual_hostname }}" + - name: "2.4 Hosts: - Setup Virtual host name resolution - DB" ansible.builtin.blockinfile: path: /etc/hosts @@ -101,7 +129,7 @@ create: true state: present block: | - {{ '%-19s' | format(database_loadbalancer_ip) }} {{ '%-50s' | format(db_virtual_hostname + '.' + sap_fqdn) }} {{ '%-21s' | format(db_virtual_hostname) }} + {{ '%-19s' | format(database_loadbalancer_ip) }} {{ '%-80s' | format(db_virtual_hostname + '.' + sap_fqdn) }} {{ '%-21s' | format(db_virtual_hostname) }} marker: "# {mark} DB Entries {{ db_virtual_hostname }}" when: - database_high_availability @@ -160,8 +188,8 @@ ansible.builtin.lineinfile: path: /etc/hosts line: - "{{ '%-19s' | format(iscsi_server.ip) }} {{ '%-50s' | format(iscsi_server.host + '.' + sap_fqdn) }} {{ '%-21s' | format(iscsi_server.host) }}" - loop: "{{ iscsi_servers }}" + "{{ '%-19s' | format(iscsi_server.ip) }} {{ '%-80s' | format(iscsi_server.host + '.' + sap_fqdn) }} {{ '%-21s' | format(iscsi_server.host) }}" + loop: "{{ iscsi_servers }}" loop_control: loop_var: iscsi_server when: iscsi_servers is defined From c5a26afb6c9536d5c564bded4113e396dd50ad9c Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 28 Mar 2024 23:07:00 +0530 Subject: [PATCH 476/607] Update count condition for azurerm_private_dns_zone --- .../modules/sap_landscape/key_vault_sap_landscape.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf index 65738a8b18..26b796de47 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf @@ -495,7 +495,7 @@ resource "azurerm_private_dns_zone_virtual_network_link" "vault" { data "azurerm_private_dns_zone" "vault" { provider = azurerm.dnsmanagement - count = var.use_private_endpoint ? 1 : 0 + count = var.use_private_endpoint && var.register_endpoints_with_dns ? 1 : 0 name = var.dns_zone_names.vault_dns_zone_name resource_group_name = var.management_dns_resourcegroup_name } From 8d5572fa1f670e8e8af7391d02d8765f45b2298d Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 28 Mar 2024 23:55:56 +0200 Subject: [PATCH 477/607] Fix subnet_client_cidr calculation in SAP OS configuration playbook --- deploy/ansible/playbook_02_os_sap_specific_config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/playbook_02_os_sap_specific_config.yaml b/deploy/ansible/playbook_02_os_sap_specific_config.yaml index af8f3ede59..05482aa7f1 100644 --- a/deploy/ansible/playbook_02_os_sap_specific_config.yaml +++ b/deploy/ansible/playbook_02_os_sap_specific_config.yaml @@ -139,7 +139,7 @@ # calculated for specific deployment scenarios. - name: "SAP OS configuration playbook: - Set the subnet_client_cidr fact" ansible.builtin.set_fact: - subnet_client_cidr: "{{ subnet_cidr_client | default(azure_network_metadata.json.network.interface[0].ipv4.subnet[0].address + '/' + azure_network_metadata.json.network.interface[0].ipv4.subnet[0].prefix) }}" + subnet_client_cidr: "{{ subnet_cidr_client | default(azure_network_metadata.json.interface[0].ipv4.subnet[0].address + '/' + azure_network_metadata.json.interface[0].ipv4.subnet[0].prefix) }}" when: - platform == 'HANA' - node_tier == 'hana' or ['hana'] in supported_tiers From 086df3c5baf1279cbd94eaf6da07f0cbe3379cbd Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 29 Mar 2024 00:24:43 +0200 Subject: [PATCH 478/607] Update DB virtual hostname resolution in 2.4 Hosts file --- deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml index 87a3212b54..f8f094215b 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml @@ -106,7 +106,7 @@ ansible.builtin.set_fact: db_so_virtual_hostname: "{{ custom_db_virtual_hostname | default(sap_sid | lower ~ db_sid | lower ~ 'db' ~ db_instance_number ~ 'so', true) }}" - - name: "2.4 Hosts: - Setup Virtual host name resolution - DB - Scale Out - Standby" + - name: "2.4 Hosts: - Calculate host name - DB - Scale Out - Standby" ansible.builtin.set_fact: db_so_virtualhost_ip: "{{ hostvars[item].ansible_all_ipv4_addresses | ansible.utils.ipaddr(hostvars[item]['subnet_client_cidr']) | default(hostvars[item].ansible_host) }}" with_items: @@ -120,7 +120,7 @@ state: present block: | {{ '%-19s' | format(db_so_virtualhost_ip) }} {{ '%-80s' | format(db_so_virtual_hostname + '.' + sap_fqdn) }} {{ '%-21s' | format(db_so_virtual_hostname) }} - marker: "# {mark} DB Entries {{ db_virtual_hostname }}" + marker: "# {mark} DB Entries {{ db_virtual_hostname | default(ansible_host) }}" - name: "2.4 Hosts: - Setup Virtual host name resolution - DB" ansible.builtin.blockinfile: From f9cc7edbfb5370a5da3e3c36ea27ef54dc0b9f5e Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Fri, 29 Mar 2024 12:36:13 +0530 Subject: [PATCH 479/607] Fix calculation of db_so_virtualhost_ip in 2.4-hosts-file --- deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml index f8f094215b..163e3e44c5 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml @@ -108,7 +108,7 @@ - name: "2.4 Hosts: - Calculate host name - DB - Scale Out - Standby" ansible.builtin.set_fact: - db_so_virtualhost_ip: "{{ hostvars[item].ansible_all_ipv4_addresses | ansible.utils.ipaddr(hostvars[item]['subnet_client_cidr']) | default(hostvars[item].ansible_host) }}" + db_so_virtualhost_ip: "{{ hostvars[item].ansible_all_ipv4_addresses | ansible.utils.ipaddr(hostvars[item]['subnet_client_cidr']) | first | default(hostvars[item].ansible_host) }}" with_items: - "{{ groups[(sap_sid | upper)~'_DB' ][0] }}" From e7b0351e8f8cc8533b9d4fb7973a6e0974788bcc Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 29 Mar 2024 09:24:44 +0200 Subject: [PATCH 480/607] Install defender extension (#572) * Add the Defender Agent * Update monitoring_defender_app_lnx and monitoring_defender_app_win resource names * Update source_address_prefixes in NSG rules * Update Azure Monitor Linux Agent name * Fix Azure Monitor agent name in VM extensions * Update NSG rules to use the first address space in vnet_sap * Refactor NSG rules to use address_space directly * Fix subnet_client_cidr calculation in SAP OS configuration playbook * Add conditional block for non-Windows systems in SAP OS configuration playbook * Remove unnecessary condition for non-Windows systems in SAP OS configuration playbook * Update DB virtual hostname resolution in 2.4 Hosts file --------- Co-authored-by: Kimmo Forss --- Webapp/SDAF/Models/LandscapeModel.cs | 4 +- Webapp/SDAF/Models/SystemModel.cs | 2 + .../ParameterDetails/LandscapeDetails.json | 9 ++++ .../ParameterDetails/LandscapeTemplate.txt | 4 ++ .../SDAF/ParameterDetails/SystemDetails.json | 9 ++++ .../SDAF/ParameterDetails/SystemTemplate.txt | 3 ++ .../playbook_02_os_sap_specific_config.yaml | 1 - .../run/sap_landscape/tfvar_variables.tf | 5 ++ .../terraform/run/sap_landscape/transform.tf | 1 + .../run/sap_system/tfvar_variables.tf | 4 ++ deploy/terraform/run/sap_system/transform.tf | 1 + .../modules/sap_landscape/iscsi.tf | 24 ++++++++- .../modules/sap_landscape/nsg.tf | 39 ++++++++++++--- .../modules/sap_landscape/vm.tf | 41 ++++++++++++++- .../modules/sap_system/anydb_node/vm-anydb.tf | 50 ++++++++++++++++++- .../modules/sap_system/app_tier/vm-app.tf | 46 ++++++++++++++++- .../modules/sap_system/app_tier/vm-scs.tf | 46 ++++++++++++++++- .../modules/sap_system/app_tier/vm-webdisp.tf | 46 ++++++++++++++++- .../modules/sap_system/hdb_node/vm-hdb.tf | 25 +++++++++- 19 files changed, 338 insertions(+), 22 deletions(-) diff --git a/Webapp/SDAF/Models/LandscapeModel.cs b/Webapp/SDAF/Models/LandscapeModel.cs index e38877d067..7d9ecea08a 100644 --- a/Webapp/SDAF/Models/LandscapeModel.cs +++ b/Webapp/SDAF/Models/LandscapeModel.cs @@ -51,7 +51,9 @@ public bool IsValid() public Tag[] tags { get; set; } - public bool? deploy_monitoring_extension { get; set; } = false; + public bool? deploy_monitoring_extension { get; set; } = true; + + public bool? deploy_defender_extension { get; set; } = false; /*---------------------------------------------------------------------------8 | | diff --git a/Webapp/SDAF/Models/SystemModel.cs b/Webapp/SDAF/Models/SystemModel.cs index 9359292ce5..387c1185e3 100644 --- a/Webapp/SDAF/Models/SystemModel.cs +++ b/Webapp/SDAF/Models/SystemModel.cs @@ -76,6 +76,8 @@ public bool IsValid() public bool? deploy_monitoring_extension { get; set; } = false; + public bool? deploy_defender_extension { get; set; } = false; + public bool? use_scalesets_for_deployment { get; set; } = false; public bool? database_use_premium_v2_storage { get; set; } = false; diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index 197de237d8..52c4e85ac1 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -127,6 +127,15 @@ "Overrules": "", "Display": 2 }, + { + "Name": "deploy_defender_extension", + "Required": false, + "Description": "If defined, will add the Microsoft.Azure.Security.Monitoring extension to the virtual machines", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, { "Name": "place_delete_lock_on_resources", "Required": false, diff --git a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt index 4ca73735c9..67ddd31369 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt @@ -39,6 +39,10 @@ $$name_override_file$$ # If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines $$deploy_monitoring_extension$$ +# If defined, will add the Microsoft.Azure.Security.Monitoring extension to the virtual machines +$$deploy_defender_extension$$ + + ######################################################################################### # # # Resource group details # diff --git a/Webapp/SDAF/ParameterDetails/SystemDetails.json b/Webapp/SDAF/ParameterDetails/SystemDetails.json index d6a044cd4b..082ef7444c 100644 --- a/Webapp/SDAF/ParameterDetails/SystemDetails.json +++ b/Webapp/SDAF/ParameterDetails/SystemDetails.json @@ -278,6 +278,15 @@ "Overrules": "", "Display": 3 }, + { + "Name": "deploy_defender_extension", + "Required": false, + "Description": "If defined, will add the Microsoft.Azure.Security.Monitoring extension to the virtual machines", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, { "Name": "vm_disk_encryption_set_id", "Required": false, diff --git a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt index e3020b0b44..0c692279fc 100644 --- a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt @@ -489,6 +489,9 @@ $$deploy_v1_monitoring_extension$$ # If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines $$deploy_monitoring_extension$$ +# If defined, will add the Microsoft.Azure.Security.Monitoring extension to the virtual machines +$$deploy_defender_extension$$ + # dns_a_records_for_secondary_names defines if DNS records should be created for the virtual host names $$dns_a_records_for_secondary_names$$ diff --git a/deploy/ansible/playbook_02_os_sap_specific_config.yaml b/deploy/ansible/playbook_02_os_sap_specific_config.yaml index 05482aa7f1..8e8ce1f375 100644 --- a/deploy/ansible/playbook_02_os_sap_specific_config.yaml +++ b/deploy/ansible/playbook_02_os_sap_specific_config.yaml @@ -144,7 +144,6 @@ - platform == 'HANA' - node_tier == 'hana' or ['hana'] in supported_tiers - not database_high_availability - - db_scale_out tags: - always diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index feee58cd7d..de0c650dea 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -417,6 +417,11 @@ variable "deploy_monitoring_extension" { default = false } +variable "deploy_defender_extension" { + description = "If defined, will add the Microsoft.Azure.Security.Monitoring extension to the virtual machines" + default = false + } + ######################################################################################### # # diff --git a/deploy/terraform/run/sap_landscape/transform.tf b/deploy/terraform/run/sap_landscape/transform.tf index faf5c1c6c8..b88cc683fa 100644 --- a/deploy/terraform/run/sap_landscape/transform.tf +++ b/deploy/terraform/run/sap_landscape/transform.tf @@ -173,6 +173,7 @@ locals { codename = try(var.infrastructure.codename, var.codename) tags = try(merge(var.resourcegroup_tags, try(var.infrastructure.tags, {})), {}) deploy_monitoring_extension = var.deploy_monitoring_extension + deploy_defender_extension = var.deploy_defender_extension user_assigned_identity_id = var.user_assigned_identity_id } diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index ffa33d890e..c286bde42a 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -1383,6 +1383,10 @@ variable "deploy_monitoring_extension" { default = true } +variable "deploy_defender_extension" { + description = "If defined, will add the Microsoft.Azure.Security.Monitoring extension to the virtual machines" + default = false + } ######################################################################################### diff --git a/deploy/terraform/run/sap_system/transform.tf b/deploy/terraform/run/sap_system/transform.tf index 514abac51b..0941eb5d2b 100644 --- a/deploy/terraform/run/sap_system/transform.tf +++ b/deploy/terraform/run/sap_system/transform.tf @@ -10,6 +10,7 @@ locals { tags = try(merge(var.resourcegroup_tags, try(var.infrastructure.tags, {})), {}) use_app_proximityplacementgroups = var.use_app_proximityplacementgroups deploy_monitoring_extension = var.deploy_monitoring_extension + deploy_defender_extension = var.deploy_defender_extension } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf index eb6c60bb65..1d8b4e668d 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf @@ -322,7 +322,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_iscsi_lnx" { 0 ) virtual_machine_id = azurerm_linux_virtual_machine.iscsi[count.index].id - name = "AzureMonitorLinuxAgent" + name = "Microsoft.Azure.Monitor.AzureMonitorLinuxAgent" publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" @@ -339,3 +339,25 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_iscsi_lnx" { } ) } + +resource "azurerm_virtual_machine_extension" "monitoring_defender_iscsi_lnx" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension ? ( + local.iscsi_count) : ( + 0 + ) + virtual_machine_id = azurerm_linux_virtual_machine.iscsi[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityLinuxAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = "true" + + settings = jsonencode( + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, + } + ) +} diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf b/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf index 755e6fe52c..70f2b8470c 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf @@ -171,7 +171,13 @@ resource "azurerm_network_security_rule" "nsr_controlplane_app" { protocol = "Tcp" source_port_range = "*" destination_port_ranges = [22, 443, 3389, 5985, 5986, 5404, 5405, 7630] - source_address_prefixes = compact(concat(var.deployer_tfstate.subnet_mgmt_address_prefixes, var.deployer_tfstate.subnet_bastion_address_prefixes)) + source_address_prefixes = compact(concat( + var.deployer_tfstate.subnet_mgmt_address_prefixes, + var.deployer_tfstate.subnet_bastion_address_prefixes, + local.SAP_virtualnetwork_exists ? ( + data.azurerm_virtual_network.vnet_sap[0].address_space) : ( + azurerm_virtual_network.vnet_sap[0].address_space + ))) destination_address_prefixes = local.application_subnet_existing ? data.azurerm_subnet.app[0].address_prefixes : azurerm_subnet.app[0].address_prefixes } @@ -195,7 +201,13 @@ resource "azurerm_network_security_rule" "nsr_controlplane_web" { protocol = "Tcp" source_port_range = "*" destination_port_ranges = [22, 443, 3389, 5985, 5986] - source_address_prefixes = compact(concat(var.deployer_tfstate.subnet_mgmt_address_prefixes, var.deployer_tfstate.subnet_bastion_address_prefixes)) + source_address_prefixes = compact(concat( + var.deployer_tfstate.subnet_mgmt_address_prefixes, + var.deployer_tfstate.subnet_bastion_address_prefixes, + local.SAP_virtualnetwork_exists ? ( + data.azurerm_virtual_network.vnet_sap[0].address_space) : ( + azurerm_virtual_network.vnet_sap[0].address_space + ))) destination_address_prefixes = local.web_subnet_existing ? data.azurerm_subnet.web[0].address_prefixes : azurerm_subnet.web[0].address_prefixes } @@ -220,7 +232,13 @@ resource "azurerm_network_security_rule" "nsr_controlplane_storage" { protocol = "*" source_port_range = "*" destination_port_ranges = [22, 443, 3389, 5985, 5986, 111, 635, 2049, 4045, 4046, 4049] - source_address_prefixes = compact(concat(var.deployer_tfstate.subnet_mgmt_address_prefixes, var.deployer_tfstate.subnet_bastion_address_prefixes)) + source_address_prefixes = compact(concat( + var.deployer_tfstate.subnet_mgmt_address_prefixes, + var.deployer_tfstate.subnet_bastion_address_prefixes, + local.SAP_virtualnetwork_exists ? ( + data.azurerm_virtual_network.vnet_sap[0].address_space) : ( + azurerm_virtual_network.vnet_sap[0].address_space + ))) destination_address_prefixes = local.storage_subnet_existing ? data.azurerm_subnet.storage[0].address_prefixes : azurerm_subnet.storage[0].address_prefixes } @@ -246,8 +264,11 @@ resource "azurerm_network_security_rule" "nsr_controlplane_db" { destination_port_ranges = [22, 443, 3389, 5985, 5986,111, 635, 2049, 4045, 4046, 4049] source_address_prefixes = compact(concat( var.deployer_tfstate.subnet_mgmt_address_prefixes, - var.deployer_tfstate.subnet_bastion_address_prefixes) - ) + var.deployer_tfstate.subnet_bastion_address_prefixes, + local.SAP_virtualnetwork_exists ? ( + data.azurerm_virtual_network.vnet_sap[0].address_space) : ( + azurerm_virtual_network.vnet_sap[0].address_space + ))) destination_address_prefixes = local.database_subnet_existing ? data.azurerm_subnet.db[0].address_prefixes : azurerm_subnet.db[0].address_prefixes } @@ -273,8 +294,10 @@ resource "azurerm_network_security_rule" "nsr_controlplane_admin" { destination_port_ranges = [22, 443, 3389, 5985, 5986,111, 635, 2049, 4045, 4046, 4049] source_address_prefixes = compact(concat( var.deployer_tfstate.subnet_mgmt_address_prefixes, - var.deployer_tfstate.subnet_bastion_address_prefixes) - ) - + var.deployer_tfstate.subnet_bastion_address_prefixes, + local.SAP_virtualnetwork_exists ? ( + data.azurerm_virtual_network.vnet_sap[0].address_space) : ( + azurerm_virtual_network.vnet_sap[0].address_space + ))) destination_address_prefixes = local.admin_subnet_existing ? data.azurerm_subnet.admin[0].address_prefixes : azurerm_subnet.admin[0].address_prefixes } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf index 2175e176ad..8b8dcb0092 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf @@ -175,7 +175,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_utility_lnx" provider = azurerm.main count = local.deploy_monitoring_extension && upper(var.vm_settings.image.os_type) == "LINUX" ? var.vm_settings.count : 0 virtual_machine_id = azurerm_linux_virtual_machine.utility_vm[count.index].id - name = "AzureMonitorLinuxAgent" + name = "Microsoft.Azure.Monitor.AzureMonitorLinuxAgent" publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" @@ -198,7 +198,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_utility_win" count = local.deploy_monitoring_extension && upper(var.vm_settings.image.os_type) == "WINDOWS" ? var.vm_settings.count : 0 virtual_machine_id = azurerm_windows_virtual_machine.utility_vm[count.index].id - name = "AzureMonitorWindowsAgent" + name = "Microsoft.Azure.Monitor.AzureMonitorWindowsAgent" publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorWindowsAgent" type_handler_version = "1.0" @@ -216,3 +216,40 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_utility_win" } +resource "azurerm_virtual_machine_extension" "monitoring_defender_utility_lnx" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension && upper(var.vm_settings.image.os_type) == "LINUX" ? var.vm_settings.count : 0 + virtual_machine_id = azurerm_linux_virtual_machine.utility_vm[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityLinuxAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = "true" + + settings = jsonencode( + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, + } + ) +} + +resource "azurerm_virtual_machine_extension" "monitoring_defender_utility_win" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension && upper(var.vm_settings.image.os_type) == "WINDOWS" ? var.vm_settings.count : 0 + virtual_machine_id = azurerm_linux_virtual_machine.utility_vm[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityWindowsAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityWindowsAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = "true" + + settings = jsonencode( + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, + } + ) +} diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf index 2383f24894..010aee681e 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf @@ -706,7 +706,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_db_lnx" { var.database_server_count) : ( 0 ) virtual_machine_id = azurerm_linux_virtual_machine.dbserver[count.index].id - name = "AzureMonitorLinuxAgent" + name = "Microsoft.Azure.Monitor.AzureMonitorLinuxAgent" publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" @@ -731,7 +731,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_db_win" { var.database_server_count) : ( 0 ) virtual_machine_id = azurerm_windows_virtual_machine.dbserver[count.index].id - name = "AzureMonitorWindowsAgent" + name = "Microsoft.Azure.Monitor.AzureMonitorWindowsAgent" publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorWindowsAgent" type_handler_version = "1.0" @@ -749,3 +749,49 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_db_win" { } + +resource "azurerm_virtual_machine_extension" "monitoring_defender_db_lnx" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension && upper(var.database.os.os_type) == "LINUX" ? ( + var.database_server_count) : ( + 0 + ) + virtual_machine_id = azurerm_linux_virtual_machine.dbserver[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityLinuxAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = "true" + + settings = jsonencode( + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, + } + ) +} + +resource "azurerm_virtual_machine_extension" "monitoring_defender_db_win" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension && upper(var.database.os.os_type) == "WINDOWS" ? ( + var.database_server_count) : ( + 0 + ) + virtual_machine_id = azurerm_windows_virtual_machine.dbserver[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityWindowsAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityWindowsAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = "true" + + settings = jsonencode( + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, + } + ) +} + + diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index fa4762481a..1bec8dd701 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -501,7 +501,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_app_lnx" { local.application_server_count) : ( 0 ) virtual_machine_id = azurerm_linux_virtual_machine.app[count.index].id - name = "AzureMonitorLinuxAgent" + name = "Microsoft.Azure.Monitor.AzureMonitorLinuxAgent" publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" @@ -526,7 +526,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_app_win" { local.application_server_count) : ( 0 ) virtual_machine_id = azurerm_windows_virtual_machine.app[count.index].id - name = "AzureMonitorWindowsAgent" + name = "Microsoft.Azure.Monitor.AzureMonitorWindowsAgent" publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorWindowsAgent" type_handler_version = "1.0" @@ -544,3 +544,45 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_app_win" { } +resource "azurerm_virtual_machine_extension" "monitoring_defender_app_lnx" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.app_os.os_type) == "LINUX" ? ( + local.application_server_count) : ( + 0 ) + virtual_machine_id = azurerm_linux_virtual_machine.app[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityLinuxAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = "true" + + settings = jsonencode( + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, + } + ) +} + +resource "azurerm_virtual_machine_extension" "monitoring_defender_app_win" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.app_os.os_type) == "WINDOWS" ? ( + local.application_server_count) : ( + 0 ) + virtual_machine_id = azurerm_windows_virtual_machine.app[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityWindowsAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityWindowsAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = "true" + + settings = jsonencode( + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, + } + ) +} + diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index a3510555d8..b28d362d80 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -703,7 +703,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_scs_lnx" { local.scs_server_count) : ( 0 ) virtual_machine_id = azurerm_linux_virtual_machine.scs[count.index].id - name = "AzureMonitorLinuxAgent" + name = "Microsoft.Azure.Monitor.AzureMonitorLinuxAgent" publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" @@ -727,7 +727,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_scs_win" { local.scs_server_count) : ( 0 ) virtual_machine_id = azurerm_windows_virtual_machine.scs[count.index].id - name = "AzureMonitorWindowsAgent" + name = "Microsoft.Azure.Monitor.AzureMonitorWindowsAgent" publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorWindowsAgent" type_handler_version = "1.0" @@ -745,3 +745,45 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_scs_win" { } +resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_lnx" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( + local.scs_server_count) : ( + 0 ) + virtual_machine_id = azurerm_linux_virtual_machine.scs[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityLinuxAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = "true" + + settings = jsonencode( + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, + } + ) +} + +resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_win" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.scs_os.os_type) == "WINDOWS" ? ( + local.scs_server_count) : ( + 0 ) + virtual_machine_id = azurerm_windows_virtual_machine.scs[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityWindowsAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityWindowsAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = "true" + + settings = jsonencode( + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, + } + ) +} + diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf index c4c68038ac..83f356b2be 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf @@ -629,7 +629,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_web_lnx" { local.webdispatcher_count) : ( 0 ) virtual_machine_id = azurerm_linux_virtual_machine.web[count.index].id - name = "AzureMonitorLinuxAgent" + name = "Microsoft.Azure.Monitor.AzureMonitorLinuxAgent" publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" @@ -653,7 +653,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_web_win" { local.webdispatcher_count) : ( 0 ) virtual_machine_id = azurerm_windows_virtual_machine.web[count.index].id - name = "AzureMonitorWindowsAgent" + name = "Microsoft.Azure.Monitor.AzureMonitorWindowsAgent" publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorWindowsAgent" type_handler_version = "1.0" @@ -669,3 +669,45 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_web_win" { } ) } + +resource "azurerm_virtual_machine_extension" "monitoring_defender_web_lnx" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( + local.webdispatcher_count) : ( + 0 ) + virtual_machine_id = azurerm_linux_virtual_machine.web[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityLinuxAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = "true" + + settings = jsonencode( + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, + } + ) +} + +resource "azurerm_virtual_machine_extension" "monitoring_defender_web_win" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.app_os.os_type) == "WINDOWS" ? ( + local.webdispatcher_count) : ( + 0 ) + virtual_machine_id = azurerm_windows_virtual_machine.web[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityWindowsAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityWindowsAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = "true" + + settings = jsonencode( + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, + } + ) +} diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf index 6792baaa78..a9e42f7686 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf @@ -560,7 +560,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_db_lnx" { var.database_server_count) : ( 0 ) virtual_machine_id = azurerm_linux_virtual_machine.vm_dbnode[count.index].id - name = "AzureMonitorLinuxAgent" + name = "Microsoft.Azure.Monitor.AzureMonitorLinuxAgent" publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" @@ -577,3 +577,26 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_db_lnx" { } ) } + + +resource "azurerm_virtual_machine_extension" "monitoring_defender_db_lnx" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension ? ( + var.database_server_count) : ( + 0 + ) + virtual_machine_id = azurerm_linux_virtual_machine.vm_dbnode[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityLinuxAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = "true" + + settings = jsonencode( + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, + } + ) +} From 5e751e71a4590714f332a86668211ff3fa2c23f7 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Fri, 29 Mar 2024 14:38:40 +0530 Subject: [PATCH 481/607] add calculation of virtual host for scaleout --- .../roles-sap/5.1-dbload/tasks/main.yaml | 26 ++++++++++++++++++- .../roles-sap/5.2-pas-install/tasks/main.yaml | 15 ++++++++--- .../roles-sap/5.3-app-install/tasks/main.yaml | 15 ++++++++--- 3 files changed, 49 insertions(+), 7 deletions(-) diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml index 31587c1f35..49dc42b09a 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml @@ -136,10 +136,34 @@ with_items: - "{{ query('inventory_hostnames', '{{ sid_to_be_deployed.sid | upper }}_DB') }}" + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + + - name: "DBLoad: Calculate virtual host name when running scale out" + ansible.builtin.set_fact: + db_virtualhost_temp: >- + {%- set _host_name = hostvars[db_server_temp | first]['virtual_host'] -%} + {%- if db_scale_out and not database_high_availability -%} + {%- set _host_name = sap_sid | lower ~ db_sid | lower ~ 'db' ~ db_instance_number ~ 'so' -%} + {%- endif -%} + {{- _host_name -}} + + - name: "DBLoad: Display temporary database virtual host name" + ansible.builtin.debug: + msg: "DB Virtual Host: {{ db_virtualhost_temp }}" + - name: "DBLoad: - Set the server facts" ansible.builtin.set_fact: scs_server: "{% if scs_high_availability %}{{ sid_to_be_deployed.sid | lower }}scs{{ scs_instance_number }}cl1{% else %}{{ hostvars[scs_server_temp | first]['virtual_host'] }}{% endif %}" - db_virtual_hostname: "{{ hostvars[db_server_temp | first]['virtual_host'] }}" + db_virtual_hostname: "{{ db_virtualhost_temp | default(hostvars[db_server_temp | first]['virtual_host'], true) }}" + + - name: "DBLoad: Display database virtual host name" + ansible.builtin.debug: + msg: "DB Virtual Host: {{ db_virtual_hostname }}" - name: "DBLoad: check media exists" ansible.builtin.stat: diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index be9f6aa214..9e80ca045c 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -21,19 +21,28 @@ with_items: - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" -- name: "Backward Compatibility - Check required Database HA variables" +- name: "Backward Compatibility - Check required Database HA variables" ansible.builtin.set_fact: - database_high_availability: "{{ db_high_availability | default(false) }}" + database_high_availability: "{{ db_high_availability | default(false) }}" when: - db_high_availability is defined - database_high_availability is not defined +- name: "PAS Install: Calculate virtual host name when running scale out" + ansible.builtin.set_fact: + db_virtualhost_temp: >- + {%- set _host_name = hostvars[db_server_temp | first]['virtual_host'] -%} + {%- if db_scale_out and not database_high_availability -%} + {%- set _host_name = sap_sid | lower ~ db_sid | lower ~ 'db' ~ db_instance_number ~ 'so' -%} + {%- endif -%} + {{- _host_name -}} + - name: "PAS Install: Set BOM facts" ansible.builtin.set_fact: sap_inifile: "{{ bom_base_name }}-pas-{{ sid_to_be_deployed.sid | lower }}-{{ ansible_hostname }}.params" sap_inifile_template: "{{ bom_base_name }}{{ bom_suffix }}-pas-inifile-param.j2" dir_params: "{{ tmp_directory }}/.{{ sid_to_be_deployed.sid | lower }}-params" - db_lb_virtual_host_HANA: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl.{{ sap_fqdn }}{% else %}{{ hostvars[db_server_temp | first]['virtual_host'] }}{% endif %}" + db_lb_virtual_host_HANA: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl.{{ sap_fqdn }}{% else %}{{ db_virtualhost_temp | default(hostvars[db_server_temp | first]['virtual_host'], true) }}{% endif %}" db_lb_virtual_host_AnyDB: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl.{{ sap_fqdn }}{% else %}{{ db_server_temp }}{% endif %}" - name: "PAS Install: Set BOM facts db host" diff --git a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml index 0b40de3291..ae70c46634 100644 --- a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml @@ -19,19 +19,28 @@ with_items: - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" -- name: "Backward Compatibility - Check required Database HA variables" +- name: "Backward Compatibility - Check required Database HA variables" ansible.builtin.set_fact: - database_high_availability: "{{ db_high_availability | default(false) }}" + database_high_availability: "{{ db_high_availability | default(false) }}" when: - db_high_availability is defined - database_high_availability is not defined +- name: "APP Install: Calculate virtual host name when running scale out" + ansible.builtin.set_fact: + db_virtualhost_temp: >- + {%- set _host_name = hostvars[db_server_temp | first]['virtual_host'] -%} + {%- if db_scale_out and not database_high_availability -%} + {%- set _host_name = sap_sid | lower ~ db_sid | lower ~ 'db' ~ db_instance_number ~ 'so' -%} + {%- endif -%} + {{- _host_name -}} + - name: "APP Install: Set BOM facts" ansible.builtin.set_fact: sap_inifile: "{{ bom_base_name }}-app-{{ sid_to_be_deployed.sid }}-{{ ansible_hostname }}.params" sap_inifile_template: "{{ bom_base_name }}{{ bom_suffix }}-app-inifile-param.j2" dir_params: "{{ tmp_directory }}/.{{ sid_to_be_deployed.sid | upper }}-params" - db_lb_virtual_host_HANA: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ hostvars[db_server_temp | first]['virtual_host'] }}{% endif %}" + db_lb_virtual_host_HANA: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ db_virtualhost_temp | default(hostvars[db_server_temp | first]['virtual_host'], true) }}{% endif %}" db_lb_virtual_host_AnyDB: "{% if database_high_availability %}{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl{% else %}{{ db_server_temp }}{% endif %}" app_virtual_hostname: "{{ virtual_host }}" From 1af638229738c957c5123f37f6f9fef074b96019 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Fri, 29 Mar 2024 18:46:29 +0530 Subject: [PATCH 482/607] Fix IP address resolution in main.yaml --- .../ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml index 1c3e03d6ed..b2e952a3c9 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml @@ -294,7 +294,7 @@ state: present mode: 0644 option: "map_{{ hostvars[item].virtual_host }}" - value: "{{ (hostvars[item].ansible_all_ipv4_addresses | ansible.utils.ipaddr(subnet_client_cidr)) | default(hostvars[item].ansible_host) }}" + value: "{{ (hostvars[item].ansible_all_ipv4_addresses | ansible.utils.ipaddr(subnet_client_cidr)) | first | default(hostvars[item].ansible_host) }}" with_items: - "{{ groups[(sap_sid | upper)~'_DB' ] }}" From 2480209e6b04a3e24b830292d0386b8d6ad04c21 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 2 Apr 2024 11:02:24 +0300 Subject: [PATCH 483/607] Update github-actions-ansible-lint.yml --- .github/workflows/github-actions-ansible-lint.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/github-actions-ansible-lint.yml b/.github/workflows/github-actions-ansible-lint.yml index 55999ec59d..c782aa06eb 100644 --- a/.github/workflows/github-actions-ansible-lint.yml +++ b/.github/workflows/github-actions-ansible-lint.yml @@ -26,6 +26,7 @@ jobs: ansible-galaxy collection install ansible.netcommon:5.1.2 --force ansible-galaxy collection install community.windows --force ansible-galaxy collection install community.general --force + ansible-galaxy collection install microsoft.ad --force - name: Run ansible-lint run: | From bbee0d7da0323cb86805967663c068c9f553e659 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 2 Apr 2024 11:31:31 +0300 Subject: [PATCH 484/607] Change to use import_role instead of include_role (#574) * Add tags to tasks * Refactor OS configuration playbook roles to use import_role instead of include_role * Refactor import_role to include_role in OS configuration playbook * Fix import_role in OS configuration playbook * Update swap role to include reboot and wait for connection tasks * Update timeout for wait_for_connection task * add tags and change to use import_role * Add deploy_defender_extension variable and update resource configurations * Add update_only flag to package update task * Add platform condition for oracle-asm tier * Update SAP system configuration * Update auto_upgrade_minor_version to true * Add tags to Ansible tasks in playbook_01_os_base_config.yaml and playbook_03_bom_processing.yaml * Add condition to skip reboot on Oracle Linux 8 * Add task to clear host errors and improve system reachability in 1.1-swap role * Refactor Oracle post-processing tasks*** * Update swap role tasks to include reboot and wait for connection * Add tags * Add post-reboot delay in swap role * Fix failed_when condition in swap role * Add become: true to reboot task * Add become flag for reboot task * Add tasks to clear host errors and wait for system to become reachable * Add kmod-oracleasm package to os-packages.yaml * Add reboot task and set failed_when to false in oracle-postprocessing.yaml * Add Microsoft AD collection to ansible-galaxy installations * Fix VM Agent Status check in 2.10.1 sap-notes * Remove oracleasm-support and kmod-oracleasm packages from oraclelinux8.9 --------- Co-authored-by: Kimmo Forss --- .../playbook_00_validate_parameters.yaml | 163 +++++++++++++----- .../ansible/playbook_01_os_base_config.yaml | 80 +++++---- .../playbook_02_os_sap_specific_config.yaml | 61 ++++--- .../ansible/playbook_03_bom_processing.yaml | 21 ++- .../roles-misc/0.1-passwords/tasks/main.yaml | 36 +++- .../ansible/roles-os/1.1-swap/tasks/main.yaml | 21 ++- .../tasks/1.4.3-update-packages-RedHat.yaml | 2 + .../1.4-packages/vars/os-packages.yaml | 3 +- .../2.10-sap-notes/tasks/main.yaml | 12 ++ .../2.5-sap-users/tasks/main.yaml | 1 + .../tasks/oracle-postprocessing.yaml | 21 ++- .../modules/sap_landscape/iscsi.tf | 30 +++- .../modules/sap_landscape/vm.tf | 29 +--- .../modules/sap_system/anydb_node/vm-anydb.tf | 33 +--- .../modules/sap_system/app_tier/vm-app.tf | 66 ++++--- .../modules/sap_system/app_tier/vm-scs.tf | 66 ++++--- .../modules/sap_system/app_tier/vm-webdisp.tf | 66 ++++--- .../modules/sap_system/hdb_node/vm-hdb.tf | 33 ++-- 18 files changed, 491 insertions(+), 253 deletions(-) diff --git a/deploy/ansible/playbook_00_validate_parameters.yaml b/deploy/ansible/playbook_00_validate_parameters.yaml index 8496333e6b..bb519a9fa0 100644 --- a/deploy/ansible/playbook_00_validate_parameters.yaml +++ b/deploy/ansible/playbook_00_validate_parameters.yaml @@ -22,16 +22,22 @@ ansible.builtin.debug: msg: - "SDAF Version: {{ SDAF_Version }}" + tags: + - always - name: "0.0 Validations - ensure jmespath is installed in the playbook python" ansible.builtin.pip: name: jmespath state: present + tags: + - always - name: "0.0 Validations - ensure netaddr is installed in the controller" ansible.builtin.pip: name: netaddr state: present + tags: + - always # - name: "0.0 Validations - ensure credssp is installed in the controller" # ansible.builtin.pip: @@ -44,11 +50,15 @@ path: "{{ _workspace_directory }}/.progress" state: directory mode: 0755 + tags: + - always - name: "0.0 Validations - Remove os-install-done flag" ansible.builtin.file: path: "{{ _workspace_directory }}/.progress/validation-done" state: absent + tags: + - always # -------------------------------------+---------------------------------------8 # @@ -61,6 +71,8 @@ when: - db_high_availability is defined - database_high_availability is not defined + tags: + - always - name: "0.0 Validations - Check required variables are present and not empty" ansible.builtin.assert: @@ -77,7 +89,9 @@ - { parameter: "use_msi_for_clusters", error: "use_msi_for_clusters has no value assigned" } - { parameter: "platform", error: "platform has no value assigned" } loop_control: - loop_var: item_to_check + loop_var: item_to_check + tags: + - always # -------------------------------------+---------------------------------------8 # @@ -94,6 +108,8 @@ - "use_msi_for_clusters: {{ use_msi_for_clusters }}" - "platform: {{ platform | upper }}" verbosity: 2 + tags: + - always # -------------------------------------+ # Fencing support is only needed when: @@ -128,6 +144,7 @@ (scs_high_availability and scs_cluster_type == "AFA") - platform != "ORACLE" tags: + - always - kv-secrets # -------------------------------------+ @@ -138,7 +155,8 @@ tasks_from: windows.yaml public: true tags: - - 0.1-win-passwords + - always + - 0.1-win-passwords when: - platform == "SQLSERVER" @@ -147,7 +165,8 @@ name: roles-misc/0.3.sap-installation-media-storage-details public: true tags: - - kv-sap-installation-media-storage-details + - always + - kv-sap-installation-media-storage-details - name: "0.0 Validations - Check required variables are present and not empty" ansible.builtin.assert: @@ -183,6 +202,8 @@ } loop_control: loop_var: item_to_check + tags: + - always - name: "0.0 Validations - Check required SCS HA variables are present and not empty" ansible.builtin.assert: @@ -213,6 +234,8 @@ when: - scs_high_availability - platform != "SQLSERVER" + tags: + - always - name: "0.0 Validations - Check required SCS HA fencing variables are present and not empty" ansible.builtin.assert: @@ -244,6 +267,8 @@ # - scs_high_availability - (scs_high_availability and scs_cluster_type == "AFA") - not use_msi_for_clusters + tags: + - always - name: "0.0 Validations - Check required Database HA variables are present and not empty" ansible.builtin.assert: @@ -279,6 +304,8 @@ # - db_high_availability - (database_high_availability and database_cluster_type == "AFA") - platform in ["HANA", "DB2"] + tags: + - always - name: "0.0 Validations - sharedHome variables" ansible.builtin.debug: @@ -287,56 +314,68 @@ loop: "{{ MULTI_SIDS }}" loop_control: loop_var: sid_to_be_deployed + tags: + - always - name: "0.0 Validations - Validate ORACLE parameters (ora_release and ora_version)" + when: + - platform in ['ORACLE', 'ORACLE-ASM'] ansible.builtin.assert: that: - ora_release is version(ora_supported_version,'>=') - ora_version is version(ora_supported_version,'>=') fail_msg: "Oracle deployments requires that ora_release and ora_version are provided" - when: - - platform in ['ORACLE', 'ORACLE-ASM'] + tags: + - always - name: "0.0 Validations - Validate ORACLE parameters (oracle_sbp_patch)" + when: + - platform in ['ORACLE', 'ORACLE-ASM'] ansible.builtin.assert: that: - oracle_sbp_patch is defined - oracle_sbp_patch | default('') | trim | length > 1 fail_msg: "Oracle deployments requires that oracle_sbp_patch is provided" - when: - - platform in ['ORACLE', 'ORACLE-ASM'] + tags: + - always - name: "0.0 Validations - Show ORACLE parameters" + when: + - platform in ['ORACLE', 'ORACLE-ASM'] ansible.builtin.debug: msg: - "ora_release: {{ ora_release }}" - "ora_version: {{ ora_version }}" - "oracle_sbp_patch: {{ oracle_sbp_patch }}" - - when: - - platform in ['ORACLE', 'ORACLE-ASM'] + tags: + - always - name: "0.0 Validations - Validate SQL Server parameters" + when: + - platform == 'SQLSERVER' ansible.builtin.assert: that: - domain_name is defined # Has the variable been defined - domain_name | type_debug != 'NoneType' # Does it have a value - domain_name | trim | length > 1 fail_msg: 'SQL Server deployments require that domain is specified' - when: - - platform == 'SQLSERVER' - + tags: + - always - name: "0.0 Validations - Get Access Token" ansible.builtin.command: >- az account get-access-token --query accessToken -o tsv changed_when: false register: access_token_data + tags: + - always - name: "0.0 Validations - Save the access token" ansible.builtin.set_fact: access_token: "{{ access_token_data.stdout }}" no_log: true + tags: + - always - name: "0.0 Validations - Retrieve Subscription ID and Resource Group Name" ansible.builtin.uri: @@ -345,6 +384,8 @@ headers: Metadata: true register: azure_metadata + tags: + - always - name: "0.0 Validations - Show IMDS results (JSON)" ansible.builtin.debug: @@ -356,16 +397,22 @@ subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" resource_group_name: "{{ azure_metadata.json.compute.resourceGroupName }}" mnt_free_diskspace: "{{ ansible_mounts | json_query('[?mount == `/mnt`].size_available') }}" + tags: + - always - name: "0.0 Validations - Show IMDS results" ansible.builtin.debug: var: mnt_free_diskspace + tags: + - always - name: "0.0 Validations - Deployer disk space requirements" ansible.builtin.set_fact: deployer_free_temp_disk_space: 40 when: - deployer_free_temp_disk_space is not defined + tags: + - always - name: "0.0 Validations - Check for free disk space on deployer" ansible.builtin.assert: @@ -374,7 +421,8 @@ when: - mnt_free_diskspace | length > 0 tags: - - 0.0-agent-diskspace + - always + - 0.0-agent-diskspace # /*---------------------------------------------------------------------------8 @@ -401,15 +449,21 @@ ansible.builtin.wait_for_connection: timeout: 120 register: wait_for_connection_results + tags: + - always - name: "0.0 Validations: - Gather facts for first time" ansible.builtin.setup: + tags: + - always - name: "0.0 Validations: - Set distro" ansible.builtin.set_fact: distro_family: "{{ ansible_os_family | upper }}" distribution_id: "{{ ansible_distribution | lower ~ ansible_distribution_major_version }}" distribution_full_id: "{{ ansible_distribution | lower ~ ansible_distribution_version }}" + tags: + - always - name: "0.0 Validations: - Show distro" ansible.builtin.debug: @@ -417,15 +471,21 @@ - "Distro family: {{ distro_family }}" - "Distribution id: {{ distribution_id }}" - "Distribution full id: {{ distribution_full_id }}" + tags: + - always - name: "0.0 Validations: - Set Python version {{ distribution_id }}" ansible.builtin.set_fact: python_version: "python3" + tags: + - always - name: "0.0 Validations: - Set Python version {{ distribution_id }}" + when: (ansible_distribution | lower ~ ansible_distribution_major_version) in ['sles_sap12'] ansible.builtin.set_fact: python_version: "python2" - when: (ansible_distribution | lower ~ ansible_distribution_major_version) in ['sles_sap12'] + tags: + - always - name: "0.0 Validations - Get details from local host" ansible.builtin.set_fact: @@ -433,6 +493,8 @@ resource_group_name: "{{ hostvars.localhost.resource_group_name }}" access_token: "{{ hostvars.localhost.access_token }}" no_log: true + tags: + - always - name: "0.0 Validations - Variables" ansible.builtin.debug: @@ -459,16 +521,19 @@ - (ansible_play_hosts_all | length) == 1 - platform == "HANA" tags: - - 0.0-scs-db-instance-single + - always + - 0.0-scs-db-instance-single - name: "0.0 Validations - Validate SCS and HDB SIDs" + when: + - (ansible_play_hosts_all | length) == 1 + - platform != "ORACLE" ansible.builtin.assert: that: - "sap_sid != db_sid" fail_msg: "Please ensure that the sap_sid is different from the db_sid when performing a standalone installation" - when: - - (ansible_play_hosts_all | length) == 1 - - platform != "ORACLE" + tags: + - always - name: Validate SCS and PAS instance numbers ansible.builtin.assert: @@ -477,7 +542,8 @@ fail_msg: "Please ensure that the pas_instance_number is different from the scs_instance_number when installing PAS on ASCS" when: (ansible_play_hosts_all | length) == 2 tags: - - 0.0-scs-pas + - always + - 0.0-scs-pas - name: "0.0 Validations - Validate SCS and PAS instance numbers" ansible.builtin.assert: @@ -486,7 +552,8 @@ fail_msg: "Please ensure that the pas_instance_number is different from the scs_instance_number on standalone installation" when: (ansible_play_hosts_all | length) == 1 tags: - - 0.0-scs-pas-single + - always + - 0.0-scs-pas-single - name: "0.0 Validations - Validate DB and PAS instance numbers" ansible.builtin.assert: @@ -495,7 +562,8 @@ fail_msg: "Please ensure that the pas_instance_number is different from the db_instance_number on standalone installation" when: (ansible_play_hosts_all | length) == 1 tags: - - 0.0-scs-pas-db-single + - always + - 0.0-scs-pas-db-single # url: "https://management.azure.com/subscriptions/{{ subscription_id }}/providers/Microsoft.Web/sites?api-version=2022-03-01" # url: "https://azure.status.microsoft/en-us/status" @@ -513,11 +581,12 @@ vars: ansible_python_interpreter: "{{ python_version }}" when: - - (ansible_distribution | lower ~ ansible_distribution_major_version) in ['suse15', 'redhat8', 'redhat9', 'sles_sap15' ] - - ansible_os_family != "Windows" - - check_outbound | bool + - (ansible_distribution | lower ~ ansible_distribution_major_version) in ['suse15', 'redhat8', 'redhat9', 'sles_sap15' ] + - ansible_os_family != "Windows" + - check_outbound | bool tags: - - 0.0-internet + - always + - 0.0-internet - name: "0.0 Validations - Check internet connectivity" ansible.builtin.uri: @@ -530,11 +599,12 @@ - 200 - 403 when: - - (ansible_distribution | lower ~ ansible_distribution_major_version) in ['suse15', 'redhat8', 'redhat9', 'sles_sap15' ] - - ansible_os_family != "Windows" - - check_outbound | bool + - (ansible_distribution | lower ~ ansible_distribution_major_version) in ['suse15', 'redhat8', 'redhat9', 'sles_sap15' ] + - ansible_os_family != "Windows" + - check_outbound | bool tags: - - 0.0-internet + - always + - 0.0-internet # - name: "0.0 Validations - Get repository listing (SUSE)" # ansible.builtin.command: zypper lr @@ -580,9 +650,9 @@ - name: "0.0 Validations - Show Mounts" ansible.builtin.debug: msg: - - "Mount: {{ item.mount }}" - - "Device: {{ item.device }}" - - "Filesystem: {{ item.fstype }}" + - "Mount: {{ item.mount }}" + - "Device: {{ item.device }}" + - "Filesystem: {{ item.fstype }}" with_items: - "{{ ansible_mounts }}" when: @@ -596,14 +666,17 @@ mode: '0755' when: - ansible_os_family != "Windows" + tags: + - always - name: "0.0 Validations - Show Hosts" + when: + - ansible_os_family != "Windows" ansible.builtin.shell: set -o pipefail && cat /etc/hosts | grep -v -e "^#" | grep -v -e '^[[:space:]]*$' register: hosts tags: - - 0.0-hosts - when: - - ansible_os_family != "Windows" + - always + - 0.0-hosts - name: "0.0 Validations - Show Hosts" ansible.builtin.debug: @@ -619,16 +692,18 @@ path: "{{ _workspace_directory }}/.progress/validation-done" state: touch mode: 0755 + tags: + - always - name: "0.0 Validations - Netmask" + when: + - ansible_os_family != "Windows" ansible.builtin.debug: msg: - "Netmask: {{ hostvars[ansible_hostname]['ansible_default_ipv4']['netmask'] }}" - "CIDR: {{ ((hostvars[ansible_hostname]['ansible_default_ipv4']['address'] | string) + '/' + (hostvars[ansible_hostname]['ansible_default_ipv4']['netmask'] | string)) | ansible.utils.ipaddr('prefix') }}" - "IPV4: {{ hostvars[ansible_hostname]['ansible_default_ipv4']['address'] }}" - when: - - ansible_os_family != "Windows" - name: Check if /usr/sap is already mounted when: @@ -641,6 +716,8 @@ when: - item.mount == "/usr/sap" - "'scs' in supported_tiers" + tags: + - always - name: "0.0 Validations - Disk space details" when: @@ -651,12 +728,16 @@ - name: "0.0 Validations - Disk space details" ansible.builtin.set_fact: free_diskspace: "{{ ansible_mounts | json_query('[?mount == `/usr/sap`].size_available') }}" + tags: + - always - name: "0.0 Validations - SCS /usr/sap disk space requirements" ansible.builtin.set_fact: scs_free_diskspace: 10 when: - scs_free_diskspace is not defined + tags: + - always - name: "0.0 Validations - Show SCS disk space" ansible.builtin.debug: @@ -676,7 +757,9 @@ - (free_diskspace | first | int / (1024 * 1024 * 1024) | int) > (scs_free_diskspace | int) fail_msg: "The SCS server needs at least {{ scs_free_diskspace }} GB of free disk space in /mnt" tags: - - 0.0-scs-diskspace + - 0.0-scs-diskspace + tags: + - always # /*---------------------------------------------------------------------------8 # | | @@ -700,7 +783,7 @@ - "distribution_id == 'oraclelinux8'" fail_msg: "For Oracle deployments DB, PAS and APP needs to run on OracleLinux" when: - - platform == 'ORACLE' + - platform in ['ORACLE', 'ORACLE-ASM'] - hosts: "{{ sap_sid | upper }}_DB" diff --git a/deploy/ansible/playbook_01_os_base_config.yaml b/deploy/ansible/playbook_01_os_base_config.yaml index 20da9a53b0..924707f8c6 100644 --- a/deploy/ansible/playbook_01_os_base_config.yaml +++ b/deploy/ansible/playbook_01_os_base_config.yaml @@ -15,26 +15,32 @@ ansible.builtin.pip: name: jmespath state: present + tags: + - always - name: "OS configuration playbook: - Create Progress folder" ansible.builtin.file: path: "{{ _workspace_directory }}/.progress" state: directory mode: 0755 + tags: + - always - name: "OS configuration playbook: - Remove os-install-done flag" ansible.builtin.file: path: "{{ _workspace_directory }}/.progress/os-configuration-done" state: absent + tags: + - always - name: "OS configuration playbook: - Read password" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-misc/0.1-passwords tasks_from: windows.yaml - public: true when: - platform == "SQLSERVER" tags: + - always - 0.1-win-passwords # /*---------------------------------------------------------------------------8 @@ -75,101 +81,103 @@ - name: "OS configuration playbook: - Set os fact" ansible.builtin.set_fact: tier: os + tags: + - always - name: "OS configuration playbook: - Set sudoers" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.0-sudoers tags: - 1.0-sudoers - name: "OS configuration playbook: - Set swap" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.1-swap tags: - 1.1-swap - name: "OS configuration playbook: - Set hostname" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.2-hostname tags: - 1.2-hostname - name: "OS configuration playbook: - Ensure the repositories are registered" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.3-repository tags: - 1.3-repository - name: "OS configuration playbook: - Ensure the packages are registered" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.4-packages tags: - 1.4-packages - name: "OS configuration playbook: - Configure volume groups and logical volumes" - ansible.builtin.include_role: + when: node_tier not in ["oracle-multi-sid", "oracle-asm"] + ansible.builtin.import_role: name: roles-os/1.5-disk-setup tags: - 1.5-disk-setup - when: node_tier not in ["oracle-multi-sid", "oracle-asm"] - name: "OS configuration playbook: - Configure volume groups and logical volumes (sharedHome)" - ansible.builtin.include_role: - name: roles-os/1.5.2-disk-setup-ora-multi-sid when: node_tier == "oracle-multi-sid" + ansible.builtin.import_role: + name: roles-os/1.5.2-disk-setup-ora-multi-sid tags: - 1.5.2-disk-setup-ora-multi-sid - name: "OS configuration playbook: - Chrony role" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.7-chrony tags: - 1.7-chrony - name: "OS configuration playbook: - Ensure the kernel parameters are set" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.9-kernelparameters tags: - 1.9-kernelparameters - name: "OS configuration playbook: - Configure networking" - ansible.builtin.include_role: - name: roles-os/1.10-networking when: ansible_os_family | upper == "REDHAT" + ansible.builtin.import_role: + name: roles-os/1.10-networking tags: - 1.10-networking - name: "OS configuration playbook: - Configure accounts" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.11-accounts tags: - 1.11-accounts - name: "OS configuration playbook: - Configure accounts (Oracle)" - ansible.builtin.include_role: + when: node_tier in ["oracle", "observer"] + ansible.builtin.import_role: name: roles-os/1.11-accounts vars: tier: ora - when: node_tier in ["oracle", "observer"] - name: "OS configuration playbook: - Configure MOTD" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.13-MOTD tags: - 1.13-MOTD - name: "OS configuration playbook: - Ensure the needed services are started" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.16-services tags: - 1.16-services - name: "OS configuration playbook: - Configure Prometheus" - ansible.builtin.include_role: - name: roles-os/1.20-prometheus when: - prometheus - ansible_os_family | upper == "SUSE" or ansible_os_family | upper == "REDHAT" + ansible.builtin.import_role: + name: roles-os/1.20-prometheus tags: - 1.20-prometheus when: @@ -182,6 +190,8 @@ - name: "OS configuration playbook: - Set os fact" ansible.builtin.set_fact: tier: os + tags: + - always - name: "OS configuration playbook: - Debug" ansible.builtin.debug: @@ -189,37 +199,37 @@ verbosity: 4 - name: "OS configuration playbook: - Add Needed packages" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/windows/1.4-packages tags: - 1.4-packages - name: "OS configuration playbook: - Set swap" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/windows/1.1-swap tags: - 1.1-swap - name: "OS configuration playbook: - Memory Dump" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/windows/1.2-memory-dump tags: - 1.2-memory-dump - name: "OS configuration playbook: - Configure Disks" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/windows/1.5-disk-setup tags: - 1.5-disk-setup - name: "OS configuration playbook: - Disable the Firewall" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/windows/1.10-firewall tags: - 1.10-firewall - name: "OS configuration playbook: - Join Domain" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/windows/1.11-domain-join vars: winadm_password: "{{ hostvars.localhost.winadm_password }}" @@ -242,6 +252,8 @@ dns_in_AD: "{{ dns_check_results.stdout_lines[0] }}" when: - dns_check_results is defined + tags: + - always - name: "OS configuration playbook: - Check if required DNS entries match" ansible.builtin.assert: @@ -288,6 +300,8 @@ - name: "OS configuration playbook: - Set os fact" ansible.builtin.set_fact: tier: issci + tags: + - always - name: "Create directories" become: true @@ -295,21 +309,23 @@ path: '/etc/sap_deployment_automation/{{ sap_sid | upper }}' state: directory mode: '0755' + tags: + - always - name: "OS configuration playbook: - Ensure the packages are registered" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.4-packages tags: - 1.4-packages - name: "OS configuration playbook: - Ensure the needed services are started" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.16-services tags: - 1.16-services - name: "OS configuration playbook: - Ensure the needed services are started" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-sap-os/2.11-iscsi-server tags: - 2.11-iSCSI-server @@ -326,6 +342,8 @@ path: "{{ _workspace_directory }}/.progress/os-configuration-done" state: touch mode: 0755 + tags: + - always ... # /*----------------------------------------------------------------------------8 diff --git a/deploy/ansible/playbook_02_os_sap_specific_config.yaml b/deploy/ansible/playbook_02_os_sap_specific_config.yaml index 8e8ce1f375..5fb35bce5f 100644 --- a/deploy/ansible/playbook_02_os_sap_specific_config.yaml +++ b/deploy/ansible/playbook_02_os_sap_specific_config.yaml @@ -17,33 +17,40 @@ ansible.builtin.pip: name: jmespath state: present + tags: + - always - name: "SAP OS configuration playbook: - Create Progress folder" ansible.builtin.file: path: "{{ _workspace_directory }}/.progress" state: directory mode: 0755 + tags: + - always - name: "SAP OS configuration playbook: - Remove sap-os-install-done flag" ansible.builtin.file: path: "{{ _workspace_directory }}/.progress/sap-os-configuration-done" state: absent + tags: + - always - name: Include 0.3.sap-installation-media-storage-details role - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-misc/0.3.sap-installation-media-storage-details - public: true when: not is_run_with_infraCreate_only vars: tier: bom_download + tags: + - always - name: "SAP OS configuration playbook: - Read password" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-misc/0.1-passwords tasks_from: windows.yaml - public: true when: platform == "SQLSERVER" tags: + - always - 0.1-win-passwords # /*----------------------------------------------------------------------------8 @@ -91,12 +98,16 @@ headers: Metadata: true register: azure_metadata + tags: + - always - name: "SAP OS configuration playbook: - : Extract Subscription ID, Resource Group Name and Virtual Machine name" ansible.builtin.set_fact: subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" resource_group_name: "{{ azure_metadata.json.compute.resourceGroupName }}" vm_name: "{{ azure_metadata.json.compute.name }}" + tags: + - always - name: "SAP OS configuration playbook: - Set sapos storage account facts" ansible.builtin.set_fact: @@ -106,6 +117,7 @@ - not is_run_with_infraCreate_only - hostvars.localhost.sapbits_location_base_path is defined tags: + - always - is_run_with_infraCreate_only - name: "SAP OS configuration playbook: - Set sapos storage account facts" @@ -115,6 +127,7 @@ - not is_run_with_infraCreate_only - hostvars.localhost.sapbits_sas_token is defined tags: + - always - is_run_with_infraCreate_only - name: "SAP OS configuration playbook: - Get the IP information from instance meta data service" @@ -124,6 +137,8 @@ headers: Metadata: true register: azure_network_metadata + tags: + - always # - name: "SAP OS configuration playbook: - Filter out the values for IPAddresses in json format" # ansible.builtin.set_fact: @@ -164,25 +179,25 @@ # vars: # ipaddr: "{{ ipadd }}" - name: "SAP OS configuration playbook: - Create hosts file" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-sap-os/2.4-hosts-file tags: - 2.4-hosts-file - name: "SAP OS configuration playbook: - Ensure the repositories are registered" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.3-repository tags: - 1.3-repository - name: "SAP OS configuration playbook: - Configure accounts" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.11-accounts tags: - 1.11-accounts - name: "SAP OS configuration playbook: - Ensure the packages are registered" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.4-packages tags: - 1.4-packages @@ -191,7 +206,7 @@ when: - scs_high_availability or database_high_availability - node_tier in ['scs', 'ers', 'hana', 'db2'] - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.4-packages tags: - 1.4-packages @@ -199,14 +214,14 @@ tier: ha - name: "SAP OS configuration playbook: - Configure volume groups, logical volumes and file systems" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.5-disk-setup when: node_tier not in ["oracle-multi-sid", "oracle-asm"] tags: - 1.5-disk-setup - name: "SAP OS configuration playbook: - Configure volume groups, logical volumes and file systems" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.5.1.1-disk-setup-asm-sap when: - node_tier == "oracle-asm" @@ -214,26 +229,26 @@ - 1.5.1.1-disk-setup-asm-sap - name: "SAP OS configuration playbook: - Configure the disks for Oracle Multi SID" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.5.2-disk-setup-ora-multi-sid when: node_tier == "oracle-multi-sid" tags: - 1.5.2-disk-setup-ora-multi-sid - name: "SAP OS configuration playbook: - Configure the kernel parameters" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.9-kernelparameters tags: - 1.9-kernelparameters - name: "SAP OS configuration playbook: - Create SAP users/groups" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-sap-os/2.5-sap-users tags: - 2.5-sap-users - name: "SAP OS configuration playbook: - Ensure the services are configured" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.16-services tags: - 1.16-services @@ -242,7 +257,7 @@ when: - scs_high_availability or database_high_availability - node_tier in ['scs', 'ers', 'hana', 'db2'] - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-os/1.16-services tags: - 1.16-services @@ -250,25 +265,25 @@ tier: ha - name: "SAP OS configuration playbook: - directory permissions" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-sap-os/2.2-sapPermissions tags: - 2.2-sapPermissions - name: "SAP OS configuration playbook: - Configurations according to SAP Notes" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-sap-os/2.10-sap-notes tags: - 2.10-sap-notes - name: "SAP OS configuration playbook: - configure exports" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-sap-os/2.3-sap-exports tags: - 2.3-sap-exports - name: "SAP OS configuration playbook: - Mount the file systems" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-sap-os/2.6-sap-mounts tags: - 2.6-sap-mounts @@ -310,7 +325,7 @@ - domain_sqlsvc_account is not defined - name: "SAP OS configuration playbook: - Add local groups and Permissions" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-sap-os/windows/2.5-sap-users vars: winadm_password: "{{ hostvars.localhost.winadm_password }}" @@ -318,7 +333,7 @@ domain_service_account: "{{ hostvars.localhost.adsvc_account }}" - name: "SAP OS configuration playbook: - Sharing Data Folder" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-sap-os/windows/2.3-sap-exports vars: winadm_password: "{{ hostvars.localhost.winadm_password }}" @@ -327,7 +342,7 @@ sql_svc_account: "{{ domain_sqlsvc_account }}" - name: "SAP OS configuration playbook: - Mounting Shared Folder" - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-sap-os/windows/2.6-sap-mounts vars: domain_user_password: "{{ hostvars.localhost.winadm_password }}" diff --git a/deploy/ansible/playbook_03_bom_processing.yaml b/deploy/ansible/playbook_03_bom_processing.yaml index c89ebaaff5..e6b169f544 100644 --- a/deploy/ansible/playbook_03_bom_processing.yaml +++ b/deploy/ansible/playbook_03_bom_processing.yaml @@ -19,18 +19,23 @@ path: "{{ _workspace_directory }}/.progress" state: directory mode: 0755 + tags: + - always - name: Remove bom-processing-done flag ansible.builtin.file: path: "{{ _workspace_directory }}/.progress/bom-processing-done" state: absent + tags: + - always - name: Include 0.3.sap-installation-media-storage-details role - ansible.builtin.include_role: + ansible.builtin.import_role: name: roles-misc/0.3.sap-installation-media-storage-details - public: true vars: tier: bom_download + tags: + - always # /*----------------------------------------------------------------------------8 # | | @@ -62,18 +67,22 @@ tier: preparation sapbits_location_base_path: "{{ hostvars.localhost.sapbits_location_base_path }}" sa_enabled: true + tags: + - always - name: Set facts + when: hostvars.localhost.sapbits_sas_token is defined ansible.builtin.set_fact: sapbits_sas_token: "{{ hostvars.localhost.sapbits_sas_token }}" - when: hostvars.localhost.sapbits_sas_token is defined + tags: + - always - name: 3.3-bom-processing role for Linux become: true when: ansible_os_family != "Windows" block: - name: Include the 3.3-bom-processing role - ansible.builtin.include_role: + ansible.builtin.import_role: name: "roles-sap/3.3-bom-processing" vars: tier: bom_download @@ -87,7 +96,7 @@ when: ansible_os_family == "Windows" block: - name: Include the 3.3-bom-processing role - ansible.builtin.include_role: + ansible.builtin.import_role: name: "roles-sap/windows/3.3-bom-processing" vars: tier: bom_download @@ -103,6 +112,8 @@ path: "{{ _workspace_directory }}/.progress/bom-processing" state: touch mode: 0755 + tags: + - always ... # /*---------------------------------------------------------------------------8 diff --git a/deploy/ansible/roles-misc/0.1-passwords/tasks/main.yaml b/deploy/ansible/roles-misc/0.1-passwords/tasks/main.yaml index b53c029d68..9653b5063d 100644 --- a/deploy/ansible/roles-misc/0.1-passwords/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.1-passwords/tasks/main.yaml @@ -8,12 +8,16 @@ - name: "0.1 Password: - Construct SAP system password secret name" ansible.builtin.set_fact: sap_password_id: "{{ secret_prefix }}-{{ sap_sid }}-sap-password" + tags: + - always - name: "0.1 Password: - Create Password secret" ansible.builtin.command: az keyvault secret set --vault-name {{ kv_name }} --name {{ sap_password_id }} --value "{{ main_password }}" when: - main_password is defined - "main_password | trim | length != 0" + tags: + - always - name: "0.1 Password: - Show SAP system password secret name" ansible.builtin.debug: @@ -28,12 +32,17 @@ --output yaml changed_when: false register: keyvault_secret_sap_password_exists + tags: + - always - name: "0.1 Password: - Check for secret availability" ansible.builtin.set_fact: secret_exists: "{{ (sap_password_id in keyvault_secret_sap_password_exists.stdout) | bool }}" + tags: + - always - name: "0.1 Password: - Retrieve SAP system password" + when: secret_exists block: - name: "0.1 Password: - Get SAP password from key vault" ansible.builtin.command: >- @@ -45,21 +54,26 @@ changed_when: false register: keyvault_secret_show_sap_password_value no_log: true + tags: + - always - name: "0.1 Password: - Extract SAP password" ansible.builtin.set_fact: sap_password: "{{ keyvault_secret_show_sap_password_value.stdout }}" no_log: true - - when: secret_exists + tags: + - always - name: "0.1 Password: - Set SAP system password" + when: not secret_exists block: - name: "0.1 Password: - Remove tmp file" ansible.builtin.file: path: /tmp/sappasswordfile state: absent + tags: + - always - name: "0.1 Password: - Create Password prefix" ansible.builtin.set_fact: @@ -74,6 +88,8 @@ - ascii_uppercase - digits no_log: true + tags: + - always - name: "0.1 Password: - Construct SAP system password" ansible.builtin.set_fact: @@ -84,14 +100,16 @@ chars=ascii_lowercase,ascii_uppercase,digits') }}" no_log: true + tags: + - always # - name: "0.1 Password: - Construct SAP system password" # ansible.builtin.set_fact: # sap_password: "S3{{ lookup('password', '/tmp/sappasswordfile length=10 chars=ascii_lowercase,ascii_uppercase,digits') }}" - name: "0.1 Password: - Create Password secret" ansible.builtin.command: az keyvault secret set --vault-name {{ kv_name }} --name {{ sap_password_id }} --value "{{ sap_password }}" - - when: not secret_exists + tags: + - always - name: "0.1 Password: - Ensure the password is set" @@ -101,21 +119,27 @@ - sap_password | type_debug != 'NoneType' # Is the variable not empty" - sap_password | trim | length > 8 fail_msg: "The SAP main password was not set in key vault" + tags: + - always - name: "0.1 Password: - Show SAP Password" ansible.builtin.debug: var: sap_password verbosity: 4 -- name: "Backward Compatibility - Check required Database HA variables" +- name: "Backward Compatibility - Check required Database HA variables" ansible.builtin.set_fact: - database_high_availability: "{{ db_high_availability | default(false) }}" + database_high_availability: "{{ db_high_availability | default(false) }}" when: - db_high_availability is defined - database_high_availability is not defined + tags: + - always - name: "0.1 Password: - Get Cluster passwords" ansible.builtin.include_tasks: 0.1.1-ha_clusterpasswords.yaml when: database_high_availability or scs_high_availability + tags: + - always ... diff --git a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml index f3c30000af..84f901967f 100644 --- a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml @@ -67,10 +67,27 @@ when: - swap_size == '0' -- name: "Swap reboot" - ansible.builtin.reboot: +- name: "1.1 Swap: - Reboot" when: - wagent_configuration.changed + block: + + - name: "1.1 Swap: - Reboot" + become: true + ansible.builtin.reboot: + reboot_timeout: 300 + post_reboot_delay: 10 + failed_when: false + + - name: "1.1 Swap: - Clear the failed state of hosts" + ansible.builtin.meta: clear_host_errors + + # Wait for Connection after reboot + - name: "1.1 Swap: - Wait for system to become reachable" + ansible.builtin.wait_for_connection: + delay: 10 + timeout: 300 + # - name: "Get SWAP Size" # set_fact: diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml index 005cd06c82..e067ab37bc 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml @@ -13,9 +13,11 @@ name: '*' state: latest skip_broken: true + update_only: true register: reboot_output when: - tier == 'os' + - (ansible_distribution | lower ~ ansible_distribution_major_version) != "oraclelinux8" # Analyse the package list for this distribution selecting only those # packages assigned to the active tier or 'all'. # - name: "1.4 Packages: - Upgrade all: {{ distribution_full_id }}" # noqa package-latest diff --git a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml index 94ef59ad0c..9669c97d94 100644 --- a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml +++ b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml @@ -505,5 +505,6 @@ packages: oraclelinux8.9: - { tier: 'os', package: 'oracle-database-preinstall-19c', node_tier: 'all', state: 'present' } - - { tier: 'os', package: 'oracleasm-support', node_tier: 'oracle-asm', state: 'present' } - { tier: 'os', package: 'gdisk', node_tier: 'all', state: 'present' } +# - { tier: 'os', package: 'kmod-oracleasm', node_tier: 'oracle-asm', state: 'present' } +# - { tier: 'os', package: 'oracleasm-support', node_tier: 'oracle-asm', state: 'present' } diff --git a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml index f4a5e6ae6b..f9ee5d99ad 100644 --- a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml @@ -55,13 +55,25 @@ register: selinux_disabled - name: "2.10.1 sap-notes: Reboot app VMs after selinux is configured" + become: true ansible.builtin.reboot: reboot_timeout: 300 post_reboot_delay: 60 ignore_unreachable: true + failed_when: false when: - selinux_disabled.changed + - name: "2.10.1 sap-notes: - Clear the failed state of hosts" + ansible.builtin.meta: clear_host_errors + + # Wait for Connection after reboot + - name: "2.10.1 sap-notes: - Wait for system to become reachable" + ansible.builtin.wait_for_connection: + delay: 10 + timeout: 300 + + - name: "2.10.1 sap-notes: Check VM Agent Status" when: - selinux_disabled.changed diff --git a/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml index fb70b77aef..0c1bc5713d 100644 --- a/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.5-sap-users/tasks/main.yaml @@ -138,3 +138,4 @@ # - { tier: 'WEB', user: 'webadm', uid: '32002', group: 'sapsys', home: '/home/webadm', comment: 'SAP WebDisp Admin' } when: - node_tier == "oracle-asm" or node_tier == "observer" + - platform == "ORACLE-ASM" diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml index 6630786128..e526a5269e 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml @@ -101,18 +101,23 @@ # Wait for creation of HugePages # Rebbot the VM to avoid the error "ORA-27102: out of memory" -- name: "DB VM reboot" +- name: "ORACLE Post Processing: DB VM reboot" block: - - name: "Oracle post-processing: Reboot after the Enabling HugePages" - ansible.builtin.reboot: - reboot_timeout: 300 + - name: "ORACLE Post Processing: Reboot after the Enabling HugePages" + become: true + ansible.builtin.reboot: + reboot_timeout: 300 + failed_when: false + + - name: "ORACLE Post Processing: Clear the failed state of hosts" + ansible.builtin.meta: clear_host_errors # Wait for Connection after reboot - - name: "Wait for Connection after reboot" - ansible.builtin.wait_for_connection: - delay: 10 - timeout: 300 + - name: "ORACLE Post Processing: Wait for Connection after reboot" + ansible.builtin.wait_for_connection: + delay: 10 + timeout: 300 when: - not sga_update_status.stat.exists diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf index 1d8b4e668d..576ebd26a9 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf @@ -326,18 +326,30 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_iscsi_lnx" { publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" - auto_upgrade_minor_version = "true" + auto_upgrade_minor_version = true + +} + +resource "azurerm_virtual_machine_extension" "monitoring_defender_iscsi_lnx" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension ? ( + local.iscsi_count) : ( + 0 + ) + virtual_machine_id = azurerm_linux_virtual_machine.iscsi[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityLinuxAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = true settings = jsonencode( - { - "authentication" = { - "managedIdentity" = { - "identifier-name" : "mi_res_id", - "identifier-value": var.infrastructure.iscsi.user_assigned_identity_id - } - } + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, } - ) + ) } resource "azurerm_virtual_machine_extension" "monitoring_defender_iscsi_lnx" { diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf index 8b8dcb0092..14bf8cb183 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf @@ -179,17 +179,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_utility_lnx" publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" - auto_upgrade_minor_version = "true" - settings = jsonencode( - { - "authentication" = { - "managedIdentity" = { - "identifier-name" : "mi_res_id", - "identifier-value": var.infrastructure.user_assigned_identity_id - } - } - } - ) + auto_upgrade_minor_version = true } @@ -202,17 +192,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_utility_win" publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorWindowsAgent" type_handler_version = "1.0" - auto_upgrade_minor_version = "true" - settings = jsonencode( - { - "authentication" = { - "managedIdentity" = { - "identifier-name" : "mi_res_id", - "identifier-value": var.infrastructure.user_assigned_identity_id - } - } - } - ) + auto_upgrade_minor_version = true } @@ -224,7 +204,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_utility_lnx" { publisher = "Microsoft.Azure.Security.Monitoring" type = "AzureSecurityLinuxAgent" type_handler_version = "2.0" - auto_upgrade_minor_version = "true" + auto_upgrade_minor_version = true settings = jsonencode( { @@ -243,8 +223,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_utility_win" { publisher = "Microsoft.Azure.Security.Monitoring" type = "AzureSecurityWindowsAgent" type_handler_version = "2.0" - auto_upgrade_minor_version = "true" - + auto_upgrade_minor_version = true settings = jsonencode( { "enableGenevaUpload" = true, diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf index 010aee681e..71abbf101f 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf @@ -455,7 +455,7 @@ resource "azurerm_virtual_machine_extension" "anydb_lnx_aem_extension" { settings = jsonencode( { "system": "SAP", - "cfg": local.extension_settings + } ) tags = var.tags @@ -480,7 +480,7 @@ resource "azurerm_virtual_machine_extension" "anydb_win_aem_extension" { settings = jsonencode( { "system": "SAP", - "cfg": local.extension_settings + } ) tags = var.tags @@ -710,18 +710,8 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_db_lnx" { publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" - auto_upgrade_minor_version = "true" + auto_upgrade_minor_version = true - settings = jsonencode( - { - "authentication" = { - "managedIdentity" = { - "identifier-name" : "mi_res_id", - "identifier-value": var.database.user_assigned_identity_id - } - } - } - ) } @@ -735,17 +725,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_db_win" { publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorWindowsAgent" type_handler_version = "1.0" - auto_upgrade_minor_version = "true" - settings = jsonencode( - { - "authentication" = { - "managedIdentity" = { - "identifier-name" : "mi_res_id", - "identifier-value": var.database.user_assigned_identity_id - } - } - } - ) + auto_upgrade_minor_version = true } @@ -761,7 +741,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_db_lnx" { publisher = "Microsoft.Azure.Security.Monitoring" type = "AzureSecurityLinuxAgent" type_handler_version = "2.0" - auto_upgrade_minor_version = "true" + auto_upgrade_minor_version = true settings = jsonencode( { @@ -783,7 +763,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_db_win" { publisher = "Microsoft.Azure.Security.Monitoring" type = "AzureSecurityWindowsAgent" type_handler_version = "2.0" - auto_upgrade_minor_version = "true" + auto_upgrade_minor_version = true settings = jsonencode( { @@ -794,4 +774,3 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_db_win" { ) } - diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index 1bec8dd701..9f5ad143ac 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -444,7 +444,7 @@ resource "azurerm_virtual_machine_extension" "app_lnx_aem_extension" { settings = jsonencode( { "system": "SAP", - "cfg": local.extension_settings + } ) tags = var.tags @@ -465,7 +465,7 @@ resource "azurerm_virtual_machine_extension" "app_win_aem_extension" { settings = jsonencode( { "system": "SAP", - "cfg": local.extension_settings + } ) tags = var.tags @@ -505,18 +505,8 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_app_lnx" { publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" - auto_upgrade_minor_version = "true" + auto_upgrade_minor_version = true - settings = jsonencode( - { - "authentication" = { - "managedIdentity" = { - "identifier-name" : "mi_res_id", - "identifier-value": var.application_tier.user_assigned_identity_id - } - } - } - ) } @@ -530,18 +520,50 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_app_win" { publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorWindowsAgent" type_handler_version = "1.0" - auto_upgrade_minor_version = "true" + auto_upgrade_minor_version = true + +} + +resource "azurerm_virtual_machine_extension" "monitoring_defender_app_lnx" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.app_os.os_type) == "LINUX" ? ( + local.application_server_count) : ( + 0 ) + virtual_machine_id = azurerm_linux_virtual_machine.app[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityLinuxAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = true + settings = jsonencode( - { - "authentication" = { - "managedIdentity" = { - "identifier-name" : "mi_res_id", - "identifier-value": var.application_tier.user_assigned_identity_id - } - } + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, } - ) + ) +} + +resource "azurerm_virtual_machine_extension" "monitoring_defender_app_win" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.app_os.os_type) == "WINDOWS" ? ( + local.application_server_count) : ( + 0 ) + virtual_machine_id = azurerm_windows_virtual_machine.app[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityWindowsAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityWindowsAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = true + settings = jsonencode( + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, + } + ) } resource "azurerm_virtual_machine_extension" "monitoring_defender_app_lnx" { diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index b28d362d80..5d507ae788 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -496,7 +496,7 @@ resource "azurerm_virtual_machine_extension" "scs_lnx_aem_extension" { settings = jsonencode( { "system": "SAP", - "cfg": local.extension_settings + } ) tags = var.tags @@ -517,7 +517,7 @@ resource "azurerm_virtual_machine_extension" "scs_win_aem_extension" { settings = jsonencode( { "system": "SAP", - "cfg": local.extension_settings + } ) tags = var.tags @@ -707,17 +707,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_scs_lnx" { publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" - auto_upgrade_minor_version = "true" - settings = jsonencode( - { - "authentication" = { - "managedIdentity" = { - "identifier-name" : "mi_res_id", - "identifier-value": var.application_tier.user_assigned_identity_id - } - } - } - ) + auto_upgrade_minor_version = true } @@ -731,19 +721,51 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_scs_win" { publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorWindowsAgent" type_handler_version = "1.0" - auto_upgrade_minor_version = "true" + auto_upgrade_minor_version = true +} + + +resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_lnx" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( + local.scs_server_count) : ( + 0 ) + virtual_machine_id = azurerm_linux_virtual_machine.scs[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityLinuxAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = true + settings = jsonencode( - { - "authentication" = { - "managedIdentity" = { - "identifier-name" : "mi_res_id", - "identifier-value": var.application_tier.user_assigned_identity_id - } - } + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, } - ) + ) } +resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_win" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.scs_os.os_type) == "WINDOWS" ? ( + local.scs_server_count) : ( + 0 ) + virtual_machine_id = azurerm_windows_virtual_machine.scs[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityWindowsAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityWindowsAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = true + + settings = jsonencode( + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, + } + ) +} resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_lnx" { provider = azurerm.main diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf index 83f356b2be..c1282e7967 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf @@ -444,7 +444,7 @@ resource "azurerm_virtual_machine_extension" "web_lnx_aem_extension" { settings = jsonencode( { "system": "SAP", - "cfg": local.extension_settings + } ) tags = var.tags @@ -465,7 +465,7 @@ resource "azurerm_virtual_machine_extension" "web_win_aem_extension" { settings = jsonencode( { "system": "SAP", - "cfg": local.extension_settings + } ) tags = var.tags @@ -633,17 +633,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_web_lnx" { publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" - auto_upgrade_minor_version = "true" - settings = jsonencode( - { - "authentication" = { - "managedIdentity" = { - "identifier-name" : "mi_res_id", - "identifier-value": var.application_tier.user_assigned_identity_id - } - } - } - ) + auto_upgrade_minor_version = true } @@ -657,17 +647,49 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_web_win" { publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorWindowsAgent" type_handler_version = "1.0" - auto_upgrade_minor_version = "true" + auto_upgrade_minor_version = true +} + +resource "azurerm_virtual_machine_extension" "monitoring_defender_web_lnx" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( + local.webdispatcher_count) : ( + 0 ) + virtual_machine_id = azurerm_linux_virtual_machine.web[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityLinuxAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = true + settings = jsonencode( - { - "authentication" = { - "managedIdentity" = { - "identifier-name" : "mi_res_id", - "identifier-value": var.application_tier.user_assigned_identity_id - } - } + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, } - ) + ) +} + +resource "azurerm_virtual_machine_extension" "monitoring_defender_web_win" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.app_os.os_type) == "WINDOWS" ? ( + local.webdispatcher_count) : ( + 0 ) + virtual_machine_id = azurerm_windows_virtual_machine.web[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityWindowsAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityWindowsAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = true + + settings = jsonencode( + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, + } + ) } resource "azurerm_virtual_machine_extension" "monitoring_defender_web_lnx" { diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf index a9e42f7686..ab13a4e6dc 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf @@ -404,7 +404,7 @@ resource "azurerm_virtual_machine_extension" "hdb_linux_extension" { settings = jsonencode( { "system": "SAP", - "cfg": local.extension_settings + } ) tags = var.tags @@ -564,18 +564,31 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_db_lnx" { publisher = "Microsoft.Azure.Monitor" type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" - auto_upgrade_minor_version = "true" + auto_upgrade_minor_version = true + +} + + +resource "azurerm_virtual_machine_extension" "monitoring_defender_db_lnx" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension ? ( + var.database_server_count) : ( + 0 + ) + virtual_machine_id = azurerm_linux_virtual_machine.vm_dbnode[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityLinuxAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = true settings = jsonencode( - { - "authentication" = { - "managedIdentity" = { - "identifier-name" : "mi_res_id", - "identifier-value": var.database.user_assigned_identity_id - } - } + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, } - ) + ) } From 797df19efdc0dce5e5a8a2eaa56c7f2b3607d075 Mon Sep 17 00:00:00 2001 From: Jaskirat Singh <108129510+jaskisin@users.noreply.github.com> Date: Tue, 2 Apr 2024 17:21:31 +0530 Subject: [PATCH 485/607] Add No log for access token and SAP Media Share as part of Install experience (#575) * No Log for access token * Changes for SAP Media File share as Install experience --- deploy/ansible/playbook_00_validate_parameters.yaml | 1 + deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml | 2 ++ 2 files changed, 3 insertions(+) diff --git a/deploy/ansible/playbook_00_validate_parameters.yaml b/deploy/ansible/playbook_00_validate_parameters.yaml index bb519a9fa0..fc97d80cca 100644 --- a/deploy/ansible/playbook_00_validate_parameters.yaml +++ b/deploy/ansible/playbook_00_validate_parameters.yaml @@ -367,6 +367,7 @@ az account get-access-token --query accessToken -o tsv changed_when: false register: access_token_data + no_log: true tags: - always diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml index 00b3fa1403..e2d27e4cbf 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml @@ -169,6 +169,7 @@ when: - "'scs' in supported_tiers" - usr_sap_install_mountpoint is undefined + - not is_executed_by_acss - name: "1.5 Disk setup - Check if installation root directory exists" ansible.builtin.stat: @@ -255,6 +256,7 @@ - node_tier != 'scs' - "'scs' not in supported_tiers" - usr_sap_install_mountpoint is not defined + - not is_executed_by_acss # Mount File systems for SCS server in Multi-SID installations - name: "2.6 SAP Mounts: - Mount local sapmnt (scs) for oracle shared home installation" From 64c232e33c4ddb64a7ae10b1e7e871e89f1c1985 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 2 Apr 2024 15:58:45 +0300 Subject: [PATCH 486/607] Update enable_db_lb_deployment condition in variables_local.tf --- .../modules/sap_system/hdb_node/variables_local.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf index eea603147e..1f22002832 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf @@ -122,7 +122,7 @@ locals { "password" = var.sid_password } - enable_db_lb_deployment = var.database_server_count > 0 && (var.use_loadbalancers_for_standalone_deployments || var.database_server_count > 1) + enable_db_lb_deployment = var.database_server_count > 0 && (var.use_loadbalancers_for_standalone_deployments || var.database_server_count > 1) && !var.database.scale_out database_sid = try(var.database.instance.sid, local.sid) // HANA database sid from the Databases array for use as reference to LB/AS database_instance = try(var.database.instance.number, "00") From 31d8ea2adbd217646bf929e9aac401d38ea6162b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 2 Apr 2024 16:04:02 +0300 Subject: [PATCH 487/607] Remove redundant code for monitoring defender extensions --- .../modules/sap_system/app_tier/vm-webdisp.tf | 41 ------------------- .../modules/sap_system/hdb_node/vm-hdb.tf | 22 ---------- 2 files changed, 63 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf index c1282e7967..f784b11dd9 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf @@ -692,44 +692,3 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_web_win" { ) } -resource "azurerm_virtual_machine_extension" "monitoring_defender_web_lnx" { - provider = azurerm.main - count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( - local.webdispatcher_count) : ( - 0 ) - virtual_machine_id = azurerm_linux_virtual_machine.web[count.index].id - name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" - publisher = "Microsoft.Azure.Security.Monitoring" - type = "AzureSecurityLinuxAgent" - type_handler_version = "2.0" - auto_upgrade_minor_version = "true" - - settings = jsonencode( - { - "enableGenevaUpload" = true, - "enableAutoConfig" = true, - "reportSuccessOnUnsupportedDistro" = true, - } - ) -} - -resource "azurerm_virtual_machine_extension" "monitoring_defender_web_win" { - provider = azurerm.main - count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.app_os.os_type) == "WINDOWS" ? ( - local.webdispatcher_count) : ( - 0 ) - virtual_machine_id = azurerm_windows_virtual_machine.web[count.index].id - name = "Microsoft.Azure.Security.Monitoring.AzureSecurityWindowsAgent" - publisher = "Microsoft.Azure.Security.Monitoring" - type = "AzureSecurityWindowsAgent" - type_handler_version = "2.0" - auto_upgrade_minor_version = "true" - - settings = jsonencode( - { - "enableGenevaUpload" = true, - "enableAutoConfig" = true, - "reportSuccessOnUnsupportedDistro" = true, - } - ) -} diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf index ab13a4e6dc..6cd4ad0765 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf @@ -591,25 +591,3 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_db_lnx" { ) } - -resource "azurerm_virtual_machine_extension" "monitoring_defender_db_lnx" { - provider = azurerm.main - count = var.infrastructure.deploy_defender_extension ? ( - var.database_server_count) : ( - 0 - ) - virtual_machine_id = azurerm_linux_virtual_machine.vm_dbnode[count.index].id - name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" - publisher = "Microsoft.Azure.Security.Monitoring" - type = "AzureSecurityLinuxAgent" - type_handler_version = "2.0" - auto_upgrade_minor_version = "true" - - settings = jsonencode( - { - "enableGenevaUpload" = true, - "enableAutoConfig" = true, - "reportSuccessOnUnsupportedDistro" = true, - } - ) -} From ac6d345778068e41fc4fb1383e148ea657ae43f4 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 2 Apr 2024 16:07:04 +0300 Subject: [PATCH 488/607] Refactor monitoring defender extensions for app and scs tiers --- .../modules/sap_system/app_tier/vm-app.tf | 42 ------------------ .../modules/sap_system/app_tier/vm-scs.tf | 43 ------------------- 2 files changed, 85 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index 9f5ad143ac..89e97dd418 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -566,45 +566,3 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_app_win" { ) } -resource "azurerm_virtual_machine_extension" "monitoring_defender_app_lnx" { - provider = azurerm.main - count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.app_os.os_type) == "LINUX" ? ( - local.application_server_count) : ( - 0 ) - virtual_machine_id = azurerm_linux_virtual_machine.app[count.index].id - name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" - publisher = "Microsoft.Azure.Security.Monitoring" - type = "AzureSecurityLinuxAgent" - type_handler_version = "2.0" - auto_upgrade_minor_version = "true" - - settings = jsonencode( - { - "enableGenevaUpload" = true, - "enableAutoConfig" = true, - "reportSuccessOnUnsupportedDistro" = true, - } - ) -} - -resource "azurerm_virtual_machine_extension" "monitoring_defender_app_win" { - provider = azurerm.main - count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.app_os.os_type) == "WINDOWS" ? ( - local.application_server_count) : ( - 0 ) - virtual_machine_id = azurerm_windows_virtual_machine.app[count.index].id - name = "Microsoft.Azure.Security.Monitoring.AzureSecurityWindowsAgent" - publisher = "Microsoft.Azure.Security.Monitoring" - type = "AzureSecurityWindowsAgent" - type_handler_version = "2.0" - auto_upgrade_minor_version = "true" - - settings = jsonencode( - { - "enableGenevaUpload" = true, - "enableAutoConfig" = true, - "reportSuccessOnUnsupportedDistro" = true, - } - ) -} - diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index 5d507ae788..cc53c010eb 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -766,46 +766,3 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_win" { } ) } - -resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_lnx" { - provider = azurerm.main - count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( - local.scs_server_count) : ( - 0 ) - virtual_machine_id = azurerm_linux_virtual_machine.scs[count.index].id - name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" - publisher = "Microsoft.Azure.Security.Monitoring" - type = "AzureSecurityLinuxAgent" - type_handler_version = "2.0" - auto_upgrade_minor_version = "true" - - settings = jsonencode( - { - "enableGenevaUpload" = true, - "enableAutoConfig" = true, - "reportSuccessOnUnsupportedDistro" = true, - } - ) -} - -resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_win" { - provider = azurerm.main - count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.scs_os.os_type) == "WINDOWS" ? ( - local.scs_server_count) : ( - 0 ) - virtual_machine_id = azurerm_windows_virtual_machine.scs[count.index].id - name = "Microsoft.Azure.Security.Monitoring.AzureSecurityWindowsAgent" - publisher = "Microsoft.Azure.Security.Monitoring" - type = "AzureSecurityWindowsAgent" - type_handler_version = "2.0" - auto_upgrade_minor_version = "true" - - settings = jsonencode( - { - "enableGenevaUpload" = true, - "enableAutoConfig" = true, - "reportSuccessOnUnsupportedDistro" = true, - } - ) -} - From b8a941d6aadbf379ad13151b1120a31e0d08726f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 2 Apr 2024 16:09:54 +0300 Subject: [PATCH 489/607] Refactor enable_db_lb_deployment logic in variables_local.tf --- .../modules/sap_system/hdb_node/variables_local.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf index 1f22002832..b01a87dc6d 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf @@ -122,7 +122,7 @@ locals { "password" = var.sid_password } - enable_db_lb_deployment = var.database_server_count > 0 && (var.use_loadbalancers_for_standalone_deployments || var.database_server_count > 1) && !var.database.scale_out + enable_db_lb_deployment = var.database.high_availability || var.use_loadbalancers_for_standalone_deployments ? true : false database_sid = try(var.database.instance.sid, local.sid) // HANA database sid from the Databases array for use as reference to LB/AS database_instance = try(var.database.instance.number, "00") From 089dada1eac5df3904586ed16ffc5ed7aa8cf306 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 2 Apr 2024 16:13:05 +0300 Subject: [PATCH 490/607] Update enable_db_lb_deployment logic in variables_local.tf --- .../modules/sap_system/hdb_node/variables_local.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf index b01a87dc6d..1f22002832 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf @@ -122,7 +122,7 @@ locals { "password" = var.sid_password } - enable_db_lb_deployment = var.database.high_availability || var.use_loadbalancers_for_standalone_deployments ? true : false + enable_db_lb_deployment = var.database_server_count > 0 && (var.use_loadbalancers_for_standalone_deployments || var.database_server_count > 1) && !var.database.scale_out database_sid = try(var.database.instance.sid, local.sid) // HANA database sid from the Databases array for use as reference to LB/AS database_instance = try(var.database.instance.number, "00") From fe13037b44ef8f7784ee1d68b4bf58e4918849b2 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 2 Apr 2024 17:17:53 +0300 Subject: [PATCH 491/607] Remove redundant code for iscsi monitoring defender --- .../modules/sap_landscape/iscsi.tf | 21 ------------------- 1 file changed, 21 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf index 576ebd26a9..3459a9348b 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf @@ -352,24 +352,3 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_iscsi_lnx" { ) } -resource "azurerm_virtual_machine_extension" "monitoring_defender_iscsi_lnx" { - provider = azurerm.main - count = var.infrastructure.deploy_defender_extension ? ( - local.iscsi_count) : ( - 0 - ) - virtual_machine_id = azurerm_linux_virtual_machine.iscsi[count.index].id - name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" - publisher = "Microsoft.Azure.Security.Monitoring" - type = "AzureSecurityLinuxAgent" - type_handler_version = "2.0" - auto_upgrade_minor_version = "true" - - settings = jsonencode( - { - "enableGenevaUpload" = true, - "enableAutoConfig" = true, - "reportSuccessOnUnsupportedDistro" = true, - } - ) -} From ed849d0f2dc4dee98a9333277e482d97bb255841 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 3 Apr 2024 12:44:43 +0300 Subject: [PATCH 492/607] Update enable_db_lb_deployment logic in variables_local.tf --- .../modules/sap_system/hdb_node/variables_local.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf index 1f22002832..debe7b724e 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf @@ -122,7 +122,7 @@ locals { "password" = var.sid_password } - enable_db_lb_deployment = var.database_server_count > 0 && (var.use_loadbalancers_for_standalone_deployments || var.database_server_count > 1) && !var.database.scale_out + enable_db_lb_deployment = var.database.high_availability || var.use_loadbalancers_for_standalone_deployments database_sid = try(var.database.instance.sid, local.sid) // HANA database sid from the Databases array for use as reference to LB/AS database_instance = try(var.database.instance.number, "00") From 96946662832af69b86c57f477e8c03ee43a53e33 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 3 Apr 2024 18:34:00 +0300 Subject: [PATCH 493/607] Update failed_when condition in oracle-postprocessing.yaml --- .../roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml index e526a5269e..add75e5f53 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml @@ -140,7 +140,7 @@ become_user: "oracle" ansible.builtin.shell: lsnrctl start register: lsnrctl_start_primary_results - failed_when: lsnrctl_start_primary_results.rc > 0 + failed_when: lsnrctl_start_primary_results.rc > 1 args: creates: /etc/sap_deployment_automation/{{ db_sid | upper }}/lsnrctl_started.txt chdir: /etc/sap_deployment_automation/{{ db_sid | upper }} From 6927441cd07ec26655511731bea23f305721caba Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 7 Apr 2024 19:29:52 +0300 Subject: [PATCH 494/607] Refactor enable_db_lb_deployment logic in variables_local.tf --- .../1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml index e067ab37bc..d21f088831 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml @@ -17,7 +17,6 @@ register: reboot_output when: - tier == 'os' - - (ansible_distribution | lower ~ ansible_distribution_major_version) != "oraclelinux8" # Analyse the package list for this distribution selecting only those # packages assigned to the active tier or 'all'. # - name: "1.4 Packages: - Upgrade all: {{ distribution_full_id }}" # noqa package-latest From 314c50b50e0c94e404d82b31656953d473bccce1 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 8 Apr 2024 09:27:43 +0300 Subject: [PATCH 495/607] Add AVG support for Scale out scenarios (#577) * Add data and log volumes * Refactor AVG logic * Fix proximity_placement_group_id calculation in avg.tf * Refactor for_each condition in avg.tf * Refactor for_each condition in avg.tf * Refactor volume creation logic in variables_local.tf * Refactor volume creation logic in variables_local.tf * Refactor volume creation logic in variables_local.tf * Refactor zone calculation logic in variables_local.tf * Refactor proximity_placement_group_id calculation in avg.tf * Add dependency on azurerm_virtual_machine_data_disk_attachment.scs in vm-app.tf * Add dependency on azurerm_virtual_machine_data_disk_attachment.scs in infrastructure.tf * Refactor package update condition in 1.4.3-update-packages-RedHat.yaml --------- Co-authored-by: Kimmo Forss --- .../sap_system/app_tier/infrastructure.tf | 1 + .../modules/sap_system/app_tier/vm-app.tf | 2 + .../common_infrastructure/variables_local.tf | 2 +- .../modules/sap_system/hdb_node/anf.tf | 55 +---- .../modules/sap_system/hdb_node/avg.tf | 228 +++++++----------- .../sap_system/hdb_node/variables_local.tf | 16 ++ 6 files changed, 116 insertions(+), 188 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf index cdcd3a4a0b..7ca4ec7ba4 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf @@ -328,6 +328,7 @@ resource "azurerm_availability_set" "app" { length(var.ppg)) : ( 0 ) + depends_on = [azurerm_virtual_machine_data_disk_attachment.scs] name = format("%s%s%s", local.prefix, var.naming.separator, diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index 89e97dd418..71b4e4acf9 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -105,6 +105,7 @@ resource "azurerm_linux_virtual_machine" "app" { local.application_server_count) : ( 0 ) + depends_on = [azurerm_virtual_machine_data_disk_attachment.scs] name = format("%s%s%s%s%s", var.naming.resource_prefixes.vm, local.prefix, @@ -254,6 +255,7 @@ resource "azurerm_windows_virtual_machine" "app" { local.application_server_count) : ( 0 ) + depends_on = [azurerm_virtual_machine_data_disk_attachment.scs] name = format("%s%s%s%s%s", var.naming.resource_prefixes.vm, local.prefix, diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_local.tf index 7d6aaa5145..a21dd0aaea 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_local.tf @@ -35,7 +35,7 @@ locals { db_zones = try(var.database.zones, []) scs_zones = try(var.application_tier.scs_zones, []) web_zones = try(var.application_tier.web_zones, []) - zones = distinct(concat(local.db_zones, local.app_zones, local.scs_zones, local.web_zones)) + zones = var.application_tier.app_use_ppg ? local.db_zones : distinct(concat(local.db_zones, local.app_zones, local.scs_zones, local.web_zones)) zonal_deployment = length(local.zones) > 0 ? true : false //Flag to control if nsg is creates in virtual network resource group diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf index cc5bef6836..8759b18e2a 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf @@ -6,14 +6,7 @@ resource "azurerm_netapp_volume" "hanadata" { provider = azurerm.main - count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_data && !local.use_avg ? ( - var.hana_ANF_volumes.use_existing_data_volume ? ( - 0 - ) : ( - (var.database_server_count - var.database.stand_by_node_count) * var.hana_ANF_volumes.data_volume_count - )) : ( - 0 - ) : 0 + count = local.create_data_volumes ? (var.database_server_count - var.database.stand_by_node_count) * var.hana_ANF_volumes.data_volume_count : 0 name = format("%s%s%s%s%d", var.naming.resource_prefixes.hanadata, local.prefix, @@ -57,14 +50,7 @@ data "azurerm_netapp_volume" "hanadata" { depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA] - count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_data ? ( - var.hana_ANF_volumes.use_existing_data_volume || local.use_avg ? ( - (var.database_server_count - var.database.stand_by_node_count) * var.hana_ANF_volumes.data_volume_count - ) : ( - 0 - )) : ( - 0 - ) : 0 + count = local.use_data_volumes ? (var.database_server_count - var.database.stand_by_node_count) * var.hana_ANF_volumes.data_volume_count : 0 name = local.use_avg ? ( format("%s%s%s%s%d", var.naming.resource_prefixes.hanadata, @@ -85,14 +71,7 @@ resource "azurerm_netapp_volume" "hanalog" { provider = azurerm.main depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA] - count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_log && !local.use_avg ? ( - var.hana_ANF_volumes.use_existing_log_volume ? ( - 0 - ) : ( - (var.database_server_count - var.database.stand_by_node_count) * var.hana_ANF_volumes.log_volume_count - )) : ( - 0 - ) : 0 + count = local.create_log_volumes ? (var.database_server_count - var.database.stand_by_node_count) * var.hana_ANF_volumes.log_volume_count : 0 name = format("%s%s%s%s%d", var.naming.resource_prefixes.hanalog, local.prefix, @@ -133,14 +112,7 @@ data "azurerm_netapp_volume" "hanalog" { provider = azurerm.main depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA] - count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_log ? ( - var.hana_ANF_volumes.use_existing_log_volume || local.use_avg ? ( - (var.database_server_count - var.database.stand_by_node_count) * var.hana_ANF_volumes.log_volume_count - ) : ( - 0 - )) : ( - 0 - ) : 0 + count = local.use_log_volumes ? (var.database_server_count - var.database.stand_by_node_count) * var.hana_ANF_volumes.log_volume_count : 0 name = local.use_avg ? ( format("%s%s%s%s%d", var.naming.resource_prefixes.hanalog, @@ -160,14 +132,7 @@ resource "azurerm_netapp_volume" "hanashared" { provider = azurerm.main depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA] - count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_shared && !local.use_avg ? ( - var.hana_ANF_volumes.use_existing_shared_volume ? ( - 0 - ) : ( - 1 - )) : ( - 0 - ) : 0 + count = local.create_shared_volumes ? length(var.ppg) : 0 name = format("%s%s%s%s%d", var.naming.resource_prefixes.hanashared, local.prefix, @@ -209,15 +174,7 @@ resource "azurerm_netapp_volume" "hanashared" { data "azurerm_netapp_volume" "hanashared" { provider = azurerm.main depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA] - - count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_shared ? ( - var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( - 1 - ) : ( - 0 - )) : ( - 0 - ) : 0 + count = local.use_shared_volumes ? length(var.ppg) : 0 name = local.use_avg ? ( format("%s%s%s%s%d", var.naming.resource_prefixes.hanashared, diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/avg.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/avg.tf index 895bebf180..9491bc9aff 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/avg.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/avg.tf @@ -6,7 +6,7 @@ resource "azurerm_netapp_volume_group_sap_hana" "avg_HANA" { provider = azurerm.main - count = local.use_avg ? length(var.ppg) : 0 + count = local.use_avg ? length(var.database.zones) * (var.database_server_count - var.database.stand_by_node_count) : 0 name = format("%s%s%s%s%d", var.naming.resource_prefixes.hana_avg, local.prefix, @@ -20,19 +20,101 @@ resource "azurerm_netapp_volume_group_sap_hana" "avg_HANA" { group_description = format("Application Volume %d group for %s", count.index + 1, var.sap_sid) application_identifier = local.sid + volume { + name = format("%s%s%s%s%d", + var.naming.resource_prefixes.hanadata, + local.prefix, + var.naming.separator, + local.resource_suffixes.hanadata, + count.index + 1 + ) + volume_path = format("%s-%s%02d", + var.sap_sid, + local.resource_suffixes.hanadata, + count.index + 1 + ) + service_level = local.ANF_pool_settings.service_level + capacity_pool_id = data.azurerm_netapp_pool.workload_netapp_pool[0].id + subnet_id = try(local.ANF_pool_settings.subnet_id, "") + proximity_placement_group_id = var.ppg[count.index % max(length(var.database.zones), 1)] + volume_spec_name = "data" + storage_quota_in_gb = var.hana_ANF_volumes.data_volume_size + throughput_in_mibps = var.hana_ANF_volumes.data_volume_throughput + + protocols = ["NFSv4.1"] + security_style = "unix" + snapshot_directory_visible = false + + export_policy_rule { + rule_index = 1 + allowed_clients = "0.0.0.0/0" + nfsv3_enabled = false + nfsv41_enabled = true + unix_read_only = false + unix_read_write = true + root_access_enabled = true + } + } + + volume { + name = format("%s%s%s%s%d", + var.naming.resource_prefixes.hanadata, + local.prefix, + var.naming.separator, + local.resource_suffixes.hanalog, + count.index + 1 + ) + volume_path = format("%s-%s%02d", + var.sap_sid, + local.resource_suffixes.hanalog, + count.index + 1 + ) + service_level = local.ANF_pool_settings.service_level + capacity_pool_id = data.azurerm_netapp_pool.workload_netapp_pool[0].id + subnet_id = try(local.ANF_pool_settings.subnet_id, "") + proximity_placement_group_id = var.ppg[count.index % max(length(var.database.zones), 1)] + volume_spec_name = "log" + storage_quota_in_gb = var.hana_ANF_volumes.log_volume_size + throughput_in_mibps = var.hana_ANF_volumes.log_volume_throughput + + protocols = ["NFSv4.1"] + security_style = "unix" + snapshot_directory_visible = false + + export_policy_rule { + rule_index = 1 + allowed_clients = "0.0.0.0/0" + nfsv3_enabled = false + nfsv41_enabled = true + unix_read_only = false + unix_read_write = true + root_access_enabled = true + } + } + dynamic "volume" { iterator = pub - for_each = (count.index == 0 ? local.volumes_primary : local.volumes_secondary) + for_each = range(count.index < length(var.database.zones) ? 1 : 0) content { - name = pub.value.name - volume_path = pub.value.path + name = format("%s%s%s%s%d", + var.naming.resource_prefixes.hanashared, + local.prefix, + var.naming.separator, + local.resource_suffixes.hanashared, + count.index + 1 + ) + volume_path = format("%s-%s%02d", + var.sap_sid, + local.resource_suffixes.hanashared, + count.index + 1 + ) service_level = local.ANF_pool_settings.service_level capacity_pool_id = data.azurerm_netapp_pool.workload_netapp_pool[0].id subnet_id = try(local.ANF_pool_settings.subnet_id, "") - proximity_placement_group_id = pub.value.proximityPlacementGroup - volume_spec_name = pub.value.volumeSpecName - storage_quota_in_gb = pub.value.storage_quota_in_gb - throughput_in_mibps = pub.value.throughput_in_mibps + proximity_placement_group_id = var.ppg[count.index % max(length(var.database.zones), 1)] + volume_spec_name = "shared" + storage_quota_in_gb = var.hana_ANF_volumes.shared_volume_size + throughput_in_mibps = var.hana_ANF_volumes.shared_volume_throughput protocols = ["NFSv4.1"] security_style = "unix" snapshot_directory_visible = false @@ -68,133 +150,3 @@ data "azurerm_netapp_account" "workload_netapp_account" { resource_group_name = try(split("/", local.ANF_pool_settings.account_id)[4], "") } - -locals { - use_avg = ( - var.hana_ANF_volumes.use_AVG_for_data) && ( - var.hana_ANF_volumes.use_for_data || var.hana_ANF_volumes.use_for_log || var.hana_ANF_volumes.use_for_shared - ) && !var.use_scalesets_for_deployment - - hana_data1 = { - name = format("%s%s%s%s%d", - var.naming.resource_prefixes.hanadata, - local.prefix, - var.naming.separator, - local.resource_suffixes.hanadata, 1 - ) - path = format("%s-%s%02d", - var.sap_sid, - local.resource_suffixes.hanadata, - 1 - ) - volumeSpecName = "data" - proximityPlacementGroup = length(var.ppg) > 0 ? try(var.ppg[0], null) : null - storage_quota_in_gb = var.hana_ANF_volumes.data_volume_size - throughput_in_mibps = var.hana_ANF_volumes.data_volume_throughput - zone = local.db_zone_count > 0 ? try(local.zones[0], null) : null - } - - hana_data2 = { - name = format("%s%s%s%s%d", - var.naming.resource_prefixes.hanadata, - local.prefix, - var.naming.separator, - local.resource_suffixes.hanadata, 2 - ) - path = format("%s-%s%02d", - var.sap_sid, - local.resource_suffixes.hanadata, - 2 - ) - volumeSpecName = "data" - proximityPlacementGroup = length(var.ppg) > 1 ? try(var.ppg[1], null) : try(var.ppg[0], null) - storage_quota_in_gb = var.hana_ANF_volumes.data_volume_size - throughput_in_mibps = var.hana_ANF_volumes.data_volume_throughput - zone = local.db_zone_count > 1 ? try(local.zones[1], null) : null - - } - - hana_log1 = { - name = format("%s%s%s%s%d", - var.naming.resource_prefixes.hanalog, - local.prefix, - var.naming.separator, - local.resource_suffixes.hanalog, 1 - ) - path = format("%s-%s%02d", - var.sap_sid, - local.resource_suffixes.hanalog, - 1 - ) - volumeSpecName = "log" - proximityPlacementGroup = length(var.ppg) > 0 ? try(var.ppg[0], null) : null - storage_quota_in_gb = var.hana_ANF_volumes.log_volume_size - throughput_in_mibps = var.hana_ANF_volumes.log_volume_throughput - zone = local.db_zone_count > 0 ? try(local.zones[0], null) : null - } - - hana_log2 = { - name = format("%s%s%s%s%d", - var.naming.resource_prefixes.hanalog, - local.prefix, - var.naming.separator, - local.resource_suffixes.hanalog, 2 - ) - path = format("%s-%s%02d", - var.sap_sid, - local.resource_suffixes.hanalog, - 2 - ) - volumeSpecName = "log" - proximityPlacementGroup = length(var.ppg) > 1 ? try(var.ppg[1], null) : try(var.ppg[0], null) - storage_quota_in_gb = var.hana_ANF_volumes.log_volume_size - throughput_in_mibps = var.hana_ANF_volumes.log_volume_throughput - zone = local.db_zone_count > 1 ? try(local.zones[1], null) : null - } - - hana_shared1 = { - name = format("%s%s%s%s%d", - var.naming.resource_prefixes.hanashared, - local.prefix, - var.naming.separator, - local.resource_suffixes.hanashared, 1 - ) - path = format("%s-%s%02d", - var.sap_sid, - local.resource_suffixes.hanashared, - 1 - ) - volumeSpecName = "shared" - proximityPlacementGroup = length(var.ppg) > 0 ? try(var.ppg[0], null) : null - storage_quota_in_gb = var.hana_ANF_volumes.shared_volume_size - throughput_in_mibps = var.hana_ANF_volumes.shared_volume_throughput - zone = local.db_zone_count > 0 ? try(local.zones[0], null) : null - } - - hana_shared2 = { - name = format("%s%s%s%s%d", - var.naming.resource_prefixes.hanashared, - local.prefix, - var.naming.separator, - local.resource_suffixes.hanashared, 2 - ) - path = format("%s-%s%02d", - var.sap_sid, - local.resource_suffixes.hanashared, - 2 - ) - volumeSpecName = "shared" - proximityPlacementGroup = length(var.ppg) > 1 ? try(var.ppg[1], null) : try(var.ppg[0], null) - storage_quota_in_gb = var.hana_ANF_volumes.shared_volume_size - throughput_in_mibps = var.hana_ANF_volumes.shared_volume_throughput - zone = local.db_zone_count > 1 ? try(local.zones[1], null) : null - } - - volumes_primary = [ - local.hana_data1, local.hana_log1, local.hana_shared1 - ] - volumes_secondary = [ - local.hana_data2, local.hana_log2, local.hana_shared2 - ] - -} diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf index debe7b724e..930f501d9f 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf @@ -397,4 +397,20 @@ locals { }] : [] deploy_monitoring_extension = local.enable_deployment && var.infrastructure.deploy_monitoring_extension && length(var.database.user_assigned_identity_id) > 0 + + use_avg = ( + var.hana_ANF_volumes.use_AVG_for_data) && ( + var.hana_ANF_volumes.use_for_data || var.hana_ANF_volumes.use_for_log || var.hana_ANF_volumes.use_for_shared + ) && !var.use_scalesets_for_deployment + + + create_data_volumes = !local.use_avg && var.hana_ANF_volumes.use_for_data && !var.hana_ANF_volumes.use_existing_data_volume + use_data_volumes = local.use_avg || var.hana_ANF_volumes.use_for_data && var.hana_ANF_volumes.use_existing_data_volume + + create_log_volumes = !local.use_avg && var.hana_ANF_volumes.use_for_log && !var.hana_ANF_volumes.use_existing_log_volume + use_log_volumes = local.use_avg || var.hana_ANF_volumes.use_for_log && var.hana_ANF_volumes.use_existing_log_volume + + create_shared_volumes = !local.use_avg && var.hana_ANF_volumes.use_for_shared && !var.hana_ANF_volumes.use_existing_shared_volume + use_shared_volumes = local.use_avg || var.hana_ANF_volumes.use_for_shared && var.hana_ANF_volumes.use_existing_shared_volume + } From 2c44110ea997c1d9c97c103546c98053628719ee Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 8 Apr 2024 10:55:36 +0300 Subject: [PATCH 496/607] Update subnet_cidr_storage in sap-parameters.tmpl --- .../modules/sap_system/output_files/sap-parameters.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl index c59476eaf1..6047f02782 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl @@ -94,12 +94,12 @@ platform: ${platform} db_scale_out: ${scale_out} db_no_standby: ${scale_out_no_standby_role} +subnet_cidr_storage: ${subnet_cidr_storage} %{~ endif } subnet_cidr_anf: ${subnet_cidr_anf} subnet_cidr_app: ${subnet_cidr_app} subnet_cidr_db: ${subnet_cidr_db} subnet_cidr_client: ${subnet_cidr_client} -subnet_cidr_storage: ${subnet_cidr_storage} # db_high_availability is a boolean flag indicating if the # SAP database servers are deployed using high availability From 34545badd612703697bf17890efc9b8433ae281d Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 10 Apr 2024 19:44:19 +0300 Subject: [PATCH 497/607] Update hosts jinja for client subnet --- .../roles-sap-os/2.4-hosts-file/templates/hosts.j2 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 b/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 index 105c162774..4391ca94e9 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 @@ -129,12 +129,12 @@ ansible_facts. {% else %} {# Loop through remaining IPs for the virtual host #} {% for ip in host_ips[1:] %} -{% if ((db_scale_out) and ((subnet_cidr_storage is defined) and (subnet_cidr_storage | ansible.utils.network_in_usable(ip)))) %} +{% if (db_scale_out) %} +{% if (subnet_cidr_storage | ansible.utils.network_in_usable(ip)) %} {{ '%-19s' | format(ip) }}{{ '%-80s ' | format(host + '-storage.' + sap_fqdn) }}{{ '%-21s' | format(host + '-storage') }} - -{% elif ((db_scale_out) and ((subnet_cidr_client is defined) and (subnet_cidr_client | ansible.utils.network_in_usable(ip)))) %} +{% elif (subnet_client_cidr | ansible.utils.network_in_usable(ip)) %} {{ '%-19s' | format(ip) }}{{ '%-80s ' | format(host + '-client.' + sap_fqdn) }}{{ '%-21s' | format(host + '-client') }} - +{% endif %} {% else %} {% for vh_name in virtual_host_names if virtual_host_names | length >= 1 %} {{ '%-19s' | format(ip) }}{{ '%-80s ' | format(vh_name + '.' + sap_fqdn) }}{{ '%-21s' | format(vh_name) }} From 0c52a8385d5ea096f284bc0cfadba54356863f55 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 10 Apr 2024 19:48:19 +0300 Subject: [PATCH 498/607] Update SAP-specific configuration playbook for HANA database scale-out scenario --- deploy/ansible/playbook_02_os_sap_specific_config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/playbook_02_os_sap_specific_config.yaml b/deploy/ansible/playbook_02_os_sap_specific_config.yaml index 5fb35bce5f..e50a36683e 100644 --- a/deploy/ansible/playbook_02_os_sap_specific_config.yaml +++ b/deploy/ansible/playbook_02_os_sap_specific_config.yaml @@ -157,7 +157,7 @@ subnet_client_cidr: "{{ subnet_cidr_client | default(azure_network_metadata.json.interface[0].ipv4.subnet[0].address + '/' + azure_network_metadata.json.interface[0].ipv4.subnet[0].prefix) }}" when: - platform == 'HANA' - - node_tier == 'hana' or ['hana'] in supported_tiers + - db_scale_out - not database_high_availability tags: - always From 8e8e93cf302470f4e9af0d0c1527c06b8aee08b4 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 10 Apr 2024 22:30:59 +0300 Subject: [PATCH 499/607] Version update --- deploy/ansible/vars/ansible-input-api.yaml | 2 +- deploy/configs/version.txt | 2 +- deploy/scripts/New-SDAFDevopsProject.ps1 | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index 63dae8c70f..69b49122c8 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -4,7 +4,7 @@ become_user_name: root # ------------------- Begin - SDAF Ansible Version ---------------------------8 -SDAF_Version: "3.11.0.0" +SDAF_Version: "3.11.0.2" # ------------------- End - SDAF Ansible Version ---------------------------8 # ------------------- Begin - OS Config Settings variables -------------------8 diff --git a/deploy/configs/version.txt b/deploy/configs/version.txt index d42a62c4ad..064ddbda20 100644 --- a/deploy/configs/version.txt +++ b/deploy/configs/version.txt @@ -1 +1 @@ -3.11.0.1 +3.11.0.2 diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index cc812745ad..5ea33287dc 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -36,10 +36,10 @@ az logout az account clear if ($ARM_TENANT_ID.Length -eq 0) { - az login --output none --only-show-errors + az login --output none --only-show-errors --scope https://graph.microsoft.com//.default } else { - az login --output none --tenant $ARM_TENANT_ID --only-show-errors + az login --output none --tenant $ARM_TENANT_ID --only-show-errors --scope https://graph.microsoft.com//.default } # Check if access to the Azure DevOps organization is available and prompt for PAT if needed @@ -760,7 +760,7 @@ if ($found_appRegistration.Length -ne 0) { $confirmation = Read-Host "Reset the app registration secret y/n?" if ($confirmation -eq 'y') { - $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors) + $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors --display-name "SDAF") } else { $WEB_APP_CLIENT_SECRET = Read-Host "Please enter the app registration secret" @@ -774,7 +774,7 @@ else { if (Test-Path ".${pathSeparator}manifest.json") { Write-Host "Removing manifest.json" ; Remove-Item ".${pathSeparator}manifest.json" } - $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors) + $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors --display-name "SDAF") } #endregion From 59082daf6a07ac24afe7247c7214d9ed145f9050 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 11 Apr 2024 12:41:13 +0300 Subject: [PATCH 500/607] Simplify Web App Identity management --- deploy/pipelines/01-deploy-control-plane.yaml | 2 +- .../terraform-units/modules/sap_deployer/app_service.tf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 828a8f2372..8f73f52b3d 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -954,7 +954,7 @@ stages: - bash: | #!/bin/bash - printf "Configure the Web Application authentication using the following script.\n\`\`\`bash\naz ad app update --id %s --web-home-page-url https://%s.azurewebsites.net --web-redirect-uris https://%s.azurewebsites.net/ https://%s.azurewebsites.net/.auth/login/aad/callback\n\`\`\`\naz role assignment create --assignee %s --role reader --subscription %s --scope /subscriptions/%s \naz webapp restart --ids %s\n\n[Access the Web App](https://%s.azurewebsites.net)" $(APP_REGISTRATION_APP_ID) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_ID) $(WEBAPP_URL_BASE) > "$(Build.Repository.LocalPath)/Web Application Configuration.md" + printf "Configure the Web Application authentication using the following script.\n\`\`\`bash\naz ad app update --id %s --web-home-page-url https://%s.azurewebsites.net --web-redirect-uris https://%s.azurewebsites.net/ https://%s.azurewebsites.net/.auth/login/aad/callback\n\`\`\`\naz role assignment create --assignee %s --role reader --subscription %s --scope /subscriptions/%s \n\naz role assignment create --assignee %s --role 'Storage Table Data Contributor' --subscription %s --scope /subscriptions/%s \n\naz role assignment create --assignee %s --role 'Storage Blob Data Contributor' --subscription %s --scope /subscriptions/%s \naz webapp restart --ids %s\n\n[Access the Web App](https://%s.azurewebsites.net)" $(APP_REGISTRATION_APP_ID) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_ID) $(WEBAPP_URL_BASE) > "$(Build.Repository.LocalPath)/Web Application Configuration.md" echo "##vso[task.uploadsummary]$(Build.Repository.LocalPath)/Web Application Configuration.md" displayName: "Documentation" diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf index b47c2e790c..3f5ac2048b 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf @@ -138,7 +138,7 @@ resource "azurerm_windows_web_app" "webapp" { key_vault_reference_identity_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].id : data.azurerm_user_assigned_identity.deployer[0].id identity { - type = "SystemAssigned, UserAssigned" + type = length(var.deployer.user_assigned_identity_id) == 0 ? "SystemAssigned" : "UserAssigned" identity_ids = [length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].id : data.azurerm_user_assigned_identity.deployer[0].id ] } connection_string { From 1e054336e9006f0d1111f419d166307699b3053f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 11 Apr 2024 12:55:58 +0300 Subject: [PATCH 501/607] Update Azure package versions in SDAFWebApp.csproj --- Webapp/SDAF/SDAFWebApp.csproj | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Webapp/SDAF/SDAFWebApp.csproj b/Webapp/SDAF/SDAFWebApp.csproj index 52c45d9318..c665b68867 100644 --- a/Webapp/SDAF/SDAFWebApp.csproj +++ b/Webapp/SDAF/SDAFWebApp.csproj @@ -16,18 +16,18 @@ - + - + - - + + - - + + From 54156d718db331ed2c01552ac859b1c31d51d5d5 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 11 Apr 2024 13:13:42 +0300 Subject: [PATCH 502/607] Update Web Application authentication configuration script --- deploy/pipelines/01-deploy-control-plane.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 8f73f52b3d..4ef1743462 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -954,7 +954,7 @@ stages: - bash: | #!/bin/bash - printf "Configure the Web Application authentication using the following script.\n\`\`\`bash\naz ad app update --id %s --web-home-page-url https://%s.azurewebsites.net --web-redirect-uris https://%s.azurewebsites.net/ https://%s.azurewebsites.net/.auth/login/aad/callback\n\`\`\`\naz role assignment create --assignee %s --role reader --subscription %s --scope /subscriptions/%s \n\naz role assignment create --assignee %s --role 'Storage Table Data Contributor' --subscription %s --scope /subscriptions/%s \n\naz role assignment create --assignee %s --role 'Storage Blob Data Contributor' --subscription %s --scope /subscriptions/%s \naz webapp restart --ids %s\n\n[Access the Web App](https://%s.azurewebsites.net)" $(APP_REGISTRATION_APP_ID) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_ID) $(WEBAPP_URL_BASE) > "$(Build.Repository.LocalPath)/Web Application Configuration.md" + printf "Configure the Web Application authentication using the following script.\n\`\`\`bash\naz ad app update --id %s --web-home-page-url https://%s.azurewebsites.net --web-redirect-uris https://%s.azurewebsites.net/ https://%s.azurewebsites.net/.auth/login/aad/callback\n\`\`\`\naz role assignment create --assignee %s --role reader --subscription %s --scope /subscriptions/%s \naz role assignment create --assignee %s --role 'Storage Table Data Contributor' --subscription %s --scope /subscriptions/%s \naz role assignment create --assignee %s --role 'Storage Blob Data Contributor' --subscription %s --scope /subscriptions/%s \n\naz webapp restart --ids %s\n\n[Access the Web App](https://%s.azurewebsites.net)" $(APP_REGISTRATION_APP_ID) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_ID) $(WEBAPP_URL_BASE) > "$(Build.Repository.LocalPath)/Web Application Configuration.md" echo "##vso[task.uploadsummary]$(Build.Repository.LocalPath)/Web Application Configuration.md" displayName: "Documentation" From 4f9833174c1e661286a137f4674431262fff25f9 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 11 Apr 2024 13:27:01 +0300 Subject: [PATCH 503/607] Update Web Application authentication configuration script --- deploy/pipelines/01-deploy-control-plane.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 4ef1743462..329153a733 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -954,7 +954,7 @@ stages: - bash: | #!/bin/bash - printf "Configure the Web Application authentication using the following script.\n\`\`\`bash\naz ad app update --id %s --web-home-page-url https://%s.azurewebsites.net --web-redirect-uris https://%s.azurewebsites.net/ https://%s.azurewebsites.net/.auth/login/aad/callback\n\`\`\`\naz role assignment create --assignee %s --role reader --subscription %s --scope /subscriptions/%s \naz role assignment create --assignee %s --role 'Storage Table Data Contributor' --subscription %s --scope /subscriptions/%s \naz role assignment create --assignee %s --role 'Storage Blob Data Contributor' --subscription %s --scope /subscriptions/%s \n\naz webapp restart --ids %s\n\n[Access the Web App](https://%s.azurewebsites.net)" $(APP_REGISTRATION_APP_ID) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_ID) $(WEBAPP_URL_BASE) > "$(Build.Repository.LocalPath)/Web Application Configuration.md" + printf "Configure the Web Application authentication using the following script.\n\`\`\`bash\naz ad app update --id %s --web-home-page-url https://%s.azurewebsites.net --web-redirect-uris https://%s.azurewebsites.net/ https://%s.azurewebsites.net/.auth/login/aad/callback\n\`\`\`\naz role assignment create --assignee %s --role reader --subscription %s --scope /subscriptions/%s \naz role assignment create --assignee %s --role 'Storage Table Data Contributor' --subscription %s --scope /subscriptions/%s \naz role assignment create --assignee %s --role 'Storage Blob Data Contributor' --subscription %s --scope /subscriptions/%s \n\naz webapp restart --ids %s\n\n[Access the Web App](https://%s.azurewebsites.net)" $(APP_REGISTRATION_APP_ID) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_ID) $(WEBAPP_URL_BASE) > "$(Build.Repository.LocalPath)/Web Application Configuration.md" echo "##vso[task.uploadsummary]$(Build.Repository.LocalPath)/Web Application Configuration.md" displayName: "Documentation" From c62b371a3e0cf59ee381e57e04c619883546e873 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 11 Apr 2024 17:18:00 +0300 Subject: [PATCH 504/607] Update Web Application authentication configuration script --- deploy/pipelines/01-deploy-control-plane.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 329153a733..ecdb431f8b 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -954,7 +954,7 @@ stages: - bash: | #!/bin/bash - printf "Configure the Web Application authentication using the following script.\n\`\`\`bash\naz ad app update --id %s --web-home-page-url https://%s.azurewebsites.net --web-redirect-uris https://%s.azurewebsites.net/ https://%s.azurewebsites.net/.auth/login/aad/callback\n\`\`\`\naz role assignment create --assignee %s --role reader --subscription %s --scope /subscriptions/%s \naz role assignment create --assignee %s --role 'Storage Table Data Contributor' --subscription %s --scope /subscriptions/%s \naz role assignment create --assignee %s --role 'Storage Blob Data Contributor' --subscription %s --scope /subscriptions/%s \n\naz webapp restart --ids %s\n\n[Access the Web App](https://%s.azurewebsites.net)" $(APP_REGISTRATION_APP_ID) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_ID) $(WEBAPP_URL_BASE) > "$(Build.Repository.LocalPath)/Web Application Configuration.md" + printf "Configure the Web Application authentication using the following script.\n\`\`\`bash\naz ad app update --id %s --web-home-page-url https://%s.azurewebsites.net --web-redirect-uris https://%s.azurewebsites.net/ https://%s.azurewebsites.net/.auth/login/aad/callback\n\`\`\`\naz role assignment create --assignee %s --role reader --subscription %s --scope /subscriptions/%s \naz role assignment create --assignee %s --role 'Storage Table Data Contributor' --subscription %s --scope /subscriptions/%s \naz role assignment create --assignee %s --role 'Storage Table Blob Contributor' --subscription %s --scope /subscriptions/%s \naz webapp restart --ids %s\n\n[Access the Web App](https://%s.azurewebsites.net)" $(APP_REGISTRATION_APP_ID) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_ID) $(WEBAPP_URL_BASE) > "$(Build.Repository.LocalPath)/Web Application Configuration.md"\n echo "##vso[task.uploadsummary]$(Build.Repository.LocalPath)/Web Application Configuration.md" displayName: "Documentation" From e4a87242f5d107f5ae7c627179d9ee57645a696d Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 11 Apr 2024 17:18:39 +0300 Subject: [PATCH 505/607] Add SLES 15.3, 15.4, and 15.5 repositories --- deploy/ansible/roles-os/1.3-repository/vars/repos.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml b/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml index f82c5498b6..d63fdbf9c4 100644 --- a/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml +++ b/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml @@ -37,6 +37,9 @@ repos: sles_sap15.3: sles_sap15.4: sles_sap15.5: + sles15.3: + sles15.4: + sles15.5: # Oracle From 08c516d78b5f551c3165c29e9f1243241e868173 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 11 Apr 2024 20:15:39 +0300 Subject: [PATCH 506/607] Update Web Application authentication configuration script and simplify Web App Identity management --- .../terraform-units/modules/sap_deployer/app_service.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf index 3f5ac2048b..0253936db5 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf @@ -138,8 +138,8 @@ resource "azurerm_windows_web_app" "webapp" { key_vault_reference_identity_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].id : data.azurerm_user_assigned_identity.deployer[0].id identity { - type = length(var.deployer.user_assigned_identity_id) == 0 ? "SystemAssigned" : "UserAssigned" - identity_ids = [length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].id : data.azurerm_user_assigned_identity.deployer[0].id ] + type = length(var.deployer.user_assigned_identity_id) == 0 ? "SystemAssigned" : "SystemAssigned, UserAssigned" + identity_ids = [length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].id : [azurerm_user_assigned_identity.deployer[0].id,data.azurerm_user_assigned_identity.deployer[0].id ]] } connection_string { name = "tfstate" From 25b0097d146867bf176fb089339fd00113a1d36b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 11 Apr 2024 20:29:38 +0300 Subject: [PATCH 507/607] Refactor Web App Identity management and update authentication configuration script --- .../modules/sap_deployer/app_service.tf | 37 ++++++++++--------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf index 0253936db5..2ad0d6bf5b 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf @@ -137,23 +137,26 @@ resource "azurerm_windows_web_app" "webapp" { key_vault_reference_identity_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].id : data.azurerm_user_assigned_identity.deployer[0].id - identity { - type = length(var.deployer.user_assigned_identity_id) == 0 ? "SystemAssigned" : "SystemAssigned, UserAssigned" - identity_ids = [length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].id : [azurerm_user_assigned_identity.deployer[0].id,data.azurerm_user_assigned_identity.deployer[0].id ]] - } - connection_string { - name = "tfstate" - type = "Custom" - value = var.use_private_endpoint ? format("@Microsoft.KeyVault(SecretUri=https://%s.privatelink.vaultcore.azure.net/secrets/tfstate/)", local.user_keyvault_name) : format("@Microsoft.KeyVault(SecretUri=https://%s.vault.azure.net/secrets/tfstate/)", local.user_keyvault_name) - } - - lifecycle { - ignore_changes = [ - app_settings, - zip_deploy_file, - tags - ] - } + identity { + type = length(var.deployer.user_assigned_identity_id) == 0 ? ( + "SystemAssigned") : ( + "SystemAssigned, UserAssigned" + ) + identity_ids = [length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].id : data.azurerm_user_assigned_identity.deployer[0].id ] + } + connection_string { + name = "tfstate" + type = "Custom" + value = var.use_private_endpoint ? format("@Microsoft.KeyVault(SecretUri=https://%s.privatelink.vaultcore.azure.net/secrets/tfstate/)", local.user_keyvault_name) : format("@Microsoft.KeyVault(SecretUri=https://%s.vault.azure.net/secrets/tfstate/)", local.user_keyvault_name) + } + + lifecycle { + ignore_changes = [ + app_settings, + zip_deploy_file, + tags + ] + } } From f805acc717ed0f560a82c5ab1fe70131a46fb6ca Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 11 Apr 2024 20:47:12 +0300 Subject: [PATCH 508/607] Update Web Application authentication configuration script --- deploy/pipelines/01-deploy-control-plane.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index ecdb431f8b..df39852a78 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -954,7 +954,7 @@ stages: - bash: | #!/bin/bash - printf "Configure the Web Application authentication using the following script.\n\`\`\`bash\naz ad app update --id %s --web-home-page-url https://%s.azurewebsites.net --web-redirect-uris https://%s.azurewebsites.net/ https://%s.azurewebsites.net/.auth/login/aad/callback\n\`\`\`\naz role assignment create --assignee %s --role reader --subscription %s --scope /subscriptions/%s \naz role assignment create --assignee %s --role 'Storage Table Data Contributor' --subscription %s --scope /subscriptions/%s \naz role assignment create --assignee %s --role 'Storage Table Blob Contributor' --subscription %s --scope /subscriptions/%s \naz webapp restart --ids %s\n\n[Access the Web App](https://%s.azurewebsites.net)" $(APP_REGISTRATION_APP_ID) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_ID) $(WEBAPP_URL_BASE) > "$(Build.Repository.LocalPath)/Web Application Configuration.md"\n + printf "Configure the Web Application authentication using the following script.\n\`\`\`bash\naz ad app update --id %s --web-home-page-url https://%s.azurewebsites.net --web-redirect-uris https://%s.azurewebsites.net/ https://%s.azurewebsites.net/.auth/login/aad/callback\n\`\`\`\naz role assignment create --assignee %s --role reader --subscription %s --scope /subscriptions/%s\naz role assignment create --assignee %s --role reader --subscription %s --scope /subscriptions/%s\naz role assignment create --assignee %s --role reader --subscription %s --scope /subscriptions/%s \naz webapp restart --ids %s\n\n[Access the Web App](https://%s.azurewebsites.net)" $(APP_REGISTRATION_APP_ID) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_ID) $(WEBAPP_URL_BASE) > "$(Build.Repository.LocalPath)/Web Application Configuration.md" echo "##vso[task.uploadsummary]$(Build.Repository.LocalPath)/Web Application Configuration.md" displayName: "Documentation" From bd47a8d9b8f33c41454a04e1d761f3c4436580e0 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 11 Apr 2024 21:08:39 +0300 Subject: [PATCH 509/607] Update Web Application authentication configuration script and simplify Web App Identity management --- deploy/pipelines/01-deploy-control-plane.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index df39852a78..d8c5b09b66 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -954,7 +954,7 @@ stages: - bash: | #!/bin/bash - printf "Configure the Web Application authentication using the following script.\n\`\`\`bash\naz ad app update --id %s --web-home-page-url https://%s.azurewebsites.net --web-redirect-uris https://%s.azurewebsites.net/ https://%s.azurewebsites.net/.auth/login/aad/callback\n\`\`\`\naz role assignment create --assignee %s --role reader --subscription %s --scope /subscriptions/%s\naz role assignment create --assignee %s --role reader --subscription %s --scope /subscriptions/%s\naz role assignment create --assignee %s --role reader --subscription %s --scope /subscriptions/%s \naz webapp restart --ids %s\n\n[Access the Web App](https://%s.azurewebsites.net)" $(APP_REGISTRATION_APP_ID) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_ID) $(WEBAPP_URL_BASE) > "$(Build.Repository.LocalPath)/Web Application Configuration.md" + printf "Configure the Web Application authentication using the following script.\n\`\`\`bash\naz ad app update --id %s --web-home-page-url https://%s.azurewebsites.net --web-redirect-uris https://%s.azurewebsites.net/ https://%s.azurewebsites.net/.auth/login/aad/callback\n\`\`\`\naz role assignment create --assignee %s --role reader --subscription %s --scope /subscriptions/%s\naz role assignment create --assignee %s --role 'Storage Blob Data Contributor' --subscription %s --scope /subscriptions/%s\naz role assignment create --assignee %s --role 'Storage Table Data Contributor' --subscription %s --scope /subscriptions/%s \n\naz webapp restart --ids %s\n\n[Access the Web App](https://%s.azurewebsites.net)" $(APP_REGISTRATION_APP_ID) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_URL_BASE) $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_IDENTITY) $ARM_SUBSCRIPTION_ID $ARM_SUBSCRIPTION_ID $(WEBAPP_ID) $(WEBAPP_URL_BASE) > "$(Build.Repository.LocalPath)/Web Application Configuration.md" echo "##vso[task.uploadsummary]$(Build.Repository.LocalPath)/Web Application Configuration.md" displayName: "Documentation" From 09cd30de6003a891b5c8c31b4c96b495b676aa9b Mon Sep 17 00:00:00 2001 From: devanshjain Date: Thu, 11 Apr 2024 22:20:42 +0000 Subject: [PATCH 510/607] Commented out SSH trust relationship checks in 1.17.2-provision.yml --- .../tasks/1.17.2-provision.yml | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml index 7b9272d75d..582c6ebb1d 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml @@ -43,19 +43,19 @@ key: "{{ hostvars[secondary_instance_name].cluster_public_ssh_key }}" when: ansible_hostname == primary_instance_name - - name: 1.17 Generic Pacemaker - Ensure trust relationship is working from primary to secondary - ansible.builtin.command: ssh -oStrictHostKeyChecking=no {{ secondary_instance_name }} "hostname -s" - register: primary_to_secondary_ssh_result - changed_when: false - failed_when: primary_to_secondary_ssh_result.stdout_lines[0] != secondary_instance_name - when: ansible_hostname == primary_instance_name + # - name: 1.17 Generic Pacemaker - Ensure trust relationship is working from primary to secondary + # ansible.builtin.command: ssh -oStrictHostKeyChecking=no {{ secondary_instance_name }} "hostname -s" + # register: primary_to_secondary_ssh_result + # changed_when: false + # failed_when: primary_to_secondary_ssh_result.stdout_lines[0] != secondary_instance_name + # when: ansible_hostname == primary_instance_name - - name: 1.17 Generic Pacemaker - Ensure trust relationship is working from secondary to primary" - ansible.builtin.command: ssh -oStrictHostKeyChecking=no {{ primary_instance_name }} "hostname -s" - register: secondary_to_primary_ssh_result - changed_when: false - failed_when: secondary_to_primary_ssh_result.stdout_lines[0] != primary_instance_name - when: ansible_hostname == secondary_instance_name + # - name: 1.17 Generic Pacemaker - Ensure trust relationship is working from secondary to primary" + # ansible.builtin.command: ssh -oStrictHostKeyChecking=no {{ primary_instance_name }} "hostname -s" + # register: secondary_to_primary_ssh_result + # changed_when: false + # failed_when: secondary_to_primary_ssh_result.stdout_lines[0] != primary_instance_name + # when: ansible_hostname == secondary_instance_name # Clustering commands are based on the Host OS - name: "1.17 Generic Pacemaker - Cluster based on {{ ansible_os_family }} on VM {{ ansible_hostname }}" From ecf5b333dd0088761595dc87c4adb1081f90c261 Mon Sep 17 00:00:00 2001 From: devanshjain Date: Thu, 11 Apr 2024 22:22:21 +0000 Subject: [PATCH 511/607] Revert "Commented out SSH trust relationship checks in 1.17.2-provision.yml" This reverts commit 09cd30de6003a891b5c8c31b4c96b495b676aa9b. --- .../tasks/1.17.2-provision.yml | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml index 582c6ebb1d..7b9272d75d 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml @@ -43,19 +43,19 @@ key: "{{ hostvars[secondary_instance_name].cluster_public_ssh_key }}" when: ansible_hostname == primary_instance_name - # - name: 1.17 Generic Pacemaker - Ensure trust relationship is working from primary to secondary - # ansible.builtin.command: ssh -oStrictHostKeyChecking=no {{ secondary_instance_name }} "hostname -s" - # register: primary_to_secondary_ssh_result - # changed_when: false - # failed_when: primary_to_secondary_ssh_result.stdout_lines[0] != secondary_instance_name - # when: ansible_hostname == primary_instance_name + - name: 1.17 Generic Pacemaker - Ensure trust relationship is working from primary to secondary + ansible.builtin.command: ssh -oStrictHostKeyChecking=no {{ secondary_instance_name }} "hostname -s" + register: primary_to_secondary_ssh_result + changed_when: false + failed_when: primary_to_secondary_ssh_result.stdout_lines[0] != secondary_instance_name + when: ansible_hostname == primary_instance_name - # - name: 1.17 Generic Pacemaker - Ensure trust relationship is working from secondary to primary" - # ansible.builtin.command: ssh -oStrictHostKeyChecking=no {{ primary_instance_name }} "hostname -s" - # register: secondary_to_primary_ssh_result - # changed_when: false - # failed_when: secondary_to_primary_ssh_result.stdout_lines[0] != primary_instance_name - # when: ansible_hostname == secondary_instance_name + - name: 1.17 Generic Pacemaker - Ensure trust relationship is working from secondary to primary" + ansible.builtin.command: ssh -oStrictHostKeyChecking=no {{ primary_instance_name }} "hostname -s" + register: secondary_to_primary_ssh_result + changed_when: false + failed_when: secondary_to_primary_ssh_result.stdout_lines[0] != primary_instance_name + when: ansible_hostname == secondary_instance_name # Clustering commands are based on the Host OS - name: "1.17 Generic Pacemaker - Cluster based on {{ ansible_os_family }} on VM {{ ansible_hostname }}" From 6b550906d1c217985ada9ed6e8681ceef3894ae7 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 15 Apr 2024 10:07:30 +0300 Subject: [PATCH 512/607] ACSS updates --- .../0.3.sap-installation-media-storage-details/tasks/main.yaml | 2 ++ deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml b/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml index 55e971e473..29c3f23d43 100644 --- a/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml @@ -87,6 +87,8 @@ allowSharedKeyAccess: true - name: "0.4 Installation Media: - Check Binaries Storage Account for Shared Key Access with Control Plane Subscription" + when: + - not is_executed_by_acss ansible.builtin.command: >- az storage account show \ --name {{ account_name }} \ diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml index e2d27e4cbf..00b3fa1403 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml @@ -169,7 +169,6 @@ when: - "'scs' in supported_tiers" - usr_sap_install_mountpoint is undefined - - not is_executed_by_acss - name: "1.5 Disk setup - Check if installation root directory exists" ansible.builtin.stat: @@ -256,7 +255,6 @@ - node_tier != 'scs' - "'scs' not in supported_tiers" - usr_sap_install_mountpoint is not defined - - not is_executed_by_acss # Mount File systems for SCS server in Multi-SID installations - name: "2.6 SAP Mounts: - Mount local sapmnt (scs) for oracle shared home installation" From c0ad23ca68d2ca92513a58ebca8def5239892ccd Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 15 Apr 2024 10:42:27 +0300 Subject: [PATCH 513/607] Oracle simplification --- deploy/ansible/playbook_04_00_01_db_ha.yaml | 59 +++++-------------- .../roles-db/4.1.3-ora-dg/tasks/main.yaml | 16 ++--- 2 files changed, 22 insertions(+), 53 deletions(-) diff --git a/deploy/ansible/playbook_04_00_01_db_ha.yaml b/deploy/ansible/playbook_04_00_01_db_ha.yaml index fa86f92867..80bcf7ab58 100644 --- a/deploy/ansible/playbook_04_00_01_db_ha.yaml +++ b/deploy/ansible/playbook_04_00_01_db_ha.yaml @@ -184,8 +184,7 @@ # Oracle installation has to be done with oracle user. Change the user to become orasid and call the installer. -- hosts: "{{ sap_sid | upper }}_DB[0]" - # serial: 1 +- hosts: "{{ sap_sid | upper }}_DB" name: DB Data Guard setup - Oracle remote_user: "{{ orchestration_ansible_user }}" gather_facts: true @@ -200,16 +199,22 @@ - db_high_availability is defined - database_high_availability is not defined + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + primary_instance_name : "{{ ansible_play_hosts_all[0] }}" # Setting up Primary Instance Name + secondary_instance_name: "{{ ansible_play_hosts_all[1] | default('N/A') }}" # Setting up Secondary Instance Name + - name: "Oracle Data Guard Setup" when: - database_high_availability - platform in ['ORACLE', 'ORACLE-ASM'] + - inventory_hostname == primary_instance_name become: true block: - name: Setting the DB facts ansible.builtin.set_fact: tier: ora # Actions for Oracle DB Servers - action: 0 + action: Prepare_Secondary main_password: "{{ hostvars.localhost.sap_password }}" tags: - always @@ -220,21 +225,6 @@ tags: - 4.1.3-ora-dg -- hosts: "{{ sap_sid | upper }}_DB" - name: DB Dataguard setup - Oracle - remote_user: "{{ orchestration_ansible_user }}" - gather_facts: true - vars_files: - - vars/ansible-input-api.yaml # API Input template with defaults - - tasks: - - name: "Backward Compatibility - Check required Database HA variables" - ansible.builtin.set_fact: - database_high_availability: "{{ db_high_availability | default(false) }}" - when: - - db_high_availability is defined - - database_high_availability is not defined - - name: "Oracle Data Guard Setup" when: - database_high_availability @@ -244,7 +234,7 @@ - name: Setting the DB facts ansible.builtin.set_fact: tier: ora # Actions for Oracle DB Servers - action: 1 + action: Prepare_and_Restore main_password: "{{ hostvars.localhost.sap_password }}" tags: - always @@ -262,31 +252,17 @@ suffix: "_DC_ACTION_1" tier: 'oracle' -- hosts: "{{ sap_sid | upper }}_DB[0]" - name: DB Dataguard setup - Oracle - remote_user: "{{ orchestration_ansible_user }}" - gather_facts: true - vars_files: - - vars/ansible-input-api.yaml # API Input template with defaults - - tasks: - - name: "Backward Compatibility - Check required Database HA variables" - ansible.builtin.set_fact: - database_high_availability: "{{ db_high_availability | default(false) }}" - when: - - db_high_availability is defined - - database_high_availability is not defined - - name: "Oracle Data Guard Setup" when: - database_high_availability - platform in ['ORACLE', 'ORACLE-ASM'] + - inventory_hostname == primary_instance_name become: true block: - name: Setting the DB facts ansible.builtin.set_fact: tier: ora # Actions for Oracle DB Servers - action: 2 + action: Post_Processing_Primary main_password: "{{ hostvars.localhost.sap_password }}" tags: - always @@ -304,25 +280,18 @@ suffix: "_DC_ACTION_2" tier: 'oracle' -- hosts: "{{ sap_sid | upper }}_DB[1]" - name: DB Dataguard setup on secondary - Oracle - remote_user: "root" - gather_facts: true - vars_files: - - vars/ansible-input-api.yaml # API Input template with defaults - - tasks: - name: "Oracle Data Guard Setup on Secondary" when: - db_high_availability - platform in ['ORACLE', 'ORACLE-ASM'] + - inventory_hostname == secondary_instance_name become: true block: - name: Setting the DB facts ansible.builtin.set_fact: tier: ora # Actions for Oracle DB Servers - action: 3 + action: Post_Processing_Secondary main_password: "{{ hostvars.localhost.sap_password }}" tags: - always @@ -371,7 +340,7 @@ - name: "Observer Playbook: Setting the DB facts" ansible.builtin.set_fact: node_tier: observer - action: 4 + action: Setup_Observer main_password: "{{ hostvars.localhost.sap_password }}" tags: - always diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml index 3cc341483e..db477eb52d 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml @@ -15,14 +15,14 @@ ora_secondary: "{{ ansible_play_hosts_all[1] }}" # Oracle Secondary Host current_host: "{{ ansible_hostname }}" when: - - node_tier == "oracle" or node_tier == "oracle-asm" - - action == 1 + - node_tier in ["oracle", "oracle-asm"] + - action == Prepare_and_Restore - name: "Oracle Data Guard: Prepare secondary node" ansible.builtin.include_tasks: "ora-dg-secondary-preparation.yaml" when: - - node_tier == "oracle" or node_tier == "oracle-asm" - - action == 0 + - node_tier in ["oracle", "oracle-asm"] + - action == Prepare_Secondary - name: "Oracle Data Guard: Prepare and Restore" block: @@ -39,7 +39,7 @@ when: - current_host == ora_secondary when: - - action == 1 + - action == Prepare_and_Restore - node_tier == "oracle" or node_tier == "oracle-asm" - node_tier != "observer" @@ -48,7 +48,7 @@ when: - node_tier == "oracle" or node_tier == "oracle-asm" - node_tier != "observer" - - action == 2 + - action == Post_Processing_Primary # Enable Flashback Loggining on the Secondary for FSFO - name: "Oracle Data Guard: Post processing on Secondary" @@ -56,14 +56,14 @@ when: - node_tier == "oracle" or node_tier == "oracle-asm" - node_tier != "observer" - - action == 3 + - action == Post_Processing_Secondary # FSFO is enabled from the Observer. - name: "Oracle Data Guard: Setup Observer" ansible.builtin.include_tasks: "ora-dg-observer-setup.yaml" when: - node_tier == "observer" - - action == 4 + - action == Setup_Observer ... # /*---------------------------------------------------------------------------8 # | END | From 3591604a36f8db03868675f78f0e4bf2c5296823 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 15 Apr 2024 11:55:09 +0300 Subject: [PATCH 514/607] Add AutoUpdate.Enabled configuration in 1.1-swap role and enable package cache update in 1.4-packages role --- deploy/ansible/roles-os/1.1-swap/tasks/main.yaml | 1 + .../roles-os/1.4-packages/tasks/1.4.3-update-packages-Suse.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml index 84f901967f..a234860feb 100644 --- a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml @@ -61,6 +61,7 @@ - { state: 'present', regexp: 'ResourceDisk.SwapSizeMB=', line: 'ResourceDisk.SwapSizeMB={{ swap_size_mb | default(2052) }}' } - { state: 'present', regexp: 'ResourceDisk.MountPoint=', line: 'ResourceDisk.MountPoint=/mnt' } - { state: 'present', regexp: 'Provisioning.Agent=', line: 'Provisioning.Agent=waagent' } + - { state: 'present', regexp: 'AutoUpdate.Enabled=', line: 'AutoUpdate.Enabled=y' } register: wagent_configuration vars: swap_size_mb: "{{ (sap_swap | selectattr('tier', 'search', node_tier) | list | first).swap_size_mb }}" diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-Suse.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-Suse.yaml index b410597f38..7a705032be 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-Suse.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-Suse.yaml @@ -11,6 +11,7 @@ community.general.zypper: name: '*' state: latest + update_cache: true environment: ZYPP_LOCK_TIMEOUT: "20" tags: From 3bbaeb18265b6a5479bc1cd7cd067697cf6de8ed Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 15 Apr 2024 12:26:14 +0300 Subject: [PATCH 515/607] Update deployment type configuration in OS and SAP specific playbooks --- deploy/ansible/playbook_01_os_base_config.yaml | 13 +++++++++++++ .../playbook_02_os_sap_specific_config.yaml | 14 ++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/deploy/ansible/playbook_01_os_base_config.yaml b/deploy/ansible/playbook_01_os_base_config.yaml index 924707f8c6..91f307363d 100644 --- a/deploy/ansible/playbook_01_os_base_config.yaml +++ b/deploy/ansible/playbook_01_os_base_config.yaml @@ -78,6 +78,19 @@ become_user: "root" block: + - name: "OS configuration playbook: - Set deployment type" + ansible.builtin.set_fact: + single_server: false + tags: + - always + + - name: "OS configuration playbook: - Set deployment type" + ansible.builtin.set_fact: + single_server: true + when: (ansible_play_hosts_all | length) == 1 + tags: + - always + - name: "OS configuration playbook: - Set os fact" ansible.builtin.set_fact: tier: os diff --git a/deploy/ansible/playbook_02_os_sap_specific_config.yaml b/deploy/ansible/playbook_02_os_sap_specific_config.yaml index e50a36683e..d23432d1a7 100644 --- a/deploy/ansible/playbook_02_os_sap_specific_config.yaml +++ b/deploy/ansible/playbook_02_os_sap_specific_config.yaml @@ -80,6 +80,20 @@ # # -------------------------------------+---------------------------------------8 tasks: + + - name: "SAP OS Configuration: - Set deployment type" + ansible.builtin.set_fact: + single_server: false + tags: + - always + + - name: "SAP OS Configuration: - Set deployment type" + ansible.builtin.set_fact: + single_server: true + when: (ansible_play_hosts_all | length) == 1 + tags: + - always + - name: "SAP OS Configuration - Linux based systems" become: true become_user: "root" From 1472a1b412be4ad274b8798d7a667b9ab3cc84c7 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 15 Apr 2024 13:18:08 +0300 Subject: [PATCH 516/607] Update AutoUpdate.Enabled configuration in 1.1-swap role and add Extensions.WaitForCloudInit configuration --- deploy/ansible/roles-os/1.1-swap/tasks/main.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml index a234860feb..5c8ba3f8d8 100644 --- a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml @@ -56,12 +56,12 @@ # ResourceDisk.EnableSwap = n # ResourceDisk.SwapSizeMB = 0 # ResourceDisk.MountOptions = None - - { state: 'present', regexp: 'ResourceDisk.Format=', line: 'ResourceDisk.Format=y' } - - { state: 'present', regexp: 'ResourceDisk.EnableSwap=', line: 'ResourceDisk.EnableSwap=y' } - - { state: 'present', regexp: 'ResourceDisk.SwapSizeMB=', line: 'ResourceDisk.SwapSizeMB={{ swap_size_mb | default(2052) }}' } - - { state: 'present', regexp: 'ResourceDisk.MountPoint=', line: 'ResourceDisk.MountPoint=/mnt' } - - { state: 'present', regexp: 'Provisioning.Agent=', line: 'Provisioning.Agent=waagent' } - - { state: 'present', regexp: 'AutoUpdate.Enabled=', line: 'AutoUpdate.Enabled=y' } + - { state: 'present', regexp: 'ResourceDisk.Format=', line: 'ResourceDisk.Format=y' } + - { state: 'present', regexp: 'ResourceDisk.EnableSwap=', line: 'ResourceDisk.EnableSwap=y' } + - { state: 'present', regexp: 'ResourceDisk.SwapSizeMB=', line: 'ResourceDisk.SwapSizeMB={{ swap_size_mb | default(2052) }}' } + - { state: 'present', regexp: 'ResourceDisk.MountPoint=', line: 'ResourceDisk.MountPoint=/mnt' } + - { state: 'present', regexp: 'AutoUpdate.UpdateToLatestVersion=', line: 'AutoUpdate.UpdateToLatestVersion=y' } + - { state: 'present', regexp: 'Extensions.WaitForCloudInit=', line: 'Extensions.WaitForCloudInit=y' } register: wagent_configuration vars: swap_size_mb: "{{ (sap_swap | selectattr('tier', 'search', node_tier) | list | first).swap_size_mb }}" From 5b8860f8131e1c5d26a4ffc9ff99006892052c3f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 15 Apr 2024 13:43:15 +0300 Subject: [PATCH 517/607] Update AutoUpdate.Enabled configuration in 1.1-swap role and add Extensions.WaitForCloudInit configuration --- deploy/ansible/roles-os/1.1-swap/tasks/main.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml index 5c8ba3f8d8..ceede8437d 100644 --- a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml @@ -60,6 +60,7 @@ - { state: 'present', regexp: 'ResourceDisk.EnableSwap=', line: 'ResourceDisk.EnableSwap=y' } - { state: 'present', regexp: 'ResourceDisk.SwapSizeMB=', line: 'ResourceDisk.SwapSizeMB={{ swap_size_mb | default(2052) }}' } - { state: 'present', regexp: 'ResourceDisk.MountPoint=', line: 'ResourceDisk.MountPoint=/mnt' } + - { state: 'present', regexp: 'AutoUpdate.Enabled=', line: 'AutoUpdate.Enabled=y' } - { state: 'present', regexp: 'AutoUpdate.UpdateToLatestVersion=', line: 'AutoUpdate.UpdateToLatestVersion=y' } - { state: 'present', regexp: 'Extensions.WaitForCloudInit=', line: 'Extensions.WaitForCloudInit=y' } register: wagent_configuration From c720925e2a1c2215af5b5c366904584fa232aeac Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 15 Apr 2024 15:56:11 +0300 Subject: [PATCH 518/607] Update WAAgent package and restart service in 1.1-swap role --- .../ansible/roles-os/1.1-swap/tasks/main.yaml | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml index ceede8437d..df366d84f3 100644 --- a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml @@ -19,6 +19,25 @@ notify: - Restart waagent +- name: "1.1 Swap: - Update WAAgent {{ ansible_os_family }}" + ansible.builtin.package: + name: WALinuxAgent + state: latest + when: + - ansible_os_family == 'RedHat' + +- name: "1.1 Swap: - Update WAAgent {{ ansible_os_family }}" + ansible.builtin.package: + name: WALinuxAgent + state: latest + when: + - ansible_os_family == 'Suse' + +- name: "1.1 Swap: - Restart {{ distro_name }}" + ansible.builtin.service: + name: waagent + state: restarted + # Check the current swap size - name: "check swap size" ansible.builtin.shell: set -o pipefail && cat /proc/meminfo | grep SwapTotal @@ -29,7 +48,6 @@ msg: "SWAP size from procinfo: {{ swap_space }}" verbosity: 2 - - name: "Trim Swap space" ansible.builtin.set_fact: swap_value: "{{ swap_space.stdout_lines[0] | regex_search('([0-9]+)') }}" From 3faf5d180b6f7ec98a0fbb02267e8e1f1bb778fd Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 15 Apr 2024 19:37:33 +0300 Subject: [PATCH 519/607] Update WAAgent package and restart service in 1.1-swap role --- .../ansible/roles-os/1.1-swap/tasks/main.yaml | 6 ++-- .../roles-os/1.4-packages/tasks/main.yaml | 31 +++++++++++++++++++ 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml index df366d84f3..4c8172e15a 100644 --- a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml @@ -19,21 +19,21 @@ notify: - Restart waagent -- name: "1.1 Swap: - Update WAAgent {{ ansible_os_family }}" +- name: "1.1 Swap: - Update WAAgent on {{ ansible_os_family }}" ansible.builtin.package: name: WALinuxAgent state: latest when: - ansible_os_family == 'RedHat' -- name: "1.1 Swap: - Update WAAgent {{ ansible_os_family }}" +- name: "1.1 Swap: - Update WAAgent on {{ ansible_os_family }}" ansible.builtin.package: name: WALinuxAgent state: latest when: - ansible_os_family == 'Suse' -- name: "1.1 Swap: - Restart {{ distro_name }}" +- name: "1.1 Swap: - Restart WAAgent on {{ distro_name }}" ansible.builtin.service: name: waagent state: restarted diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml index 9b24ff7a8e..957892fd71 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml @@ -25,6 +25,37 @@ - upgrade_packages is defined - upgrade_packages +- name: "1.4 Packages: - Resolve failed waagent updates" + ansible.builtin.stat: + path: /etc/waagent.conf + register: waagent_conf + +- name: "1.4 Packages: - Resolve failed waagent updates" + ansible.builtin.stat: + path: /etc/waagent.conf.rpmsave + register: waagent_conf_save + +- name: "1.4 Copy the conf file" + ansible.builtin.copy: + src: /etc/waagent.conf.rpmsave + dest: /etc/waagent.conf + owner: root + group: root + mode: 0644 + when: + - not waagent_conf.stat.exists + - waagent_conf_save.stat.exists + register: waagent_conf_copy + +- name: "1.1 Swap: - Restart WAAgent on {{ distro_name }}" + ansible.builtin.service: + name: waagent + state: restarted + when: + - waagent_conf_copy is defined + - waagent_conf_copy.changed + + # /*----------------------------------------------------------------------------8 # | END | # +------------------------------------4---------------------------------------*/ From fee5a679cebc5886d507a422ab4e23260dd06ab3 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 15 Apr 2024 21:06:05 +0300 Subject: [PATCH 520/607] Update AutoUpdate.Enabled configuration in 1.1-swap role and add Extensions.WaitForCloudInit configuration --- deploy/ansible/roles-os/1.1-swap/tasks/main.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml index 4c8172e15a..25e91d8e33 100644 --- a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml @@ -78,8 +78,8 @@ - { state: 'present', regexp: 'ResourceDisk.EnableSwap=', line: 'ResourceDisk.EnableSwap=y' } - { state: 'present', regexp: 'ResourceDisk.SwapSizeMB=', line: 'ResourceDisk.SwapSizeMB={{ swap_size_mb | default(2052) }}' } - { state: 'present', regexp: 'ResourceDisk.MountPoint=', line: 'ResourceDisk.MountPoint=/mnt' } - - { state: 'present', regexp: 'AutoUpdate.Enabled=', line: 'AutoUpdate.Enabled=y' } - - { state: 'present', regexp: 'AutoUpdate.UpdateToLatestVersion=', line: 'AutoUpdate.UpdateToLatestVersion=y' } + - { state: 'present', regexp: 'AutoUpdate.Enabled=', line: 'AutoUpdate.Enabled=n' } + - { state: 'present', regexp: 'AutoUpdate.UpdateToLatestVersion=', line: 'AutoUpdate.UpdateToLatestVersion=n' } - { state: 'present', regexp: 'Extensions.WaitForCloudInit=', line: 'Extensions.WaitForCloudInit=y' } register: wagent_configuration vars: From 8f18d14879715a115862e5bb336aceb4ceae4b18 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 15 Apr 2024 23:05:48 +0300 Subject: [PATCH 521/607] Update WAAgent package and restart service in 1.4-packages role --- deploy/ansible/roles-os/1.4-packages/tasks/main.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml index 957892fd71..d2dcfdb5bb 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml @@ -36,6 +36,8 @@ register: waagent_conf_save - name: "1.4 Copy the conf file" + become: true + become_user: root ansible.builtin.copy: src: /etc/waagent.conf.rpmsave dest: /etc/waagent.conf From 43cce2b5398f2f4b691f76fb8367168c1cb7f242 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 16 Apr 2024 06:38:53 +0300 Subject: [PATCH 522/607] Update waagent configuration check in 1.4-packages role --- deploy/ansible/roles-os/1.4-packages/tasks/main.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml index d2dcfdb5bb..07cf4c2367 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml @@ -25,12 +25,12 @@ - upgrade_packages is defined - upgrade_packages -- name: "1.4 Packages: - Resolve failed waagent updates" +- name: "1.4 Packages: - Check for (waagent_conf)" ansible.builtin.stat: path: /etc/waagent.conf register: waagent_conf -- name: "1.4 Packages: - Resolve failed waagent updates" +- name: "1.4 Packages: - Check for (waagent.conf.rpmsave)" ansible.builtin.stat: path: /etc/waagent.conf.rpmsave register: waagent_conf_save From 046a05ee95659d0a4b7e10203b0fdf1dedf3b634 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 16 Apr 2024 08:04:42 +0300 Subject: [PATCH 523/607] Update waagent configuration check and systemd service reload in 1.4-packages role --- deploy/ansible/roles-os/1.4-packages/tasks/main.yaml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml index 07cf4c2367..262861ff0b 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml @@ -39,16 +39,18 @@ become: true become_user: root ansible.builtin.copy: - src: /etc/waagent.conf.rpmsave + remote_src: /etc/waagent.conf.rpmsave dest: /etc/waagent.conf - owner: root - group: root - mode: 0644 when: - not waagent_conf.stat.exists - waagent_conf_save.stat.exists register: waagent_conf_copy + +- name: "1.1 Swap: - Force systemd to reread configs {{ distro_name }}" + ansible.builtin.systemd_service: + daemon_reload: true + - name: "1.1 Swap: - Restart WAAgent on {{ distro_name }}" ansible.builtin.service: name: waagent From 268b4efbd0ec47df071a65ef330a8d9d7428d157 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 16 Apr 2024 08:05:53 +0300 Subject: [PATCH 524/607] Update AutoUpdate.Enabled configuration and add Extensions.WaitForCloudInit configuration in 1.1-swap role --- deploy/ansible/roles-os/1.1-swap/tasks/main.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml index 25e91d8e33..4c8172e15a 100644 --- a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml @@ -78,8 +78,8 @@ - { state: 'present', regexp: 'ResourceDisk.EnableSwap=', line: 'ResourceDisk.EnableSwap=y' } - { state: 'present', regexp: 'ResourceDisk.SwapSizeMB=', line: 'ResourceDisk.SwapSizeMB={{ swap_size_mb | default(2052) }}' } - { state: 'present', regexp: 'ResourceDisk.MountPoint=', line: 'ResourceDisk.MountPoint=/mnt' } - - { state: 'present', regexp: 'AutoUpdate.Enabled=', line: 'AutoUpdate.Enabled=n' } - - { state: 'present', regexp: 'AutoUpdate.UpdateToLatestVersion=', line: 'AutoUpdate.UpdateToLatestVersion=n' } + - { state: 'present', regexp: 'AutoUpdate.Enabled=', line: 'AutoUpdate.Enabled=y' } + - { state: 'present', regexp: 'AutoUpdate.UpdateToLatestVersion=', line: 'AutoUpdate.UpdateToLatestVersion=y' } - { state: 'present', regexp: 'Extensions.WaitForCloudInit=', line: 'Extensions.WaitForCloudInit=y' } register: wagent_configuration vars: From c1fa51027613b01119636a34ec570bdc7107334f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 16 Apr 2024 13:27:41 +0300 Subject: [PATCH 525/607] Update waagent configuration check and systemd service reload in 1.1-swap role --- .../ansible/roles-os/1.1-swap/tasks/main.yaml | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml index 4c8172e15a..e05086e6ec 100644 --- a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml @@ -60,6 +60,42 @@ ansible.builtin.debug: msg: "Swap size: {{ swap_size }}" +- name: "1.1 Swap: - Check for (waagent_conf)" + ansible.builtin.stat: + path: /etc/waagent.conf + register: waagent_conf + +- name: "1.1 Swap: - Check for (waagent.conf.rpmsave)" + ansible.builtin.stat: + path: /etc/waagent.conf.rpmsave + register: waagent_conf_save + +- name: "1.1 Swap: Copy the conf file" + become: true + become_user: root + ansible.builtin.copy: + remote_src: /etc/waagent.conf.rpmsave + dest: /etc/waagent.conf + when: + - not waagent_conf.stat.exists + - waagent_conf_save.stat.exists + register: waagent_conf_copy + + +- name: "1.1 Swap: - Force systemd to reread configs {{ distro_name }}" + ansible.builtin.systemd_service: + daemon_reload: true + +- name: "1.1 Swap: - Restart WAAgent on {{ distro_name }}" + ansible.builtin.service: + name: waagent + state: restarted + when: + - waagent_conf_copy is defined + - waagent_conf_copy.changed + + + - name: "1.1 Swap: - Ensure waagent file is configured with proper parameters" ansible.builtin.lineinfile: dest: /etc/waagent.conf From 5b5925a42439b1ed789d19e5abab0f65f5a3f634 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 16 Apr 2024 13:43:09 +0300 Subject: [PATCH 526/607] Update waagent configuration check and systemd service reload in 1.1-swap role --- deploy/ansible/roles-os/1.1-swap/tasks/main.yaml | 3 ++- deploy/ansible/roles-os/1.4-packages/tasks/main.yaml | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml index e05086e6ec..db109b41f2 100644 --- a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml @@ -74,7 +74,8 @@ become: true become_user: root ansible.builtin.copy: - remote_src: /etc/waagent.conf.rpmsave + remote_src: true + src: /etc/waagent.conf.rpmsave dest: /etc/waagent.conf when: - not waagent_conf.stat.exists diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml index 262861ff0b..298e1618d6 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml @@ -39,7 +39,8 @@ become: true become_user: root ansible.builtin.copy: - remote_src: /etc/waagent.conf.rpmsave + remote_src: true + src: /etc/waagent.conf.rpmsave dest: /etc/waagent.conf when: - not waagent_conf.stat.exists From 84f75ee9d8cdddb52be48830104de06a0b968794 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 16 Apr 2024 13:43:42 +0300 Subject: [PATCH 527/607] Update database_high_availability condition in playbook_04_00_01_db_ha.yaml --- .../ansible/playbook_01_os_base_config.yaml | 50 +++++++++---------- .../playbook_02_os_sap_specific_config.yaml | 42 ++++++++-------- deploy/ansible/playbook_04_00_01_db_ha.yaml | 2 +- 3 files changed, 47 insertions(+), 47 deletions(-) diff --git a/deploy/ansible/playbook_01_os_base_config.yaml b/deploy/ansible/playbook_01_os_base_config.yaml index 91f307363d..6701c03450 100644 --- a/deploy/ansible/playbook_01_os_base_config.yaml +++ b/deploy/ansible/playbook_01_os_base_config.yaml @@ -34,7 +34,7 @@ - always - name: "OS configuration playbook: - Read password" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-misc/0.1-passwords tasks_from: windows.yaml when: @@ -98,89 +98,89 @@ - always - name: "OS configuration playbook: - Set sudoers" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.0-sudoers tags: - 1.0-sudoers - name: "OS configuration playbook: - Set swap" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.1-swap tags: - 1.1-swap - name: "OS configuration playbook: - Set hostname" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.2-hostname tags: - 1.2-hostname - name: "OS configuration playbook: - Ensure the repositories are registered" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.3-repository tags: - 1.3-repository - name: "OS configuration playbook: - Ensure the packages are registered" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.4-packages tags: - 1.4-packages - name: "OS configuration playbook: - Configure volume groups and logical volumes" when: node_tier not in ["oracle-multi-sid", "oracle-asm"] - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.5-disk-setup tags: - 1.5-disk-setup - name: "OS configuration playbook: - Configure volume groups and logical volumes (sharedHome)" when: node_tier == "oracle-multi-sid" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.5.2-disk-setup-ora-multi-sid tags: - 1.5.2-disk-setup-ora-multi-sid - name: "OS configuration playbook: - Chrony role" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.7-chrony tags: - 1.7-chrony - name: "OS configuration playbook: - Ensure the kernel parameters are set" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.9-kernelparameters tags: - 1.9-kernelparameters - name: "OS configuration playbook: - Configure networking" when: ansible_os_family | upper == "REDHAT" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.10-networking tags: - 1.10-networking - name: "OS configuration playbook: - Configure accounts" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.11-accounts tags: - 1.11-accounts - name: "OS configuration playbook: - Configure accounts (Oracle)" when: node_tier in ["oracle", "observer"] - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.11-accounts vars: tier: ora - name: "OS configuration playbook: - Configure MOTD" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.13-MOTD tags: - 1.13-MOTD - name: "OS configuration playbook: - Ensure the needed services are started" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.16-services tags: - 1.16-services @@ -189,7 +189,7 @@ when: - prometheus - ansible_os_family | upper == "SUSE" or ansible_os_family | upper == "REDHAT" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.20-prometheus tags: - 1.20-prometheus @@ -212,37 +212,37 @@ verbosity: 4 - name: "OS configuration playbook: - Add Needed packages" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/windows/1.4-packages tags: - 1.4-packages - name: "OS configuration playbook: - Set swap" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/windows/1.1-swap tags: - 1.1-swap - name: "OS configuration playbook: - Memory Dump" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/windows/1.2-memory-dump tags: - 1.2-memory-dump - name: "OS configuration playbook: - Configure Disks" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/windows/1.5-disk-setup tags: - 1.5-disk-setup - name: "OS configuration playbook: - Disable the Firewall" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/windows/1.10-firewall tags: - 1.10-firewall - name: "OS configuration playbook: - Join Domain" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/windows/1.11-domain-join vars: winadm_password: "{{ hostvars.localhost.winadm_password }}" @@ -326,19 +326,19 @@ - always - name: "OS configuration playbook: - Ensure the packages are registered" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.4-packages tags: - 1.4-packages - name: "OS configuration playbook: - Ensure the needed services are started" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.16-services tags: - 1.16-services - name: "OS configuration playbook: - Ensure the needed services are started" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-sap-os/2.11-iscsi-server tags: - 2.11-iSCSI-server diff --git a/deploy/ansible/playbook_02_os_sap_specific_config.yaml b/deploy/ansible/playbook_02_os_sap_specific_config.yaml index d23432d1a7..1037ef3831 100644 --- a/deploy/ansible/playbook_02_os_sap_specific_config.yaml +++ b/deploy/ansible/playbook_02_os_sap_specific_config.yaml @@ -36,7 +36,7 @@ - always - name: Include 0.3.sap-installation-media-storage-details role - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-misc/0.3.sap-installation-media-storage-details when: not is_run_with_infraCreate_only vars: @@ -45,7 +45,7 @@ - always - name: "SAP OS configuration playbook: - Read password" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-misc/0.1-passwords tasks_from: windows.yaml when: platform == "SQLSERVER" @@ -193,25 +193,25 @@ # vars: # ipaddr: "{{ ipadd }}" - name: "SAP OS configuration playbook: - Create hosts file" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-sap-os/2.4-hosts-file tags: - 2.4-hosts-file - name: "SAP OS configuration playbook: - Ensure the repositories are registered" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.3-repository tags: - 1.3-repository - name: "SAP OS configuration playbook: - Configure accounts" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.11-accounts tags: - 1.11-accounts - name: "SAP OS configuration playbook: - Ensure the packages are registered" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.4-packages tags: - 1.4-packages @@ -220,7 +220,7 @@ when: - scs_high_availability or database_high_availability - node_tier in ['scs', 'ers', 'hana', 'db2'] - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.4-packages tags: - 1.4-packages @@ -228,14 +228,14 @@ tier: ha - name: "SAP OS configuration playbook: - Configure volume groups, logical volumes and file systems" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.5-disk-setup when: node_tier not in ["oracle-multi-sid", "oracle-asm"] tags: - 1.5-disk-setup - name: "SAP OS configuration playbook: - Configure volume groups, logical volumes and file systems" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.5.1.1-disk-setup-asm-sap when: - node_tier == "oracle-asm" @@ -243,26 +243,26 @@ - 1.5.1.1-disk-setup-asm-sap - name: "SAP OS configuration playbook: - Configure the disks for Oracle Multi SID" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.5.2-disk-setup-ora-multi-sid when: node_tier == "oracle-multi-sid" tags: - 1.5.2-disk-setup-ora-multi-sid - name: "SAP OS configuration playbook: - Configure the kernel parameters" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.9-kernelparameters tags: - 1.9-kernelparameters - name: "SAP OS configuration playbook: - Create SAP users/groups" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-sap-os/2.5-sap-users tags: - 2.5-sap-users - name: "SAP OS configuration playbook: - Ensure the services are configured" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.16-services tags: - 1.16-services @@ -271,7 +271,7 @@ when: - scs_high_availability or database_high_availability - node_tier in ['scs', 'ers', 'hana', 'db2'] - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-os/1.16-services tags: - 1.16-services @@ -279,25 +279,25 @@ tier: ha - name: "SAP OS configuration playbook: - directory permissions" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-sap-os/2.2-sapPermissions tags: - 2.2-sapPermissions - name: "SAP OS configuration playbook: - Configurations according to SAP Notes" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-sap-os/2.10-sap-notes tags: - 2.10-sap-notes - name: "SAP OS configuration playbook: - configure exports" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-sap-os/2.3-sap-exports tags: - 2.3-sap-exports - name: "SAP OS configuration playbook: - Mount the file systems" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-sap-os/2.6-sap-mounts tags: - 2.6-sap-mounts @@ -339,7 +339,7 @@ - domain_sqlsvc_account is not defined - name: "SAP OS configuration playbook: - Add local groups and Permissions" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-sap-os/windows/2.5-sap-users vars: winadm_password: "{{ hostvars.localhost.winadm_password }}" @@ -347,7 +347,7 @@ domain_service_account: "{{ hostvars.localhost.adsvc_account }}" - name: "SAP OS configuration playbook: - Sharing Data Folder" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-sap-os/windows/2.3-sap-exports vars: winadm_password: "{{ hostvars.localhost.winadm_password }}" @@ -356,7 +356,7 @@ sql_svc_account: "{{ domain_sqlsvc_account }}" - name: "SAP OS configuration playbook: - Mounting Shared Folder" - ansible.builtin.import_role: + ansible.builtin.include_role: name: roles-sap-os/windows/2.6-sap-mounts vars: domain_user_password: "{{ hostvars.localhost.winadm_password }}" diff --git a/deploy/ansible/playbook_04_00_01_db_ha.yaml b/deploy/ansible/playbook_04_00_01_db_ha.yaml index 80bcf7ab58..d43f586196 100644 --- a/deploy/ansible/playbook_04_00_01_db_ha.yaml +++ b/deploy/ansible/playbook_04_00_01_db_ha.yaml @@ -168,7 +168,7 @@ - name: "HANA HA Setup: - run the Pacemaker role" ansible.builtin.include_role: name: roles-sap/5.5-hanadb-pacemaker - when: db_high_availability + when: database_high_availability tags: - 5.5-hanadb-pacemaker From c408a4b9b2fe82b54415d2f59d95593c98af92d3 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 17 Apr 2024 14:20:15 +0300 Subject: [PATCH 528/607] Add the ability to block app registration --- deploy/scripts/New-SDAFDevopsProject.ps1 | 75 +++++++++++++++--------- 1 file changed, 47 insertions(+), 28 deletions(-) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index 5ea33287dc..19b10965c7 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -170,12 +170,17 @@ else { } -$ApplicationName = $ControlPlanePrefix + "-configuration-app" +$WebApp = $true +if ($Env:SDAF_WEBAPP -eq "true") { + $ApplicationName = $ControlPlanePrefix + "-configuration-app" -if ($Env:SDAF_APP_NAME.Length -ne 0) { - $ApplicationName = $Env:SDAF_APP_NAME + if ($Env:SDAF_APP_NAME.Length -ne 0) { + $ApplicationName = $Env:SDAF_APP_NAME + } +} +else { + $WebApp = $false } - $confirmation = Read-Host "Use Agent pool with name '$Pool_Name' y/n?" if ($confirmation -ne 'y') { @@ -288,7 +293,6 @@ else { } - $confirmation = Read-Host "You can optionally import the Terraform and Ansible code from GitHub into Azure DevOps, however, this should only be done if you cannot access github from the Azure DevOps agent or if you intend to customize the code. Do you want to run the code from GitHub y/n?" if ($confirmation -ne 'y') { Add-Content -Path $fname -Value "" @@ -470,8 +474,8 @@ else { Add-Content -Path $templatename " type: GitHub" Add-Content -Path $templatename -Value (" endpoint: " + $ghConn) Add-Content -Path $templatename " name: Azure/sap-automation" - Add-Content -Path $templatename " ref: refs/heads/main" -# Add-Content -Path $templatename -Value (" ref: refs/tags/" + $versionLabel) + Add-Content -Path $templatename " ref: refs/heads/main" + # Add-Content -Path $templatename -Value (" ref: refs/tags/" + $versionLabel) $cont = Get-Content -Path $templatename -Raw @@ -748,33 +752,35 @@ Add-Content -Path $fname -Value "" Add-Content -Path $fname -Value ("Web Application: " + $ApplicationName) #region App registration -Write-Host "Creating the App registration in Azure Active Directory" -ForegroundColor Green +if ($WebApp) { + Write-Host "Creating the App registration in Azure Active Directory" -ForegroundColor Green -$found_appRegistration = (az ad app list --all --filter "startswith(displayName, '$ApplicationName')" --query "[?displayName=='$ApplicationName'].displayName | [0]" --only-show-errors) + $found_appRegistration = (az ad app list --all --filter "startswith(displayName, '$ApplicationName')" --query "[?displayName=='$ApplicationName'].displayName | [0]" --only-show-errors) -if ($found_appRegistration.Length -ne 0) { - Write-Host "Found an existing App Registration:" $ApplicationName - $ExistingData = (az ad app list --all --filter "startswith(displayName, '$ApplicationName')" --query "[?displayName=='$ApplicationName']| [0]" --only-show-errors) | ConvertFrom-Json + if ($found_appRegistration.Length -ne 0) { + Write-Host "Found an existing App Registration:" $ApplicationName + $ExistingData = (az ad app list --all --filter "startswith(displayName, '$ApplicationName')" --query "[?displayName=='$ApplicationName']| [0]" --only-show-errors) | ConvertFrom-Json - $APP_REGISTRATION_ID = $ExistingData.appId + $APP_REGISTRATION_ID = $ExistingData.appId - $confirmation = Read-Host "Reset the app registration secret y/n?" - if ($confirmation -eq 'y') { - $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors --display-name "SDAF") + $confirmation = Read-Host "Reset the app registration secret y/n?" + if ($confirmation -eq 'y') { + $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors --display-name "SDAF") + } + else { + $WEB_APP_CLIENT_SECRET = Read-Host "Please enter the app registration secret" + } } else { - $WEB_APP_CLIENT_SECRET = Read-Host "Please enter the app registration secret" - } -} -else { - Write-Host "Creating an App Registration for" $ApplicationName -ForegroundColor Green - Add-Content -Path manifest.json -Value '[{"resourceAppId":"00000003-0000-0000-c000-000000000000","resourceAccess":[{"id":"e1fe6dd8-ba31-4d61-89e7-88639da4683d","type":"Scope"}]}]' + Write-Host "Creating an App Registration for" $ApplicationName -ForegroundColor Green + Add-Content -Path manifest.json -Value '[{"resourceAppId":"00000003-0000-0000-c000-000000000000","resourceAccess":[{"id":"e1fe6dd8-ba31-4d61-89e7-88639da4683d","type":"Scope"}]}]' - $APP_REGISTRATION_ID = (az ad app create --display-name $ApplicationName --enable-id-token-issuance true --sign-in-audience AzureADMyOrg --required-resource-access ".${pathSeparator}manifest.json" --query "appId").Replace('"', "") + $APP_REGISTRATION_ID = (az ad app create --display-name $ApplicationName --enable-id-token-issuance true --sign-in-audience AzureADMyOrg --required-resource-access ".${pathSeparator}manifest.json" --query "appId").Replace('"', "") - if (Test-Path ".${pathSeparator}manifest.json") { Write-Host "Removing manifest.json" ; Remove-Item ".${pathSeparator}manifest.json" } + if (Test-Path ".${pathSeparator}manifest.json") { Write-Host "Removing manifest.json" ; Remove-Item ".${pathSeparator}manifest.json" } - $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors --display-name "SDAF") + $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors --display-name "SDAF") + } } #endregion @@ -840,7 +846,12 @@ if ($authenticationMethod -eq "Service Principal") { $Control_plane_groupID = (az pipelines variable-group list --query "[?name=='$ControlPlanePrefix'].id | [0]" --only-show-errors) if ($Control_plane_groupID.Length -eq 0) { Write-Host "Creating the variable group" $ControlPlanePrefix -ForegroundColor Green - az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' APP_REGISTRATION_APP_ID=$APP_REGISTRATION_ID CP_ARM_CLIENT_ID=$CP_ARM_CLIENT_ID CP_ARM_OBJECT_ID=$CP_ARM_OBJECT_ID CP_ARM_CLIENT_SECRET='Enter your SPN password here' CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID CP_ARM_TENANT_ID=$CP_ARM_TENANT_ID WEB_APP_CLIENT_SECRET=$WEB_APP_CLIENT_SECRET PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF --output none --authorize true + if ($WebApp) { + az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' APP_REGISTRATION_APP_ID=$APP_REGISTRATION_ID CP_ARM_CLIENT_ID=$CP_ARM_CLIENT_ID CP_ARM_OBJECT_ID=$CP_ARM_OBJECT_ID CP_ARM_CLIENT_SECRET='Enter your SPN password here' CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID CP_ARM_TENANT_ID=$CP_ARM_TENANT_ID WEB_APP_CLIENT_SECRET=$WEB_APP_CLIENT_SECRET PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF --output none --authorize true + } + else { + az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' CP_ARM_CLIENT_ID=$CP_ARM_CLIENT_ID CP_ARM_OBJECT_ID=$CP_ARM_OBJECT_ID CP_ARM_CLIENT_SECRET='Enter your SPN password here' CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID CP_ARM_TENANT_ID=$CP_ARM_TENANT_ID PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF --output none --authorize true + } $Control_plane_groupID = (az pipelines variable-group list --query "[?name=='$ControlPlanePrefix'].id | [0]" --only-show-errors) } @@ -876,7 +887,13 @@ else { $Control_plane_groupID = (az pipelines variable-group list --query "[?name=='$ControlPlanePrefix'].id | [0]" --only-show-errors) if ($Control_plane_groupID.Length -eq 0) { Write-Host "Creating the variable group" $ControlPlanePrefix -ForegroundColor Green - az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' APP_REGISTRATION_APP_ID=$APP_REGISTRATION_ID CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID WEB_APP_CLIENT_SECRET=$WEB_APP_CLIENT_SECRET PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF USE_MSI=true --output none --authorize true + if ($WebApp) { + az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' APP_REGISTRATION_APP_ID=$APP_REGISTRATION_ID CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID WEB_APP_CLIENT_SECRET=$WEB_APP_CLIENT_SECRET PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF USE_MSI=true --output none --authorize true + } + else { + az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF USE_MSI=true --output none --authorize true + } + $Control_plane_groupID = (az pipelines variable-group list --query "[?name=='$ControlPlanePrefix'].id | [0]" --only-show-errors) } @@ -895,7 +912,9 @@ else { $groups.Add($Control_plane_groupID) -az pipelines variable-group variable update --group-id $Control_plane_groupID --name "WEB_APP_CLIENT_SECRET" --value $WEB_APP_CLIENT_SECRET --secret true --output none --only-show-errors +if ($WebApp) { + az pipelines variable-group variable update --group-id $Control_plane_groupID --name "WEB_APP_CLIENT_SECRET" --value $WEB_APP_CLIENT_SECRET --secret true --output none --only-show-errors +} #endregion From 015b557693fd5ce36365161ef54c57f008403f9a Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 17 Apr 2024 16:37:18 +0300 Subject: [PATCH 529/607] Update systemd service file path in 5.6.7-config-systemd-sap-start.yml --- .../tasks/5.6.7-config-systemd-sap-start.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml index 2bbaad04a2..2be63e47b3 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml @@ -8,7 +8,7 @@ # the path for the service file is /etc/systemd/system/SAP_.service - name: "5.6 SCSERS - check if the SAP ASCS/SCS and ERS resources are created as systemd services" ansible.builtin.stat: - path: "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ sap_instance_number }}.service" + path: "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ item }}.service" register: systemd_service_file_path loop: - "{{ scs_instance_number }}" From 46ffa2b22ad5e97540216e1051983e55ddb6b95e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 17 Apr 2024 16:37:52 +0300 Subject: [PATCH 530/607] Update systemd service file path in 5.6.7-config-systemd-sap-start.yml --- .../tasks/5.6.7-config-systemd-sap-start.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml index 2be63e47b3..2bbaad04a2 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml @@ -8,7 +8,7 @@ # the path for the service file is /etc/systemd/system/SAP_.service - name: "5.6 SCSERS - check if the SAP ASCS/SCS and ERS resources are created as systemd services" ansible.builtin.stat: - path: "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ item }}.service" + path: "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ sap_instance_number }}.service" register: systemd_service_file_path loop: - "{{ scs_instance_number }}" From e19406bcd905c3a2d5127e5c7385496d3833200e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 17 Apr 2024 16:40:59 +0300 Subject: [PATCH 531/607] Update systemd service file path in 5.6.7-config-systemd-sap-start.yml --- .../tasks/5.6.7-config-systemd-sap-start.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml index 2bbaad04a2..d58571c2ec 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml @@ -16,6 +16,10 @@ loop_control: loop_var: sap_instance_number +- name: "5.6 SCSERS - Set fact for the systemd services existance" + ansible.builtin.debug: + loop_var: systemd_service_file_path + - name: "5.6 SCSERS - Set fact for the systemd services existance" ansible.builtin.set_fact: systemd_service_names: "{{ @@ -25,6 +29,11 @@ | regex_replace('/etc/systemd/system/', '') }}" +- name: "5.6 SCSERS - Set fact for the systemd services existance" + ansible.builtin.debug: + loop_var: systemd_service_names + + - name: "5.6 SCSERS - Disable the services and stop auto-restart if they exist" when: - systemd_service_names is defined From ad31c8a03e66778df6381a21fc304932ba5480bf Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 17 Apr 2024 19:12:25 +0300 Subject: [PATCH 532/607] Update web_instance_number and add web_sid variable in sap_system/transform.tf --- deploy/terraform/run/sap_system/transform.tf | 2 ++ .../sap_system/common_infrastructure/variables_global.tf | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/deploy/terraform/run/sap_system/transform.tf b/deploy/terraform/run/sap_system/transform.tf index 0941eb5d2b..c173ea2877 100644 --- a/deploy/terraform/run/sap_system/transform.tf +++ b/deploy/terraform/run/sap_system/transform.tf @@ -223,6 +223,7 @@ locals { 0 ) web_instance_number = var.web_instance_number + web_sid = upper(var.web_sid) web_sku = try(coalesce(var.webdispatcher_server_sku, var.application_tier.web_sku), "") web_use_ppg = (var.webdispatcher_server_count) > 0 ? var.use_scalesets_for_deployment ? ( false) : ( @@ -232,6 +233,7 @@ locals { false) : ( var.webdispatcher_server_use_avset ) + deploy_v1_monitoring_extension = var.deploy_v1_monitoring_extension user_assigned_identity_id = var.user_assigned_identity_id } diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf index 95273c5de2..28eac42bee 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf @@ -5,6 +5,12 @@ variable "application_tier" { ) error_message = "The sid must be specified in the sid field." } + validation { + condition = ( + var.application_tier.webdispatcher_count > 0 ? length(trimspace(try(var.application_tier.web_sid, ""))) != 3 : false + ) + error_message = "The sid must be specified in the sid field." + } validation { condition = ( From a3c65045baf419efe7d15f744d5a22ba2177f821 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 17 Apr 2024 19:20:01 +0300 Subject: [PATCH 533/607] Fix validation error message for web dispatcher sid in variables_global.tf --- .../sap_system/common_infrastructure/variables_global.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf index 28eac42bee..d269e3dce0 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf @@ -7,9 +7,9 @@ variable "application_tier" { } validation { condition = ( - var.application_tier.webdispatcher_count > 0 ? length(trimspace(try(var.application_tier.web_sid, ""))) != 3 : false + var.application_tier.webdispatcher_count > 0 ? length(trimspace(try(var.application_tier.web_sid, ""))) == 3 : false ) - error_message = "The sid must be specified in the sid field." + error_message = "The web dispatcher sid must be specified in the web_sid field." } validation { From d47e2c8002c6cab9f545f2fe4494151bee89e7c8 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 17 Apr 2024 21:24:02 +0300 Subject: [PATCH 534/607] Remove chkconfig package from os-packages.yaml --- deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml index 9669c97d94..57ba6b8e8d 100644 --- a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml +++ b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml @@ -224,7 +224,6 @@ packages: - { tier: 'os', package: 'libpam.so.0', node_tier: 'db2', state: 'present' } - { tier: 'db2', package: 'acl', node_tier: 'db2', state: 'present' } # --------------------------- End - Packages required for DB2 -------------------------------------------8 - - { tier: 'sapos', package: 'chkconfig', node_tier: 'hana', state: 'present' } - { tier: 'sapos', package: 'autofs', node_tier: 'all', state: 'present' } - { tier: 'sapos', package: 'nfs4-acl-tools', node_tier: 'all', state: 'present' } - { tier: 'sapos', package: 'tuned-profiles-sap*', node_tier: 'all', state: 'present' } From 57e8c6a270a200e46be01e980f33096cf02408ef Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 17 Apr 2024 22:46:54 +0300 Subject: [PATCH 535/607] Update systemd service file path in 5.6.7-config-systemd-sap-start.yml --- .../tasks/5.6.7-config-systemd-sap-start.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml index d58571c2ec..eef3b2d41a 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml @@ -18,7 +18,7 @@ - name: "5.6 SCSERS - Set fact for the systemd services existance" ansible.builtin.debug: - loop_var: systemd_service_file_path + var: systemd_service_file_path - name: "5.6 SCSERS - Set fact for the systemd services existance" ansible.builtin.set_fact: @@ -31,7 +31,7 @@ - name: "5.6 SCSERS - Set fact for the systemd services existance" ansible.builtin.debug: - loop_var: systemd_service_names + var: systemd_service_names - name: "5.6 SCSERS - Disable the services and stop auto-restart if they exist" From d8f95ba577f7f1404967d750937ce1136d35e737 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 17 Apr 2024 22:54:25 +0300 Subject: [PATCH 536/607] Update OS version check for RHEL 8.2 and SLES 15 in 5.6.1-set_runtime_facts.yml --- .../5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml index 1a8c3d60f7..f17e29eda1 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml @@ -146,14 +146,20 @@ path: /etc/sap_deployment_automation//{{ sap_sid | upper }}/sap_deployment_ers.txt register: ers_installed +- name: "5.6 SCSERS - check if the OS version is RHEL 8.2 or newer" + ansible.builtin.set_fact: + is_rhel_82_or_newer: false + is_sles_15_or_newer: false - name: "5.6 SCSERS - check if the OS version is RHEL 8.2 or newer" ansible.builtin.set_fact: is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" + when: ansible_os_family | upper == 'REDHAT' - name: "5.6 SCSERS - check if the OS version is SLES 15 or newer" ansible.builtin.set_fact: is_sles_15_or_newer: "{{ ansible_distribution_version is version('15', '>=') | default(false) }}" + when: ansible_os_family | upper == 'SUSE' # /*---------------------------------------------------------------------------8 # | END | From f0255c3a69efa8b6f2ace105147c05286a22b458 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 17 Apr 2024 23:45:28 +0300 Subject: [PATCH 537/607] Update OS version check for RHEL 9.0 or newer in 1.4.0-packages-RedHat-prep.yaml --- .../tasks/1.4.0-packages-RedHat-prep.yaml | 34 +++++++++++++++++++ .../roles-os/1.4-packages/tasks/main.yaml | 28 +++++++++++++-- .../1.4-packages/vars/os-packages.yaml | 1 + 3 files changed, 60 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml index 53f826f57d..6c44e817cf 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml @@ -8,6 +8,40 @@ - name: "1.4 Packages: - Import package list" ansible.builtin.include_vars: os-packages.yaml +- name: "5.6 SCSERS - check if the OS version is RHEL 9.0 or newer" + ansible.builtin.set_fact: + is_rhel_90_or_newer: "{{ ansible_distribution_version is version('9.0', '>=') | default(false) }}" + when: ansible_os_family | upper == 'REDHAT' + +- name: "1.4 Packages: - Check if /etc/init.d exists" + ansible.builtin.stat: + path: /etc/init.d + register: initd_dir + +- name: "5.6 SCSERS - check if the OS version is RHEL 9.0 or newer" + ansible.builtin.set_fact: + is_rhel_90_or_newer: "{{ ansible_distribution_version is version('9.0', '>=') | default(false) }}" + init_d_exists: "{{ initd_dir.stat.exists }}" + when: ansible_os_family | upper == 'REDHAT' + +- name: "1.4 Packages: - Copy /etc/init.d" + ansible.builtin.copy: + remote_src: false + src: /etc/init.d + dest: /etc/init.d.bckp + register: initd_copy + when: + - is_rhel_90_or_newer + - init_d_exists + +- name: "1.4 Packages: - Remove /etc/init.d" + ansible.builtin.file: + src: /etc/init.d + state: absent + when: + - is_rhel_90_or_newer + - init_d_exists + # /*----------------------------------------------------------------------------8 # | END | diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml index 298e1618d6..c570e6c911 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml @@ -35,7 +35,7 @@ path: /etc/waagent.conf.rpmsave register: waagent_conf_save -- name: "1.4 Copy the conf file" +- name: "1.4 Packages: - Copy the conf file" become: true become_user: root ansible.builtin.copy: @@ -48,11 +48,11 @@ register: waagent_conf_copy -- name: "1.1 Swap: - Force systemd to reread configs {{ distro_name }}" +- name: "1.4 Packages: - Force systemd to reread configs {{ distro_name }}" ansible.builtin.systemd_service: daemon_reload: true -- name: "1.1 Swap: - Restart WAAgent on {{ distro_name }}" +- name: "1.4 Packages: - Restart WAAgent on {{ distro_name }}" ansible.builtin.service: name: waagent state: restarted @@ -60,6 +60,28 @@ - waagent_conf_copy is defined - waagent_conf_copy.changed +- name: "1.4 Packages: - Check if /etc/init.d.bckp exists" + ansible.builtin.stat: + path: /etc/init.d.bckp + register: initd_dir_bckp + +- name: "1.4 Packages: - check if the OS version is RHEL 9.0 or newer" + ansible.builtin.set_fact: + is_rhel_90_or_newer: "{{ ansible_distribution_version is version('9.0', '>=') | default(false) }}" + init_d_bckp_exists: "{{ initd_dir_bckp.stat.exists }}" + when: ansible_os_family | upper == 'REDHAT' + +- name: "1.4 Packages: - Restore /etc/init.d.bckp" + ansible.builtin.copy: + remote_src: false + src: /etc/init.d.bckp + dest: /etc/init.d + register: initd_copy + when: + - is_rhel_90_or_newer + - init_d_bckp_exists + + # /*----------------------------------------------------------------------------8 # | END | diff --git a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml index 57ba6b8e8d..9669c97d94 100644 --- a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml +++ b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml @@ -224,6 +224,7 @@ packages: - { tier: 'os', package: 'libpam.so.0', node_tier: 'db2', state: 'present' } - { tier: 'db2', package: 'acl', node_tier: 'db2', state: 'present' } # --------------------------- End - Packages required for DB2 -------------------------------------------8 + - { tier: 'sapos', package: 'chkconfig', node_tier: 'hana', state: 'present' } - { tier: 'sapos', package: 'autofs', node_tier: 'all', state: 'present' } - { tier: 'sapos', package: 'nfs4-acl-tools', node_tier: 'all', state: 'present' } - { tier: 'sapos', package: 'tuned-profiles-sap*', node_tier: 'all', state: 'present' } From 448c26b38404d11f4be64bd83ee6030ee4c799fa Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 18 Apr 2024 00:00:11 +0300 Subject: [PATCH 538/607] Update Oracle ASM backup process and fix file permissions --- .../4.1.2-ora-asm-db-install/tasks/main.yaml | 26 ++++++++++++------- .../tasks/1.4.0-packages-RedHat-prep.yaml | 2 +- .../roles-os/1.4-packages/tasks/main.yaml | 2 +- 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml b/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml index d286e5164a..12f5b4ae1e 100644 --- a/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml @@ -230,22 +230,24 @@ # Backup Oracle GRID Home prior to patching. - name: "Oracle ASM : BACKUP ORACLE GRID" - become: true - become_user: "root" - ansible.builtin.shell: | - cp -rp /oracle/GRID/{{ ora_version }} /oracle/GRID/{{ ora_version }}.bck + become: true + become_user: "root" + ansible.builtin.copy: + src: /oracle/GRID/{{ ora_version }} + dest: /oracle/GRID/{{ ora_version }}.bck + remote_src: true + mode: preserve + directory_mode: preserve + when: + - not opatchgrid_stat.stat.exists register: gridbackup - args: - creates: /etc/sap_deployment_automation/{{ sap_sid | upper }}/gridbackedup.txt - executable: /bin/csh - - name: "Oracle ASM: Create flag gridbackedup.txt " ansible.builtin.file: path: /etc/sap_deployment_automation/{{ sap_sid | upper }}/gridbackedup.txt state: touch mode: '0755' - when: gridbackup.rc == 0 + when: gridbackup.changed - name: "Oracle ASM: Check if 'OPatch.bck' exists" ansible.builtin.stat: @@ -656,10 +658,14 @@ owner: oracle group: oinstall + - name: "ORACLE ASM: Post Processing - SBP Patching - progress" + ansible.builtin.debug: + msg: "Running SBP Patching, please wait" + - name: "Oracle ASM: Post Processing - SBP Patching" become: true become_user: "oracle" - ansible.builtin.shell: $IHRDBMS/MOPatch/mopatch.sh -v -s {{ oracle_sbp_patch }} + ansible.builtin.shell: $IHRDBMS/MOPatch/mopatch.sh -v -s {{ oracle_sbp_patch }} environment: DB_SID: "{{ db_sid }}" CV_ASSUME_DISTID: OL7 diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml index 6c44e817cf..225a11e2ba 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml @@ -26,7 +26,7 @@ - name: "1.4 Packages: - Copy /etc/init.d" ansible.builtin.copy: - remote_src: false + remote_src: true src: /etc/init.d dest: /etc/init.d.bckp register: initd_copy diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml index c570e6c911..1aebd3f068 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml @@ -73,7 +73,7 @@ - name: "1.4 Packages: - Restore /etc/init.d.bckp" ansible.builtin.copy: - remote_src: false + remote_src: true src: /etc/init.d.bckp dest: /etc/init.d register: initd_copy From c48266429758625875ceaa08be874527293c0629 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 18 Apr 2024 00:06:12 +0300 Subject: [PATCH 539/607] Fix file path in 1.4.0-packages-RedHat-prep.yaml --- .../roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml index 225a11e2ba..c82d0ca31c 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml @@ -36,7 +36,7 @@ - name: "1.4 Packages: - Remove /etc/init.d" ansible.builtin.file: - src: /etc/init.d + path: /etc/init.d state: absent when: - is_rhel_90_or_newer From 29a5216a412c18cd797319cfa18605428d12f9bd Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 18 Apr 2024 00:34:40 +0300 Subject: [PATCH 540/607] Update OS version check for RHEL 9.0 or newer in 1.4.0-packages-RedHat-prep.yaml --- deploy/ansible/roles-os/1.4-packages/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml index 1aebd3f068..87a4ef40d5 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml @@ -78,7 +78,7 @@ dest: /etc/init.d register: initd_copy when: - - is_rhel_90_or_newer + - is_rhel_90_or_newer | default(false) - init_d_bckp_exists From b2aebae785fcce55811dd8e98dc42e469e0aa424 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 18 Apr 2024 00:52:59 +0300 Subject: [PATCH 541/607] Update file path and preserve file permissions in 1.4.0-packages-RedHat-prep.yaml --- .../1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml | 4 +++- deploy/ansible/roles-os/1.4-packages/tasks/main.yaml | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml index c82d0ca31c..adb3559c87 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml @@ -28,7 +28,9 @@ ansible.builtin.copy: remote_src: true src: /etc/init.d - dest: /etc/init.d.bckp + dest: /etc/init.d_bckp + mode: preserve + directory_mode: preserve register: initd_copy when: - is_rhel_90_or_newer diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml index 87a4ef40d5..534b6d8338 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml @@ -76,6 +76,8 @@ remote_src: true src: /etc/init.d.bckp dest: /etc/init.d + mode: preserve + directory_mode: preserve register: initd_copy when: - is_rhel_90_or_newer | default(false) From ed45a50e952ed7796f5cf36508932f782f89f811 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 18 Apr 2024 01:06:04 +0300 Subject: [PATCH 542/607] Fix action values in playbook_04_00_01_db_ha.yaml and roles-db/4.1.3-ora-dg/tasks/main.yaml --- deploy/ansible/playbook_04_00_01_db_ha.yaml | 8 ++++---- deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml | 10 +++++----- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/deploy/ansible/playbook_04_00_01_db_ha.yaml b/deploy/ansible/playbook_04_00_01_db_ha.yaml index d43f586196..3125a904f9 100644 --- a/deploy/ansible/playbook_04_00_01_db_ha.yaml +++ b/deploy/ansible/playbook_04_00_01_db_ha.yaml @@ -234,7 +234,7 @@ - name: Setting the DB facts ansible.builtin.set_fact: tier: ora # Actions for Oracle DB Servers - action: Prepare_and_Restore + action: 'Prepare_and_Restore' main_password: "{{ hostvars.localhost.sap_password }}" tags: - always @@ -262,7 +262,7 @@ - name: Setting the DB facts ansible.builtin.set_fact: tier: ora # Actions for Oracle DB Servers - action: Post_Processing_Primary + action: 'Post_Processing_Primary' main_password: "{{ hostvars.localhost.sap_password }}" tags: - always @@ -291,7 +291,7 @@ - name: Setting the DB facts ansible.builtin.set_fact: tier: ora # Actions for Oracle DB Servers - action: Post_Processing_Secondary + action: 'Post_Processing_Secondary' main_password: "{{ hostvars.localhost.sap_password }}" tags: - always @@ -340,7 +340,7 @@ - name: "Observer Playbook: Setting the DB facts" ansible.builtin.set_fact: node_tier: observer - action: Setup_Observer + action: 'Setup_Observer' main_password: "{{ hostvars.localhost.sap_password }}" tags: - always diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml index db477eb52d..75d97c0c76 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml @@ -16,7 +16,7 @@ current_host: "{{ ansible_hostname }}" when: - node_tier in ["oracle", "oracle-asm"] - - action == Prepare_and_Restore + - action == 'Prepare_and_Restore' - name: "Oracle Data Guard: Prepare secondary node" ansible.builtin.include_tasks: "ora-dg-secondary-preparation.yaml" @@ -39,7 +39,7 @@ when: - current_host == ora_secondary when: - - action == Prepare_and_Restore + - action == 'Prepare_and_Restore' - node_tier == "oracle" or node_tier == "oracle-asm" - node_tier != "observer" @@ -48,7 +48,7 @@ when: - node_tier == "oracle" or node_tier == "oracle-asm" - node_tier != "observer" - - action == Post_Processing_Primary + - action == 'Post_Processing_Primary' # Enable Flashback Loggining on the Secondary for FSFO - name: "Oracle Data Guard: Post processing on Secondary" @@ -56,14 +56,14 @@ when: - node_tier == "oracle" or node_tier == "oracle-asm" - node_tier != "observer" - - action == Post_Processing_Secondary + - action == 'Post_Processing_Secondary' # FSFO is enabled from the Observer. - name: "Oracle Data Guard: Setup Observer" ansible.builtin.include_tasks: "ora-dg-observer-setup.yaml" when: - node_tier == "observer" - - action == Setup_Observer + - action == 'Setup_Observer' ... # /*---------------------------------------------------------------------------8 # | END | From 2875bc3e2fa4291ef72e57bae6555c16df232f0a Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 18 Apr 2024 01:11:28 +0300 Subject: [PATCH 543/607] Fix action values in playbook_04_00_01_db_ha.yaml and roles-db/4.1.3-ora-dg/tasks/main.yaml --- deploy/ansible/playbook_04_00_01_db_ha.yaml | 2 +- deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/playbook_04_00_01_db_ha.yaml b/deploy/ansible/playbook_04_00_01_db_ha.yaml index 3125a904f9..ebd9e94aac 100644 --- a/deploy/ansible/playbook_04_00_01_db_ha.yaml +++ b/deploy/ansible/playbook_04_00_01_db_ha.yaml @@ -214,7 +214,7 @@ - name: Setting the DB facts ansible.builtin.set_fact: tier: ora # Actions for Oracle DB Servers - action: Prepare_Secondary + action: 'Prepare_Secondary' main_password: "{{ hostvars.localhost.sap_password }}" tags: - always diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml index 75d97c0c76..bcf4bac333 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml @@ -22,7 +22,7 @@ ansible.builtin.include_tasks: "ora-dg-secondary-preparation.yaml" when: - node_tier in ["oracle", "oracle-asm"] - - action == Prepare_Secondary + - action == 'Prepare_Secondary' - name: "Oracle Data Guard: Prepare and Restore" block: From f377dd9f17a126b648ac9a986baeabf1748498bd Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 18 Apr 2024 09:54:16 +0300 Subject: [PATCH 544/607] Update wait time for StartService in 5.6 SCS/ERS Validation --- .../5.6-scsers-pacemaker/tasks/5.6.6-validate.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.6-validate.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.6-validate.yml index de866ce028..c4bcd45552 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.6-validate.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.6-validate.yml @@ -114,7 +114,12 @@ # cluster_group_moved.stderr is not search('Already in requested state') # or cluster_group_moved.stderr is not search('is already active on') # ) - - name: "5.6 SCS/ERS Validation: Wait 300 secs for the StartService {{ sap_sid | upper }} to finish" + + - name: "5.6 SCS/ERS Validation: Wait 300 seconds for the StartService {{ sap_sid | upper }} to finish" + ansible.builtin.debug: + msg: "Wait for 300 seconds for the StartService {{ sap_sid | upper }} to finish" + + - name: "5.6 SCS/ERS Validation: Wait 300 seconds for the StartService {{ sap_sid | upper }} to finish" ansible.builtin.wait_for: timeout: 300 From 41751f2b47d88ab6af95ada9822b2843dae91b8a Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 18 Apr 2024 09:54:25 +0300 Subject: [PATCH 545/607] Update Terraform version to 1.8.0 in deployment scripts and tfvar_variables.tf files --- deploy/scripts/New-SDAFDevopsProject.ps1 | 2 +- deploy/scripts/configure_deployer.sh | 2 +- deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf | 2 +- deploy/terraform/run/sap_deployer/tfvar_variables.tf | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index 19b10965c7..0832b19da1 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -590,7 +590,7 @@ Write-Host "Creating the variable group SDAF-General" -ForegroundColor Green $general_group_id = (az pipelines variable-group list --query "[?name=='SDAF-General'].id | [0]" --only-show-errors) if ($general_group_id.Length -eq 0) { - az pipelines variable-group create --name SDAF-General --variables ANSIBLE_HOST_KEY_CHECKING=false Deployment_Configuration_Path=WORKSPACES Branch=main tf_version="1.7.4" ansible_core_version="2.15" S-Username=$SUserName S-Password=$SPassword --output yaml --authorize true --output none + az pipelines variable-group create --name SDAF-General --variables ANSIBLE_HOST_KEY_CHECKING=false Deployment_Configuration_Path=WORKSPACES Branch=main tf_version="1.8.0" ansible_core_version="2.15" S-Username=$SUserName S-Password=$SPassword --output yaml --authorize true --output none $general_group_id = (az pipelines variable-group list --query "[?name=='SDAF-General'].id | [0]" --only-show-errors) az pipelines variable-group variable update --group-id $general_group_id --name "S-Password" --value $SPassword --secret true --output none --only-show-errors } diff --git a/deploy/scripts/configure_deployer.sh b/deploy/scripts/configure_deployer.sh index 5fcc645d2c..21b6fdd0c8 100755 --- a/deploy/scripts/configure_deployer.sh +++ b/deploy/scripts/configure_deployer.sh @@ -64,7 +64,7 @@ export local_user=$USER # if [ -z "${TF_VERSION}" ]; then - TF_VERSION="1.7.0" + TF_VERSION="1.8.0" fi diff --git a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf index c4376f6163..ce1cc15f17 100644 --- a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf @@ -381,7 +381,7 @@ variable "deployer_diagnostics_account_arm_id" { variable "tf_version" { description = "Terraform version to install on deployer" - default = "1.7.0" + default = "1.8.0" } variable "name_override_file" { diff --git a/deploy/terraform/run/sap_deployer/tfvar_variables.tf b/deploy/terraform/run/sap_deployer/tfvar_variables.tf index 0d56ca6918..0135416cd2 100644 --- a/deploy/terraform/run/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/run/sap_deployer/tfvar_variables.tf @@ -378,7 +378,7 @@ variable "deployer_diagnostics_account_arm_id" { variable "tf_version" { description = "Terraform version to install on deployer" - default = "1.7.0" + default = "1.8.0" } variable "name_override_file" { From d461c99b1f5f767d147612b3a2727fa389654337 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 18 Apr 2024 15:03:30 +0300 Subject: [PATCH 546/607] Fix missing else statement in deploy control plane pipeline --- deploy/pipelines/01-deploy-control-plane.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index d8c5b09b66..781b9db97b 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -230,6 +230,8 @@ stages: if [ "$USE_WEBAPP" = "true" ]; then echo "Use WebApp is selected" + else + echo "No WebApp" fi export TF_LOG_PATH=$CONFIG_REPO_PATH/.sap_deployment_automation/terraform.log From 68d5474818f27bc9c555c42e8956da55c79d2a42 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 18 Apr 2024 15:10:45 +0300 Subject: [PATCH 547/607] Fix missing else statement in deploy control plane pipeline --- deploy/pipelines/01-deploy-control-plane.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 781b9db97b..7dc5180ea8 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -222,10 +222,13 @@ stages: export TF_VAR_agent_pat=$(PAT) fi if [ -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip ]; then + echo "1" deployer_random_id=$(cat ${deployer_environment_file_name} | grep deployer_random_id= | awk -F'=' '{print $2}' | xargs) - + echo "2" pass=$(echo $DEPLOYER_RANDOM_ID_SEED | sed 's/-//g') + echo "3" unzip -qq -o -P "${pass}" ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip -d ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder) + echo "4" fi if [ "$USE_WEBAPP" = "true" ]; then From 69538ca90d5dc9a473e7220fb1d5013a11b46439 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 18 Apr 2024 15:18:53 +0300 Subject: [PATCH 548/607] Fix missing else statement in deploy control plane pipeline --- deploy/pipelines/01-deploy-control-plane.yaml | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 7dc5180ea8..8325f75778 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -222,16 +222,11 @@ stages: export TF_VAR_agent_pat=$(PAT) fi if [ -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip ]; then - echo "1" - deployer_random_id=$(cat ${deployer_environment_file_name} | grep deployer_random_id= | awk -F'=' '{print $2}' | xargs) - echo "2" pass=$(echo $DEPLOYER_RANDOM_ID_SEED | sed 's/-//g') - echo "3" unzip -qq -o -P "${pass}" ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip -d ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder) - echo "4" fi - if [ "$USE_WEBAPP" = "true" ]; then + if [ $(use_webapp) = "true" ]; then echo "Use WebApp is selected" else echo "No WebApp" From 626be75fe23032b1349e148df8464a64637aeff2 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 18 Apr 2024 15:41:43 +0300 Subject: [PATCH 549/607] Fix missing else statement in deploy control plane pipeline --- deploy/pipelines/01-deploy-control-plane.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 8325f75778..a99b11b6cd 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -221,10 +221,12 @@ stages: export TF_VAR_agent_pool=$(POOL) export TF_VAR_agent_pat=$(PAT) fi + echo "1" if [ -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip ]; then pass=$(echo $DEPLOYER_RANDOM_ID_SEED | sed 's/-//g') unzip -qq -o -P "${pass}" ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip -d ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder) fi + echo "2" if [ $(use_webapp) = "true" ]; then echo "Use WebApp is selected" @@ -232,6 +234,7 @@ stages: echo "No WebApp" fi + echo "3" export TF_LOG_PATH=$CONFIG_REPO_PATH/.sap_deployment_automation/terraform.log set +eu From ebd8a4ec6679913d4e113a769a17622976753252 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 18 Apr 2024 15:56:46 +0300 Subject: [PATCH 550/607] Fix missing else statement in deploy control plane pipeline --- deploy/pipelines/01-deploy-control-plane.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index a99b11b6cd..67a96f538a 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -223,8 +223,11 @@ stages: fi echo "1" if [ -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip ]; then + echo "11" pass=$(echo $DEPLOYER_RANDOM_ID_SEED | sed 's/-//g') + echo "12" unzip -qq -o -P "${pass}" ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip -d ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder) + echo "13" fi echo "2" From c7b007e56f0553c2a8d7663df099b47635316a13 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 18 Apr 2024 16:01:53 +0300 Subject: [PATCH 551/607] Fix missing else statement in deploy control plane pipeline --- deploy/pipelines/01-deploy-control-plane.yaml | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 67a96f538a..cda47fa158 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -221,23 +221,17 @@ stages: export TF_VAR_agent_pool=$(POOL) export TF_VAR_agent_pat=$(PAT) fi - echo "1" if [ -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip ]; then - echo "11" pass=$(echo $DEPLOYER_RANDOM_ID_SEED | sed 's/-//g') - echo "12" unzip -qq -o -P "${pass}" ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip -d ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder) - echo "13" fi - echo "2" - if [ $(use_webapp) = "true" ]; then - echo "Use WebApp is selected" - else - echo "No WebApp" - fi + if [ $(use_webapp) = "true" ]; then + echo "Use WebApp is selected" + else + echo "No WebApp" + fi - echo "3" export TF_LOG_PATH=$CONFIG_REPO_PATH/.sap_deployment_automation/terraform.log set +eu From 50378f7bfc7a6d8558e36a9999f1129e09ef8e70 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 19 Apr 2024 11:28:22 +0300 Subject: [PATCH 552/607] Fix missing else statement in deploy control plane pipeline --- .../ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml b/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml index 12f5b4ae1e..5d665ce448 100644 --- a/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.1.2-ora-asm-db-install/tasks/main.yaml @@ -238,8 +238,6 @@ remote_src: true mode: preserve directory_mode: preserve - when: - - not opatchgrid_stat.stat.exists register: gridbackup - name: "Oracle ASM: Create flag gridbackedup.txt " From f7a2435fc827fa33ceeb7c9f39531b7d631b6bc3 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 19 Apr 2024 12:04:33 +0300 Subject: [PATCH 553/607] Update virtual machine extension reference in vm.tf --- deploy/terraform/terraform-units/modules/sap_landscape/vm.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf index 14bf8cb183..dafdcd5344 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf @@ -218,7 +218,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_utility_lnx" { resource "azurerm_virtual_machine_extension" "monitoring_defender_utility_win" { provider = azurerm.main count = var.infrastructure.deploy_defender_extension && upper(var.vm_settings.image.os_type) == "WINDOWS" ? var.vm_settings.count : 0 - virtual_machine_id = azurerm_linux_virtual_machine.utility_vm[count.index].id + virtual_machine_id = azurerm_windows_virtual_machine.utility_vm[count.index].id name = "Microsoft.Azure.Security.Monitoring.AzureSecurityWindowsAgent" publisher = "Microsoft.Azure.Security.Monitoring" type = "AzureSecurityWindowsAgent" From e137a98954e61b3adfcf13a97e23aa9e3b9a4043 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 19 Apr 2024 12:26:06 +0300 Subject: [PATCH 554/607] Update virtual machine extension version to 1.0 in vm.tf --- deploy/terraform/terraform-units/modules/sap_landscape/vm.tf | 2 +- .../terraform-units/modules/sap_system/anydb_node/vm-anydb.tf | 2 +- .../terraform-units/modules/sap_system/app_tier/vm-app.tf | 2 +- .../terraform-units/modules/sap_system/app_tier/vm-scs.tf | 2 +- .../terraform-units/modules/sap_system/app_tier/vm-webdisp.tf | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf index dafdcd5344..7260e24f00 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf @@ -222,7 +222,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_utility_win" { name = "Microsoft.Azure.Security.Monitoring.AzureSecurityWindowsAgent" publisher = "Microsoft.Azure.Security.Monitoring" type = "AzureSecurityWindowsAgent" - type_handler_version = "2.0" + type_handler_version = "1.0" auto_upgrade_minor_version = true settings = jsonencode( { diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf index 71abbf101f..1590976062 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf @@ -762,7 +762,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_db_win" { name = "Microsoft.Azure.Security.Monitoring.AzureSecurityWindowsAgent" publisher = "Microsoft.Azure.Security.Monitoring" type = "AzureSecurityWindowsAgent" - type_handler_version = "2.0" + type_handler_version = "1.0" auto_upgrade_minor_version = true settings = jsonencode( diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index 71b4e4acf9..bb1a33b192 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -556,7 +556,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_app_win" { name = "Microsoft.Azure.Security.Monitoring.AzureSecurityWindowsAgent" publisher = "Microsoft.Azure.Security.Monitoring" type = "AzureSecurityWindowsAgent" - type_handler_version = "2.0" + type_handler_version = "1.0" auto_upgrade_minor_version = true settings = jsonencode( diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index cc53c010eb..cb2ed64118 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -755,7 +755,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_win" { name = "Microsoft.Azure.Security.Monitoring.AzureSecurityWindowsAgent" publisher = "Microsoft.Azure.Security.Monitoring" type = "AzureSecurityWindowsAgent" - type_handler_version = "2.0" + type_handler_version = "1.0" auto_upgrade_minor_version = true settings = jsonencode( diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf index f784b11dd9..8d6967494d 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf @@ -680,7 +680,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_web_win" { name = "Microsoft.Azure.Security.Monitoring.AzureSecurityWindowsAgent" publisher = "Microsoft.Azure.Security.Monitoring" type = "AzureSecurityWindowsAgent" - type_handler_version = "2.0" + type_handler_version = "1.0" auto_upgrade_minor_version = true settings = jsonencode( From f5d6c43cc8e0533d9ec8b37b9b6d5af94234affc Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 19 Apr 2024 14:01:30 +0300 Subject: [PATCH 555/607] Add Observer VM for HANA --- deploy/terraform/run/sap_system/module.tf | 12 +- .../modules/sap_system/hdb_node/outputs.tf | 17 +++ .../sap_system/hdb_node/variables_global.tf | 1 + .../sap_system/hdb_node/variables_local.tf | 9 ++ .../sap_system/hdb_node/vm-observer.tf | 119 ++++++++++++++++++ 5 files changed, 156 insertions(+), 2 deletions(-) create mode 100644 deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index c6b7aac883..c885b7e99d 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -160,6 +160,7 @@ module "hdb_node" { use_custom_dns_a_registration = try(data.terraform_remote_state.landscape.outputs.use_custom_dns_a_registration, false) use_loadbalancers_for_standalone_deployments = var.use_loadbalancers_for_standalone_deployments use_msi_for_clusters = var.use_msi_for_clusters + use_observer = var.database_HANA_use_ANF_scaleout_scenario && local.database.high_availability use_scalesets_for_deployment = var.use_scalesets_for_deployment use_secondary_ips = var.use_secondary_ips register_endpoints_with_dns = var.register_endpoints_with_dns @@ -369,8 +370,15 @@ module "output_files" { ######################################################################################### bom_name = var.bom_name db_sid = local.db_sid - observer_ips = module.anydb_node.observer_ips - observer_vms = module.anydb_node.observer_vms + observer_ips = upper(try(local.database.platform, "HANA")) == "HANA" ? ( + module.hdb_node.observer_ips) : ( + module.anydb_node.observer_ips + ) + observer_vms = upper(try(local.database.platform, "HANA")) == "HANA" ? ( + module.hdb_node.observer_vms) : ( + module.anydb_node.observer_vms + ) + platform = upper(try(local.database.platform, "HANA")) sap_sid = local.sap_sid web_sid = var.web_sid diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf index 61ee1e145d..d3f2981b7d 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf @@ -227,3 +227,20 @@ output "ANF_subnet_prefix" { ) } + + +output "observer_ips" { + description = "IP adresses for observer nodes" + value = local.enable_deployment && local.deploy_observer ? ( + azurerm_network_interface.observer[*].private_ip_address) : ( + [] + ) + } + +output "observer_vms" { + description = "Resource IDs for observer nodes" + value = local.enable_deployment ? ( + azurerm_linux_virtual_machine.observer[*].id,) : ( + [""] + ) + } diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf index 9093165754..27d5d42017 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf @@ -59,6 +59,7 @@ variable "use_loadbalancers_for_standalone_deployments" { default = true } variable "use_msi_for_clusters" { description = "If true, the Pacemaker cluser will use a managed identity" } +variable "use_observer" { description = "Use Observer VM" } variable "use_secondary_ips" { description = "Use secondary IPs for the SAP System" default = false diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf index 930f501d9f..6674b05903 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf @@ -413,4 +413,13 @@ locals { create_shared_volumes = !local.use_avg && var.hana_ANF_volumes.use_for_shared && !var.hana_ANF_volumes.use_existing_shared_volume use_shared_volumes = local.use_avg || var.hana_ANF_volumes.use_for_shared && var.hana_ANF_volumes.use_existing_shared_volume + #If using an existing VM for observer set use_observer to false in .tfvars + deploy_observer = var.use_observer + observer_size = "Standard_D4s_v3" + observer_authentication = local.authentication + observer_custom_image = local.hdb_custom_image + observer_custom_image_id = local.enable_deployment ? local.hdb_os.source_image_id : "" + observer_os = local.enable_deployment ? local.hdb_os : null + + } diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf new file mode 100644 index 0000000000..99cf0c5f44 --- /dev/null +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf @@ -0,0 +1,119 @@ + + +#######################################4#######################################8 +# # +# Primary Network Interface # +# # +#######################################4#######################################8 +resource "azurerm_network_interface" "observer" { + provider = azurerm.main + count = local.deploy_observer ? 1 : 0 + name = format("%s%s%s%s%s", + var.naming.resource_prefixes.nic, + local.prefix, + var.naming.separator, + var.naming.virtualmachine_names.OBSERVER_VMNAME[count.index], + local.resource_suffixes.nic + ) + resource_group_name = var.resource_group[0].name + location = var.resource_group[0].location + enable_accelerated_networking = false + tags = var.tags + + ip_configuration { + name = "IPConfig1" + subnet_id = var.db_subnet.id + private_ip_address = var.database.use_DHCP ? ( + null) : ( + try(local.observer.nic_ips[count.index], + cidrhost( + var.db_subnet.address_prefixes[0], + tonumber(count.index) + local.anydb_ip_offsets.observer_db_vm + ) + ) + ) + private_ip_address_allocation = var.database.use_DHCP ? "Dynamic" : "Static" + + } +} + + + +#######################################4#######################################8 +# # +# Virtual Machine # +# # +#######################################4#######################################8 + +resource "azurerm_linux_virtual_machine" "observer" { + provider = azurerm.main + count = local.deploy_observer && upper(local.anydb_ostype) == "LINUX" ? 1 : 0 + depends_on = [var.anchor_vm] + resource_group_name = var.resource_group[0].name + location = var.resource_group[0].location + + name = format("%s%s%s%s%s", + var.naming.resource_prefixes.vm, + local.prefix, + var.naming.separator, + var.naming.virtualmachine_names.OBSERVER_VMNAME[count.index], + local.resource_suffixes.vm + ) + computer_name = var.naming.virtualmachine_names.OBSERVER_COMPUTERNAME[count.index] + + admin_username = var.sid_username + admin_password = local.enable_auth_key ? null : var.sid_password + disable_password_authentication = !local.enable_auth_password + + zone = local.zonal_deployment ? setsubtract(["1", "2", "3"], local.zones)[0] : null + + network_interface_ids = [ + azurerm_network_interface.observer[count.index].id + ] + size = local.observer_size + source_image_id = local.observer_custom_image ? local.observer_custom_image_id : null + + custom_data = var.deployment == "new" ? var.cloudinit_growpart_config : null + + license_type = length(var.license_type) > 0 ? var.license_type : null + + tags = merge(local.tags, var.tags) + + dynamic "admin_ssh_key" { + for_each = range(var.deployment == "new" ? 1 : (local.enable_auth_password ? 0 : 1)) + content { + username = var.sid_username + public_key = var.sdu_public_key + } + } + + os_disk { + name = format("%s%s%s%s%s", + var.naming.resource_prefixes.osdisk, + local.prefix, + var.naming.separator, + var.naming.virtualmachine_names.OBSERVER_VMNAME[count.index], + local.resource_suffixes.osdisk + ) + caching = "ReadWrite" + storage_account_type = "Premium_LRS" + disk_encryption_set_id = try(var.options.disk_encryption_set_id, null) + } + + + dynamic "source_image_reference" { + for_each = range(local.observer_custom_image ? 0 : 1) + content { + publisher = local.observer_os.publisher + offer = local.observer_os.offer + sku = local.observer_os.sku + version = local.observer_os.version + } + } + + boot_diagnostics { + storage_account_uri = var.storage_bootdiag_endpoint + } + +} + From c94cb32de5d53d23138eb060f56f4606f1ba952a Mon Sep 17 00:00:00 2001 From: "Shekhar Sorot ( MSFT )" Date: Sat, 27 Apr 2024 13:51:17 +0530 Subject: [PATCH 556/607] Local Feature/scaleout hsr to upstream scale-out HSR branch (#581) * overhaul of pacemaker approach for Scale out Shared nothing HSR cluster. * add code to 2.6 AFS mount to support /hana/shared for scale out HSR code * port changes to simple mount AFS task * Update 2.6.0-afs-mounts.yaml * Update 2.6.0-afs-mounts.yaml * Update 2.6.0-afs-mounts.yaml * anf mount bugfix * cleanup of stale code - anf /hana/shared mount * Update playbook_04_00_00_db_install.yaml * Update main.yaml * Update main.yaml * Update playbook_04_00_00_db_install.yaml * Update playbook_04_00_00_db_install.yaml * Update 1.18.0-set_runtime_facts.yml * Update 1.18.0-set_runtime_facts.yml * Update 1.18.0-set_runtime_facts.yml * Update 1.18.0-set_runtime_facts.yml * Update 1.18.0-set_runtime_facts.yml * Update 1.18.2-provision.yml * Update 1.18.2-provision.yml * Update 1.18.2-provision.yml * experimental attempt to split pacemaker from hana setup * Update playbook_04_00_00_db_install.yaml * Update playbook_04_00_00_db_install.yaml * Update playbook_04_00_00_db_install.yaml * Update playbook_04_00_00_db_install.yaml * rollback * Update playbook_04_00_00_db_install.yaml * Update 1.18.2.0-cluster-Suse.yml * Update 1.18.2.0-cluster-Suse.yml * Update main.yml * Update main.yml * Update 1.18.2-provision.yml * pacemaker 1.18 overhaul * Update 1.18.2.0-cluster-RedHat.yml * Update playbook_04_00_00_db_install.yaml * Update playbook_04_00_00_db_install.yaml * Update playbook_04_00_00_db_install.yaml * Update playbook_04_00_00_db_install.yaml * Update 1.18.2-provision.yml * Update 1.18.2.0-cluster-Suse.yml * re-enable SSH key based authentication * Update main.yml * fixes * Update playbook_04_00_00_db_install.yaml * Update playbook_04_00_00_db_install.yaml * troubleshooting skipping of observer_db node * Update playbook_04_00_00_db_install.yaml * update to cluster config * Update playbook_04_00_00_db_install.yaml * task naming simplification for accurate debugging * Update 1.18.0-set_runtime_facts.yml * Update 1.18.1-pre_checks.yml * Update 1.18.2-provision.yml * Update 1.18.2.0-cluster-Suse.yml * corosync template error * Update corosync.conf.j2 * variables port * Update main.yml * Update 1.18.2.0-cluster-Suse.yml * switch to using primary and secondary instance names * Update 1.18.2.0-cluster-RedHat.yml * add code for cluster configuration for SUSE post pacemaker configuration * Update 5.5.4.2-cluster-ScaleOut-Suse.yml * Update 5.5.3.1-SAPHanaSRMultiTarget.yml * add 20-saphana.j2 file * Update 20-saphana.j2 * Update 20-saphana.j2 * Update 20-saphana.j2 * Update 5.5.3.1-SAPHanaSRMultiTarget.yml * fix for RHEL * Update 5.5.3.1-SAPHanaSRMultiTarget.yml * Update main.yaml * Update 1.18.2.0-cluster-Suse.yml * Update 1.18.2.0-cluster-Suse.yml * update to 20-saphana sudoers file * Update 5.5.3.1-SAPHanaSRMultiTarget.yml * Update 5.5.3.1-SAPHanaSRMultiTarget.yml * Update 5.5.3.1-SAPHanaSRMultiTarget.yml * Update 5.5.3.1-SAPHanaSRMultiTarget.yml * rename scale out task file * bug fix * Update 5.5.4.1-cluster-ScaleOut-Suse.yml * Update 5.5.4.0-clusterPrep-ScaleOut-Suse.yml * minor fix * Update 5.5.4.1-cluster-ScaleOut-Suse.yml * Update 5.5.4.0-clusterPrep-ScaleOut-Suse.yml * Update 5.5.3.1-SAPHanaSRMultiTarget.yml * Update 5.5.4.1-cluster-ScaleOut-Suse.yml * fix to SLES clustering code and installing SAPHANASr-Scaleout package for majority maker * Update 5.5.3.1-SAPHanaSRMultiTarget.yml * minor fix for SAPHANAsr-multitarget subtask * fix for majority maker node not installing hook plugin * Added code for RHEL pacemaker configuration * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * MM-01 plugin issue bugfix * SAP HANA hook fix 2 * remove SCS references from 1.18 task as its specific to HANA scale out pacemaker creation * Update 1.18.1.1-iSCSI.yml * Update 1.18.1.1-iSCSI.yml * Update 1.18.1.1-iSCSI.yml * Update 1.18.1.2-sbd.yaml * Update 1.18.2.0-cluster-RedHat.yml * Update 1.18.2.0-cluster-RedHat.yml * Update main.yml * minor fix for RHEL pacemaker * Update 1.18.3-post_provision_report.yml * add pacemaker code for RHEL * minor fix to hana basepaths * Update main.yaml * slight update to hana paths * Update main.yaml * syntax fix * Update 1.18.2.0-cluster-RedHat.yml * Update 1.18.2.0-cluster-RedHat.yml * Update 1.18.2.0-cluster-RedHat.yml * Update os-packages.yaml * Update 5.5.3.1-SAPHanaSRMultiTarget.yml * Update 5.5.3.1-SAPHanaSRMultiTarget.yml * Update 5.5.3.1-SAPHanaSRMultiTarget.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * changes to packages for scale out * development test push * Update ansible-input-api.yaml * Update ansible-input-api.yaml * Update ansible-input-api.yaml * Update ansible-input-api.yaml * package fix for RHEL * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * change to how sap starts and stops * Update 5.5.3.1-SAPHanaSRMultiTarget.yml * remove HTTPS prot flag in saphostagent * Update 5.5.3.1-SAPHanaSRMultiTarget.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update main.yaml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * pacemaker fix * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update os-packages.yaml * Update os-packages.yaml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * rhel pcs node attribute fix * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.1-cluster-ScaleOut-RedHat.yml * replace availability variable replace db_high_availability with database_high_availability * Update main.yaml * Update main.yaml * Update main.yaml * Update main.yaml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * post provision fix for scale out * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * Update ansible-input-api.yaml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * scale out cluster config overhaul * refactor, move scale out code seperate * scale out specific provision tasks * Update main.yml * Update main.yml * Update playbook_04_00_01_db_ha.yaml * Update 1.18.0-set_runtime_facts.yml * Update main.yml * Update main.yml * Update 5.5.2-pre_checks.yml * fix error on import vs include in 5.5 sub tasks * add majority maker constraints * post-provision code, minor code fixes for HANA ha config * Update 1.18.3-post_provision_report.yml * Update main.yaml * Update main.yaml * Update 1.18.3-post_provision_report.yml * Update 5.5.4.0-clusterPrep-ScaleOut-RedHat.yml * error code fix * Update 5.5.4.1-cluster-ScaleOut-RedHat.yml * Update 5.5.4.1-cluster-ScaleOut-RedHat.yml * Update 5.5.4.1-cluster-ScaleOut-RedHat.yml * Update main.yml * Update 5.5.3-SAPHanaSR.yml * changes to HANA Replication python hook configi * Update 20-saphana-rhel.j2 * Scale out code overhaul !!! * Update main.yaml * Update main.yaml * Update 5.8.4-provision-ScaleOut.yml * Update 5.8.4-provision-ScaleOut.yml --- .../ansible/playbook_04_00_00_db_install.yaml | 312 ++++++-- deploy/ansible/playbook_04_00_01_db_ha.yaml | 25 +- .../tasks/main.yaml | 722 ++++++++++++++---- .../templates/HANA_2_00_customconfig.rsp | 4 + ...anf.rsp => HANA_2_00_install_scaleout.rsp} | 6 +- .../1.18-scaleout-pacemaker/defaults/main.yml | 10 + .../tasks/1.18.0-set_runtime_facts.yml | 220 ++++++ .../tasks/1.18.1-pre_checks.yml | 127 +++ .../tasks/1.18.1.1-iSCSI.yml | 148 ++++ .../tasks/1.18.1.2-sbd.yaml | 269 +++++++ .../tasks/1.18.1.3-sbd-deviceUpdate.yaml | 91 +++ .../tasks/1.18.2-provision.yml | 137 ++++ .../tasks/1.18.2.0-cluster-RedHat.yml | 391 ++++++++++ .../tasks/1.18.2.0-cluster-Suse.yml | 212 +++++ .../tasks/1.18.3-post_provision_report.yml | 36 + .../1.18-scaleout-pacemaker/tasks/main.yml | 39 + .../templates/corosync.conf.j2 | 60 ++ .../templates/softdog.conf | 1 + .../1.18-scaleout-pacemaker/vars/main.yml | 110 +++ .../tasks/2.6.0-afs-mounts.yaml | 23 + .../tasks/2.6.1.1-anf-mount.yaml | 2 +- .../tasks/2.6.7-afs-mounts-simplemount.yaml | 25 + .../2.6-sap-mounts/tasks/main.yaml | 16 +- .../5.5-hanadb-pacemaker/tasks/main.yml | 2 +- .../defaults/main.yml | 7 + .../tasks/5.8.1-set_runtime_facts.yml | 90 +++ .../tasks/5.8.2-pre_checks.yml | 61 ++ .../tasks/5.8.3-SAPHanaSRMultiTarget.yml | 437 +++++++++++ .../tasks/5.8.4-provision-ScaleOut.yml | 17 + .../5.8.4.0-clusterPrep-ScaleOut-RedHat.yml | 258 +++++++ .../5.8.4.0-clusterPrep-ScaleOut-Suse.yml | 57 ++ .../tasks/5.8.4.1-cluster-ScaleOut-RedHat.yml | 308 ++++++++ .../tasks/5.8.4.1-cluster-ScaleOut-Suse.yml | 256 +++++++ .../tasks/5.8.5-post_provision_report.yml | 194 +++++ .../tasks/main.yml | 20 + .../templates/20-saphana-rhel.j2 | 6 + .../templates/20-saphana-suse.j2 | 5 + .../templates/corosync.conf.j2 | 61 ++ .../vars/main.yml | 47 ++ deploy/ansible/vars/ansible-input-api.yaml | 4 + 40 files changed, 4592 insertions(+), 224 deletions(-) create mode 100644 deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_customconfig.rsp rename deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/{HANA_2_00_install_scaleout_anf.rsp => HANA_2_00_install_scaleout.rsp} (98%) create mode 100644 deploy/ansible/roles-os/1.18-scaleout-pacemaker/defaults/main.yml create mode 100644 deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.0-set_runtime_facts.yml create mode 100644 deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.1-pre_checks.yml create mode 100644 deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.1.1-iSCSI.yml create mode 100644 deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.1.2-sbd.yaml create mode 100644 deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.1.3-sbd-deviceUpdate.yaml create mode 100644 deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2-provision.yml create mode 100644 deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-RedHat.yml create mode 100644 deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-Suse.yml create mode 100644 deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.3-post_provision_report.yml create mode 100644 deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/main.yml create mode 100644 deploy/ansible/roles-os/1.18-scaleout-pacemaker/templates/corosync.conf.j2 create mode 100644 deploy/ansible/roles-os/1.18-scaleout-pacemaker/templates/softdog.conf create mode 100644 deploy/ansible/roles-os/1.18-scaleout-pacemaker/vars/main.yml create mode 100644 deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/defaults/main.yml create mode 100644 deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.1-set_runtime_facts.yml create mode 100644 deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.2-pre_checks.yml create mode 100644 deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget.yml create mode 100644 deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4-provision-ScaleOut.yml create mode 100644 deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-RedHat.yml create mode 100644 deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-Suse.yml create mode 100644 deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-RedHat.yml create mode 100644 deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-Suse.yml create mode 100644 deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.5-post_provision_report.yml create mode 100644 deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/main.yml create mode 100644 deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/templates/20-saphana-rhel.j2 create mode 100644 deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/templates/20-saphana-suse.j2 create mode 100644 deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/templates/corosync.conf.j2 create mode 100644 deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/vars/main.yml diff --git a/deploy/ansible/playbook_04_00_00_db_install.yaml b/deploy/ansible/playbook_04_00_00_db_install.yaml index d0b18a5847..74c94dd1ec 100644 --- a/deploy/ansible/playbook_04_00_00_db_install.yaml +++ b/deploy/ansible/playbook_04_00_00_db_install.yaml @@ -83,7 +83,6 @@ when: - db_scale_out is defined - db_scale_out - - not db_high_availability register: root_password_generated - name: "Database Installation Playbook: - Show root password" @@ -95,23 +94,203 @@ # /*----------------------------------------------------------------------------8 -# | | +# | | # | Playbook for HANA DB Install | -# | | +# | No Scale Out configuration | # +------------------------------------4--------------------------------------*/ -# +------------------------Scale out HANA configuration only -----------------*/ +- hosts: "{{ sap_sid | upper }}_DB" + name: DB Installation - HANA + remote_user: "{{ orchestration_ansible_user }}" + gather_facts: true # Important to collect hostvars information + any_errors_fatal: true + vars_files: + - vars/ansible-input-api.yaml # API Input template with defaults + + tasks: + # -------------------------------------+---------------------------------------8 + # + # Build the list of tasks to be executed in order here. + # + # -------------------------------------+---------------------------------------8 + + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + + - name: "Database Installation Playbook: - Install HANA" + become: true + when: + - node_tier == 'hana' + - not db_scale_out + block: + - name: "Database Installation Playbook: - Setting the DB facts" + ansible.builtin.set_fact: + tier: hdb # Actions for HANA DB Serve + main_password: "{{ hostvars.localhost.sap_password }}" + sapbits_location_base_path: "{{ hostvars.localhost.sapbits_location_base_path }}" + sapbits_sas_token: "{{ hostvars.localhost.sapbits_sas_token }}" + # Only applicable for scale out with HSR + tags: + - always + + - name: "Database Installation Playbook: - Show SAP password" + ansible.builtin.debug: + msg: "{{ hostvars.localhost.sap_password }}" + verbosity: 4 + + - name: "Run the Database installation Playbook" + block: + - name: "Database Installation Playbook: - run HANA installation" + ansible.builtin.include_role: + name: roles-db/4.0.0-hdb-install + when: + - not db_scale_out + + - name: "Database Installation Playbook: - Create db-install-done flag" + delegate_to: localhost + become: false + ansible.builtin.file: + path: "{{ _workspace_directory }}/.progress/db/db-install-done{{ ansible_hostname }}" + state: touch + mode: 0755 + when: + - hana_already_installed is defined or (hana_installation.rc is defined and hana_installation.rc == 0) + + always: + - name: "Database Installation Playbook: - Get the Error lines from installation output" + ansible.builtin.set_fact: + error_lines: "{{ error_lines | default([]) + [item] }}" + with_items: "{{ hana_installation.stdout_lines }}" + when: + - hana_installation is defined + - hana_installation.stdout_lines is defined + - '"ERROR" in item' + + - name: "Database Installation Playbook: - Run post installation routines" + ansible.builtin.include_role: + name: roles-sap/7.0.0-post-install + vars: + suffix: "_DB" + tier: 'hana' + + - name: "Database Installation Playbook: - Show errors from HANA installation" + ansible.builtin.debug: + msg: "{{ error_lines }}" + when: + - error_lines is defined + + tags: + - 4.0.0-hdb-install + + - name: "Database Installation Playbook: - Install Pacemaker (base)" + become: true + when: + # - db_high_availability + - database_high_availability + - node_tier == 'hana' + - not db_scale_out + block: + - name: "Database Installation Playbook: - Setting the facts" + ansible.builtin.set_fact: + tier: ha + main_password: "{{ hostvars.localhost.sap_password }}" + password_ha_db_cluster: "{{ hostvars.localhost.db_cluster_password }}" + primary_instance_name: "{{ ansible_play_hosts_all[0] }}" # Setting up Primary Instance Name + secondary_instance_name: "{{ ansible_play_hosts_all[1] }}" # Setting up Secondary Instance Name + # fencing_spn_client_id: "{% if not use_msi_for_clusters %}{{ hostvars.localhost.sap_fencing_spn_client_id }}{% endif %}" + # fencing_spn_client_pwd: "{% if not use_msi_for_clusters %}{{ hostvars.localhost.sap_fencing_spn_pwd }}{%- endif %}" + # fencing_spn_tenant_id: "{% if not use_msi_for_clusters %}{{ hostvars.localhost.sap_fencing_spn_tenant_id }}{%- endif %}" + + - name: "Database Installation Playbook: - Setting the facts for fencing" + ansible.builtin.set_fact: + fencing_spn_client_id: "{{ hostvars.localhost.sap_fencing_spn_client_id }}" + fencing_spn_client_pwd: "{{ hostvars.localhost.sap_fencing_spn_pwd }}" + fencing_spn_tenant_id: "{{ hostvars.localhost.sap_fencing_spn_tenant_id }}" + when: + - database_cluster_type == "AFA" + - not use_msi_for_clusters + + - name: "Database Installation Playbook: - Ensure the correct repositories are set" + ansible.builtin.include_role: + name: roles-os/1.3-repository + tags: + - 1.3-repository + + - name: "Database Installation Playbook: - Ensure the correct packages are installed" + ansible.builtin.include_role: + name: roles-os/1.4-packages + tags: + - 1.4-packages + + # Setup the kernel parameters required for pacemaker cluster VMs + - name: "Database Installation Playbook: - Ensure the correct kernel parameters are set" + ansible.builtin.include_role: + name: roles-os/1.9-kernelparameters + tags: + - 1.9-kernelparameters + + - name: "Database Installation Playbook: - Ensure the needed services are enabled/disabled" + ansible.builtin.include_role: + name: roles-os/1.16-services + tags: + - 1.16-services + + - name: "Database Installation Playbook: - Ensure the accounts are present" + ansible.builtin.include_role: + name: roles-os/1.11-accounts + tags: + - 1.11-accounts + + - name: "Database Installation Playbook: - Install Pacemaker" + ansible.builtin.include_role: + name: roles-os/1.17-generic-pacemaker # Configures the Pacemaker cluster with Azure fence agent + apply: + tags: + - 1.17-generic-pacemaker + become: true + become_user: root + when: + - node_tier != 'oracle' + - node_tier != 'oracle-asm' + tags: + - 1.17-generic-pacemaker + + - name: "Database Installation Playbook: - Create pacemaker-install-done flag" + delegate_to: localhost + become: false + ansible.builtin.file: + path: "{{ _workspace_directory }}/.progress/pacemaker-install-done" + state: touch + mode: 0755 + +# /*----------------------------------------------------------------------------8 +# | | +# | Playbook for HANA DB Install | +# | Scale Out configuration Only | +# +------------------------------------4--------------------------------------*/ -# This configures root account on HANA nodes for scale out + anf configuration only +# This configures HANA Scale out ( netapp and HSR shared nothing ) - hosts: "{{ sap_sid | upper }}_DB" - name: DB Installation - login configuration + name: DB Installation - HANA Scale Out remote_user: "{{ orchestration_ansible_user }}" - gather_facts: true + gather_facts: true # Important to collect hostvars information any_errors_fatal: true vars_files: - - vars/ansible-input-api.yaml + - vars/ansible-input-api.yaml # API Input template with defaults + tasks: - - name: "SAP HANA: Configure root credential for Scale-Out" + - name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + # This configures root account on HANA nodes for scale out configuration ( shared and shared nothing with HSR ) to use password based login. + - name: "SAP HANA: Configure root credential for Scale-Out" block: - name: Reset root password become: true @@ -137,35 +316,11 @@ ansible.builtin.service: name: sshd state: restarted - when: - - not db_high_availability - - db_scale_out | default(false) + - db_scale_out - hostvars.localhost.root_password is defined - -- hosts: "{{ sap_sid | upper }}_DB" - name: DB Installation - HANA - remote_user: "{{ orchestration_ansible_user }}" - gather_facts: true # Important to collect hostvars information - any_errors_fatal: true - vars_files: - - vars/ansible-input-api.yaml # API Input template with defaults - - tasks: - # -------------------------------------+---------------------------------------8 - # - # Build the list of tasks to be executed in order here. - # - # -------------------------------------+---------------------------------------8 - - name: "Backward Compatibility - Check required Database HA variables" - ansible.builtin.set_fact: - database_high_availability: "{{ db_high_availability | default(false) }}" - when: - - db_high_availability is defined - - database_high_availability is not defined - - - name: "Database Installation Playbook: - Install HANA" + - name: "Database Installation Playbook: - Install HANA Scale Out" become: true when: - node_tier == 'hana' @@ -176,6 +331,8 @@ main_password: "{{ hostvars.localhost.sap_password }}" sapbits_location_base_path: "{{ hostvars.localhost.sapbits_location_base_path }}" sapbits_sas_token: "{{ hostvars.localhost.sapbits_sas_token }}" + primary_instance_name: "{{ ansible_play_hosts_all[0] }}" # Setting up Primary Instance Name + secondary_instance_name: "{{ ansible_play_hosts_all[1] }}" # Setting up Secondary Instance Name # Only applicable for scale out with HSR tags: - always @@ -184,9 +341,9 @@ ansible.builtin.set_fact: root_password: "{{ hostvars.localhost.root_password }}" when: - - db_scale_out is defined + # - db_scale_out is defined - db_scale_out - - not db_high_availability + # - not db_high_availability - name: "Database Installation Playbook: - Show SAP password" ansible.builtin.debug: @@ -195,18 +352,6 @@ - name: "Run the Database installation Playbook" block: - - name: "Database Installation Playbook: - run HANA installation" - ansible.builtin.include_role: - name: roles-db/4.0.0-hdb-install - when: - - not db_scale_out - - # - name: "Database installation Playbook: - run HANA Scale-Out mounts" - # ansible.builtin.include_role: - # name: roles-sap-os/2.6-sap-mounts - # when: - # - db_scale_out | default(false) == true - - name: "Database Installation Playbook: - run HANA Scale-Out installation" ansible.builtin.include_role: name: roles-db/4.0.3-hdb-install-scaleout @@ -248,26 +393,69 @@ tags: - 4.0.0-hdb-install + # This disables root account on HANA nodes for scale out configuration ( shared and shared nothing with HSR ) to use password based login. + - name: "SAP HANA: disable root credential for Scale-Out" + block: + - name: Reset root password to random local value + become: true + ansible.builtin.user: + name: root + update_password: always + password: "{{ lookup('ansible.builtin.password', '/dev/null', seed=inventory_hostname) | password_hash('sha512') }}" - - name: "Database Installation Playbook: - Install Pacemaker (base)" + - name: Enable {{ item.key }} in /etc/ssh/sshd_config + become: true + ansible.builtin.lineinfile: + path: "/etc/ssh/sshd_config" + regex: "^(# *)?{{ item.key }}" + line: "{{ item.key }} {{ item.value }}" + state: present + loop: + - { key: "PermitRootLogin", value: "yes" } + - { key: "PasswordAuthentication", value: "no" } + - { key: "ChallengeResponseAuthentication", value: "no" } + + - name: "Restart SSHD on {{ ansible_hostname }}" + become: true + ansible.builtin.service: + name: sshd + state: restarted + when: + - db_scale_out + - hostvars.localhost.root_password is defined + + +# Configure HANA Scale out Pacemaker, run on DB nodes and majority maker node ( first node if multiple are provided, rest are ignored. ) +- hosts: "{{ sap_sid | upper }}_DB: + {{ sap_sid | upper }}_OBSERVER_DB" + name: DB Installation - HANA Scale Out - Pacemaker + remote_user: "{{ orchestration_ansible_user }}" + gather_facts: true # Important to collect hostvars information + any_errors_fatal: true + vars_files: + - vars/ansible-input-api.yaml # API Input template with defaults + tasks: + - name: "Database Installation Playbook: - Install Pacemaker (Scale Out)" become: true when: # - db_high_availability - - database_high_availability - - node_tier == 'hana' + - db_high_availability + - node_tier in ['hana','observer'] + - db_scale_out block: - - name: "Database Installation Playbook: - Setting the facts" + - name: "Database Installation Playbook: - Setting the facts" ansible.builtin.set_fact: tier: ha main_password: "{{ hostvars.localhost.sap_password }}" password_ha_db_cluster: "{{ hostvars.localhost.db_cluster_password }}" primary_instance_name: "{{ ansible_play_hosts_all[0] }}" # Setting up Primary Instance Name secondary_instance_name: "{{ ansible_play_hosts_all[1] }}" # Setting up Secondary Instance Name + # root_password: "{{ hostvars.localhost.root_password }}" # fencing_spn_client_id: "{% if not use_msi_for_clusters %}{{ hostvars.localhost.sap_fencing_spn_client_id }}{% endif %}" # fencing_spn_client_pwd: "{% if not use_msi_for_clusters %}{{ hostvars.localhost.sap_fencing_spn_pwd }}{%- endif %}" # fencing_spn_tenant_id: "{% if not use_msi_for_clusters %}{{ hostvars.localhost.sap_fencing_spn_tenant_id }}{%- endif %}" - - name: "Database Installation Playbook: - Setting the facts for fencing" + - name: "Database Installation Playbook: - Setting the facts for fencing" ansible.builtin.set_fact: fencing_spn_client_id: "{{ hostvars.localhost.sap_fencing_spn_client_id }}" fencing_spn_client_pwd: "{{ hostvars.localhost.sap_fencing_spn_pwd }}" @@ -307,19 +495,21 @@ tags: - 1.11-accounts - - name: "Database Installation Playbook: - Install Pacemaker" + # Scale out based pacemaker task/s + - name: "Database Installation Playbook: - Install Scale Out Pacemaker" ansible.builtin.include_role: - name: roles-os/1.17-generic-pacemaker # Configures the Pacemaker cluster with Azure fence agent + name: roles-os/1.18-scaleout-pacemaker # Configures the Pacemaker cluster with Azure fence agent apply: tags: - - 1.17-generic-pacemaker + - 1.18-scaleout-pacemaker become: true become_user: root when: - - node_tier != 'oracle' - - node_tier != 'oracle-asm' + - node_tier in ['hana','observer'] + - db_scale_out + tags: - - 1.17-generic-pacemaker + - 1.18-scaleout-pacemaker - name: "Database Installation Playbook: - Create pacemaker-install-done flag" delegate_to: localhost @@ -330,6 +520,8 @@ mode: 0755 + + # /*----------------------------------------------------------------------------8 # | | # | Playbook for Oracle DB Install diff --git a/deploy/ansible/playbook_04_00_01_db_ha.yaml b/deploy/ansible/playbook_04_00_01_db_ha.yaml index ebd9e94aac..bb16552044 100644 --- a/deploy/ansible/playbook_04_00_01_db_ha.yaml +++ b/deploy/ansible/playbook_04_00_01_db_ha.yaml @@ -84,8 +84,9 @@ when: database_high_availability # +------------------------------------4--------------------------------------*/ -- hosts: "{{ sap_sid | upper }}_DB" - name: DB HA Configuration +- hosts: "{{ sap_sid | upper }}_DB + {{ db_sid | upper }}_OBSERVER_DB" + name: HANA DB HA Configuration remote_user: "{{ orchestration_ansible_user }}" gather_facts: true # Important to collect hostvars information vars_files: @@ -104,6 +105,11 @@ - db_high_availability is defined - database_high_availability is not defined + - name: "0.0 Validations: - Gather facts for first time" + ansible.builtin.setup: + tags: + - always + - name: "HANA HA Setup" become: true block: @@ -137,6 +143,8 @@ main_password: "{{ hostvars.localhost.sap_password }}" - name: "Run the db/hdb-hsr role" + when: + - node_tier == 'hana' ansible.builtin.include_role: name: roles-db/4.0.1-hdb-hsr tags: @@ -168,10 +176,21 @@ - name: "HANA HA Setup: - run the Pacemaker role" ansible.builtin.include_role: name: roles-sap/5.5-hanadb-pacemaker - when: database_high_availability + when: + - database_high_availability + - not db_scale_out tags: - 5.5-hanadb-pacemaker + - name: "HANA HA Setup: - run the Pacemaker role for scale out" + ansible.builtin.include_role: + name: roles-sap/5.8-hanadb-scaleout-pacemaker + when: + - db_high_availability + - db_scale_out + tags: + - 5.8-hanadb-scaleout-pacemaker + when: - database_high_availability - platform == 'HANA' diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml index b2e952a3c9..d55419da45 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml @@ -21,6 +21,7 @@ ansible.builtin.set_fact: sap_inifile: "hdbserver_{{ virtual_host }}_{{ sap_sid }}_install.rsp" dir_params: "{{ tmp_directory }}/.params" + sap_custom_config: "global.ini" - name: "4.0.3 - SAP HANA SCALE OUT: Create list of all db hosts" ansible.builtin.set_fact: @@ -89,7 +90,11 @@ - "Client Subnet CIDR: {{ subnet_client_cidr }}" - "Storage Subnet CIDR: {{ subnet_storage_cidr }}" -# Scale out ANF only runs on primary node or the first node in the SID_DB list. This is mandatory. +# Scale out - ANF with shared storage +# Scale out ANF must only run on the designated primary node from the DB server list. +# /*---------------------------------------------------------------------------8 +# | Primary site setup with Shared storage scale out | +# +------------------------------------4--------------------------------------*/ - name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - ANF" block: @@ -100,7 +105,7 @@ - name: "4.0.3 - SAP HANA SCALE OUT: SAP HANA Template processing: Create ini file {{ sap_inifile }} from {{ HANA_2_00_install.rsp }}" ansible.builtin.template: - src: "HANA_2_00_install_scaleout_anf.rsp" + src: "HANA_2_00_install_scaleout.rsp" dest: "{{ dir_params }}/{{ sap_inifile }}" mode: 0644 force: true @@ -118,8 +123,9 @@ _rsp_internal_network: "{{ subnet_cidr_db | default((subnet_address + '/' + subnet_prefix), true) }}" # This comes in from the main ansible playbook. It is the password for the root user. Must be randomized after the installation. _rsp_root_password: "{{ root_password }}" - # Note: Last node in the DB list is marked as standby, while everything else except first node is marked as worker node - # This is the way !!! + # Note : Default configuration involves placing the last node in DB List as standby. + # Note : This behavior can be overridden via property 'db_no_standby' to force all remaining nodes as workers + # Note : This configuration is not recommended as it leaves your distributed system without a standby _rsp_additional_hosts: "{% for item in db_hosts[1:] %} {% if loop.index == db_hosts | length -1 %} {% if db_no_standby %} @@ -372,172 +378,549 @@ when: - not hana_installed.stat.exists - - not (db_high_availability | default(false)) + - not (database_high_availability | default(false)) # Only allowed for the first node. No other node in the scale out - ANF setup is allowed to install hdblcm. - ansible_hostname == db_hosts[0] - db_scale_out is defined - db_scale_out -# TODO: add block for Scale out with HSR support here, same as regular installation. -- name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - HSR" - block: - - - name: "4.0.3 - SAP HANA SCALE OUT: remove install response file if exists" - ansible.builtin.file: - path: "{{ dir_params }}/{{ sap_inifile }}" - state: absent - - name: "4.0.3 - SAP HANA SCALE OUT: SAP HANA Template processing: Create ini file {{ sap_inifile }} from {{ HANA_2_00_install.rsp }}" - ansible.builtin.template: - src: "HANA_2_00_install.rsp" - dest: "{{ dir_params }}/{{ sap_inifile }}" - mode: 0644 - force: true - # Template parameter mapping - vars: - _rsp_component_root: "../COMPONENTS" - _rsp_components: "{{ hana_components }}" - _rsp_sapmnt: "/hana/shared" # Default Value - _rsp_hostname: "{{ virtual_host }}" - _rsp_sid: "{{ db_sid | upper }}" - _rsp_number: "{{ db_instance_number }}" - _rsp_system_usage: "custom" - use_master_password: "{{ hana_use_master_password }}" - password_copy: "{%if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" - - - name: "4.0.3 - SAP HANA SCALE OUT: Progress" - ansible.builtin.debug: - msg: "Start HANA Installation" +# Scale our HSR with multi site replication +# DB servers need to be split into two sites, each with designated primary. HANA setup will run on the primaries only. +# /*---------------------------------------------------------------------------8 +# | Primary site setup with Shared nothing scale out | +# +------------------------------------4--------------------------------------*/ - - name: "4.0.3 - SAP HANA SCALE OUT: installation" - block: - - name: "4.0.3 - SAP HANA SCALE OUT: Execute hdblcm on {{ virtual_host }}" +- name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - HSR" + block: + - name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - HSR ( Primary Site )" + block: + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: remove install response file if exists" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_inifile }}" + state: absent + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Template processing: Create ini file {{ sap_inifile }} from {{ HANA_2_00_install.rsp }}" + ansible.builtin.template: + src: "HANA_2_00_install_scaleout.rsp" + dest: "{{ dir_params }}/{{ sap_inifile }}" + mode: 0644 + force: true + # Template parameter mapping + vars: + _rsp_component_root: "../COMPONENTS" + _rsp_components: "{{ hana_components }}" + _rsp_sapmnt: "/hana/shared" # Default Value + _rsp_hostname: "{{ virtual_host }}" + _rsp_sid: "{{ db_sid | upper }}" + _rsp_number: "{{ db_instance_number }}" + _rsp_system_usage: "custom" + use_master_password: "{{ hana_use_master_password }}" + _rsp_hana_data_basepath: "{{ hana_data_basepath }}" + _rsp_hana_log_basepath: "{{ hana_log_basepath }}" + password_copy: "{%if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" + _rsp_internal_network: "{{ (subnet_address + '/' + subnet_prefix) }}" + _rsp_root_password: "{{ root_password }}" + _rsp_additional_hosts: "{% for item in ansible_play_hosts_all[2::2] %} + {% if loop.index == ansible_play_hosts_all | length -1 %} + {{ item }}:role=worker:group=default:workergroup=default + {% else %} + {{ item }}:role=worker:group=default:workergroup=default, + {% endif %} + {% endfor %}" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Template processing: Create custom ini file {{ sap_custom_config }} from {{ HANA_2_00_customconfig.rsp }}" + ansible.builtin.template: + src: "HANA_2_00_customconfig.rsp" + dest: "{{ dir_params }}/{{ sap_custom_config }}" + mode: 0644 + force: true + vars: + _rsp_basepath_shared: "no" + _rsp_hana_data_basepath: "{{ hana_data_basepath }}" + _rsp_hana_log_basepath: "{{ hana_log_basepath }}" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Progress" + ansible.builtin.debug: + msg: "Start HANA Installation" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: installation" + block: + - name: "SAP HANA SCALE OUT-HSR: Execute hdblcm on {{ primary_instance_name }}" + ansible.builtin.shell: | + umask {{ custom_umask | default('022') }} ; + chmod 755 /usr/sap; + ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' --custom_cfg='{{ dir_params }}' + args: + chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" + creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + environment: + TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" + register: hana_installation + failed_when: hana_installation.rc > 0 + rescue: + - name: "Fail if HANA installation failed with rc > 1" + ansible.builtin.fail: + msg: "INSTALL:0022:Execute hdblcm failed." + when: hana_installation.rc > 1 + + - name: "SAP HANA SCALE OUT-HSR: Progress" + ansible.builtin.debug: + msg: "Restarting the HANA Installation" + when: hana_installation.rc == 1 + + - name: "SAP HANA SCALE OUT-HSR: Re-execute hdblcm on {{ virtual_host }} and rescue" + block: + - name: "SAP HANA SCALE OUT-HSR: Re-execute hdblcm on {{ virtual_host }}" + ansible.builtin.shell: | + umask {{ custom_umask | default('022') }} ; + chmod 755 /usr/sap; + ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' + args: + chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" + creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + environment: + TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" + register: hana_installation + failed_when: hana_installation.rc > 0 + when: hana_installation.rc == 1 + rescue: + - name: "Fail if HANA installation failed on second attempt." + ansible.builtin.fail: + msg: "INSTALL:0022:Execute hdblcm failed." + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Installation results" + ansible.builtin.debug: + msg: + - "HANA Installation failed" + - "HDBLCM output: {{ hana_installation }}" + when: + - hana_installation.rc is defined + - hana_installation.rc > 0 + + - name: "Errorhandling: SAP HANA" + ansible.builtin.debug: + msg: "INSTALL:{{ hana_installation }}" + when: + - hana_installation.rc is defined + - hana_installation.rc > 0 + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Successful installation" + block: + + - name: "SAP HANA SCALE OUT-HSR: Installation results" + ansible.builtin.debug: + msg: "HANA Installation succeeded" + + - name: "SAP HANA SCALE OUT-HSR: HANA Install: flag" + ansible.builtin.file: + path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + state: touch + mode: 0755 + + - name: "Retrieve Subscription ID and Resource Group Name" + ansible.builtin.uri: + url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 + use_proxy: false + headers: + Metadata: true + register: azure_metadata + + - name: "Extract details" + ansible.builtin.set_fact: + subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" + resource_group_name: "{{ azure_metadata.json.compute.resourceGroupName }}" + + - name: "Show the subscription and resource group" + ansible.builtin.debug: + msg: + - "Subscription ID: {{ subscription_id }}" + - "Resource Group Name: {{ resource_group_name }}" + + - name: "Include deploy/ansible/roles-misc/0.6-ARM-Deployment" + ansible.builtin.include_role: + name: roles-misc/0.6-ARM-Deployment + vars: + subscriptionId: "{{ subscription_id }}" + resourceGroupName: "{{ resource_group_name }}" + + - name: "SAP HANA SCALE OUT-HSR: ARM Deployment flag" + ansible.builtin.file: + path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb_arm.txt" + state: touch + mode: 0755 + + - name: "SAP HANA SCALE OUT-HSR: remove install response file" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_inifile }}" + state: absent + + - name: "SAP HANA SCALE OUT-HSR: remove custom config response file" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_custom_config }}" + state: absent + + when: + - hana_installation.rc is defined + - hana_installation.rc < 1 + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Create backup folder" + ansible.builtin.file: + path: "{{ hana_backup_path }}" + state: directory + group: sapsys + owner: "{{ db_sid | lower }}adm" + mode: 0755 + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Configure global.ini" + block: + - name: "Prepare global.ini for domain name resolution." + become_user: root + become: true + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: communication + state: present + mode: 0644 + option: listeninterface + value: .internal + + - name: "Prepare global.ini for installation in non-shared environment" + become_user: root + become: true + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: persistence + state: present + mode: 0644 + option: basepath_shared + value: no + + - name: "Prepare global.ini for site hosts name resolution (Primary Site)" + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: "internal_hostname_resolution" + mode: 0644 + state: present + option: "{{ hostvars[item].ansible_host }}" + value: "{{ hostvars[item].virtual_host }}" + with_items: + - "{{ ansible_play_hosts_all[0::2] }}" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Restart HANA" + block: + - name: "Stop HANA Database" + become_user: "{{ db_sid | lower }}adm" + become: true ansible.builtin.shell: | - umask {{ custom_umask | default('022') }} ; - chmod 755 /usr/sap; - ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' - args: - chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" - creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + sapcontrol -nr {{ db_instance_number }} -function StopSystem {{ db_sid | upper }} + changed_when: false + failed_when: false + register: hana_stopped environment: - TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" - register: hana_installation - failed_when: hana_installation.rc > 0 - rescue: - - name: "Fail if HANA installation failed with rc > 1" - ansible.builtin.fail: - msg: "INSTALL:0022:Execute hdblcm failed." - when: hana_installation.rc > 1 - - - name: "4.0.3 - SAP HANA SCALE OUT: Progress" - ansible.builtin.debug: - msg: "Restarting the HANA Installation" - when: hana_installation.rc == 1 - - - - name: "4.0.3 - SAP HANA SCALE OUT: Re-execute hdblcm on {{ virtual_host }} and rescue" - block: - - name: "4.0.3 - SAP HANA SCALE OUT: Re-execute hdblcm on {{ virtual_host }}" - ansible.builtin.shell: | - umask {{ custom_umask | default('022') }} ; - chmod 755 /usr/sap; - ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' - args: - chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" - creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" - environment: - TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" - register: hana_installation - failed_when: hana_installation.rc > 0 - when: hana_installation.rc == 1 - rescue: - - name: "Fail if HANA installation failed on second attempt." - ansible.builtin.fail: - msg: "INSTALL:0022:Execute hdblcm failed." - - - name: "4.0.3 - SAP HANA SCALE OUT: Installation results" - ansible.builtin.debug: - msg: - - "HANA Installation failed" - - "HDBLCM output: {{ hana_installation }}" - when: - - hana_installation.rc is defined - - hana_installation.rc > 0 - - - name: "Errorhandling: SAP HANA" - ansible.builtin.debug: - msg: "INSTALL:{{ hana_installation }}" - when: - - hana_installation.rc is defined - - hana_installation.rc > 0 + PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}:/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + DIR_LIBRARY: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + LD_LIBRARY_PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + SAPSYSTEMNAME: "{{ db_sid | upper }}" + vars: + allow_world_readable_tmpfiles: true - - name: "4.0.3 - SAP HANA SCALE OUT: Successful installation" - block: + # ToDo: Check if we can interrogate the HANA DB to see if it is stopped. + - name: "Wait 2 minutes for SAP system to stop" + ansible.builtin.wait_for: + timeout: 120 - - name: "4.0.3 - SAP HANA SCALE OUT: Installation results" - ansible.builtin.debug: - msg: "HANA Installation succeeded" + - name: "Start HANA Database" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + sapcontrol -nr {{ db_instance_number }} -function StartSystem {{ db_sid | upper }} + changed_when: false + failed_when: false + register: hana_started + environment: + PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}:/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + DIR_LIBRARY: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + LD_LIBRARY_PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + SAPSYSTEMNAME: "{{ db_sid | upper }}" + vars: + allow_world_readable_tmpfiles: true - - name: "4.0.3 - SAP HANA SCALE OUT: HANA Install: flag" - ansible.builtin.file: - path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" - state: touch - mode: 0755 + - name: "Wait 2 minutes for SAP system to start" + ansible.builtin.wait_for: + timeout: 120 + when: + - ansible_hostname == primary_instance_name + - not hana_installed.stat.exists - - name: "4.0.3 - SAP HANA SCALE OUT: Retrieve Subscription ID and Resource Group Name" - ansible.builtin.uri: - url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 - use_proxy: false - headers: - Metadata: true - register: azure_metadata +# /*---------------------------------------------------------------------------8 +# | Secondary site setup with Shared nothing scale out | +# +------------------------------------4--------------------------------------*/ - - name: "4.0.3 - SAP HANA SCALE OUT: Extract Azure subscription details" - ansible.builtin.set_fact: - subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" - resource_group_name: "{{ azure_metadata.json.compute.resourceGroupName }}" + - name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - HSR ( Secondary Site )" + block: + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: remove install response file if exists" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_inifile }}" + state: absent + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Template processing: Create ini file {{ sap_inifile }} from {{ HANA_2_00_install.rsp }}" + ansible.builtin.template: + src: "HANA_2_00_install_scaleout.rsp" + dest: "{{ dir_params }}/{{ sap_inifile }}" + mode: 0644 + force: true + # Template parameter mapping + vars: + _rsp_component_root: "../COMPONENTS" + _rsp_components: "{{ hana_components }}" + _rsp_sapmnt: "/hana/shared" # Default Value + _rsp_hostname: "{{ virtual_host }}" + _rsp_sid: "{{ db_sid | upper }}" + _rsp_number: "{{ db_instance_number }}" + _rsp_system_usage: "custom" + use_master_password: "{{ hana_use_master_password }}" + _rsp_hana_data_basepath: "{{ hana_data_basepath }}" + _rsp_hana_log_basepath: "{{ hana_log_basepath }}" + password_copy: "{%if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" + _rsp_internal_network: "{{ (subnet_address + '/' + subnet_prefix) }}" + _rsp_root_password: "{{ root_password }}" + _rsp_additional_hosts: "{% for item in ansible_play_hosts_all[3::2] %} + {% if loop.index == ansible_play_hosts_all | length -1 %} + {{ item }}:role=worker:group=default:workergroup=default + {% else %} + {{ item }}:role=worker:group=default:workergroup=default, + {% endif %} + {% endfor %}" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Template processing: Create custom ini file {{ sap_custom_config }} from {{ HANA_2_00_customconfig.rsp }}" + ansible.builtin.template: + src: "HANA_2_00_customconfig.rsp" + dest: "{{ dir_params }}/{{ sap_custom_config }}" + mode: 0644 + force: true + vars: + _rsp_basepath_shared: "no" + _rsp_hana_data_basepath: "{{ hana_data_basepath }}" + _rsp_hana_log_basepath: "{{ hana_log_basepath }}" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Progress" + ansible.builtin.debug: + msg: "Start HANA Installation" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: installation" + block: + - name: "SAP HANA: Execute hdblcm on {{ secondary_instance_name }}" + ansible.builtin.shell: | + umask {{ custom_umask | default('022') }} ; + chmod 755 /usr/sap; + ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' --custom_cfg='{{ dir_params }}' + args: + chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" + creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + environment: + TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" + register: hana_installation + failed_when: hana_installation.rc > 0 + rescue: + - name: "Fail if HANA installation failed with rc > 1" + ansible.builtin.fail: + msg: "INSTALL:0022:Execute hdblcm failed." + when: hana_installation.rc > 1 + + - name: "SAP HANA: Progress" + ansible.builtin.debug: + msg: "Restarting the HANA Installation" + when: hana_installation.rc == 1 + + - name: "SAP HANA: Re-execute hdblcm on {{ virtual_host }} and rescue" + block: + - name: "SAP HANA: Re-execute hdblcm on {{ virtual_host }}" + ansible.builtin.shell: | + umask {{ custom_umask | default('022') }} ; + chmod 755 /usr/sap; + ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' + args: + chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" + creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + environment: + TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" + register: hana_installation + failed_when: hana_installation.rc > 0 + when: hana_installation.rc == 1 + rescue: + - name: "Fail if HANA installation failed on second attempt." + ansible.builtin.fail: + msg: "INSTALL:0022:Execute hdblcm failed." + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Installation results" + ansible.builtin.debug: + msg: + - "HANA Installation failed" + - "HDBLCM output: {{ hana_installation }}" + when: + - hana_installation.rc is defined + - hana_installation.rc > 0 + + - name: "Errorhandling: SAP HANA" + ansible.builtin.debug: + msg: "INSTALL:{{ hana_installation }}" + when: + - hana_installation.rc is defined + - hana_installation.rc > 0 + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Successful installation" + block: + + - name: "SAP HANA SCALE OUT-HSR: Installation results" + ansible.builtin.debug: + msg: "HANA Installation succeeded" + + - name: "SAP HANA SCALE OUT-HSR: HANA Install: flag" + ansible.builtin.file: + path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + state: touch + mode: 0755 + + - name: "Retrieve Subscription ID and Resource Group Name" + ansible.builtin.uri: + url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 + use_proxy: false + headers: + Metadata: true + register: azure_metadata + + - name: "Extract details" + ansible.builtin.set_fact: + subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" + resource_group_name: "{{ azure_metadata.json.compute.resourceGroupName }}" + + - name: "Show the subscription and resource group" + ansible.builtin.debug: + msg: + - "Subscription ID: {{ subscription_id }}" + - "Resource Group Name: {{ resource_group_name }}" + + - name: "Include deploy/ansible/roles-misc/0.6-ARM-Deployment" + ansible.builtin.include_role: + name: roles-misc/0.6-ARM-Deployment + vars: + subscriptionId: "{{ subscription_id }}" + resourceGroupName: "{{ resource_group_name }}" + + - name: "SAP HANA SCALE OUT-HSR: ARM Deployment flag" + ansible.builtin.file: + path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb_arm.txt" + state: touch + mode: 0755 + + - name: "SAP HANA SCALE OUT-HSR: remove install response file" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_inifile }}" + state: absent + + - name: "SAP HANA SCALE OUT-HSR: remove custom config response file" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_custom_config }}" + state: absent + + when: + - hana_installation.rc is defined + - hana_installation.rc < 1 + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Create backup folder" + ansible.builtin.file: + path: "{{ hana_backup_path }}" + state: directory + group: sapsys + owner: "{{ db_sid | lower }}adm" + mode: 0755 + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Configure global.ini" + block: + - name: "Prepare global.ini for domain name resolution." + become_user: root + become: true + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: communication + state: present + mode: 0644 + option: listeninterface + value: .internal + + - name: "Prepare global.ini for installation in non-shared environment" + become_user: root + become: true + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: persistence + state: present + mode: 0644 + option: basepath_shared + value: no + + - name: "Prepare global.ini for site hosts name resolution (Secondary Site)" + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: "internal_hostname_resolution" + mode: 0644 + state: present + option: "{{ hostvars[item].ansible_host }}" + value: "{{ hostvars[item].virtual_host }}" + with_items: + - "{{ ansible_play_hosts_all[1::2] }}" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Restart HANA" + block: + - name: "Stop HANA Database" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + sapcontrol -nr {{ db_instance_number }} -function StopSystem {{ db_sid | upper }} + changed_when: false + failed_when: false + register: hana_stopped + environment: + PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}:/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + DIR_LIBRARY: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + LD_LIBRARY_PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + SAPSYSTEMNAME: "{{ db_sid | upper }}" + vars: + allow_world_readable_tmpfiles: true - - name: "4.0.3 - SAP HANA SCALE OUT: Show the subscription and resource group" - ansible.builtin.debug: - msg: - - "Subscription ID: {{ subscription_id }}" - - "Resource Group Name: {{ resource_group_name }}" + # ToDo: Check if we can interrogate the HANA DB to see if it is stopped. + - name: "Wait 2 minutes for SAP system to stop" + ansible.builtin.wait_for: + timeout: 120 - - name: "4.0.3 - SAP HANA SCALE OUT: Include deploy/ansible/roles-misc/0.6-ARM-Deployment" - ansible.builtin.include_role: - name: roles-misc/0.6-ARM-Deployment + - name: "Start HANA Database" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + sapcontrol -nr {{ db_instance_number }} -function StartSystem {{ db_sid | upper }} + changed_when: false + failed_when: false + register: hana_started + environment: + PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}:/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + DIR_LIBRARY: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + LD_LIBRARY_PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + SAPSYSTEMNAME: "{{ db_sid | upper }}" vars: - subscriptionId: "{{ subscription_id }}" - resourceGroupName: "{{ resource_group_name }}" - - - name: "4.0.3 - SAP HANA SCALE OUT: ARM Deployment flag" - ansible.builtin.file: - path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb_arm.txt" - state: touch - mode: 0755 + allow_world_readable_tmpfiles: true - - name: "4.0.3 - SAP HANA SCALE OUT: remove install response file" - ansible.builtin.file: - path: "{{ dir_params }}/{{ sap_inifile }}" - state: absent + - name: "Wait 2 minutes for SAP system to start" + ansible.builtin.wait_for: + timeout: 120 + when: + - ansible_hostname == secondary_instance_name + - not hana_installed.stat.exists + when: + - database_high_availability - when: - - hana_installation.rc is defined - - hana_installation.rc < 1 +# /*----------------------------End of setup----------------------------------8 - - name: "4.0.3 - SAP HANA SCALE OUT: Create backup folder" - ansible.builtin.file: - path: "{{ hana_backup_path }}" - state: directory - group: sapsys - owner: "{{ db_sid | lower }}adm" - mode: 0755 - when: - - not hana_installed.stat.exists - - db_high_availability is defined - - db_high_availability - - db_scale_out is defined - - db_scale_out - name: "HANA Install status" @@ -603,6 +986,45 @@ when: - hana_installed.stat.exists + +# Scale out Supplementary tasks +# Create {{ db_sid | lower}}adm account on majority maker VM +- name: "4.0.3 - SAP HANA SCALE OUT: Supplementary tasks" + block: + - name: "Create Create SAP Groups on Observer VM" + when: + - node_tier == 'observer' + ansible.builtin.group: + name: "{{ item.group }}" + gid: "{{ item.gid }}" + state: present + loop: + - { group: 'sapsys', gid: '{{ sapsys_gid }}' } + - { group: 'sapinst', gid: '{{ sapinst_gid }}' } + + - name: "Create /usr/sap directory" + when: + - node_tier == 'observer' + ansible.builtin.file: + path: "/usr/sap" + state: directory + mode: '0755' + owner: root + group: root + force: yes + + - name: "Create {{ db_sid | lower }}adm account for Observer " + when: + - node_tier == 'observer' + ansible.builtin.user: + name: "{{ db_sid | lower }}adm" + comment: "SAP HANA Database System Administrator" + uid: "{{ hdbadm_uid }}" + group: "sapsys" + state: present + shell: "/bin/sh" + home: "/usr/sap/{{ db_sid | upper }}/home" + ... # /*---------------------------------------------------------------------------8 # | END | diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_customconfig.rsp b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_customconfig.rsp new file mode 100644 index 0000000000..65dd6801b0 --- /dev/null +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_customconfig.rsp @@ -0,0 +1,4 @@ +[persistence] +basepath_shared = {{ _rsp_basepath_shared }} +basepath_datavolumes = {{ _rsp_hana_data_basepath }} +basepath_logvolumes = {{ _rsp_hana_log_basepath }} diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_install_scaleout_anf.rsp b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_install_scaleout.rsp similarity index 98% rename from deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_install_scaleout_anf.rsp rename to deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_install_scaleout.rsp index bc31db0ffe..85fced0f6e 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_install_scaleout_anf.rsp +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_install_scaleout.rsp @@ -39,7 +39,7 @@ skip_modify_sudoers=n use_pmem=n # Enable the installation or upgrade of the SAP Host Agent ( Default: y ) -install_hostagent=n +install_hostagent=y # Database Isolation ( Default: low; Valid values: low | high ) db_isolation=low @@ -96,10 +96,10 @@ lss_trust_unsigned_server=n volume_encryption=n # Location of Data Volumes ( Default: /hana/data/${sid} ) -datapath=/hana/data/${sid} +datapath={{ _rsp_hana_data_basepath }} # Location of Log Volumes ( Default: /hana/log/${sid} ) -logpath=/hana/log/${sid} +logpath={{ _rsp_hana_log_basepath }} # Location of Persistent Memory Volumes ( Default: /hana/pmem/${sid} ) pmempath=/hana/pmem/${sid} diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/defaults/main.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/defaults/main.yml new file mode 100644 index 0000000000..a9baae5c17 --- /dev/null +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/defaults/main.yml @@ -0,0 +1,10 @@ +--- +# TODO: Maybe move these to a group_vars/all/distro file so that they +# can be shared by all playbooks/tasks automatically, and extend with +# standardised versions of all similar patterns used in the playbooks. +# Changed from ansible_os_family to ansible_distribution to adopt Oracle Linux. os_family returns returns value Redhat by default. +distro_name: "{{ ansible_distribution | upper }}-{{ ansible_distribution_major_version }}" +distribution_id: "{{ ansible_distribution | lower ~ ansible_distribution_major_version }}" +distribution_full_id: "{{ ansible_distribution | lower ~ ansible_distribution_version }}" + + diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.0-set_runtime_facts.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.0-set_runtime_facts.yml new file mode 100644 index 0000000000..5a71780e12 --- /dev/null +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.0-set_runtime_facts.yml @@ -0,0 +1,220 @@ +--- + +# /*---------------------------------------------------------------------------8 +# | | +# | Set Runtime Paramters - e.g Sub ID , Resource group name | +# | | +# +------------------------------------4--------------------------------------*/ + +# ---------------------------------------- +# BEGIN +# ---------------------------------------- + +- name: "Cluster Type: SBD - parameters to be used..." + ansible.builtin.debug: + msg: # Best method for formatting output with Azure Devops Logs + - "database_cluster_type: {{ database_cluster_type }}" + +- name: "1.18.0 Generic Pacemaker - Retrieve Subscription ID and Resource Group Name" + ansible.builtin.uri: + url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 + use_proxy: false + headers: + Metadata: true + register: azure_metadata + +- name: "1.18.0 Generic Pacemaker - Show IMDS results" + ansible.builtin.debug: + var: azure_metadata.json + verbosity: 2 + + +# /*---------------------------------------------------------------------------8 +# | | +# | Fencing Specific | +# | | +# +------------------------------------4--------------------------------------*/ +- name: "Cluster Type: Fencing" + block: + + - name: "Cluster Type: Fencing specific..." + ansible.builtin.debug: + msg: # Best method for formatting output with Azure Devops Logs + - "Cluster Type: Fencing specific..." + + - name: "1.18.0 Generic Pacemaker - Extract details" + ansible.builtin.set_fact: + fencing_spn_subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" + resource_group_name: "{{ azure_metadata.json.compute.resourceGroupName }}" + + when: + - (database_cluster_type == "AFA") +# /*---------------------------------------------------------------------------8 +# | | +# | Fencing Specific - END | +# | | +# +------------------------------------4--------------------------------------*/ + + +# /*---------------------------------------------------------------------------8 +# | | +# | SBD/ISCSI Specific | +# | | +# +------------------------------------4--------------------------------------*/ +- name: "Cluster Type: SBD" + block: + + - name: "Cluster Type: SBD specific..." + ansible.builtin.debug: + msg: # Best method for formatting output with Azure Devops Logs + - "Cluster Type: SBD specific..." + + when: + - (database_cluster_type == "ASD") + +- name: "Cluster Type: SBD" + block: + + - name: "Cluster Type: ISCI specific..." + ansible.builtin.debug: + msg: # Best method for formatting output with Azure Devops Logs + - "Cluster Type: ISCI specific..." + when: + (database_cluster_type == "ISCSI") + +# /*---------------------------------------------------------------------------8 +# | | +# | SBD/ISCSI Specific - END | +# | | +# +------------------------------------4--------------------------------------*/ + + +- name: "1.18.0 Generic Pacemaker - Extract details" + ansible.builtin.set_fact: + primary_ip: "{{ azure_metadata.json.network.interface[0].ipv4.ipAddress[0].privateIpAddress }}" + subnet_prefix: "{{ azure_metadata.json.network.interface[0].ipv4.subnet[0].prefix }}" + +- name: "1.18.0 Generic Pacemaker - Set unique variable fact" + ansible.builtin.set_fact: + "host_var": "{{ ansible_hostname | replace('-','_') }}" + +- name: "1.18.0 Generic Pacemaker - Extract VM Name" + ansible.builtin.set_fact: + vm_name: "{{ azure_metadata.json.compute.name }}" + +- name: "Wait for primary_ip check on current node to finish" + ansible.builtin.set_fact: + "is_primIP_defined_on_host": "{{ hostvars[ansible_hostname].primary_ip is defined }}" + retries: 30 + delay: 60 + until: is_primIP_defined_on_host + + +- name: "1.18.0 Generic Pacemaker - Extract NIC IPs" + ansible.builtin.set_fact: + "{{ host_var }}_instance_ip": "{{ hostvars[ansible_hostname]['primary_ip'] | string }}" + +- name: "1.18.0 Generic Pacemaker - Show Details" + ansible.builtin.debug: + msg: + - "FENCING :" + - " SUBSCRIPTION : {% if fencing_spn_subscription_id is defined %}{{ fencing_spn_subscription_id }}{% else %}NOT DEFINED{% endif %}" + - " RESOURCE GROUP : {% if resource_group_name is defined %}{{ resource_group_name }}{% else %}NOT DEFINED{% endif %}" + - " SUBNET PREFIX : {{ subnet_prefix }}" + verbosity: 2 + + +# /*---------------------------------------------------------------------------8 +# | | +# | Configure SSH Keys for inter-node communication as root for SUSE | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "1.18.0 Generic Pacemaker - Configure SSH Keys for inter-node communication as root for SUSE" + block: + - name: "1.18.0 Generic Pacemaker - Ensure there are SSH keys for the root user to communicate between nodes" + ansible.builtin.shell: ssh-keygen -b 4096 -t rsa -f /root/.ssh/id_rsa -q -N "" + args: + creates: /root/.ssh/id_rsa + + - name: "1.18.0 Generic Pacemaker - Ensure there is a public key for the root user SSH key" + ansible.builtin.shell: ssh-keygen -f /root/.ssh/id_rsa -y > /root/.ssh/id_rsa.pub + args: + creates: /root/.ssh/id_rsa.pub + + - name: "1.18.0 Generic Pacemaker - Ensure the Public SSH keys are available for exchanging SSH key trust between nodes" + ansible.builtin.command: cat /root/.ssh/id_rsa.pub + register: cluster_public_ssh_key + changed_when: false + + - name: "1.18.0 Generic Pacemaker - Set SSH fact" + ansible.builtin.set_fact: + cluster_public_ssh_key: "{{ cluster_public_ssh_key.stdout }}" + when: + - ansible_os_family | upper == "SUSE" + +- name: "1.18.0 Generic Pacemaker - Wait for ssh key facts to be present" + when: + - ansible_os_family | upper == "SUSE" + block: + - name: "Wait for cluster_public_ssh_key check on nodes to finish" + ansible.builtin.set_fact: + "is_ssh_defined_on_{{ host_var }}": "{{ hostvars[ansible_hostname].cluster_public_ssh_key is defined }}" + retries: 30 + delay: 60 + until: is_ssh_defined_on_{{ host_var }} + + +# /*---------------------------------------------------------------------------8 +# | | +# | Configure SSH Keys for inter-node communication as root for SUSE - END | +# | | +# +------------------------------------4--------------------------------------*/ + +# /*---------------------------------------------------------------------------8 +# | | +# | Validate packages for Pacemaker installation - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ +# Currently we can only validate the version numbers for packages on SUSE and RHEL +# We need to add support for comparing minor releases. +# The current version filter is not able to do that. + +- name: "1.18.0 Generic Pacemaker - Get packages information" + ansible.builtin.package_facts: + +- name: "1.18.0 Generic Pacemaker - Set packages information ({{ ansible_os_family | upper }})" + ansible.builtin.set_fact: + pacemaker_version: "{{ ansible_facts.packages['pacemaker'] | map(attribute='version') | first | default('') }}" + resource_agents_version: "{{ ansible_facts.packages['resource-agents'] | map(attribute='version') | first | default('') }}" + +- name: "1.18.0 Generic Pacemaker - Show packages information ({{ ansible_os_family | upper }})" + when: ansible_os_family | upper == "REDHAT" + ansible.builtin.debug: + msg: + - "pacemaker: {{ ansible_facts.packages['pacemaker'] }}" + - "resource-agents: {{ ansible_facts.packages['resource-agents'] }}" + - "fence-agents-azure-arm: {{ ansible_facts.packages['fence-agents-azure-arm'] }}" + +- name: "1.18.0 Generic Pacemaker - Show packages information ({{ ansible_os_family | upper }})" + when: ansible_os_family | upper == "SUSE" + ansible.builtin.debug: + msg: + - "pacemaker: {{ ansible_facts.packages['pacemaker'] }}" + - "resource-agents: {{ ansible_facts.packages['resource-agents'] }}" + +- name: "Verify that the packages are the right version" + ansible.builtin.assert: + that: + - 'ansible_facts.packages[ packages_list.name ][0].version is version( packages_list.version , ">=", version_type= packages_list.version_type )' + fail_msg: "{{ packages_list.name }} version is not greater than {{ packages_list.version }}" + success_msg: "{{ packages_list.name }} version is greater than {{ packages_list.version }}" + register: package_version_results + loop: "{{ package_versions[distribution_full_id] }}" + loop_control: + loop_var: packages_list + +... +# /*---------------------------------------------------------------------------8 +# | END | +# +------------------------------------4--------------------------------------*/ diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.1-pre_checks.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.1-pre_checks.yml new file mode 100644 index 0000000000..5d54a1a542 --- /dev/null +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.1-pre_checks.yml @@ -0,0 +1,127 @@ +--- +# /*---------------------------------------------------------------------------8 +# | | +# | This file runs common pre-check tasks before the | +# | OS specific clustering is performed | +# | | +# +------------------------------------4--------------------------------------*/ + +# /*---------------------------------------------------------------------------8 +# | | +# | Fencing Specific | +# | | +# +------------------------------------4--------------------------------------*/ +- name: "Cluster Type: Fencing" + block: + + - name: "BEGIN: Fencing specific..." + ansible.builtin.debug: + msg: # Best method for formatting output with Azure Devops Logs + - "BEGIN: Fencing specific..." + + - name: "1.18.1 Generic Pacemaker - Check the fencing agent configuration variables are set" + ansible.builtin.assert: + that: + - "fencing_spn_subscription_id is defined" + - "fencing_spn_subscription_id | trim | length > 0" + - "fencing_spn_tenant_id is defined" + - "fencing_spn_tenant_id | trim | length > 0" + - "fencing_spn_client_id is defined" + - "fencing_spn_client_id | trim | length > 0" + - "fencing_spn_client_pwd is defined" + - "fencing_spn_client_pwd | trim | length > 0" + fail_msg: "A highly available deployment requires that an SPN is defined for the fencing agent" + when: + - not use_msi_for_clusters + + - name: "END : Fencing specific..." + ansible.builtin.debug: + msg: # Best method for formatting output with Azure Devops Logs + - "END : Fencing specific..." + + when: + - (database_cluster_type == "AFA") +# /*---------------------------------------------------------------------------8 +# | | +# | Fencing Specific - END | +# | | +# +------------------------------------4--------------------------------------*/ + + +# /*---------------------------------------------------------------------------8 +# | | +# | SBD Specific | +# | | +# +------------------------------------4--------------------------------------*/ +- name: "Cluster Type: SBD" + block: + + - name: "BEGIN: SBD specific..." + ansible.builtin.debug: + msg: # Best method for formatting output with Azure Devops Logs + - "BEGIN: SBD specific..." + + + - name: "END : SBD specific..." + ansible.builtin.debug: + msg: # Best method for formatting output with Azure Devops Logs + - "END : SBD specific..." + + when: + - (database_cluster_type == "ASD") or + (database_cluster_type == "ISCSI") +# /*---------------------------------------------------------------------------8 +# | | +# | SBD Specific - END | +# | | +# +------------------------------------4--------------------------------------*/ + + +# /*---------------------------------------------------------------------------8 +# | | +# | SUSE Specific | +# | | +# +------------------------------------4--------------------------------------*/ +- name: "SUSE specific" + block: + + - name: "1.18.1 Generic Pacemaker - Check the required Clustering scripts are available" + ansible.builtin.stat: + path: "/usr/sbin/crm" + register: cluster_scripts_status_results + failed_when: not cluster_scripts_status_results.stat.exists + + when: + - ansible_os_family | upper == "SUSE" +# /*---------------------------------------------------------------------------8 +# | | +# | SUSE Specific - END | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "1.18.1 Generic Pacemaker - Check the required cluster password is set" + ansible.builtin.assert: + that: + - "password_ha_db_cluster is defined" + - "password_ha_db_cluster | trim | length > 0" + fail_msg: "A highly available SCS deployment requires that an cluster password is set" + +- name: "1.18.1 Generic Pacemaker - Check if a cluster has already been prepared" + ansible.builtin.command: "{{ cluster_status_cmd[ansible_os_family] }}" + register: cluster_existence_check_result + changed_when: false + failed_when: false + +- name: "1.18.1 Generic Pacemaker - Save settings" + ansible.builtin.set_fact: + cluster_existence_check: "{{ cluster_existence_check_result.rc | int }}" + +- name: "1.18.1 Generic Pacemaker - Show if a cluster has already been prepared" + ansible.builtin.debug: + msg: + - "CLUSTER VALIDATION : {{ cluster_existence_check }}" + +... +# /*---------------------------------------------------------------------------8 +# | END | +# +------------------------------------4--------------------------------------*/ diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.1.1-iSCSI.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.1.1-iSCSI.yml new file mode 100644 index 0000000000..ca47ef84d0 --- /dev/null +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.1.1-iSCSI.yml @@ -0,0 +1,148 @@ +--- +# /*---------------------------------------------------------------------------8 +# | | +# +------------------------------------4--------------------------------------*/ +# Ref: https://learn.microsoft.com/en-us/azure/sap/workloads/high-availability-guide-suse-pacemaker#set-up-the-iscsi-target-server-sbd-device +# Note: This needs to be done on majority maker as well if the cluster type is Scale out - shared nothing. + +- name: "1.18.1 iSCSI packages" + community.general.zypper: + name: "{{ item }}" + state: present + loop: + - open-iscsi + - sbd + +- name: "1.18.1 iSCSI packages" + ansible.builtin.systemd: + name: "{{ item }}" + enabled: true + loop: + - iscsid + - iscsi + - sbd + + # Forcing this to 'db' as the parent task 1.18 is only applicable for HANA scale out clusters +- name: "1.18.1 iSCSI packages - Get node type on which we will operate" + ansible.builtin.set_fact: + iscsi_node_type: 'db' + +- name: "1.18.1 iSCSI packages - Get initiator name" + ansible.builtin.set_fact: + iscsi_initiator_name: "{{ iscsi_servers | + selectattr('type', 'equalto', iscsi_node_type) | + map(attribute='iqn') | list | unique | first }}" + +- name: "1.18.1 iSCSI packages - print iSCSI node type and initiator name" + when: iscsi_node_type is defined and iscsi_initiator_name is defined + ansible.builtin.debug: + msg: "iSCSI node type: {{ iscsi_node_type }}, initiator name: {{ iscsi_initiator_name }}" + verbosity: 2 + +- name: "1.18.1 iSCSI packages - Get hostname index" + ansible.builtin.set_fact: + hostname_index: "{{ ansible_play_hosts_all.index(ansible_hostname) }}" + +- name: "1.18.1 iSCSI packages - Set the acl names" + ansible.builtin.set_fact: + acl_name_db_{{ hostname_index }}: "iqn.2006-04.{{ sap_sid | lower }}-x{{ iscsi_node_type }}-0.local:{{ sap_sid | lower }}-x{{ iscsi_node_type }}-{{ hostname_index }}" + +- name: "1.18.1 iSCSI packages - print acl names" + ansible.builtin.debug: + msg: " acl_name_db_{{ hostname_index }}: vars['acl_name_db_'+hostname_index] }}" + verbosity: 2 + +- name: "1.18.1 iSCSI configuration - replace InitiatorName - DB" + when: + - iscsi_node_type == 'db' + block: + - name: "1.18.1 iSCSI configuration - replace InitiatorName - {{ ansible_hostname }}" + ansible.builtin.replace: + path: /etc/iscsi/initiatorname.iscsi + regexp: '^InitiatorName=.*$' + replace: "InitiatorName={{ vars['acl_name_db_'+hostname_index] }}" + backup: true + +- name: "1.18.1 iSCSI configuration - restart iSCSI services" + ansible.builtin.systemd: + name: "{{ item }}" + state: restarted + loop: + - iscsid + - iscsi + +- name: "1.18.1 iSCSI configuration block - iterate through the available iSCSI servers" + when: + - iscsi_node_type is defined + - iscsi_node_type in ['db'] + - iscsi_initiator_name is defined + block: + # - name: "1.18.1 iSCSI configuration - discovery, login and update node.startup" + # when: + # - iscsi_node_type == item.type + # - iscsi_initiator_name == item.iqn + # ansible.builtin.shell: + # cmd: >- + # iscsiadm -m discovery --type=st --portal={{ item.host }}:3260; + # iscsiadm -m node -T {{ item.iqn }} --login --portal={{ item.host }}:3260; + # iscsiadm -m node -T {{ item.iqn }} -p {{ item.host }}:3260 --op=update \ + # --name=node.startup --value=automatic + # loop: "{{ iscsi_servers }}" + + - name: "1.18.1 iSCSI configuration - discovery, login and update node.startup using community.general.open_iscsi" + community.general.open_iscsi: + login: true + portal: "{{ item.host }}" + auto_portal_startup: true + discover: true + target: "{{ item.iqn }}" + loop: "{{ iscsi_servers }}" + register: iscsi_configuration_result + when: + - iscsi_node_type == item.type + - iscsi_initiator_name == item.iqn + +- name: "1.18.1 reload iSCSI" + ansible.builtin.systemd: + name: "{{ item }}" + enabled: true + daemon_reload: true + loop: + - iscsid + - iscsi + +- name: "1.18.1 iSCSI packages - print iSCSI configuration result" + when: + - iscsi_node_type is defined + - iscsi_node_type in ['db'] + - iscsi_initiator_name is defined + ansible.builtin.debug: + msg: "{{ iscsi_configuration_result }}" + verbosity: 2 + +- name: "1.18.1 iSCSI packages - product of vms in group and iscsi_configuration_result devicenodes attribute" + ansible.builtin.set_fact: + iscsi_devices_on_client: "{{ iscsi_configuration_result.results | selectattr('devicenodes', 'defined') | + map(attribute='devicenodes') | select() | flatten(levels=1) | + default([]) | unique | list }}" + when: + - iscsi_configuration_result is defined + +- name: "1.18.1 iSCSI packages - product of ansible_play_hosts_all in group and iscsi_configuration_result devices on client" + ansible.builtin.set_fact: + iscsi_device_map: "{{ [ansible_hostname] | product(iscsi_devices_on_client) | + default([]) | unique | list }}" + when: + - iscsi_configuration_result is defined + +- name: "1.18.1 iSCSI packages - product of vms in group and iscsi_configuration_result devicenodes attribute" + ansible.builtin.debug: + msg: + - "iscsi_devices_on_client: {{ iscsi_devices_on_client }}" + - "iscsi_device_map: {{ iscsi_device_map }}" + verbosity: 2 + when: + - iscsi_configuration_result is defined + + +... diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.1.2-sbd.yaml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.1.2-sbd.yaml new file mode 100644 index 0000000000..6922d04805 --- /dev/null +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.1.2-sbd.yaml @@ -0,0 +1,269 @@ +--- +# /*---------------------------------------------------------------------------8 +# | | +# +------------------------------------4--------------------------------------*/ +# https://learn.microsoft.com/en-us/azure/sap/workloads/high-availability-guide-suse-pacemaker#overview + +# 1) Create and attach and Azure Shared Disk +# 2) Set up an Azure shared disk SBD device +# create SBD device +# Edit SBD Config file +# Softdog config file +# Load Module + + +# /*---------------------------------------------------------------------------8 +# | | +# | Show important parameters | +# | | +# +------------------------------------4--------------------------------------*/ +- name: "Parameters to be used..." + ansible.builtin.debug: + msg: # Best method for formatting output with Azure Devops Logs + - "database_cluster_type: {{ database_cluster_type }}" + - "sbdDevices: {{ sbdDevices }}" + verbosity: 2 + when: + - (database_cluster_type == "ASD") + +- name: "Parameters to be used..." + ansible.builtin.debug: + msg: # Best method for formatting output with Azure Devops Logs + - "database_cluster_type: {{ database_cluster_type }}" + - "iscsi_servers: {{ iscsi_servers }}" + verbosity: 2 + when: + - (database_cluster_type == "ISCSI") + + +- name: "Parameters testing..." + ansible.builtin.debug: + msg: # Best method for formatting output with Azure Devops Logs + - "item.host: {{ item.host }}" + - "item.lun: {{ item.lun }}" + verbosity: 2 + loop: "{{ sbdDevices }}" + when: + - (database_cluster_type == "ASD") + + +# - name: "Parameters testing..." +# ansible.builtin.debug: +# var: hostvars[ansible_hostname] +# verbosity: 2 + + +# /*---------------------------------------------------------------------------8 +# | | +# | ASD | +# | | +# +------------------------------------4--------------------------------------*/ +- name: 'SBD - ASD - discover and create SBD device(s)' + block: + # +------------------------------------ + # Build List of Lists + # sbdMap: [ + # { + # host: + # lun: + # blockDev: + # diskByLun: + # diskById: + # } + # ] + - name: "ASD - Build SBD Dictionary" + ansible.builtin.set_fact: + sbdMap: "{{ sbdMap + sbdMap_update }}" + vars: + sbdMap_update: + - { host: "{{ item.host }}" , + lun: "{{ item.LUN }}" , + blockDev: '' , + diskByLun: '' , + diskById: '' } + loop: "{{ sbdDevices }}" + when: + - (database_cluster_type == "ASD") + - item.host == ansible_hostname + + - name: "ISCSI - Build SBD Dictionary" + ansible.builtin.set_fact: + sbdMap: "{{ sbdMap + sbdMap_update }}" + vars: + sbdMap_update: + - { host: "{{ item[0] }}" , + lun: '' , + blockDev: '{{ item[1] }}' , + diskByLun: '' , + diskById: '' } + loop: "{{ iscsi_device_map }}" + when: + - (database_cluster_type == "ISCSI") + + - name: "SBD Dictionary..." + ansible.builtin.debug: + var: sbdMap + verbosity: 2 + when: + - (database_cluster_type == "ASD") or + (database_cluster_type == "ISCSI") + + - name: "SBD Dictionary..." + ansible.builtin.debug: + msg: # Best method for formatting output with Azure Devops Logs + - "item.host: {{ item.host }}" + - "item.blockDev: {{ item.blockDev }}" + verbosity: 2 + loop: "{{ sbdMap }}" + when: + - (database_cluster_type == "ISCSI") + + # +------------------------------------ + # -------------------------------------+---------------------------------------8 + # Step: 03 + # Description: Dynamic Processing - Iterate over BOM dictionary to find Dynamic + # entries. + # + - name: "Update Block Device" + ansible.builtin.include_tasks: "1.18.1.3-sbd-deviceUpdate.yaml" + loop: "{{ sbdMap }}" + loop_control: + loop_var: sdbMap_entry + index_var: sdbMap_index + # vars: + # _step: "deviceUpdate" + # Step: 03 - END + # -------------------------------------+---------------------------------------8 + + - name: "Update Device Info: Show SBD Dictionary..." + ansible.builtin.debug: + var: sbdMap + verbosity: 2 + + + # /*------------------------------------ + # | + # | create sbd devices + # | + # +------------------------------------4 + - name: "dump SBD Device(s)..." + ansible.builtin.command: "sbd + {%- for entry in sbdMap +%} + -d {{ entry.diskById }} + {%- endfor +%} + dump + " + ignore_errors: true + register: sbdDumpCommand_results + run_once: true # run on only one host in play + + - name: "Show dump SBD device(s) results..." + ansible.builtin.debug: + var: sbdDumpCommand_results + verbosity: 2 + run_once: true # run on only one host in play + + - name: "create SBD Device(s)..." + block: + + - name: "create SBD Device(s)..." + ansible.builtin.command: "sbd + {%- for entry in sbdMap +%} + -d {{ entry.diskById }} + {%- endfor +%} + -1 60 -4 120 create + " + register: sbdCreateCommand_results + + + - name: "Show block device results..." + ansible.builtin.debug: + var: sbdCreateCommand_results + verbosity: 2 + + run_once: true # run on only one host in play + when: sbdDumpCommand_results.rc == 1 + + + # /*------------------------------------ + # | + # | sbd config file + # | + # +------------------------------------4 + # Type: string + # Default: "" + # + # SBD_DEVICE specifies the devices to use for exchanging sbd messages + # and to monitor. If specifying more than one path, use ";" as + # separator. + # SBD_DEVICE="/dev/sdb1;/dev/sdc1;/dev/sdc2" + # + # SBD_DEVICE="" + - name: "SBD config file" + ansible.builtin.lineinfile: + path: /etc/sysconfig/sbd + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + owner: root + group: root + mode: '0644' + backup: true + loop: + - { regexp: "^SBD_PACEMAKER=.*", line: 'SBD_PACEMAKER=yes' } + - { regexp: "^SBD_STARTMODE=.*", line: 'SBD_STARTMODE=always' } + # Format line as so: + # SBD_DEVICE="/dev/disk/by-id/scsi-3600224804208a67da8073b2a9728af19" + # SBD_DEVICE="/dev/disk/by-id/scsi-360022480ef971a6c5759f2dc3adf4c96;/dev/disk/by-id/scsi-360022480ea4064ac4952ea8452d66317" + - { regexp: "^SBD_DEVICE=.*", line: 'SBD_DEVICE=" + {%- for entry in sbdMap %} + {%- if loop.index > 1 %};{% endif -%} + {{ entry.diskById }} + {%- endfor -%} + "'} + + + # /*------------------------------------ + # | + # | softdog config file + # | + # +------------------------------------4 + - name: "softdog - Create config file" + ansible.builtin.template: + src: softdog.conf + dest: /etc/modules-load.d/softdog.conf + owner: root + group: root + mode: '0644' + + + # /*------------------------------------ + # | + # | Load softdog kernel module + # | + # +------------------------------------4 + - name: "softdog - load module" + community.general.modprobe: + name: softdog + state: present + + +# /*------------------------------------ +# | +# | systemctl enable sbd +# | +# +------------------------------------4 +# - name: "systemctl enable sbd" +# ansible.builtin.shell: | +# systemctl enable sbd +- name: "systemctl enable sbd" + ansible.builtin.systemd: + name: sbd + enabled: true + daemon_reload: true + when: + - (database_cluster_type == "ASD") or + (database_cluster_type == "ISCSI") +... +# /*---------------------------------------------------------------------------8 +# | END | +# +------------------------------------4--------------------------------------*/ diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.1.3-sbd-deviceUpdate.yaml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.1.3-sbd-deviceUpdate.yaml new file mode 100644 index 0000000000..8c32b5d501 --- /dev/null +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.1.3-sbd-deviceUpdate.yaml @@ -0,0 +1,91 @@ +--- + +- name: "Update Device Info: Parameters to be used..." + ansible.builtin.debug: + msg: # Best method for formatting output with Azure Devops Logs + # - "_step: {{ _step }}" + - "database_cluster_type: {{ database_cluster_type }}" + - "sbdMap: {{ sbdMap }}" + - "sdbMap_entry: {{ sdbMap_entry }}" + - "sdbMap_index: {{ sdbMap_index }}" + - "sbdMap[sdbMap_index].lun: {{ sbdMap[sdbMap_index].lun }}" + - "sdbMap_entry.lun: {{ sdbMap_entry.lun }}" + verbosity: 2 + when: + - database_cluster_type == 'ASD' + +- name: "Update Device Info: Get block device" + ansible.builtin.command: "readlink -f /dev/disk/azure/scsi1/lun{{ sbdMap[sdbMap_index].lun }}" + register: blockDevice_results + when: + - database_cluster_type == 'ASD' + + +- name: "Update Device Info: Show block device results..." + ansible.builtin.debug: + var: blockDevice_results.stdout + verbosity: 2 + when: + - database_cluster_type == 'ASD' + + +- name: "Update Device Info: Find scsi by id..." + ansible.builtin.find: + path: /dev/disk/by-id + patterns: 'scsi-3*' + file_type: link + use_regex: false + register: sscsiById + +- name: "Update Device Info: Show scsi by id paths..." + ansible.builtin.debug: + var: item.path + verbosity: 2 + loop: "{{ sscsiById.files }}" + + +- name: "Update Device Info: Translate block device to scsi by id..." + ansible.builtin.stat: + path: "{{ item.path }}" + follow: false + loop: "{{ sscsiById.files }}" + register: statById + + +- name: "Update Device Info: Update SBD Dictionary - ASD" + ansible.builtin.set_fact: + sbdMap: "{{ sbdMap_update }}" + vars: + sbdMap_update: " + {#- -#} + {% set _ = sbdMap[sdbMap_index].update({'blockDev': blockDevice_results.stdout}) -%} + {% set _ = sbdMap[sdbMap_index].update({'diskByLun': blockDevice_results.cmd[2]}) -%} + {% set _ = sbdMap[sdbMap_index].update({'diskById': item.stat.path}) -%} + {{ sbdMap }} + " + loop: "{{ statById.results }}" + when: + - database_cluster_type == 'ASD' + - item.stat.lnk_source == blockDevice_results.stdout + +- name: "Update Device Info: Update SBD Dictionary - ISCSI" + ansible.builtin.set_fact: + sbdMap: "{{ sbdMap_update }}" + vars: + sbdMap_update: " + {#- -#} + {% set _ = sbdMap[sdbMap_index].update({'diskById': item.stat.path}) -%} + {{ sbdMap }} + " + loop: "{{ statById.results }}" + when: + - database_cluster_type == 'ISCSI' + - ansible_hostname == sbdMap[sdbMap_index].host + - item.stat.lnk_source == sbdMap[sdbMap_index].blockDev + +- name: "Update Device Info: Show SBD Dictionary..." + ansible.builtin.debug: + var: sbdMap + verbosity: 2 + +... diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2-provision.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2-provision.yml new file mode 100644 index 0000000000..c348c0b190 --- /dev/null +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2-provision.yml @@ -0,0 +1,137 @@ +--- +# /*---------------------------------------------------------------------------8 +# | | +# | This file runs common pre-provisioning tasks before the | +# | OS specific clustering is performed | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "1.18.2 Generic Pacemaker - Ensure systemctl daemon is reloaded" + ansible.builtin.systemd: + daemon_reload: true + + +# # /*----------------------------Supplimentary tasks for all HANA + majority maker nodes------------# +# # note: we are going to install the hooks in this stage as 4.0.3 runs on all HANA nodes and Observer nodes. +# # 5.5 runs only on HANA nodes, so majority maker will be bypassed, resulting in cluster creation failure since it can't find the resource agent/python hook + + +- name: Implement the Python system replication hook SAPHanaSR-ScaleOut MultiTarget (SUSE) + when: + - node_tier in ['observer','hana'] + - platform == 'HANA' + - db_scale_out + - database_high_availability + - ansible_os_family | upper == "SUSE" + block: + - name: Generate list of deployed packages on current host + ansible.builtin.package_facts: + + # SAPHanaSR-ScaleOut conflicts with SAPHanaSR and dependencies + - name: "Ensure SAPHanaSR package is absent" + ansible.builtin.package: + name: SAPHanaSR + state: absent + when: + - ansible_facts.packages['SAPHanaSR'] is defined + + - name: "Ensure SAPHanaSR-doc package is absent" + ansible.builtin.package: + name: SAPHanaSR-doc + state: absent + when: + - ansible_facts.packages['SAPHanaSR-doc'] is defined + + - name: "Ensure yast2-sap-ha package is absent" + ansible.builtin.package: + name: yast2-sap-ha + state: absent + when: + - ansible_facts.packages['yast2-sap-ha'] is defined + + # Ensure SAPHANA SR Scaleout package is installed + - name: "Ensure SAPHanaSR-ScaleOut package is installed" + ansible.builtin.package: + name: SAPHanaSR-ScaleOut + state: present + when: + - ansible_os_family | upper == "SUSE" + - ansible_facts.packages['SAPHanaSR-ScaleOut'] is not defined + + - name: "Ensure SAPHanaSR-ScaleOut-doc package is installed" + ansible.builtin.package: + name: SAPHanaSR-ScaleOut-doc + state: present + when: + - ansible_os_family | upper == "SUSE" + - ansible_facts.packages['SAPHanaSR-ScaleOut-doc'] is not defined + + +- name: Implement the Scale out Resource Agent hook (REDHAT) + when: + - node_tier in ['observer','hana'] + - db_scale_out + - database_high_availability + - ansible_os_family | upper == "REDHAT" + block: + - name: Generate list of deployed packages on current host + ansible.builtin.package_facts: + + - name: "Ensure resource-agents-sap-hana is absent (REDHAT)" + ansible.builtin.package: + name: resource-agents-sap-hana + state: absent + when: + - ansible_facts.packages['resource-agents-sap-hana'] is defined + + - name: "Ensure resource-agents-sap-hana-scaleout is installed (REDHAT)" + ansible.builtin.package: + name: resource-agents-sap-hana-scaleout + state: present + when: + - ansible_facts.packages['resource-agents-sap-hana-scaleout'] is not defined + + + + +# SSH access between nodes is only required on SUSE for crm_clustering +- name: "1.18.2 Generic Pacemaker - SUSE specific network and SSH configuration" + when: ansible_os_family | upper == "SUSE" + block: + # Change the configuration file for the network interface to prevent the cloud + # network plugin from removing the virtual IP address (Pacemaker must control + # the VIP assignment) + # Refer to: https://www.suse.com/support/kb/doc/?id=7023633 for more information + - name: "1.18.2 Generic Pacemaker - Ensure that network interface is not managed by cloud network plugin" + become: true + ansible.builtin.lineinfile: + path: /etc/sysconfig/network/ifcfg-eth0 + backup: true + regexp: '^CLOUD_NETCONFIG_MANAGE=' + line: CLOUD_NETCONFIG_MANAGE='no' + tags: + - cloudnetmanage + + - name: "1.18.2 Generic Pacemaker - Ensure the current Node trusts public key of particiating nodes" + ansible.posix.authorized_key: + user: root + key: "{{ hostvars[item].cluster_public_ssh_key }}" + state: present + when: ansible_hostname != item + with_items: + - "{{ ansible_play_hosts_all }}" + + - name: 1.18.2 Generic Pacemaker - Ensure trust relationship is working from current to remaining hosts + ansible.builtin.command: ssh -oStrictHostKeyChecking=no {{ item }} "hostname -s" + register: primary_to_secondary_ssh_result + changed_when: false + failed_when: primary_to_secondary_ssh_result.stdout_lines[0] != item + when: ansible_hostname != item + with_items: + - "{{ ansible_play_hosts_all }}" + + +# Clustering commands are based on the Host OS +- name: "1.18.2 Generic Pacemaker - Cluster based on {{ ansible_os_family }}" + ansible.builtin.include_tasks: "1.18.2.0-cluster-{{ ansible_os_family }}.yml" + # when: cluster_existence_check != 0 diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-RedHat.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-RedHat.yml new file mode 100644 index 0000000000..1737a03dfc --- /dev/null +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-RedHat.yml @@ -0,0 +1,391 @@ +--- + + +# @TODO Subscribe to subscriptions/repos if required +# This code assumes the deployment is using RHEL SAP image + +# /*---------------------------------------------------------------------------8 +# | | +# | RHEL Clustering | +# | ref: https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/high-availability-guide-rhel-pacemaker +# | | +# RHEL 8.4: resource-agents-4.1.1-90.13 +# RHEL 8.6: resource-agents-4.9.0-16.9 +# RHEL 8.8 and newer: resource-agents-4.9.0-40.1 +# RHEL 9.0 and newer: resource-agents-cloud-4.10.0-34.2 + +# +------------------------------------4--------------------------------------*/ + +- name: "1.18.2.0 Generic Pacemaker - Check if we are on RHEL 9 or newer" + ansible.builtin.set_fact: + is_rhel_9_or_newer: "{{ ansible_distribution_major_version | int >= 9 }}" + +- name: "1.18.2.0 Generic Pacemaker - Check if we are on RHEL 8.4 or newer" + ansible.builtin.set_fact: + is_rhel_84_or_newer: "{{ ansible_distribution_version is version('8.4', '>=') }}" + +- name: "1.18.2.0 Generic Pacemaker - Ensure HA Cluster password is set to something secure" + ansible.builtin.user: + name: hacluster + password: "{{ password_ha_db_cluster | password_hash('sha512', 65534 | random(seed=None) | string) }}" + +- name: "1.18.2.0 Generic Pacemaker - Ensure the firewall service is configured to allow High Availability traffic" + ansible.posix.firewalld: + service: high-availability + state: enabled + permanent: true + +- name: "1.18.2.0 Generic Pacemaker - Ensure the basic cluster services are enabled and running" + ansible.builtin.systemd: + name: pcsd.service + state: started + enabled: true + +# Basic Pacemaker cluster configuration: +# https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/high-availability-guide-rhel-pacemaker +- name: "1.18.2.0 Generic Pacemaker - Create the cluster on the primary node: {{ primary_instance_name }}" + when: ansible_hostname == primary_instance_name + block: + - name: "1.18.2.0 Generic Pacemaker - Ensure the cluster can authenticate nodes as hacluster" + ansible.builtin.command: "pcs cluster auth {{ ansible_play_hosts_all | join (' ') }} -u hacluster -p {{ password_ha_db_cluster }}" + when: ansible_distribution_major_version == "7" + + - name: "1.18.2.0 Generic Pacemaker - Ensure the cluster can authenticate nodes as hacluster" + ansible.builtin.command: "pcs host auth {{ ansible_play_hosts_all | join (' ') }} -u hacluster -p {{ password_ha_db_cluster }}" + when: ansible_distribution_major_version in ["8", "9"] + + # Ref https://docs.microsoft.com/en-us/azure/virtual-machines/maintenance-and-updates#maintenance-that-doesnt-require-a-reboot + - name: "1.18.2.0 Generic Pacemaker - Ensure the cluster is created with a token large enough to allow memory preserving maintenance" + ansible.builtin.command: "pcs cluster setup --name {{ db_sid | upper }}_cluster {{ ansible_play_hosts_all | join (' ') }} --token {{ cluster_totem.token }}" + when: ansible_distribution_major_version == "7" and node_tier == 'hana' + + - name: "1.18.2.0 Generic Pacemaker - Ensure the cluster is created with a token large enough to allow memory preserving maintenance" + ansible.builtin.command: "pcs cluster setup {{ db_sid | upper }}_cluster {{ ansible_play_hosts_all | join (' ') }} totem token={{ cluster_totem.token }}" + when: ansible_distribution_major_version in ["8", "9"] and node_tier == 'hana' + + - name: "1.18.2.0 Generic Pacemaker - Ensure the cluster is starting on all nodes" + ansible.builtin.command: pcs cluster start --all + changed_when: false + + - name: "1.18.2.0 Generic Pacemaker - Enable cluster to auto-start after reboot" + ansible.builtin.command: pcs cluster enable --all + changed_when: false + + - name: "1.18.2.0 Generic Pacemaker - Allow Concurrent Fencing" + ansible.builtin.command: pcs property set concurrent-fencing=true + changed_when: false + + - name: "1.18.2.0 Generic Pacemaker - Wait until cluster has stabilized" + ansible.builtin.shell: set -o pipefail && pcs status | grep '^Online:' + register: cluster_stable_check + retries: 12 + delay: 10 + until: "(primary_instance_name + ' ' + secondary_instance_name) in cluster_stable_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in cluster_stable_check.stdout" + when: ansible_distribution_major_version != "8" and ansible_distribution_major_version != "9" + + # '*' is a special character in regexp and needs to be escaped for literal matching + # if we are worried about character spacing across distros we can match for '\* Online:' + - name: "1.18.2.0 Generic Pacemaker - Wait until cluster has stabilized" + ansible.builtin.shell: set -o pipefail && pcs status | grep '^ \* Online:' + register: cluster_stable_check + retries: 12 + delay: 10 + until: "(primary_instance_name + ' ' + secondary_instance_name) in cluster_stable_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in cluster_stable_check.stdout" + when: ansible_distribution_major_version in ["8", "9"] + + - name: "1.18.2.0 Generic Pacemaker - Ensure the expected quorum votes is set for the cluster" + ansible.builtin.command: pcs quorum expected-votes "{{ cluster_quorum.expected_votes }}" + register: quorum + changed_when: quorum.rc == 0 + +# /*---------------------------------------------------------------------------8 +# | | +# | Fencing - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ + +# Currently +- name: "1.18.2.0 Generic Pacemaker - Configure the cluster STONITH device on the primary node" + when: + - database_cluster_type == "AFA" + - inventory_hostname == primary_instance_name + block: + - name: "1.18.2.0 Generic Pacemaker - Ensure STONITH timeout is raised" + ansible.builtin.command: pcs property set stonith-timeout=900 + + - name: "1.18.2.0 Generic Pacemaker - Ensure the STONTIH device is configured" + ansible.builtin.command: > + pcs stonith create rsc_st_azure fence_azure_arm + login="{{ fencing_spn_client_id }}" + passwd="{{ fencing_spn_client_pwd }}" + resourceGroup="{{ resource_group_name }}" + tenantId="{{ fencing_spn_tenant_id }}" + subscriptionId="{{ fencing_spn_subscription_id }}" + power_timeout=240 + pcmk_reboot_timeout=900 + pcmk_monitor_timeout=120 + pcmk_monitor_retries=4 + pcmk_action_limit=3 + pcmk_delay_max=15 + pcmk_host_map="{% for item in ansible_play_hosts_all %}{{ item }}:{{ hostvars[item]['vm_name'] }}{{ ';' if not loop.last }}{% endfor %}" + when: + - ansible_distribution_major_version == "7" + - not use_msi_for_clusters + + - name: "1.18.2.0 Generic Pacemaker - Ensure the STONTIH device is configured" + ansible.builtin.command: > + pcs stonith create rsc_st_azure fence_azure_arm + msi=true + passwd="{{ fencing_spn_client_pwd }}" + resourceGroup="{{ resource_group_name }}" + subscriptionId="{{ fencing_spn_subscription_id }}" + power_timeout=240 + pcmk_reboot_timeout=900 + pcmk_monitor_timeout=120 + pcmk_monitor_retries=4 + pcmk_action_limit=3 + pcmk_delay_max=15 + pcmk_host_map="{% for item in ansible_play_hosts_all %}{{ item }}:{{ hostvars[item]['vm_name'] }}{{ ';' if not loop.last }}{% endfor %}" + when: + - ansible_distribution_major_version == "7" + - use_msi_for_clusters + + - name: "1.18.2.0 Generic Pacemaker - Ensure the STONTIH device is configured" + ansible.builtin.command: > + pcs stonith create rsc_st_azure fence_azure_arm + login="{{ fencing_spn_client_id }}" + passwd="{{ fencing_spn_client_pwd }}" + resourceGroup="{{ resource_group_name }}" + tenantId="{{ fencing_spn_tenant_id }}" + subscriptionId="{{ fencing_spn_subscription_id }}" + power_timeout=240 + pcmk_reboot_timeout=900 + pcmk_monitor_timeout=120 + pcmk_monitor_retries=4 + pcmk_action_limit=3 + pcmk_delay_max=15 + pcmk_host_map="{% for item in ansible_play_hosts_all %}{{ item }}:{{ hostvars[item]['vm_name'] }}{{ ';' if not loop.last }}{% endfor %}" + when: + - ansible_distribution_major_version in ["8", "9"] + - not use_msi_for_clusters + + - name: "1.18.2.0 Generic Pacemaker - Ensure the STONTIH device is configured (MSI)" + ansible.builtin.command: > + pcs stonith create rsc_st_azure fence_azure_arm + msi=true + resourceGroup="{{ resource_group_name }}" + subscriptionId="{{ fencing_spn_subscription_id }}" + power_timeout=240 + pcmk_reboot_timeout=900 + pcmk_monitor_timeout=120 + pcmk_monitor_retries=4 + pcmk_action_limit=3 + pcmk_delay_max=15 + pcmk_host_map="{% for item in ansible_play_hosts_all %}{{ item }}:{{ hostvars[item]['vm_name'] }}{{ ';' if not loop.last }}{% endfor %}" + when: + - ansible_distribution_major_version in ["8", "9"] + - use_msi_for_clusters + + - name: "1.18.2.0 Generic Pacemaker - Update Monitor interval" + ansible.builtin.command: pcs resource update rsc_st_azure op monitor interval=3600 + + - name: "1.18.2.0 Generic Pacemaker - Ensure the STONITH action is set to reboot" + ansible.builtin.command: pcs property set stonith-action=reboot + + - name: "1.18.2.0 Generic Pacemaker - Ensure the STONITH device is enabled" + ansible.builtin.command: pcs property set stonith-enabled=true + + - name: "1.18.2.0 Generic Pacemaker - Clear any errors during enablement of STONITH device" + ansible.builtin.command: pcs resource cleanup + +# /*---------------------------------------------------------------------------8 +# | | +# | Fencing - END | +# | | +# +------------------------------------4--------------------------------------*/ + +# /*---------------------------------------------------------------------------8 +# | | +# | kdump stonith - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ +# fix the kdump code to include all the DB hosts. + +- name: "1.18.2.0 Generic Pacemaker - Install fence-agents-kdump package" + when: + - kdump_enabled | default("disabled") == "enabled" + ansible.builtin.yum: + name: fence-agents-kdump + state: present + register: fence_agents_kdump_package + changed_when: fence_agents_kdump_package.changed + +- name: "1.18.2.0 Generic Pacemaker - configure the special fencing device fence_kdump" + when: + - inventory_hostname == primary_instance_name + - kdump_enabled | default("disabled") == "enabled" + block: + + # we can assume that the stonith:fence_azure_rm is already configured + # if we need to check we can use either pcs stonith show or pcs stonith status + # create fence_kdump fencing device in the cluster + - name: "1.18.2.0 Generic Pacemaker - Create a fence_kdump fencing device in the cluster" + ansible.builtin.command: > + pcs stonith create rsc_st_kdump fence_kdump + pcmk_reboot_action="off" + pcmk_host_list="{% for item in ansible_play_hosts_all %}{{ item }}{% endfor %}" + pcmk_host_map="{% for item in ansible_play_hosts_all %}{{ item }}:{{ hostvars[item]['vm_name'] }}{{ ';' if not loop.last }}{% endfor %}" + timeout=30 + + - name: "1.18.2.0 Generic Pacemaker - Update Monitor interval" + ansible.builtin.command: pcs resource update rsc_st_kdump op monitor interval=3600 + + # for each node in the play, set the fence_kdump fencing device as the first fencing device to be used + - name: "1.18.2.0 Generic Pacemaker - Set the fence_kdump fencing device as the first for cluster nodes" + ansible.builtin.command: pcs stonith level add 1 {{ ansible_hostname }} rsc_st_kdump + + - name: "1.18.2.0 Generic Pacemaker - Set the fence_azure_arm fencing device as the second for cluster nodes" + ansible.builtin.command: pcs stonith level add 2 {{ ansible_hostname }} rsc_st_azure + +- name: "1.18.2.0 Generic Pacemaker - Ensure that the kdump service is enabled" + when: + - kdump_enabled | default("disabled") == "enabled" + block: + + # Perform the fence_kdump_nodes configuration in /etc/kdump.conf + - name: "1.18.2.0 Generic Pacemaker - Perform the fence_kdump_nodes configuration in /etc/kdump.conf" + ansible.builtin.replace: + path: /etc/kdump.conf + regexp: '^#fence_kdump_nodes(.*)$' + replace: "fence_kdump_nodes {{ secondary_instance_name }}" + backup: true + register: kdump_conf_file + failed_when: kdump_conf_file.rc != 0 + when: + - kdump_enabled | default("disabled") == "enabled" + - inventory_hostname == primary_instance_name + + # Perform the fence_kdump_nodes configuration in /etc/kdump.conf + - name: "1.18.2.0 Generic Pacemaker - Perform the fence_kdump_nodes configuration in /etc/kdump.conf" + ansible.builtin.replace: + path: /etc/kdump.conf + regexp: '^#fence_kdump_nodes(.*)$' + replace: "fence_kdump_nodes {{ primary_instance_name }}" + backup: true + register: kdump_conf_file + failed_when: kdump_conf_file.rc != 0 + when: + - kdump_enabled | default("disabled") == "enabled" + - inventory_hostname == secondary_instance_name + + # set the kdump path to /usr/crash in /etc/kdump.conf + - name: "1.18.2.0 Generic Pacemaker - Set the kdump path to /usr/crash in /etc/kdump.conf" + ansible.builtin.replace: + path: /etc/kdump.conf + regexp: '^path(.*)$' + replace: "path /usr/crash" + backup: true + register: kdump_conf_file_path + failed_when: kdump_conf_file_path.rc != 0 + when: + - kdump_enabled | default("disabled") == "enabled" + + # restart kdump service as we made changes to the configuration + - name: "1.18.2.0 Generic Pacemaker - Restart kdump service" + ansible.builtin.service: + name: kdump + state: restarted + + # Ensure that the initramfs image file contains the fence_kdump and hosts files + - name: "1.18.2.0 Generic Pacemaker - Check that the initramfs image file contains the fence_kdump and hosts files" + ansible.builtin.shell: set -o pipefail && lsinitrd /boot/initramfs-$(uname -r)kdump.img | egrep "fence|hosts" + register: initramfs_image_file + changed_when: false + failed_when: false + + # Ensure that the initramfs image file contains the fence_kdump and hosts files + - name: "1.18.2.0 Generic Pacemaker - Check that the initramfs image file contains the fence_kdump and hosts files" + ansible.builtin.shell: set -o pipefail && lsinitrd /boot/initramfs-$(uname -r)kdump.img | egrep "fence|hosts" + register: initramfs_image_check + changed_when: false + failed_when: initramfs_image_check.rc != 0 + + # print debug on the validation of initramfs + - name: "1.18.2.0 Generic Pacemaker - debug initramfs output" + ansible.builtin.debug: + msg: "initramfs check: {{ initramfs_image_check.stdout }}" + when: initramfs_image_check.rc == 0 +# /*---------------------------------------------------------------------------8 +# | | +# | kdump stonith - END | +# | | +# +------------------------------------4--------------------------------------*/ + +# /*---------------------------------------------------------------------------8 +# | | +# | Azure scheduled events - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "1.18.2.0 Generic Pacemaker - check if the OS version is RHEL 8.4 or newer" + ansible.builtin.set_fact: + is_rhel_84_or_newer: "{{ ansible_distribution_version is version('8.4', '>=') }}" + when: ansible_distribution_major_version in ["8", "9"] + +- name: "1.18.2.0 Generic Pacemaker - Ensure Azure scheduled events is configured" + when: + - inventory_hostname == primary_instance_name + - is_rhel_84_or_newer + block: + # After configuring the Pacemaker resources for azure-events agent, + # when you place the cluster in or out of maintenance mode, you may get warning messages like: + # WARNING: cib-bootstrap-options: unknown attribute 'hostName_ hostname' + # WARNING: cib-bootstrap-options: unknown attribute 'azure-events_globalPullState' + # WARNING: cib-bootstrap-options: unknown attribute 'hostName_ hostname' + # These warning messages can be ignored. + - name: "1.18.2.0 Generic Pacemaker - Ensure maintenance mode is set" + ansible.builtin.command: pcs property set maintenance-mode=true + + - name: "1.18.2.0 Generic Pacemaker - Set the node-health-strategy" + ansible.builtin.command: pcs property set node-health-strategy=custom + + - name: "1.18.2.0 Generic Pacemaker - Set the cluster health-node-strategy constraint" + ansible.builtin.command: pcs constraint location 'regexp%!health-.*' rule score-attribute='#health-azure' defined '#uname' + + - name: "1.18.2.0 Generic Pacemaker - Set the initial value of the cluster attributes" + ansible.builtin.shell: + cmd: crm_attribute --node {{ item }} --name '#health-azure' --update 0 + with_items: + - "{{ ansible_play_hosts_all }}" + + - name: "1.18.2.0 Generic Pacemaker - Configure the resources in Pacemaker" + ansible.builtin.command: pcs resource create health-azure-events ocf:heartbeat:azure-events-az op monitor interval=10s + + - name: "1.18.2.0 Generic Pacemaker - Ensure clone resource azure-events is configured" + ansible.builtin.command: pcs resource clone health-azure-events allow-unhealthy-nodes=true + + - name: "1.18.2.0 Generic Pacemaker - Ensure maintenance mode is disabled" + ansible.builtin.command: pcs property set maintenance-mode=false + + - name: "1.18.2.0 Generic Pacemaker - Clear any errors during enablement of the Azure scheduled events" + ansible.builtin.command: pcs resource cleanup + + - name: "1.18.2.0 Generic Pacemaker - Ensure the Azure scheduled events resource is started" + ansible.builtin.shell: | + set -o pipefail + pcs status --full | grep health-azure-events | grep Started | wc -l + register: azure_scheduled_events_status + retries: 12 + delay: 10 + until: azure_scheduled_events_status.stdout | int == ansible_play_hosts_all | length + when: inventory_hostname == primary_instance_name + failed_when: false + + - name: "1.18.2.0 Generic Pacemaker - display output of Azure scheduled events" + when: inventory_hostname == primary_instance_name + ansible.builtin.debug: + msg: + - "output lines: {{ azure_scheduled_events_status.stdout_lines }}" + - "output: {{ azure_scheduled_events_status.stdout | int }}" + - "output_truthiness: {{ azure_scheduled_events_status.stdout | int == ansible_play_hosts_all | length }}" + verbosity: 2 diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-Suse.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-Suse.yml new file mode 100644 index 0000000000..a7c02f3bac --- /dev/null +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-Suse.yml @@ -0,0 +1,212 @@ +--- + +# SLES Clustering +# Ref: https://documentation.suse.com/sle-ha/12-SP4/html/SLE-HA-install-quick/index.html + +- name: "1.18.2.0 Generic Pacemaker - Ensure a list of package version is available for checking the cloud-netconfig-azure version" + ansible.builtin.package_facts: + +# Pacemaker can create a large number of processes +- name: "1.18.2.0 Generic Pacemaker - Ensure Process limit is raised" + ansible.builtin.lineinfile: + path: /etc/systemd/system.conf + state: present + regexp: "^#?\\s*DefaultTasksMax=" + line: "DefaultTasksMax=4096" + register: raise_process_limit + +# eth0 is the "db" NIC +- name: "1.18.2.0 Generic Pacemaker - Ensure clustering can manage Virtual IPs on the Database Interface" + ansible.builtin.lineinfile: + path: /etc/sysconfig/network/ifcfg-eth0 + state: present + regexp: "^#?\\s*CLOUD_NETCONFIG_MANAGE=" + line: "CLOUD_NETCONFIG_MANAGE='no'" + when: ansible_facts.packages['cloud-netconfig-azure'] and (ansible_facts.packages['cloud-netconfig-azure'][0].version | float) < 1.3 + + +- name: "1.18.2.0 Generic Pacemaker - Ensure Primary node initiates the Cluster" + block: +# - name: "1.18.2.0 Generic Pacemaker - Ensure csync2 is configured" +# ansible.builtin.command: crm cluster init -y csync2 --interface eth0 + + - name: "1.18.2.0 Generic Pacemaker - Ensure corosync is configured" + ansible.builtin.command: "crm cluster init -y -u corosync --interface eth0" + + # This task has to run on HANA node, preferebly the first in the list. + - name: "1.18.2.0 Generic Pacemaker - Ensure cluster (hdb_{{ db_sid | upper }}) is configured" + # ha-cluster-init is not supported in SLES 15 SP4 anymore, crm syntax required + # ansible.builtin.command: "ha-cluster-init -y --name 'hdb_{{ db_sid | upper }}' --interface eth0 --no-overwrite-sshkey" + ansible.builtin.command: "crm cluster init -y --name 'hdb_{{ db_sid | upper }}' --interface eth0 --no-overwrite-sshkey" + when: node_tier == 'hana' + when: ansible_hostname == ansible_play_hosts_all[0] + +- name: "1.18.2.0 Generic Pacemaker - Ensure additional nodes joins the Cluster" + block: + - name: "1.18.2.0 Generic Pacemaker - Ensure Secondary nodes joins the Cluster" + # ha-cluster-join is not supported in SLES 15 SP4 anymore, crm syntax required + ansible.builtin.command: "sudo crm cluster join -y -c {{ ansible_hostname }} --interface eth0" + when: + - ansible_hostname != ansible_play_hosts_all[0] + +- name: "1.18.2.0 Generic Pacemaker - Ensure HA Cluster password is set to something secure" + ansible.builtin.user: + name: hacluster + password: "{{ password_ha_db_cluster | password_hash('sha512', 65534 | random(seed=None) | string) }}" +- name: "1.18.2.0 Generic Pacemaker - Ensure cluster configuration contains correct details" + ansible.builtin.template: + src: corosync.conf.j2 + dest: /etc/corosync/corosync.conf + mode: 0600 + +- name: "1.18.2.0 Generic Pacemaker - Ensure the pacemaker service is restarted on node." + ansible.builtin.systemd: + name: pacemaker + state: restarted + +- name: "1.17 Generic Pacemaker - Pause" + ansible.builtin.wait_for: + timeout: 30 + +# /*---------------------------------------------------------------------------8 +# | | +# | Fencing - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ +- name: "1.18.2.0 Generic Pacemaker - Ensure the STONITH Azure fence agent is created when SBD not used" + block: + - name: "1.18.2.0 Generic Pacemaker - Enable Stonith" + ansible.builtin.shell: | + crm configure property stonith-enabled=true + crm configure property concurrent-fencing=true + +# templatize the pcmk_host_map from all entries in ansible_play_hosts_all and mm_hosts + - name: "1.18.2.0 Generic Pacemaker - Create Azure Fencing Agent" + ansible.builtin.command: > + crm configure primitive rsc_st_azure stonith:fence_azure_arm params + subscriptionId="{{ fencing_spn_subscription_id }}" + resourceGroup="{{ resource_group_name }}" + tenantId="{{ fencing_spn_tenant_id }}" + login="{{ fencing_spn_client_id }}" + passwd="{{ fencing_spn_client_pwd }}" + pcmk_monitor_retries=4 + pcmk_action_limit=-1 + power_timeout=240 + pcmk_reboot_timeout=900 + pcmk_host_map="{% for item in ansible_play_hosts_all %}{{ item }}:{{ hostvars[item]['vm_name'] }}{{ ';' if not loop.last }}{% endfor %}" + when: + - not use_msi_for_clusters or distribution_full_id in ["sles_sap12.4"] + + - name: "1.18.2.0 Generic Pacemaker - Create Azure Fencing Agent (MSI)" + ansible.builtin.command: > + crm configure primitive rsc_st_azure stonith:fence_azure_arm params + subscriptionId="{{ fencing_spn_subscription_id }}" + resourceGroup="{{ resource_group_name }}" + msi=true + pcmk_monitor_retries=4 + pcmk_action_limit=-1 + power_timeout=240 + pcmk_reboot_timeout=900 + pcmk_host_map="{% for item in ansible_play_hosts_all %}{{ item }}:{{ hostvars[item]['vm_name'] }}{{ ';' if not loop.last }}{% endfor %}" + when: + - use_msi_for_clusters + - distribution_full_id in ["sles_sap12.5", "sles_sap15.1","sles_sap15.2", "sles_sap15.3", "sles_sap15.4", "sles_sap15.5"] + + + - name: "1.18.2.0 Generic Pacemaker - Stonith Timeout Property" + become: true + ansible.builtin.command: crm configure property stonith-timeout=900 + + when: + - database_cluster_type == "AFA" + - inventory_hostname == ansible_play_hosts_all[0] +# /*---------------------------------------------------------------------------8 +# | | +# | Fencing - END | +# | | +# +------------------------------------4--------------------------------------*/ + + +# /*---------------------------------------------------------------------------8 +# | | +# | SBD - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ +- name: "1.18.2.0 Generic Pacemaker - Ensure the STONITH Azure fence agent is created when SBD not used" + when: + - (database_cluster_type == "ASD") or + (database_cluster_type == "ISCSI") + - inventory_hostname == primary_instance_name + block: + - name: "1.18.2.0 Generic Pacemaker - Check if Stonith SBD is configured in cluster" + ansible.builtin.shell: crm resource show stonith-sbd + register: stonith_sbd_configured + failed_when: false + changed_when: false + + - name: "1.18.2.0 Generic Pacemaker - Delete Stonith SBD if it is already configured in cluster" + when: stonith_sbd_configured.rc == 0 + ansible.builtin.command: crm configure delete stonith-sbd + + - name: "1.18.2.0 Generic Pacemaker - Ensure Stonith SBD is configured in cluster" + when: stonith_sbd_configured.rc != 0 + ansible.builtin.command: >- + crm configure primitive stonith-sbd stonith:external/sbd \ + params pcmk_delay_max="15" \ + op monitor interval="600" timeout="15" + + - name: "1.18.2.0 Generic Pacemaker - Set the Stonith SBD Timeout Property" + ansible.builtin.command: crm configure property stonith-timeout=144 + + - name: "1.18.2.0 Generic Pacemaker - Enable Stonith" + ansible.builtin.command: crm configure property stonith-enabled=true + + always: + - name: "1.18.2.0 Generic Pacemaker - SBD device configuration" + when: inventory_hostname == primary_instance_name + ansible.builtin.debug: + msg: "SBD device configuration ends" + +# /*---------------------------------------------------------------------------8 +# | | +# | SBD - END | +# | | +# +------------------------------------4--------------------------------------*/ + +# /*---------------------------------------------------------------------------8 +# | | +# | Azure scheduled events - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "1.18.2.0 Generic Pacemaker - Ensure Azure scheduled events is configured" + block: + # After configuring the Pacemaker resources for azure-events agent, + # when you place the cluster in or out of maintenance mode, you may get warning messages like: + # WARNING: cib-bootstrap-options: unknown attribute 'hostName_ hostname' + # WARNING: cib-bootstrap-options: unknown attribute 'azure-events_globalPullState' + # WARNING: cib-bootstrap-options: unknown attribute 'hostName_ hostname' + # These warning messages can be ignored. + - name: "1.18.2.0 Generic Pacemaker - Ensure maintenance mode is set" + ansible.builtin.command: crm configure property maintenance-mode=true + + - name: "1.18.2.0 Generic Pacemaker - Ensure Pacemaker resources for the Azure agent is created" + ansible.builtin.shell: crm configure primitive rsc_azure-events ocf:heartbeat:azure-events op monitor interval=10s + + - name: "1.18.2.0 Generic Pacemaker - Ensure clone resource azure-events is configured" + ansible.builtin.shell: crm configure clone cln_azure-events rsc_azure-events + + - name: "1.18.2.0 Generic Pacemaker - Remove false positives" + ansible.builtin.shell: crm_resource -C + + - name: "1.18.2.0 Generic Pacemaker - Ensure maintenance mode is disabled" + ansible.builtin.command: crm configure property maintenance-mode=false + when: inventory_hostname == primary_instance_name + +# /*---------------------------------------------------------------------------8 +# | | +# | Azure scheduled events - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ + +# END of Generic Pacemaker Tasks diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.3-post_provision_report.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.3-post_provision_report.yml new file mode 100644 index 0000000000..3500e44977 --- /dev/null +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.3-post_provision_report.yml @@ -0,0 +1,36 @@ +--- + +- name: "1.18.3 Generic Pacemaker - Pause to give cluster time to stabilize" + ansible.builtin.pause: + seconds: "{{ cluster_status_report_wait_in_s }}" + +- name: "1.18.3 Generic Pacemaker - Cleanup resource status" + ansible.builtin.shell: > + pcs resource cleanup + register: cluster_cleanup + failed_when: cluster_cleanup.rc > 0 + +- name: "1.18.3 Generic Pacemaker - Check the post-provisioning cluster status" + ansible.builtin.command: "{{ cluster_status_cmd[ansible_os_family] }}" + register: cluster_status_report + changed_when: false + failed_when: false + +- name: "1.18.3 Generic Pacemaker - Output cluster status" + ansible.builtin.debug: + msg: "{{ cluster_status_report.stdout }}" + verbosity: 2 + +# - name: Check the SBD devices status +# ansible.builtin.shell: > +# set -o pipefail +# crm_mon -1 | grep sbd +# register: sbd_status_report +# changed_when: false +# failed_when: false +# when: ansible_os_family | upper == "SUSE" + +# - name: Output SBD status +# ansible.builtin.debug: +# msg: "{{ sbd_status_report.stdout }}" +# when: ansible_os_family | upper == "SUSE" diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/main.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/main.yml new file mode 100644 index 0000000000..10a69446dc --- /dev/null +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/main.yml @@ -0,0 +1,39 @@ +--- +# /*---------------------------------------------------------------------------8 +# | | +# +------------------------------------4--------------------------------------*/ +- name: "1.18 Generic Pacemaker - Set Runtime Facts" + ansible.builtin.import_tasks: 1.18.0-set_runtime_facts.yml + +- name: "1.18 Generic Pacemaker - Run pre-checks" + ansible.builtin.import_tasks: 1.18.1-pre_checks.yml + +- name: "1.18 Generic Pacemaker - Set Runtime Facts" + ansible.builtin.import_tasks: 1.18.1.1-iSCSI.yml + when: + - (database_cluster_type == 'ISCSI') + + +- name: "1.18 Generic Pacemaker - SBD Devices" + ansible.builtin.import_tasks: 1.18.1.2-sbd.yaml + when: + - (database_cluster_type == "ASD") or + (database_cluster_type == "ISCSI") + +# Import this task only if the cluster is not yet created +- name: "1.18 Generic Pacemaker - Provision" + when: + - not cluster_existence_check == '0' + block: + - name: "1.18 Generic Pacemaker - Provision Pacemaker" + ansible.builtin.import_tasks: 1.18.2-provision.yml + tags: + - 1.18.2-provision + +- name: "1.18 Generic Pacemaker - Post provision report" + ansible.builtin.import_tasks: 1.18.3-post_provision_report.yml + +... +# /*---------------------------------------------------------------------------8 +# | END | +# +------------------------------------4--------------------------------------*/ diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/templates/corosync.conf.j2 b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/templates/corosync.conf.j2 new file mode 100644 index 0000000000..9df5d40fbb --- /dev/null +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/templates/corosync.conf.j2 @@ -0,0 +1,60 @@ +# Please read the corosync.conf.5 manual page + +totem { + version: 2 + secauth: on + crypto_hash: sha1 + crypto_cipher: aes256 + cluster_name: hacluster + clear_node_high_bit: yes + + token: {{ cluster_totem.token }} + token_retransmits_before_loss_const: {{ cluster_totem.retransmits }} + join: {{ cluster_totem.join }} + consensus: {{ cluster_totem.consensus }} + max_messages: {{ cluster_totem.max_messages }} + + interface { + ringnumber: 0 + mcastport: 5405 + ttl: 1 + } + + + transport: udpu + + + +} + +logging { + fileline: off + to_stderr: no + to_logfile: yes + logfile: /var/log/cluster/corosync.log + to_syslog: no + debug: off + timestamp: on + logger_subsys { + subsys: QUORUM + debug: off + } +} + +nodelist{ +{% for host in ansible_play_hosts_all %} + node { + ring0_addr: {{ hostvars[host]['primary_ip'] }} + name: {{ host }} + nodeid: {{ loop.index + 1 }} + } +{% endfor %} +} + +quorum { + # Enable and configure quorum subsystem (default: off) + # see also corosync.conf.5 and votequorum.5 + provider: corosync_votequorum + expected_votes: {{ cluster_quorum.expected_votes }} + two_node: {{ cluster_quorum.two_node }} +} diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/templates/softdog.conf b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/templates/softdog.conf new file mode 100644 index 0000000000..6711610824 --- /dev/null +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/templates/softdog.conf @@ -0,0 +1 @@ +softdog diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/vars/main.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/vars/main.yml new file mode 100644 index 0000000000..e00e54ac6d --- /dev/null +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/vars/main.yml @@ -0,0 +1,110 @@ +--- + +# /*---------------------------------------------------------------------------8 +# | Variables in this file are required by the role, | +# | but are computed from default variables (e.g. sid), | +# | from the playbook/environment (e.g. hana_database), | +# | or are "SAP standard" values. | +# | | +# | They may be overridden, if required, but normally are not | +# | | +# +------------------------------------4--------------------------------------*/ + +db_hosts: "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" +mm_hosts: "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_OBSERVER_DB') }}" + +cluster_totem: + token: 30000 + retransmits: 10 + join: 60 + consensus: 36000 + max_messages: 20 + +cluster_quorum: + # expected_votes: "{{ (( ansible_play_hosts_all | length ) / 2 ) | int + 1 }}" + expected_votes: "{{ ansible_play_hosts_all | length }}" + two_node: 0 + +# /*---------------------------------------------------------------------------8 +# | These are the default timeouts used for the SAP HANA OS clustering. | +# | Depending on the SAP HANA System, these may need to be adjusted | +# | if the operation takes longer than expected. | +# | | +# +------------------------------------4--------------------------------------*/ + +cluster_sap_hana_timeouts: + start: 3600 + stop: 3600 + monitor_master: 700 + monitor_slave: 700 + promote: 3600 + demote: 3600 + +cluster_status_cmd: + RedHat: "pcs status --full" + Suse: "crm status full" + +cluster_status_report_wait_in_s: 60 +cluster_name: db{{ sid | lower }} + +# /*---------------------------------------------------------------------------8 +# | HANA utility commands | +# | | +# +------------------------------------4--------------------------------------*/ + +sapcontrol_command: "/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe/sapcontrol -nr {{ db_instance_number }}" + +sbdMap: [] + +# https://docs.ansible.com/ansible/latest/collections/ansible/builtin/version_test.html +# version_type is one of "loose" ← (default), "strict", "semver", "semantic", "pep440" +# compare_operator is one of ">=" ← (default), "==", "!=", "<", "<=", ">", ">=" +# RHEL +# RHEL 8.4: resource-agents-4.1.1-90.13 +# RHEL 8.6: resource-agents-4.9.0-16.9 +# RHEL 8.8 and newer: resource-agents-4.9.0-40.1 +# RHEL 9.0 and newer: resource-agents-cloud-4.10.0-34.2 +# SLES +# SLES 12 SP5: resource-agents-4.3.018.a7fb5035-3.98.1 +# SLES 15 SP1: resource-agents-4.3.0184.6ee15eb2-150100.4.72.1 +# SLES 15 SP2: resource-agents-4.4.0+git57.70549516-150200.3.56.1 +# SLES 15 SP3: resource-agents-4.8.0+git30.d0077df0-150300.8.31.1 +# SLES 15 SP4 and newer: resource-agents-4.10.0+git40.0f4de473-150400.3.19.1 +# todo: Figure out a way to get the release information from the package manager +package_versions: + redhat8.4: + - {name: "pacemaker", version: "2.0.5", compare_operator: ">=", version_type: "loose"} + - {name: "resource-agents", version: "4.1.1", compare_operator: ">=", version_type: "loose"} + redhat8.6: + - {name: "pacemaker", version: "2.0.5", compare_operator: ">=", version_type: "loose"} + - {name: "resource-agents", version: "4.9.0", compare_operator: ">=", version_type: "loose"} + redhat8.8: + - {name: "pacemaker", version: "2.0.5", compare_operator: ">=", version_type: "loose"} + - {name: "resource-agents", version: "4.9.0", compare_operator: ">=", version_type: "loose"} + redhat8.9: + - {name: "pacemaker", version: "2.0.5", compare_operator: ">=", version_type: "loose"} + - {name: "resource-agents", version: "4.9.0", compare_operator: ">=", version_type: "loose"} + redhat9.0: + - {name: "pacemaker", version: "2.0.5", compare_operator: ">=", version_type: "loose"} + - {name: "resource-agents-cloud", version: "4.10.0", compare_operator: ">=", version_type: "loose"} + redhat9.2: + - {name: "pacemaker", version: "2.0.5", compare_operator: ">=", version_type: "loose"} + - {name: "resource-agents-cloud", version: "4.10.0", compare_operator: ">=", version_type: "loose"} + sles_sap12.5: + - {name: "pacemaker", version: "1.1.23", compare_operator: ">=", version_type: "loose"} + - {name: "resource-agents", version: "4.3.018", compare_operator: ">=", version_type: "loose"} + sles_sap15.1: + - {name: "pacemaker", version: "1.1.23", compare_operator: ">=", version_type: "loose"} + - {name: "resource-agents", version: "4.3.018", compare_operator: ">=", version_type: "semver"} + sles_sap15.2: + - {name: "pacemaker", version: "1.1.23", compare_operator: ">=", version_type: "loose"} + - {name: "resource-agents", version: "4.4.0", compare_operator: ">=", version_type: "semver"} + sles_sap15.3: + - {name: "pacemaker", version: "1.1.23", compare_operator: ">=", version_type: "loose"} + - {name: "resource-agents", version: "4.8.0", compare_operator: ">=", version_type: "semver"} + sles_sap15.4: + - {name: "pacemaker", version: "1.1.23", compare_operator: ">=", version_type: "loose"} + - {name: "resource-agents", version: "4.10.0", compare_operator: ">=", version_type: "semver"} + sles_sap15.5: + - {name: "pacemaker", version: "1.1.23", compare_operator: ">=", version_type: "loose"} + - {name: "resource-agents", version: "4.10.0", compare_operator: ">=", version_type: "semver"} diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 74e85a9a51..929619e0e5 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -395,4 +395,27 @@ - tier == 'sapos' - sap_trans is defined +# Scale out - shared nothing configuration code to support AFS based /hana/shared mount +- name: "AFS Mount: Scale out hana_shared" + ansible.builtin.include_tasks: 2.6.0.1-afs-mount.yaml + loop: + - { + 'type': 'shared', + 'temppath': 'shared', + 'mount': "{% if ansible_hostname in query('inventory_hostnames', '{{ sap_sid | upper }}_DB')[0::2] %}{{ hana_shared_mountpoint[0] }}{% else %}{{ hana_shared_mountpoint[1] }}{% endif %}", + 'opts': 'vers=4,minorversion=1,sec=sys', + 'path': '/hana/shared', + 'permissions': '0775', + 'set_chattr_on_dir': false, + 'target_nodes': ['hana'], + 'create_temp_folders': false + } + vars: + primary_host: "{{ ansible_hostname }}" + when: + - db_scale_out + - db_high_availability + - hana_shared_mountpoint is defined + - hana_shared_mountpoint | length == 2 + ... diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml index c5302e8550..922f935b1d 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml @@ -142,7 +142,7 @@ when: - node_tier in item.target_nodes or item.target_nodes == ['all'] -- name: "Backward Compatibility - Check required Database HA variables" +- name: "Backward Compatibility - Check required Database HA variables" ansible.builtin.set_fact: database_high_availability: "{{ db_high_availability | default(false) }}" when: diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml index 9a29be2f37..831ed595d0 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml @@ -321,4 +321,29 @@ - tier == 'sapos' - sap_trans is defined + +# Scale out - shared nothing configuration code to support AFS based /hana/shared mount +- name: "AFS Mount: Scale out hana_shared" + ansible.builtin.include_tasks: 2.6.0.1-afs-mount.yaml + loop: + - { + 'type': 'shared', + 'temppath': 'shared', + 'mount': "{% if ansible_hostname in query('inventory_hostnames', '{{ sap_sid | upper }}_DB')[0::2] %}{{ hana_shared_mountpoint[0] }}{% else %}{{ hana_shared_mountpoint[1] }}{% endif %}", + 'opts': 'vers=4,minorversion=1,sec=syss', + 'path': '/hana/shared', + 'permissions': '0775', + 'set_chattr_on_dir': false, + 'target_nodes': ['hana'], + 'create_temp_folders': false + } + vars: + primary_host: "{{ ansible_hostname }}" + when: + - db_scale_out + - db_high_availability + - hana_shared_mountpoint is defined + - hana_shared_mountpoint | length == 2 + + ... diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml index 00b3fa1403..db9918a986 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml @@ -126,7 +126,7 @@ - name: "2.6 SAP Mounts: - Mount local file systems (hana data)" ansible.posix.mount: src: '/dev/vg_hana_data/lv_hana_data' - path: '/hana/data' + path: "{{ hana_data_basepath }}" fstype: 'xfs' opts: defaults state: mounted @@ -137,7 +137,7 @@ - name: "2.6 SAP Mounts: - Mount local file systems (hana log)" ansible.posix.mount: src: '/dev/vg_hana_log/lv_hana_log' - path: '/hana/log' + path: "{{ hana_log_basepath }}" fstype: 'xfs' opts: defaults state: mounted @@ -353,6 +353,8 @@ ansible.builtin.import_tasks: "2.6.3-oracle-observer.yaml" when: - node_tier == "observer" + - platform != "HANA" + # This is to prevent conflict between HANA scaleout-hsr and oracle DB as they both share observer, but only one platform can be deployed at a time - name: "2.6 SAP Mounts: - Import Oracle shared home tasks" ansible.builtin.import_tasks: "2.6.3-oracle-multi-sid.yaml" @@ -396,6 +398,8 @@ ansible.builtin.import_tasks: "2.6.6-oracle-nfs-mounts.yaml" when: - node_tier in ['oracle','oracle-asm','observer'] + - platform != "HANA" + # ensure that this is not triggered for HANA scale out with HSR which uses an observer tier VM - name: "2.6 SAP Mounts: - Set permissions" @@ -412,8 +416,8 @@ path: "{{ item.path }}" mode: '{{ item.mode }}' loop: - - { mode: '0755', path: '/hana/data' } - - { mode: '0755', path: '/hana/log' } + - { mode: '0755', path: "{{ hana_data_basepath }}" } + - { mode: '0755', path: "{{ hana_log_basepath }}" } - { mode: '0755', path: '/hana/shared' } rescue: - name: "2.6 SAP Mounts: - Set permissions on hana folders" @@ -425,8 +429,8 @@ path: "{{ item.path }}" mode: '{{ item.mode }}' loop: - - { mode: '0755', path: '/hana/data' } - - { mode: '0755', path: '/hana/log' } + - { mode: '0755', path: "{{ hana_data_basepath }}" } + - { mode: '0755', path: "{{ hana_log_basepath }}" } - { mode: '0755', path: '/hana/shared' } # Import custom NFS mounts. diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/main.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/main.yml index 5ae33edf74..414e8a9d19 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/main.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/main.yml @@ -12,5 +12,5 @@ ansible.builtin.import_tasks: 5.5.4-provision.yml when: not hana_cluster_existence_check -- name: "5.5 HANADB Pacemaker - import - post_provision_repor" +- name: "5.5 HANADB Pacemaker - import - post_provision_report" ansible.builtin.import_tasks: 5.5.5-post_provision_report.yml diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/defaults/main.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/defaults/main.yml new file mode 100644 index 0000000000..e35807b734 --- /dev/null +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/defaults/main.yml @@ -0,0 +1,7 @@ +--- + +iscsi_object: iqn.2006-04 + +iscsi_port: 3260 + +sapcontrol_command: "sapcontrol -nr {{ db_instance_number }}" diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.1-set_runtime_facts.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.1-set_runtime_facts.yml new file mode 100644 index 0000000000..9778d293ed --- /dev/null +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.1-set_runtime_facts.yml @@ -0,0 +1,90 @@ +--- + +# /*---------------------------------------------------------------------------8 +# | | +# | Set Runtime Paramters - e.g Sub ID , Resource group name | +# | | +# +------------------------------------4--------------------------------------*/ + +# ---------------------------------------- +# BEGIN +# ---------------------------------------- + + +- name: "5.5 HANA Pacemaker - Retrieve Subscription ID and Resource Group Name" + ansible.builtin.uri: + url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 + use_proxy: false + headers: + Metadata: true + register: hanavmmetadata + +- name: "5.5 HANA Pacemaker - Show IMDS results" + ansible.builtin.debug: + var: hanavmmetadata.json + verbosity: 2 + +- name: "5.5 HANA Pacemaker - Extract Subscription ID" + ansible.builtin.set_fact: + fencing_spn_subscription_id: "{{ hanavmmetadata.json.compute.subscriptionId }}" + no_log: true + +- name: "5.5 HANA Pacemaker - Extract ResourceGroup Name" + ansible.builtin.set_fact: + resource_group_name: "{{ hanavmmetadata.json.compute.resourceGroupName }}" + no_log: true + +- name: "5.5 HANA Pacemaker - Set the primary intance nic and secondary instance nic IP" + ansible.builtin.set_fact: + primary_ip: "{{ hanavmmetadata.json.network.interface[0].ipv4.ipAddress[0].privateIpAddress }}" + subnet_prefix: "{{ hanavmmetadata.json.network.interface[0].ipv4.subnet[0].prefix }}" + +- name: "5.5 HANA Pacemaker - Extract NIC IPs" + ansible.builtin.set_fact: + primary_instance_ip_db: "{{ hostvars[primary_instance_name]['primary_ip'] | string }}" + secondary_instance_ip_db: "{{ hostvars[secondary_instance_name]['primary_ip'] | string }}" + +# - name: Set the primary intance db nic and admin nic IP +# ansible.builtin.set_fact: +# primary_instance_ip_db: "{{ hostvars[primary_instance_name]['ansible_eth0']['ipv4'][0]['address'] }}" + +# - name: Set the secondary intance db nic and admin nic IP +# ansible.builtin.set_fact: +# secondary_instance_ip_db: "{{ hostvars[secondary_instance_name]['ansible_eth0']['ipv4'][0]['address'] }}" + +- name: "5.5 HANA Pacemaker - Show Subscription ID" + ansible.builtin.debug: + var: fencing_spn_subscription_id + verbosity: 2 + +- name: "5.5 HANA Pacemaker - Show Resource Group Name" + ansible.builtin.debug: + var: resource_group_name + verbosity: 2 + +- name: "5.5 HANA Pacemaker - Ensure HANA DB version is checked and captured" + block: + - name: Check HANA DB Version and register + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.command: /hana/shared/{{ db_sid | upper }}/HDB{{ db_instance_number }}/HDB version + register: hdbversion + changed_when: false + + - name: "5.5 HANA Pacemaker - Capture the Hana DB version" + ansible.builtin.set_fact: + hdb_version: "{{ hdbversion.stdout_lines.1.split().1 }}" + + - name: "5.5 HANA Pacemaker - Show the HDB version" + ansible.builtin.debug: + var: hdb_version + + - name: "5.5 HANA Pacemaker - Show the HDB version prefix" + ansible.builtin.debug: + var: hdb_version[0:2] + +# /*---------------------------------------------------------------------------8 +# | END | +# +------------------------------------4--------------------------------------*/ + +... diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.2-pre_checks.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.2-pre_checks.yml new file mode 100644 index 0000000000..5188fe8e9d --- /dev/null +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.2-pre_checks.yml @@ -0,0 +1,61 @@ +--- + +# sapcontrol EXITCODES + +# /*---------------------------------------------------------------------------8 +# | | +# | Pre checks | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: Check the fencing agent configuration variables are set + ansible.builtin.assert: + that: + - "fencing_spn_subscription_id is defined" + - "fencing_spn_subscription_id | trim | length > 0" + - "fencing_spn_tenant_id is defined" + - "fencing_spn_tenant_id | trim | length > 0" + - "fencing_spn_client_id is defined" + - "fencing_spn_client_id | trim | length > 0" + - "fencing_spn_client_pwd is defined" + - "fencing_spn_client_pwd | trim | length > 0" + fail_msg: Fencing SPN details are missing + when: not use_msi_for_clusters +- name: Check the required cluster password is set + ansible.builtin.assert: + that: + - "password_ha_db_cluster is defined" + - "password_ha_db_cluster | trim | length > 0" + fail_msg: The cluster password is not defined + +- name: Check the required Clustering scripts are available + ansible.builtin.stat: + path: "/usr/sbin/crm" + register: cluster_scripts_status_results + failed_when: not cluster_scripts_status_results.stat.exists + when: ansible_os_family == 'SUSE' + +- name: "HANA PCM Install: Create run flag directory" + ansible.builtin.file: + path: /etc/sap_deployment_automation + state: directory + mode: 0755 + +- name: "HANA PCM Install: reset" + ansible.builtin.file: + path: /etc/sap_deployment_automation/sap_deployment_hana_pcm.txt + state: absent + when: reinstall + +- name: "HANA PCM Install: check if deployed" + ansible.builtin.stat: + path: /etc/sap_deployment_automation/sap_deployment_hana_pcm.txt + register: hana_pacemaker + +- name: Check if a cluster has already been prepared (save) + ansible.builtin.set_fact: + hana_cluster_existence_check: "{{ hana_pacemaker.stat.exists }}" + +- name: Check if a cluster has already been prepared (show) + ansible.builtin.debug: + msg: "Cluster check return value: {{ hana_cluster_existence_check }}" diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget.yml new file mode 100644 index 0000000000..45d7f2e1fe --- /dev/null +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget.yml @@ -0,0 +1,437 @@ +--- + +# /*---------------------------------------------------------------------------8 +# | | +# | Implement the Python system replication hook SAPHanaSR-ScaleOut | +# | Ref: https://learn.microsoft.com/en-us/azure/sap/workloads/sap-hana-high-availability-scale-out-hsr-suse#implement-hana-ha-hooks-saphanasrmultitarget-and-suschksrv +# | Begin: configuration for SAPHanaSR-ScaleOut python hook | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: HANA 2.0 only - Implement the Python system replication hook SAPHanaSR-ScaleOut MultiTarget + when: + - hdb_version[0:2] == "2." + - database_high_availability + block: + - name: Generate list of deployed packages on current host + ansible.builtin.package_facts: + + # SAPHanaSR-ScaleOut conflicts with SAPHanaSR and dependencies + - name: "Ensure SAPHanaSR package is absent" + ansible.builtin.package: + name: SAPHanaSR + state: absent + when: + - ansible_os_family | upper == "SUSE" + - ansible_facts.packages['SAPHanaSR'] is defined + + - name: "Ensure SAPHanaSR-doc package is absent" + ansible.builtin.package: + name: SAPHanaSR-doc + state: absent + when: + - ansible_os_family | upper == "SUSE" + - ansible_facts.packages['SAPHanaSR-doc'] is defined + + - name: "Ensure yast2-sap-ha package is absent" + ansible.builtin.package: + name: yast2-sap-ha + state: absent + when: + - ansible_os_family | upper == "SUSE" + - ansible_facts.packages['yast2-sap-ha'] is defined + + # Ensure SAPHANA SR Scaleout package is installed + - name: "Ensure SAPHanaSR-ScaleOut package is installed" + ansible.builtin.package: + name: SAPHanaSR-ScaleOut + state: present + when: + - ansible_os_family | upper == "SUSE" + - ansible_facts.packages['SAPHanaSR-ScaleOut'] is not defined + + - name: "Ensure SAPHanaSR-ScaleOut-doc package is installed" + ansible.builtin.package: + name: SAPHanaSR-ScaleOut-doc + state: present + when: + - ansible_os_family | upper == "SUSE" + - ansible_facts.packages['SAPHanaSR-ScaleOut-doc'] is not defined + + # for RHEL, ensure resource-agents-sap-hana-scaleout is installed + + - name: "Ensure resource-agents-sap-hana is absent (REDHAT)" + ansible.builtin.package: + name: resource-agents-sap-hana + state: absent + when: + - ansible_os_family | upper == "REDHAT" + - ansible_facts.packages['resource-agents-sap-hana'] is defined + + - name: "Ensure resource-agents-sap-hana-scaleout is installed (REDHAT)" + ansible.builtin.package: + name: resource-agents-sap-hana-scaleout + state: present + when: + - ansible_os_family | upper == "REDHAT" + - ansible_facts.packages['resource-agents-sap-hana-scaleout'] is not defined + + # add package verification for RHEL based on link https://access.redhat.com/articles/3397471 + + - name: "Verify SAPHanaSR-ScaleOut package version is greater than 0.180" + ansible.builtin.assert: + that: + - ansible_facts.packages['SAPHanaSR-ScaleOut'][0].version is version('0.180', '>=') + fail_msg: "SAPHanaSR-ScaleOut version is not greater than 0.180" + success_msg: "SAPHanaSR-ScaleOut version is greater than 0.180" + register: saphanasr_scaleout_version + when: + - ansible_os_family | upper == "SUSE" + - ansible_facts.packages['SAPHanaSR-ScaleOut'] is defined + + - name: "HANA HSR: - Check HANA DB Version and register" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.command: /hana/shared/{{ db_sid | upper }}/HDB{{ db_instance_number }}/HDB version + register: hdb_version_output + changed_when: false + + - name: "Extract SAP HANA version number" + ansible.builtin.set_fact: + hana_version_str: "{{ hdb_version_output.stdout | regex_search('version:\\s+([\\d\\.]+)', '\\1') | first }}" + + - name: "Assert SAP HANA version is greater than SAP HANA 2.0 SP5" + ansible.builtin.assert: + that: + - hana_version_str is version('2.00.050', '>=') + fail_msg: "Installed HANA version is not greater than SAP HANA 2.0 SP5" + success_msg: "Installed HANA version is greater than SAP HANA 2.0 SP5" + register: hana_sp_version + when: + - ansible_os_family | upper == "SUSE" + - hdb_version_output.stdout is search("version") + + - name: "Set fact (is_susTkOver_ready) to determine if susTkOver is ready to be configured" + ansible.builtin.set_fact: + is_susTkOver_ready: true + when: + - ansible_os_family | upper == "SUSE" + - hdb_version_output.stdout is search("version") + - saphanasr_scaleout_version is defined + - saphanasr_scaleout_version is success + - hana_sp_version is defined + - hana_sp_version is success + + - name: "Set fact that susTkOver is ready to be configured" + ansible.builtin.set_fact: + configure_susTkOver: "{{ (is_susTkOver_ready is defined and is_susTkOver_ready) | ternary(true, false) }}" + + - name: Check if "myHooks" file exists in /hana/shared directory + ansible.builtin.stat: + path: /hana/shared/myHooks + register: my_hooks_stat + + - name: Stop HANA System on both sites + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.command: "{{ sapcontrol_command }} -function StopSystem" + failed_when: false + changed_when: false + register: hana_system_stopped + when: + - ansible_hostname == primary_instance_name or ansible_hostname == secondary_instance_name + + - name: Wait 2 minutes for SAP system to stop + ansible.builtin.pause: + seconds: 120 + +# This is not needed any more as we are going to use the default path of the hook script installed by the package. + # - name: copy SAPHanaSR-ScaleOut.py (SUSE) + # ansible.builtin.copy: + # src: /usr/share/SAPHanaSR-ScaleOut/SAPHanaSR-ScaleOut.py + # dest: /hana/shared/myHooks/ + # remote_src: true + # owner: root + # group: root # TODO - check the correct group once the python hook package is installed + # mode: '0644' + # when: + # - ansible_os_family | upper == "SUSE" + # - not my_hooks_stat.stat.exists + + - name: Copy /usr/share/SAPHanaSR-ScaleOut/SAPHanaSR.py to /hana/shared/myHooks/ (RHEL) + ansible.builtin.copy: + src: /usr/share/SAPHanaSR-ScaleOut/SAPHanaSR.py + dest: /hana/shared/myHooks/ + remote_src: true + owner: root + group: root # TODO - check the correct group once the python hook package is installed + mode: '0644' + when: + - ansible_os_family | upper == "REDHAT" + - not my_hooks_stat.stat.exists + + - name: Change ownership of the directory + ansible.builtin.file: + path: /hana/shared/myHooks + state: directory + recurse: true + owner: "{{ db_sid | lower }}adm" + group: sapsys + + - name: "Prepare global.ini for host name resolution" + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: "system_replication_hostname_resolution" + mode: 0644 + state: present + option: "{{ hostvars[item].ansible_host }}" + value: "{{ hostvars[item].virtual_host }}" + with_items: + - "{{ groups[(sap_sid | upper)~'_DB' ] }}" + +# susTkOver is not available on REDHAT + - name: Adjust global.ini on each cluster node ( RHEL without susChkSrv/susTkOver ) + ansible.builtin.blockinfile: + path: /hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini + block: | + [ha_dr_provider_SAPHanaSR] + provider = SAPHanaSR + path = /hana/shared/myHooks + execution_order = 1 + + [trace] + ha_dr_saphanasr = info + when: + - not configure_susTkOver + - ansible_os_family | upper == "REDHAT" + - ansible_hostname == primary_instance_name or ansible_hostname == secondary_instance_name + + - name: Adjust global.ini on each cluster node ( with susChkSrv/susTkOver ) + ansible.builtin.blockinfile: + path: /hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini + block: | + [ha_dr_provider_saphanasrmultitarget] + provider = SAPHanaSrMultiTarget + path = /usr/share/SAPHanaSR-ScaleOut/ + execution_order = 1 + + [ha_dr_provider_sustkover] + provider = susTkOver + path = /usr/share/SAPHanaSR-ScaleOut/ + execution_order = 2 + sustkover_timeout = 30 + + [trace] + ha_dr_saphanasrmultitarget = info + ha_dr_sustkover = info + when: + - configure_susTkOver + - ansible_os_family | upper == "SUSE" + - ansible_hostname == primary_instance_name or ansible_hostname == secondary_instance_name + + # Ref: https://documentation.suse.com/sbp/sap-15/html/SLES4SAP-hana-scaleOut-PerfOpt-15/index.html#id-integrating-sap-hana-with-the-cluster + # Note: Azure documentation is outdated w.r.t to SAP HANA hook. + - name: Create sudoers file for /etc/sudoers.d/20-saphana for SLES + ansible.builtin.template: + src: "20-saphana-suse.j2" + dest: "/etc/sudoers.d/20-saphana" + mode: "0440" + owner: root + group: root + # validate: /usr/sbin/visudo -cf %s + when: + - ansible_os_family | upper == "SUSE" + + # Ref: https://learn.microsoft.com/en-us/azure/sap/workloads/sap-hana-high-availability-scale-out-hsr-rhel?tabs=lb-portal#create-sap-hana-cluster-resources + - name: Create sudoers file for /etc/sudoers.d/20-saphana for RHEL + ansible.builtin.template: + src: "20-saphana-rhel.j2" + dest: "/etc/sudoers.d/20-saphana" + mode: "0440" + owner: root + group: root + # validate: /usr/sbin/visudo -cf %s + when: + - ansible_os_family | upper == "REDHAT" + + + + - name: Start HANA System on both nodes + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.command: "{{ sapcontrol_command }} -function StartSystem" + failed_when: false + changed_when: false + register: hana_system_started + + - name: Wait 5 minutes for SAP system to start + ansible.builtin.pause: + seconds: 300 + + # - name: Start HANA Database + # ansible.builtin.import_tasks: ../../../roles-misc/0.4-helpers/tasks/04.01-start_hana.yml + # when: + # - ansible_hostname == primary_instance_name or ansible_hostname == secondary_instance_name + + # old command: + # awk '/ha_dr_SAPHanaSR-ScaleOut.*crm_attribute/ { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* + # Verify that the SAPHanaSR-ScaleOut hook script is working as expected. + - name: Pause to give HANA replication time to stabilize + ansible.builtin.pause: + seconds: "{{ hsr_status_report_wait_in_s }}" + +# REDHAT only +# This needs to be run on all the nodes where HANA is deployed. + - name: Verify that the hook script is working as expected (REDHAT)" + when: + - ansible_os_family | upper == "REDHAT" + - ansible_hostname == primary_instance_name + block: + - name: "Verify the hook Installation (REDHAT)" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + set -o pipefail + awk '/ha_dr_SAPHanaSR.*crm_attribute/ \ + { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* | tail -n 1 + args: + chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ primary_instance_name }}/trace + register: saphanasr + until: saphanasr.stdout is search("SOK") + retries: 10 + delay: 30 + rescue: + - name: "[Rescue] - Pause to give HANA replication time to stabilize" + ansible.builtin.pause: + seconds: "{{ rescue_hsr_status_report_wait_in_s }}" + + - name: "[Rescue] - Verify the hook Installation (REDHAT)" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + set -o pipefail + awk '/ha_dr_SAPHanaSR.*crm_attribute/ \ + { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* | tail -n 1 + args: + chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ primary_instance_name }}/trace + register: saphanasr + until: saphanasr.stdout is search("SOK") + retries: 10 + delay: 30 + + +# SUSE only +# Check on all nodes, status of SAPHanaSrMultiTarget Hook + - name: Verify that the hook script is working as expected (SUSE)" + when: + # - inventory_hostname == primary_instance_name + - ansible_os_family | upper == "SUSE" + block: + - name: "Verify the hook Installation (SUSE)" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + set -o pipefail + grep SAPHanaSr.*init nameserver_*.trc | tail -3 + args: + chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ inventory_hostname }}/trace + register: SAPHanaSR_ScaleOut + until: SAPHanaSR_ScaleOut.stdout is search("Running") + retries: 10 + delay: 30 + # when: inventory_hostname == primary_instance_name + rescue: + - name: "[Rescue] - Pause to give HANA replication time to stabilize" + ansible.builtin.pause: + seconds: "{{ rescue_hsr_status_report_wait_in_s }}" + + - name: "[Rescue] - Verify the hook Installation (SUSE)" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + set -o pipefail + grep SAPHanaSr.*init nameserver_*.trc | tail -3 + args: + chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ inventory_hostname }}/trace + register: SAPHanaSR_ScaleOut + until: SAPHanaSR_ScaleOut.stdout is search("Running") + retries: 10 + delay: 30 + # when: inventory_hostname == primary_instance_name + +# SUSE only +# Check on all nodes, status of susTkOver Hook + + - name: Verify that the susTkOver hook script is working as expected (SUSE)" + when: + # - inventory_hostname == primary_instance_name + - ansible_os_family | upper == "SUSE" + - is_susTkOver_ready is defined + - is_susTkOver_ready == true + block: + - name: "Verify the hook Installation (SUSE)" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + set -o pipefail + grep HADR.*load.*susTkOver nameserver_*.trc | tail -3 + grep susTkOver.init nameserver_*.trc | tail -3 + args: + chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ inventory_hostname }}/trace + register: susTkOver + until: susTkOver.stdout is search("susTkOver.init()") + retries: 10 + delay: 30 + # when: inventory_hostname == primary_instance_name + rescue: + - name: "[Rescue] - Pause to give HANA replication time to stabilize" + ansible.builtin.pause: + seconds: "{{ rescue_hsr_status_report_wait_in_s }}" + + - name: "[Rescue] - Verify the hook Installation" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + set -o pipefail + grep HADR.*load.*susTkOver nameserver_*.trc | tail -3 + grep susTkOver.init nameserver_*.trc | tail -3 + args: + chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ inventory_hostname }}/trace + register: susTkOver + until: susTkOver.stdout is search("susTkOver.init()") + retries: 10 + delay: 30 + # when: inventory_hostname == primary_instance_name + + + - name: "Log that the hook script is working as expected" + block: + + - name: "Debug (SAPHanaSR_ScaleOut)" + ansible.builtin.debug: + var: SAPHanaSR_ScaleOut + verbosity: 2 + + - name: "set_fact (SAPHanaSR_ScaleOut)" + ansible.builtin.set_fact: + hsr_result: SAPHanaSR_ScaleOut.stdout + + - name: "Debug (hsr_result)" + ansible.builtin.debug: + var: hsr_result + verbosity: 2 + + - name: "Assert HSR Hook verification is successful" + ansible.builtin.assert: + that: + - "'SFAIL' != hsr_result" + fail_msg: "Unable to determine if HSR Hook is working" + # when: inventory_hostname == primary_instance_name + + - name: Verify the hook Installation + ansible.builtin.debug: + var: SAPHanaSR_ScaleOut + verbosity: 2 + +# Note: We do not configure Hook on Majority maker, only installation is needed. Unfortunately since this task runs on HANA VM's only, Majority maker is skipped. +# Hook packages are deployed on Majority maker in task 1.18-scaleout-pacemaker diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4-provision-ScaleOut.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4-provision-ScaleOut.yml new file mode 100644 index 0000000000..97f89e3a3b --- /dev/null +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4-provision-ScaleOut.yml @@ -0,0 +1,17 @@ +--- + +########################################################################################### +# This file calls the OS specific tasks to configure HANA specific clustering resources #8 +########################################################################################### + + +# Clustering commands are based on the Host OS +- name: "5.8 HANADB Pacemaker - configure pre-requisites" + ansible.builtin.include_tasks: "5.8.4.0-clusterPrep-ScaleOut-{{ ansible_os_family }}.yml" + +- name: "5.8 HANADB Pacemaker - import - SAP HanaSRMultiTarget" + ansible.builtin.include_tasks: 5.8.3-SAPHanaSRMultiTarget.yml + +- name: "5.8 HANADB Pacemaker - configure cluster resources" + ansible.builtin.include_tasks: "5.8.4.1-cluster-ScaleOut-{{ ansible_os_family }}.yml" +... diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-RedHat.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-RedHat.yml new file mode 100644 index 0000000000..011fbf7424 --- /dev/null +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-RedHat.yml @@ -0,0 +1,258 @@ +--- + +# RHEL Clustering - Deploy HANA cluster resources +# Azure ref: https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/sap-hana-high-availability-rhel + +# @TODO Subscribe to subscriptions/repos if required +# This code assumes the deployment is using RHEL SAP image + +# SAP HANA Cluster resources prep for ANF +# https://learn.microsoft.com/en-us/azure/sap/workloads/sap-hana-high-availability-netapp-files-red-hat + + +# +------------------------------------4--------------------------------------*/ +- name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ database_high_availability | default(false) }}" + when: + - database_high_availability is defined + - database_high_availability is not defined + +# We set this to prevent code overflow. Since pacemaker code for both AFS and ANF are the same barring NFS mount options, we parameterize this parameter. +- name: "NFS Compatibility - set mount options based on NFS source" + ansible.builtin.set_fact: + nfs_mount_options: "{% if NFS_provider == 'ANF' %}bind,defaults,rw,hard,rsize=262144,wsize=262144,proto=tcp,noatime,_netdev,nfsvers=4.1,lock,sec=sys{% else %}noresvport,defaults,rw,hard,proto=tcp,noatime,nfsvers=4.1,lock{% endif %}" + + # Ref : https://learn.microsoft.com/en-us/azure/sap/workloads/sap-hana-high-availability-scale-out-hsr-rhel?tabs=lb-portal#create-file-system-resources +- name: "Configure the ANF/AFS file system resources" + when: + - database_high_availability + - db_scale_out + - NFS_provider in ["ANF","AFS"] + - hana_shared_mountpoint is defined + - hana_shared_mountpoint | length > 1 + block: + #Notes : + # 1. keep cluster out of maintenance mode + # 2. Stopping HANA is fine, but do not kill any sap services locking /hana/shared especially if you have configured replication between sites. + # 3. mask the /hana/shared in /etc/fstab and configure filesystem role in pacemaker. Let pacemaker handle share mount. + # 4. Do not kill existing processes and attempt to unmount /hana/shared. Bad things will happen. + # 5. No Seriously !! Terrible things will happen and you will have a hard time repairing the deployment + + - name: Stop HANA System on both sites + block: + - name: Execute HANA StopSystem on both sites + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.command: "{{ sapcontrol_command }} -function StopSystem" + failed_when: false + changed_when: false + register: hana_system_stopped + when: ansible_hostname in ["{{ primary_instance_name }}","{{ secondary_instance_name }}"] + + - name: Wait 2 minutes for SAP system to stop + ansible.builtin.pause: + seconds: 120 + + - name: Unmount /hana/shared from all cluster participating nodes + block: + - name: "Comment out the mountpoint from '/etc/fstab' file" + ansible.builtin.replace: + path: /etc/fstab + regexp: "^{{ item }}" + replace: "# {{ item }}" + backup: true + loop: + - "{{ hana_shared_mountpoint[0] }}" + - "{{ hana_shared_mountpoint[1] }}" + + - name: "Configure pacemaker hana shared filesystem resources on {{ primary_instance_name }}" + when: ansible_hostname == primary_instance_name + block: + - name: "Configure NFS filesystem resource in Pacemaker for HSR sites" + ansible.builtin.shell: > + pcs resource create {{ item.fs_name }} --disabled ocf:heartbeat:Filesystem \ + device="{{ item.fs_mount }}" directory="{{ item.fs_dir }}" fstype="nfs" \ + options="{{ nfs_mount_options }}" \ + op monitor interval=20s on-fail=fence timeout=120s OCF_CHECK_LEVEL=20 \ + op start interval=0 timeout=120 op stop interval=0 timeout=120 \ + clone meta clone-node-max=1 interleave=true + register: nfs_mount_sites + failed_when: false + ignore_errors: true + loop: + - { fs_name: 'fs_hana_shared_s1',fs_mount: '{{ hana_shared_mountpoint[0] }}', fs_dir: '/hana/shared' } + - { fs_name: 'fs_hana_shared_s2',fs_mount: '{{ hana_shared_mountpoint[1] }}', fs_dir: '/hana/shared' } + loop_control: + loop_var: item + + - name: "Check if NFS hana mounts did not error on {{ primary_instance_name }}" + ansible.builtin.set_fact: + chk_nfs_mount_sites: "{{ nfs_mount_sites.results | selectattr('rc', 'ne', 0) | rejectattr('stderr', 'search', 'already exists') | default([]) | list }}" + + - name: "Fail when NFS hana mounts errored on {{ primary_instance_name }}" + ansible.builtin.fail: + msg: "Failed to create NFS hana mounts on {{ primary_instance_name }}" + when: + - chk_nfs_mount_sites | length > 0 + + - name: "Configure node attributes for primary site on {{ primary_instance_name }}" + ansible.builtin.shell: > + pcs node attribute {{ item }} NFS_{{ db_sid | upper }}_SITE=S1 + register: node_nfs_attribute_site1 + failed_when: false + ignore_errors: true + with_items: + - "{{ ansible_play_hosts_all[0::2] }}" + + - name: "Configure node attributes for secondary site on {{ primary_instance_name }}" + ansible.builtin.shell: > + pcs node attribute {{ item }} NFS_{{ db_sid | upper }}_SITE=S2 + register: node_nfs_attribute_site2 + failed_when: false + ignore_errors: true + with_items: + - "{{ ansible_play_hosts_all[1::2] }}" + + - name: "Configure location constraint for filesystem resource clone on {{ primary_instance_name }}" + ansible.builtin.shell: > + pcs constraint location {{ item.clone_name }} rule resource-discovery=never score=-INFINITY NFS_{{ db_sid | upper }}_SITE ne {{ item.site_code }} + register: location_nfs_attribute_sites + failed_when: false + ignore_errors: true + loop: + - { clone_name: 'fs_hana_shared_s1-clone', site_code: 'S1'} + - { clone_name: 'fs_hana_shared_s2-clone', site_code: 'S2'} + + - name: "Check if NFS hana mounts constraints did not error on {{ primary_instance_name }}" + ansible.builtin.set_fact: + chk_location_nfs_attribute_sites: "{{ location_nfs_attribute_sites.results | selectattr('rc', 'ne', 0) | rejectattr('stderr', 'search', 'already exists') | default([]) | list }}" + + - name: "Fail when NFS hana mounts errored on {{ primary_instance_name }}" + ansible.builtin.fail: + msg: "Failed to create NFS hana mounts on {{ primary_instance_name }}" + when: + - chk_location_nfs_attribute_sites | length > 0 + + - name: Activate filesystem resource on {{ primary_instance_name }} + ansible.builtin.shell: > + pcs resource enable {{ item.fs_name }} + register: activate_nfs_mount_sites + failed_when: false + ignore_errors: true + loop: + - { fs_name: 'fs_hana_shared_s1' } + - { fs_name: 'fs_hana_shared_s2' } + when: + - chk_location_nfs_attribute_sites | length == 0 + - chk_nfs_mount_sites | length == 0 + + - name: Configure pacemaker attribute resource on {{ primary_instance_name }} + ansible.builtin.shell: > + pcs resource create {{ item.res_name }} ocf:pacemaker:attribute active_value=true \ + inactive_value=false name={{ item.res_name }} \ + clone meta clone-node-max=1 interleave=true + register: attribute_hana_nfs_sites + failed_when: false + ignore_errors: true + loop: + - { res_name: 'hana_nfs_s1_active' } + - { res_name: 'hana_nfs_s2_active' } + + - name: Create constraints for pacemaker attribute resource on {{ primary_instance_name }} + ansible.builtin.shell: > + pcs constraint order fs_hana_shared_s1-clone then hana_nfs_s1_active-clone + register: loc_attribute_hana_nfs_sites + failed_when: false + ignore_errors: true + loop: + - { fs_clone: 'fs_hana_shared_s1-clone', res_clone: 'hana_nfs_s1_active-clone' } + - { fs_clone: 'fs_hana_shared_s2-clone', res_clone: 'hana_nfs_s2_active-clone' } + + - name: Wait for /hana/shared to become available on all participating nodes + block: + - name: Wait for /hana/shared to be mounted + ansible.builtin.wait_for: + path: /hana/shared + state: present + timeout: 300 + + - name: Check if /hana/shared is mounted + ansible.builtin.shell: > + mountpoint -q /hana/shared + register: hana_shared_mounted + changed_when: false + failed_when: false + + - name: Fail if /hana/shared is not mounted + ansible.builtin.fail: + msg: "Critical failure : /hana/shared is not mounted" + when: hana_shared_mounted.rc > 0 + + # Note: We need to manually start HANA on all participating nodes via HDB script. + - name: Start HANA database on each participating node + become_user: "{{ db_sid | lower }}adm" + args: + chdir: "/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}" + become: true + ansible.builtin.shell: > + source /usr/sap/{{ db_sid | upper }}/home/.sapenv.sh && + HDB start + failed_when: hdb_start.rc > 0 + changed_when: false + register: hdb_start + + - name: Start HANA System on both sites + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.command: "{{ sapcontrol_command }} -function StartSystem" + failed_when: hana_system_started.rc > 0 + changed_when: false + register: hana_system_started + when: + - ansible_hostname in ["{{ primary_instance_name }}","{{ secondary_instance_name }}"] + + - name: Wait 5 minutes for SAP system to stablize + ansible.builtin.pause: + seconds: 300 +# End of HANA filesystem clustering resources + + # Ref : https://access.redhat.com/articles/3004101 - 4.3 Configure general cluster properties +- name: "Configure general cluster properties" + when: + - ansible_hostname == primary_instance_name + block: + - name: "Set resource stickiness value to 1000" + ansible.builtin.shell: > + pcs resource defaults resource-stickiness=1000 + register: res_stickiness + failed_when: res_stickiness.rc > 0 + changed_when: false + + - name: "Set migration threshold value to 5000" + ansible.builtin.shell: > + pcs resource defaults migration-threshold=5000 + register: mig_threshold + failed_when: mig_threshold.rc > 0 + changed_when: false + + +# ⠀⠀⠀⠀⠀⠀⣠⡤⠶⠒⢛⢻⠛⠛⠛⠛⠛⠛⢿⣛⡓⠶⢦⣤⠀⠀⠀⠀⠀⠀ +# ⠀⠀⠀⠀⠀⡴⡫⠒⠊⠁⠀⣸⠀⠀⠀⠀⠀⠀⢹⠀⠀⠁⠒⡏⢳⡄⠀⠀⠀⠀ +# ⠀⠀⠀⢀⡾⡑⡇⡀⠀⠀⠀⡷⠀⠀⠤⠤⠀⠀⢸⠀⠀⠀⠀⡇⡳⢻⡄⠀⠀⠀ +# ⠀⠀⢀⡾⢱⠔⠁⡇⠀⠀⠀⣇⠀⣀⣀⣀⣀⣀⣀⡇⠀⠀⠀⠃⠱⣵⢻⡄⠀⠀ +# ⠀⠀⡾⠁⢀⡀⢤⠗⠒⠒⢺⢳⠤⠶⠶⠶⠶⠶⢖⣷⠒⠒⠒⡦⢄⡀⠀⢹⡄⠀ +# ⠀⢸⡇⠐⣁⠤⠬⠤⠤⣤⣼⣷⣵⣶⣶⣶⣶⣶⣽⣿⢤⣤⠤⠷⠤⢄⡁⠘⣇⠀ +# ⠀⠘⣧⣞⣁⣀⡮⡯⡿⠛⠛⠫⠿⠭⠭⠭⠭⠽⠿⠛⠛⢻⠽⡿⣄⣀⣑⣦⠏⠀ +# ⠀⠀⣿⢠⣿⠃⠀⠈⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⡍⠁⠈⣿⡆⢸⠀⠀ +# ⠀⠀⣿⢸⣿⡀⠀⠀⠑⠄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⠔⠀⠀⠀⣿⣇⢸⡀⠀ +# ⢀⣴⡏⠥⠹⢇⠀⠀⠀⠀⠑⠄⠀⠀⠀⠀⠀⠀⠤⠂⠁⠀⠀⠀⢠⠟⠥⠹⣧⡀ +# ⣿⡼⢶⡒⠲⡚⠳⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⡴⠛⡲⠒⣲⠷⢹ +# ⣿⣗⡚⠨⠑⢌⠢⡘⠷⣤⣀⣴⣾⠻⠟⠛⠻⡻⣶⣄⣠⡴⠏⡠⢊⠔⡡⢺⢚⢸ +# ⠙⢯⣗⣀⡀⠀⠑⠂⠥⢂⠭⣛⢵⣖⣒⣒⢲⡦⢟⠭⣕⠪⠅⠊⠀⠀⣁⣘⣯⠞ +# ⠀⠀⢿⡑⢬⣑⢄⠀⠀⠀⠈⡟⡞⣯⣷⣢⢿⣇⡗⡏⠀⠀⠀⢀⢴⡁⢐⣹⠀⠀ +# ⠀⠀⠀⠙⢻⡈⠳⣗⢄⣼⠓⣟⡏⣄⠩⠭⢡⡊⣗⡗⣷⣄⣴⡹⠋⡾⠛⠁⠀⠀ +# ⠀⠀⠀⠀⠀⠳⣤⡈⣷⣿⣆⢸⡇⠛⠀⠀⠘⠃⣿⢀⣿⣷⢋⣠⠾⠃⠀⠀⠀⠀ +# ⠀⠀⠀⠀⠀⠀⠀⠉⠻⠦⣭⣽⠇⠿⠀⠀⠸⠃⣯⣭⡥⠞⠋⠁⠀⠀⠀⠀⠀⠀ +# ⢀⢀⣀⣀⣀⣀⣀⣀⣀⣀⣈⣳⣵⣶⣶⣷⣶⣾⡵⣋⣀⣀⣀⣀⢀⠀⠀⠀⠀⠀ +# ⣿⣟⣟⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣯⣽⣽⣯⣿ diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-Suse.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-Suse.yml new file mode 100644 index 0000000000..ee300c993e --- /dev/null +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-Suse.yml @@ -0,0 +1,57 @@ +--- + +# SLES Clustering - Deploy HANA clustering Resources +# Ref: https://documentation.suse.com/sle-ha/12-SP4/html/SLE-HA-install-quick/index.html +# Ref: https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/sap-hana-high-availability +# Ref: https://learn.microsoft.com/en-us/azure/sap/workloads/sap-hana-high-availability-netapp-files-suse#create-file-system-resources + +- name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ database_high_availability | default(false) }}" + when: + - database_high_availability is defined + - database_high_availability is not defined + +- name: "NFS Compatibility - set mount options based on NFS source" + ansible.builtin.set_fact: + nfs_mount_options: "{% if NFS_provider == 'ANF' %}bind,defaults,rw,hard,rsize=262144,wsize=262144,proto=tcp,noatime,_netdev,nfsvers=4.1,lock,sec=sys{% else %}bind,defaults,rw,hard,proto=tcp,noatime,nfsvers=4.1,lock{% endif %}" + +- name: "Scale-Out Cluster Compatibility - Fetch majority maker node name" + ansible.builtin.set_fact: + majority_maker: "{{ (query('inventory_hostnames', '{{ sap_sid | upper }}_OBSERVER_DB'))[0] }}" + +- name: "Configure the ANF specific resources when relevant" + when: + - database_high_availability + - hana_shared_mountpoint is defined + - hana_shared_mountpoint | length > 1 + block: + - name: "Create dummy file system cluster resource for monitoring" + ansible.builtin.file: + path: "{{ item.folderpath }}" + state: directory + mode: 0755 + loop: + - { folderpath: '/hana/shared/{{ db_sid | upper }}/check'} + - { folderpath: '/hana/check'} + + +# ⠀⠀⠀⠀⠀⠀⣠⡤⠶⠒⢛⢻⠛⠛⠛⠛⠛⠛⢿⣛⡓⠶⢦⣤⠀⠀⠀⠀⠀⠀ +# ⠀⠀⠀⠀⠀⡴⡫⠒⠊⠁⠀⣸⠀⠀⠀⠀⠀⠀⢹⠀⠀⠁⠒⡏⢳⡄⠀⠀⠀⠀ +# ⠀⠀⠀⢀⡾⡑⡇⡀⠀⠀⠀⡷⠀⠀⠤⠤⠀⠀⢸⠀⠀⠀⠀⡇⡳⢻⡄⠀⠀⠀ +# ⠀⠀⢀⡾⢱⠔⠁⡇⠀⠀⠀⣇⠀⣀⣀⣀⣀⣀⣀⡇⠀⠀⠀⠃⠱⣵⢻⡄⠀⠀ +# ⠀⠀⡾⠁⢀⡀⢤⠗⠒⠒⢺⢳⠤⠶⠶⠶⠶⠶⢖⣷⠒⠒⠒⡦⢄⡀⠀⢹⡄⠀ +# ⠀⢸⡇⠐⣁⠤⠬⠤⠤⣤⣼⣷⣵⣶⣶⣶⣶⣶⣽⣿⢤⣤⠤⠷⠤⢄⡁⠘⣇⠀ +# ⠀⠘⣧⣞⣁⣀⡮⡯⡿⠛⠛⠫⠿⠭⠭⠭⠭⠽⠿⠛⠛⢻⠽⡿⣄⣀⣑⣦⠏⠀ +# ⠀⠀⣿⢠⣿⠃⠀⠈⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⡍⠁⠈⣿⡆⢸⠀⠀ +# ⠀⠀⣿⢸⣿⡀⠀⠀⠑⠄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⠔⠀⠀⠀⣿⣇⢸⡀⠀ +# ⢀⣴⡏⠥⠹⢇⠀⠀⠀⠀⠑⠄⠀⠀⠀⠀⠀⠀⠤⠂⠁⠀⠀⠀⢠⠟⠥⠹⣧⡀ +# ⣿⡼⢶⡒⠲⡚⠳⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⡴⠛⡲⠒⣲⠷⢹ +# ⣿⣗⡚⠨⠑⢌⠢⡘⠷⣤⣀⣴⣾⠻⠟⠛⠻⡻⣶⣄⣠⡴⠏⡠⢊⠔⡡⢺⢚⢸ +# ⠙⢯⣗⣀⡀⠀⠑⠂⠥⢂⠭⣛⢵⣖⣒⣒⢲⡦⢟⠭⣕⠪⠅⠊⠀⠀⣁⣘⣯⠞ +# ⠀⠀⢿⡑⢬⣑⢄⠀⠀⠀⠈⡟⡞⣯⣷⣢⢿⣇⡗⡏⠀⠀⠀⢀⢴⡁⢐⣹⠀⠀ +# ⠀⠀⠀⠙⢻⡈⠳⣗⢄⣼⠓⣟⡏⣄⠩⠭⢡⡊⣗⡗⣷⣄⣴⡹⠋⡾⠛⠁⠀⠀ +# ⠀⠀⠀⠀⠀⠳⣤⡈⣷⣿⣆⢸⡇⠛⠀⠀⠘⠃⣿⢀⣿⣷⢋⣠⠾⠃⠀⠀⠀⠀ +# ⠀⠀⠀⠀⠀⠀⠀⠉⠻⠦⣭⣽⠇⠿⠀⠀⠸⠃⣯⣭⡥⠞⠋⠁⠀⠀⠀⠀⠀⠀ +# ⢀⢀⣀⣀⣀⣀⣀⣀⣀⣀⣈⣳⣵⣶⣶⣷⣶⣾⡵⣋⣀⣀⣀⣀⢀⠀⠀⠀⠀⠀ +# ⣿⣟⣟⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣯⣽⣽⣯⣿ diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-RedHat.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-RedHat.yml new file mode 100644 index 0000000000..965942442a --- /dev/null +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-RedHat.yml @@ -0,0 +1,308 @@ +--- + +# RHEL Clustering - Deploy HANA cluster resources +# Azure ref: https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/sap-hana-high-availability-rhel + +# @TODO Subscribe to subscriptions/repos if required +# This code assumes the deployment is using RHEL SAP image + +# SAP HANA Cluster resources +# https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/sap-hana-high-availability-rhel#create-sap-hana-cluster-resources + +# This article contains references to a term that Microsoft no longer uses. When the term is removed from the software, we’ll remove it from this article. + +# +------------------------------------4--------------------------------------*/ + +# Fetch the majority maker node OS Hostname as we need to create a constraint to prevent HANA resources from running on it +- name: "Scale-Out Cluster Compatibility - Fetch majority maker node name" + ansible.builtin.set_fact: + majority_maker: "{{ (query('inventory_hostnames', '{{ sap_sid | upper }}_OBSERVER_DB'))[0] }}" + +- name: "5.5.4.1 HANA Cluster configuration - Optimise the Pacemaker cluster for SAP HANA" + block: + - name: "5.5.4.1 HANA Cluster configuration - Get the cluster maintenance mode status" + ansible.builtin.shell: pcs property show maintenance-mode + register: get_status_maintenance_mode + changed_when: false + ignore_errors: true + + - name: "5.5.4.1 HANA Cluster configuration - Set the cluster maintenance mode if not already in maintenance mode" + ansible.builtin.shell: pcs property set maintenance-mode=true + when: >- + get_status_maintenance_mode.stdout is not search('maintenance-mode') or + get_status_maintenance_mode.stdout is search('maintenance-mode: false') + + - name: "5.5.4.1 HANA cluster resource configuration - RHEL 7" + when: + - ansible_distribution_major_version == "7" + block: + - name: Ensure the SAP topology resource is configured and cloned + ansible.builtin.shell: > + pcs resource create SAPHanaTopology_{{ db_sid | upper }}_HDB{{ db_instance_number }} SAPHanaTopologyScaleOut \ + SID={{ db_sid | upper }} InstanceNumber={{ db_instance_number }} \ + op start timeout=600 op stop timeout=300 op monitor interval=10 timeout=600 \ + clone meta clone-node-max=1 interleave=true + register: hana_t + failed_when: hana_t.rc > 0 + + - name: Ensure the SAP HANA instance resource is created + ansible.builtin.shell: > + pcs resource create SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} SAPHanaController \ + SID={{ db_sid | upper }} InstanceNumber={{ db_instance_number }} PREFER_SITE_TAKEOVER=true DUPLICATE_PRIMARY_TIMEOUT=7200 AUTOMATED_REGISTER=false \ + op start interval=0 timeout=3600 op stop interval=0 timeout=3600 op promote interval=0 timeout=3600 \ + op monitor interval=60 role="Master" timeout=700 op monitor interval=61 role="Slave" timeout=700 + register: sap_hana + failed_when: sap_hana.rc > 0 + + - name: Ensure master-slave (msl) resource for managing an SAP HANA instance is created + ansible.builtin.shell: > + pcs resource master msl_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} \ + meta master-max="1" clone-node-max=1 interleave=true + register: msl_sap_hana + failed_when: msl_sap_hana.rc > 0 + + - name: Ensure the netcat resource for the Load Balancer Healthprobe is created + ansible.builtin.shell: pcs resource create nc_{{ db_sid | upper }}_{{ db_instance_number }} azure-lb port=625{{ db_instance_number }} + register: netcat + failed_when: netcat.rc > 0 + + - name: Ensure the Virtual IP resource for the Load Balancer Front End IP is created + ansible.builtin.shell: pcs resource create vip_{{ db_sid | upper }}_{{ db_instance_number }} ocf:heartbeat:IPaddr2 ip={{ database_loadbalancer_ip }} op monitor interval="10s" timeout="20s" + register: vip + failed_when: vip.rc > 0 + + - name: Ensure the Virtual IP group resource is created + ansible.builtin.shell: pcs resource group add g_ip_{{ db_sid | upper }}_{{ db_instance_number }} nc_{{ db_sid | upper }}_{{ db_instance_number }} vip_{{ db_sid | upper }}_{{ db_instance_number }} + register: vip_g + failed_when: vip_g.rc > 0 + + - name: Ensure the order constraint for the SAP HANA Topology is configured + ansible.builtin.shell: pcs constraint order SAPHanaTopology_{{ db_sid | upper }}_HDB{{ db_instance_number }}-clone then msl_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} + register: order + failed_when: order.rc > 0 + + - name: Ensure the Virtual IP group colocation constraint is configured + ansible.builtin.shell: pcs constraint colocation add g_ip_{{ db_sid | upper }}_{{ db_instance_number }} with master msl_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} 4000 + register: colocation + failed_when: colocation.rc > 0 + + # Ref : https://access.redhat.com/articles/6093611#5123-constraints + - name: Ensure that SAP HANA master-slave resources avoids running on the majority maker node + ansible.builtin.shell: pcs constraint location msl_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} avoids {{ majority_maker }} + register: msl_mm_location + failed_when: msl_mm_location.rc > 0 + + - name: Ensure that HANA Resource topology avoids running on the majority maker node + ansible.builtin.shell: pcs constraint location SAPHanaTopology_{{ db_sid | upper }}_HDB{{ db_instance_number }}-clone avoids {{ majority_maker }} + register: hana_mm_location + failed_when: hana_mm_location.rc > 0 + + - name: Ensure that IP Resource avoids running on the majority maker node + ansible.builtin.shell: pcs constraint location g_ip_{{ db_sid | upper }}_{{ db_instance_number }} avoids {{ majority_maker }} + register: vip_mm_location + failed_when: vip_mm_location.rc > 0 + + - name: Ensure the HANA resources and NFS filesystem constraint is configured + ansible.builtin.shell: pcs constraint location SAPHanaTopology_{{ db_sid | upper }}_HDB{{ db_instance_number }}-clone rule resource-discovery=never score=-INFINITY hana_nfs_s1_active ne true and hana_nfs_s2_active ne true + register: nfs_constraint + failed_when: nfs_constraint.rc > 0 + + + + - name: "5.5.4.1 HANA cluster resource configuration - RHEL 8/9" + when: + - ansible_distribution_major_version in ["8", "9"] + block: + - name: Ensure the SAP topology resource is configured and cloned + ansible.builtin.shell: > + pcs resource create SAPHanaTopology_{{ db_sid | upper }}_HDB{{ db_instance_number }} SAPHanaTopology \ + SID={{ db_sid | upper }} InstanceNumber={{ db_instance_number }} meta clone-node-max=1 interleave=true \ + op methods interval=0s timeout=5 \ + op start timeout=600 op stop timeout=300 op monitor interval=10 timeout=600 \ + clone meta clone-node-max=1 interleave=true + register: hana_t + failed_when: hana_t.rc > 0 + + - name: Ensure the SAP HANA instance is created + ansible.builtin.shell: > + pcs resource create SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} SAPHanaController \ + SID={{ db_sid | upper }} InstanceNumber={{ db_instance_number }} PREFER_SITE_TAKEOVER=true DUPLICATE_PRIMARY_TIMEOUT=7200 AUTOMATED_REGISTER=false \ + op demote interval=0s timeout=320 op methods interval=0s timeout=5 \ + op start interval=0 timeout=3600 op stop interval=0 timeout=3600 op promote interval=0 timeout=3600 \ + op monitor interval=60 role="Master" timeout=700 op monitor interval=61 role="Slave" timeout=700 + register: sap_hana + failed_when: sap_hana.rc > 0 + + - name: Ensure that the SAP HANA instance is promotable + ansible.builtin.shell: > + pcs resource promotable SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} \ + meta master-max="1" clone-node-max=1 interleave=true + register: promo_sap_hana + failed_when: promo_sap_hana.rc > 0 + + - name: Ensure the netcat resource for the Load Balancer Healthprobe is created + ansible.builtin.shell: pcs resource create nc_{{ db_sid | upper }}_{{ db_instance_number }} azure-lb port=625{{ db_instance_number }} + register: netcat + failed_when: netcat.rc > 0 + + - name: Ensure the Virtual IP resource for the Load Balancer Front End IP is created + ansible.builtin.shell: pcs resource create vip_{{ db_sid | upper }}_{{ db_instance_number }} ocf:heartbeat:IPaddr2 ip={{ database_loadbalancer_ip }} op monitor interval="10s" timeout="20s" + register: vip + failed_when: vip.rc > 0 + + - name: Ensure the Virtual IP group resource is created + ansible.builtin.shell: pcs resource group add g_ip_{{ db_sid | upper }}_{{ db_instance_number }} nc_{{ db_sid | upper }}_{{ db_instance_number }} vip_{{ db_sid | upper }}_{{ db_instance_number }} + register: vip_g + failed_when: vip_g.rc > 0 + + - name: Ensure the order constraint for the SAP HANA Topology is configured + ansible.builtin.shell: pcs constraint order SAPHanaTopology_{{ db_sid | upper }}_HDB{{ db_instance_number }}-clone then SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }}-clone + register: order + failed_when: order.rc > 0 + + - name: Ensure the Virtual IP group colocation constraint is configured + ansible.builtin.shell: pcs constraint colocation add g_ip_{{ db_sid | upper }}_{{ db_instance_number }} with master SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }}-clone 4000 + register: colocation + failed_when: colocation.rc > 0 + + # Ref: https://access.redhat.com/articles/6093611#5123-constraints + - name: Ensure that SAP HANA resources avoids running on the majority maker node + ansible.builtin.shell: pcs constraint location SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }}-clone avoids {{ majority_maker }} + register: msl_mm_location + failed_when: msl_mm_location.rc > 0 + + - name: Ensure that HANA Resource topology avoids running on the majority maker node + ansible.builtin.shell: pcs constraint location SAPHanaTopology_{{ db_sid | upper }}_HDB{{ db_instance_number }}-clone avoids {{ majority_maker }} + register: hana_mm_location + failed_when: hana_mm_location.rc > 0 + + - name: Ensure that IP Resource avoids running on the majority maker node + ansible.builtin.shell: pcs constraint location g_ip_{{ db_sid | upper }}_{{ db_instance_number }} avoids {{ majority_maker }} + register: vip_mm_location + failed_when: vip_mm_location.rc > 0 + + + - name: Ensure the HANA resources and NFS filesystem constraint is configured + ansible.builtin.shell: pcs constraint location SAPHanaTopology_{{ db_sid | upper }}_HDB{{ db_instance_number }}-clone rule resource-discovery=never score=-INFINITY hana_nfs_s1_active ne true and hana_nfs_s2_active ne true + register: nfs_constraint + failed_when: nfs_constraint.rc > 0 + + + - name: "5.5.4.1 HANA Cluster configuration - Disable Maintenance mode for the cluster" + ansible.builtin.shell: pcs property set maintenance-mode=false + + - name: "5.5.4.1 HANA Cluster configuration - Wait until cluster has stabilized on RHEL 7" + ansible.builtin.shell: set -o pipefail && pcs status | grep '^Online:' + register: cluster_stable_check + retries: 12 + delay: 10 + until: "(primary_instance_name + ' ' + secondary_instance_name) in cluster_stable_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in cluster_stable_check.stdout" + when: ansible_distribution_major_version != "8" and ansible_distribution_major_version != "9" + # '*' is a special character in regexp and needs to be escaped for literal matching + # if we are worried about character spacing across distros we can match for '\* Online:' + - name: "5.5.4.1 HANA Cluster configuration - Wait until cluster has stabilized on RHEL 8 or 9" + ansible.builtin.shell: set -o pipefail && pcs status | grep '^ \* Online:' + register: cluster_stable_check + retries: 12 + delay: 10 + until: "(primary_instance_name + ' ' + secondary_instance_name) in cluster_stable_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in cluster_stable_check.stdout" + when: ansible_distribution_major_version in ["8", "9"] + + - name: "5.5.4.1 HANA Cluster configuration - Cleanup any stale cluster resource StartSystem" + ansible.builtin.shell: pcs resource cleanup + + # the leading spaces are irrelevant here as we are looking for *Started: + - name: "5.5.4.1 HANA Cluster configuration - Ensure Cluster resources are started on RHEL 7" + ansible.builtin.shell: set -o pipefail && pcs resource show | grep ' Started:' + register: hana_cluster_resource_check + retries: 12 + delay: 10 + until: "(primary_instance_name + ' ' + secondary_instance_name) in hana_cluster_resource_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in hana_cluster_resource_check.stdout" + when: ansible_distribution_major_version != "8" and ansible_distribution_major_version != "9" + + - name: "5.5.4.1 HANA Cluster configuration - Ensure Cluster resources are started on RHEL 8 or 9" + ansible.builtin.shell: set -o pipefail && pcs resource status | grep '\* Started:' + register: hana_cluster_resource_check + retries: 12 + delay: 10 + until: "(primary_instance_name + ' ' + secondary_instance_name) in hana_cluster_resource_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in hana_cluster_resource_check.stdout" + when: ansible_distribution_major_version in ["8", "9"] + when: ansible_hostname == primary_instance_name + +# End of HANA clustering resources + +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ +# Follow steps described in https://access.redhat.com/articles/6884531 + +- name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" + ansible.builtin.set_fact: + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" + +- name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" + become: true + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + block: + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + line: "[Unit]" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^[Unit]$' + line: "Description=Pacemaker needs the SAP HANA instance service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' + line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' + line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + register: dropinfile + + - name: "5.5.4.1 HANA Cluster configuration - systemd reload" + ansible.builtin.systemd: + daemon_reload: true + when: + - dropinfile.changed + +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - END | +# | | +# +------------------------------------4--------------------------------------*/ + + +# Configuration for ACtive/Read enabled system diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-Suse.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-Suse.yml new file mode 100644 index 0000000000..404fb4e265 --- /dev/null +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-Suse.yml @@ -0,0 +1,256 @@ +--- + +# SLES Clustering - Deploy HANA scale out clustering Resources +# Ref: https://documentation.suse.com/sle-ha/12-SP4/html/SLE-HA-install-quick/index.html +# Ref: https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/sap-hana-high-availability +# Ref: https://learn.microsoft.com/en-us/azure/sap/workloads/sap-hana-high-availability-scale-out-hsr-suse?tabs=lb-portal +# This code contains references to terms that Microsoft no longer uses. When these terms are removed from the software, we'll remove them from this article. + +- name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure the Cluster STONITH is configured" + block: + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure maintenance mode is enabled" + ansible.builtin.command: crm configure property maintenance-mode=true + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure CIB Bootstrap Options are set" + when: + - (database_cluster_type == "ASD") or + (database_cluster_type == "ISCSI") + ansible.builtin.command: > + crm configure property \$id="cib-bootstrap-options" + no-quorum-policy="ignore" + stonith-enabled="true" + stonith-action="reboot" + stonith-timeout="144s" + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure CIB Bootstrap Options are set" + when: database_cluster_type not in ["ISCSI", "ASD"] + ansible.builtin.command: > + crm configure property \$id="cib-bootstrap-options" + no-quorum-policy="ignore" + stonith-enabled="true" + stonith-action="reboot" + stonith-timeout="900s" + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure the Resource Defaults are configured" + ansible.builtin.shell: > + crm configure rsc_defaults \$id="rsc-options" + resource-stickiness="1000" + migration-threshold="5000" + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure file system cluster resources are created for for cluster site" + ansible.builtin.shell: > + crm configure primitive fs_{{ db_sid | upper }}_HDB{{db_instance_number}}_fscheck Filesystem + params device="/hana/shared/{{ db_sid | upper }}/check" + directory="/hana/check" fstype=nfs4 + options="{{ nfs_mount_options }}" + op monitor interval=120 timeout=120 on-fail=fence + op_params OCF_CHECK_LEVEL=20 + op start interval=0 timeout=120 op stop interval=0 timeout=120 + + crm configure clone cln_fs_{{ db_sid | upper }}_HDB{{ db_instance_number }}_fscheck fs_{{ db_sid | upper }}_HDB{{ db_instance_number }}_fscheck + meta clone-node-max=1 interleave=true + + crm configure location loc_cln_fs_{{ db_sid | upper }}_HDB{{ db_instance_number }}_fscheck_not_on_mm \ + cln_fs_{{ db_sid | upper }}_HDB{{ db_instance_number }}_fscheck -inf: {{ majority_maker }} + register: sap_file_cluster + failed_when: sap_file_cluster.rc > 1 + when: + - ansible_hostname == primary_instance_name + + # Operation Default recommendation from section 5.3.1 in https://www.suse.com/media/white-paper/suse_linux_enterprise_server_for_sap_applications_12_sp1.pdf#page=26 + # Ref: https://learn.microsoft.com/en-us/azure/sap/workloads/sap-hana-high-availability-scale-out-hsr-suse?tabs=lb-portal#create-sap-hana-cluster-resources + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure SAP HANA Topology resource is configured on cluster site" + ansible.builtin.shell: > + crm configure primitive rsc_SAPHanaTopology_{{ db_sid | upper }}_HDB{{ db_instance_number }} \ + ocf:suse:SAPHanaTopology \ + op monitor interval="10" timeout="600" \ + op start interval="0" timeout="600" \ + op stop interval="0" timeout="300" \ + params SID="{{ db_sid | upper }}" InstanceNumber="{{ db_instance_number }}" + register: sap_hana_topology + failed_when: sap_hana_topology.rc > 1 + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure SAP HANA Topology clone set resource is configured on cluster site" + ansible.builtin.shell: > + crm configure clone cln_SAPHanaTopology_{{ db_sid | upper }}_HDB{{ db_instance_number }} \ + rsc_SAPHanaTopology_{{ db_sid | upper }}_HDB{{ db_instance_number }} \ + meta clone-node-max="1" target-role="Started" interleave="true" + register: sap_hana_topology_cln + failed_when: sap_hana_topology_cln.rc > 1 + + # We recommend as a best practice that you only set AUTOMATED_REGISTER to no, while performing thorough fail-over tests, + # to prevent failed primary instance to automatically register as secondary. + # Once the fail-over tests have completed successfully, set AUTOMATED_REGISTER to yes, so that after takeover system replication can resume automatically. + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure SAP HANA Controller is configured" + ansible.builtin.shell: > + crm configure primitive rsc_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} ocf:suse:SAPHanaController \ + op start interval="0" timeout="3600" \ + op stop interval="0" timeout="3600" \ + op promote interval="0" timeout="3600" \ + op monitor interval="60" role="Master" timeout="700" \ + op monitor interval="61" role="Slave" timeout="700" \ + params SID="{{ db_sid | upper }}" InstanceNumber="{{ db_instance_number }}" PREFER_SITE_TAKEOVER="true" \ + DUPLICATE_PRIMARY_TIMEOUT="7200" AUTOMATED_REGISTER="false" + register: sap_hana_controller + failed_when: sap_hana_controller.rc > 1 + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure SAP HANA master-slave resource is configured" + ansible.builtin.shell: > + crm configure ms msl_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} \ + rsc_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} \ + meta clone-node-max="1" master-max="1" interleave="true" + register: sap_hana_msl + failed_when: sap_hana_msl.rc > 1 + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure SAP HANA Virtual IP resource is configured" + ansible.builtin.shell: > + crm configure primitive rsc_ip_{{ db_sid | upper }}_HDB{{ db_instance_number }} ocf:heartbeat:IPaddr2 + meta target-role="Started" + operations \$id="rsc_ip_{{ db_sid | upper }}_HDB{{ db_instance_number }}-operations" + op monitor interval="10s" timeout="20s" + params ip="{{ database_loadbalancer_ip }}" + register: sap_hana_rsc_ip + failed_when: sap_hana_rsc_ip.rc > 1 + # socat is recommended in place of netcat on Azure: https://www.suse.com/support/kb/doc/?id=000019536 + # https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/sap-hana-high-availability + # Currently we recommend using azure-lb resource agent, which is part of package resource-agents + # - name: Ensure SAP HANA Heartbeat socat resource is configured + # shell: > + # crm configure primitive rsc_nc_{{ db_sid | upper }}_HDB{{ instance_number }} anything + # params binfile="/usr/bin/socat" cmdline_options="-U TCP-LISTEN:625{{ instance_number }},backlog=10,fork,reuseaddr /dev/null" + # op monitor timeout=20s interval=10 depth=0 + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure SAP HANA azure-lb resource agent is configured" + ansible.builtin.shell: > + crm configure primitive rsc_nc_{{ db_sid | upper }}_HDB{{ db_instance_number }} azure-lb port=625{{ db_instance_number }} meta resource-stickiness=0 + register: sap_hana_nc_ip + failed_when: sap_hana_nc_ip.rc > 1 + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure Group IP Address resource is configured" + ansible.builtin.shell: > + crm configure group g_ip_{{ db_sid | upper }}_HDB{{ db_instance_number }} + rsc_ip_{{ db_sid | upper }}_HDB{{ db_instance_number }} + rsc_nc_{{ db_sid | upper }}_HDB{{ db_instance_number }} + register: sap_hana_g_ip + failed_when: sap_hana_g_ip.rc > 1 + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure Co-Location constraint is configured" + ansible.builtin.shell: > + crm configure colocation col_saphana_ip_{{ db_sid | upper }}_HDB{{ db_instance_number }} + 4000: + g_ip_{{ db_sid | upper }}_HDB{{ db_instance_number }}:Started + msl_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }}:Master + register: sap_hana_g_col_ip + failed_when: sap_hana_g_col_ip.rc > 1 + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure Resource order is configured" + ansible.builtin.shell: > + crm configure order ord_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} + Optional: + cln_SAPHanaTopology_{{ db_sid | upper }}_HDB{{ db_instance_number }} + msl_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} + register: sap_hana_ord_ip + failed_when: sap_hana_ord_ip.rc > 1 + + # Ensure the first entry of observer_db host group is excluded from running cluster resources + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure master slave configuration does not runs on majority maker node" + ansible.builtin.shell: > + crm configure location loc_SAPHanaCon_not_on_majority_maker msl_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} -inf: {{ majority_maker }} + register: sap_hana_msl_loc_mm + failed_when: sap_hana_msl_loc_mm.rc > 1 + + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure clone set does not runs on majority maker" + ansible.builtin.shell: > + sudo crm configure location loc_SAPHanaTop_not_on_majority_maker cln_SAPHanaTopology_{{ db_sid | upper }}_HDB{{ db_instance_number }} -inf: {{ majority_maker }} + register: sap_hana_cln_mm + failed_when: sap_hana_cln_mm.rc > 1 + + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Configure cluster stickiness parameter" + ansible.builtin.shell: > + crm configure rsc_defaults resource-stickiness=1000 + register: sap_hana_stickiness + failed_when: sap_hana_stickiness.rc > 1 + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Configure cluster default migration threshold" + ansible.builtin.shell: > + crm configure rsc_defaults migration-threshold=50 + register: sap_hana_migration + failed_when: sap_hana_migration.rc > 1 + + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure any required cluster resources are cleaned up" + ansible.builtin.command: "crm resource cleanup rsc_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }}" + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure maintenance mode is disabled" + ansible.builtin.command: crm configure property maintenance-mode=false + when: + - inventory_hostname == primary_instance_name + +- name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ database_high_availability | default(false) }}" + when: + - database_high_availability is defined + - database_high_availability is not defined + +- name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Configure the ANF specific resources when relevant" + when: + - database_high_availability + - NFS_provider == "ANF" + - hana_shared_mountpoint is defined + - hana_shared_mountpoint | length > 1 + - inventory_hostname == primary_instance_name + block: + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Stop HANA System on both sites" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.command: "{{ sapcontrol_command }} -function StopSystem" + failed_when: false + changed_when: false + register: hana_system_stopped + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Wait 2 minutes for SAP system to stop" + ansible.builtin.pause: + seconds: 120 + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure maintenance mode is enabled" + ansible.builtin.command: crm configure property maintenance-mode=true + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Configure the cluster to add the directory structure for monitoring" + ansible.builtin.command: > + crm configure primitive rsc_fs_check_{{ db_sid | upper }}_HDB{{ db_instance_number }} Filesystem params \ + device="/hana/shared/{{ db_sid | upper }}/check/" \ + directory="/hana/shared/check/" fstype=nfs4 \ + options={{ nfs_mount_options }} \ + op monitor interval=120 timeout=120 on-fail=fence \ + op_params OCF_CHECK_LEVEL=20 \ + op start interval=0 timeout=120 \ + op stop interval=0 timeout=120 + register: sap_hana_fs_check + failed_when: sap_hana_fs_check.rc != 0 + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Clone and check the newly configured volume in the cluster" + ansible.builtin.command: > + crm configure clone cln_fs_check_{{ db_sid | upper }}_HDB{{ db_instance_number }} rsc_fs_check_{{ db_sid | upper }}_HDB{{ db_instance_number }} \ + meta clone-node-max=1 interleave=true + register: sap_hana_cln_fs_check + failed_when: sap_hana_cln_fs_check.rc != 0 + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Start HANA System on both nodes" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.command: "{{ sapcontrol_command }} -function StartSystem" + failed_when: false + changed_when: false + register: hana_system_started + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Wait 5 minutes for SAP system to start" + ansible.builtin.pause: + seconds: 300 + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure maintenance mode is disabled" + ansible.builtin.command: crm configure property maintenance-mode=false + + - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Remove false positives" + ansible.builtin.shell: crm_resource -C diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.5-post_provision_report.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.5-post_provision_report.yml new file mode 100644 index 0000000000..622e0ac1ea --- /dev/null +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.5-post_provision_report.yml @@ -0,0 +1,194 @@ +--- +# /*---------------------------------------------------------------------------8 +# | | +# | Post processing +# | | +# +------------------------------------4--------------------------------------*/ + +- name: Pause to give cluster time to stabilize + ansible.builtin.pause: + seconds: "{{ cluster_status_report_wait_in_s }}" + +- name: Check the post-provisioning cluster status + ansible.builtin.command: "{{ cluster_status_cmd[ansible_os_family] }}" + register: cluster_status_report + changed_when: false + failed_when: false + +- name: Output cluster status + ansible.builtin.debug: + msg: "{{ cluster_status_report.stdout }}" + verbosity: 2 + +- name: Check the SBD devices status + ansible.builtin.shell: set -o pipefail && crm_mon -1 | grep sbd + register: sbd_status_report + changed_when: false + failed_when: false + when: ansible_os_family == 'Suse' + +- name: Output SBD status + ansible.builtin.debug: + msg: "{{ sbd_status_report.stdout }}" + when: ansible_os_family == 'Suse' + +# old command: +# awk '/ha_dr_SAPHanaSR.*crm_attribute/ { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* +# Verify that the hook script is working as expected. +- name: Pause to give HANA replication time to stabilize + ansible.builtin.pause: + seconds: "{{ hsr_status_report_wait_in_s }}" + +- name: "Verify that the hook script is working as expected" + when: not db_scale_out + block: + - name: "Verify the hook Installation" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + set -o pipefail + IFS=' ' + get_saphanasr_rc=$(grep ha_dr_ nameserver_* | \ + awk '/ha_dr_SAPHanaSR.*crm_attribute/ \ + { printf "%s ",$16 }') + read -a saphanasr_status <<< ${get_saphanasr_rc} + echo "${saphanasr_status[-1]}" + args: + chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ hostvars[primary_instance_name]['virtual_host'] }}/trace + register: saphanasr + when: inventory_hostname == primary_instance_name + rescue: + - name: "[Rescue] - Pause to give HANA replication time to stabilize" + ansible.builtin.pause: + seconds: "{{ rescue_hsr_status_report_wait_in_s }}" + + - name: "[Rescue] - Verify the hook Installation" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + set -o pipefail + IFS=' ' + get_saphanasr_rc=$(grep ha_dr_ nameserver_* | \ + awk '/ha_dr_SAPHanaSR.*crm_attribute/ \ + { printf "%s ",$16 }') + read -a saphanasr_status <<< ${get_saphanasr_rc} + echo "${saphanasr_status[-1]}" + args: + chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ hostvars[primary_instance_name]['virtual_host'] }}/trace + register: saphanasr + when: inventory_hostname == primary_instance_name + +# Code block is specifically for ScaleOut, SUSE and RedHat +- name: "Verify that the hook script is working as expected (Scale Out) for SUSE " + when: + - db_scale_out + - ansible_os_family | upper == 'SUSE' + - inventory_hostname == primary_instance_name + block: + - name: "Verify the hook Installation (SUSE)" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + set -o pipefail + grep SAPHanaSr.*init nameserver_*.trc | tail -3 + args: + chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ inventory_hostname }}/trace + register: saphanasr + until: saphanasr.stdout is search("Running") + retries: 10 + delay: 30 + rescue: + - name: "[Rescue] - Pause to give HANA replication time to stabilize" + ansible.builtin.pause: + seconds: "{{ rescue_hsr_status_report_wait_in_s }}" + + - name: "[Rescue] - Verify the hook Installation (SUSE)" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + set -o pipefail + grep SAPHanaSr.*init nameserver_*.trc | tail -3 + args: + chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ inventory_hostname }}/trace + register: saphanasr + until: saphanasr.stdout is search("Running") + retries: 10 + delay: 30 + +- name: Verify that the hook script is working as expected (Scale Out) for Red Hat {{ ansible_distribution_major_version }}" + when: + - ansible_os_family | upper == "REDHAT" + - ansible_hostname == primary_instance_name + - db_scale_out + block: + - name: "Verify the hook Installation (REDHAT)" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + set -o pipefail + awk '/ha_dr_SAPHanaSR.*crm_attribute/ \ + { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* | tail -n 1 + args: + chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ primary_instance_name }}/trace + register: saphanasr + until: saphanasr.stdout is search("SOK") + retries: 10 + delay: 30 + rescue: + - name: "[Rescue] - Pause to give HANA replication time to stabilize" + ansible.builtin.pause: + seconds: "{{ rescue_hsr_status_report_wait_in_s }}" + + - name: "[Rescue] - Verify the hook Installation (REDHAT)" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + set -o pipefail + awk '/ha_dr_SAPHanaSR.*crm_attribute/ \ + { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* | tail -n 1 + args: + chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ primary_instance_name }}/trace + register: saphanasr + until: saphanasr.stdout is search("SOK") + retries: 10 + delay: 30 + + + +# SUSE only +# Check on all nodes, status of susTkOver Hook + + +- name: "Log that the hook script is working as expected" + block: + - name: "Debug (saphanasr)" + ansible.builtin.debug: + var: saphanasr + verbosity: 2 + + - name: "set_fact (saphanasr)" + ansible.builtin.set_fact: + hsr_result: saphanasr.stdout + + - name: "Debug (hsr_result)" + ansible.builtin.debug: + var: hsr_result + verbosity: 2 + + - name: "Assert HSR Hook verification is successful" + ansible.builtin.assert: + that: + - "'SFAIL' != hsr_result" + fail_msg: "Unable to determine if HSR Hook is working" + when: inventory_hostname == primary_instance_name + +- name: Verify the hook Installation + ansible.builtin.debug: + var: saphanasr + verbosity: 2 + +- name: "HANA PCM Install: reset" + ansible.builtin.file: + path: /etc/sap_deployment_automation/sap_deployment_hana_pcm.txt + state: touch + mode: 0755 diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/main.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/main.yml new file mode 100644 index 0000000000..487fa498b2 --- /dev/null +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: "5.8 HANADB Pacemaker - import - set_runtime_facts" + ansible.builtin.import_tasks: 5.8.1-set_runtime_facts.yml + +- name: "5.8 HANADB Pacemaker - import - pre_checks" + ansible.builtin.import_tasks: 5.8.2-pre_checks.yml + +- name: "5.8 HANADB Pacemaker - import - SAP HanaSR" + ansible.builtin.import_tasks: 5.8.3-SAPHanaSRMultiTarget.yml + when: + - node_tier in ['hana','observer'] + +# Scale-Out HSR Specific provision task with its own unique sequence +- name: "5.8 HANADB Pacemaker - import - provision Scale-Out-HSR" + ansible.builtin.import_tasks: 5.8.4-provision-ScaleOut.yml + when: + - not hana_cluster_existence_check + +- name: "5.8 HANADB Pacemaker - import - post_provision_report" + ansible.builtin.import_tasks: 5.8.5-post_provision_report.yml diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/templates/20-saphana-rhel.j2 b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/templates/20-saphana-rhel.j2 new file mode 100644 index 0000000000..d292468a9f --- /dev/null +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/templates/20-saphana-rhel.j2 @@ -0,0 +1,6 @@ + +Cmnd_Alias SOK = /usr/sbin/crm_attribute -n hana_{{ db_sid | lower }}_glob_srHook -v SOK -t crm_config -s SAPHanaSR +Cmnd_Alias SFAIL = /usr/sbin/crm_attribute -n hana_{{ db_sid | lower }}_glob_srHook -v SFAIL -t crm_config -s SAPHanaSR +{{ db_sid | lower }}adm ALL=(ALL) NOPASSWD: SOK, SFAIL +Defaults!SOK, SFAIL !requiretty + diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/templates/20-saphana-suse.j2 b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/templates/20-saphana-suse.j2 new file mode 100644 index 0000000000..4e288be225 --- /dev/null +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/templates/20-saphana-suse.j2 @@ -0,0 +1,5 @@ +# SAPHanaSR-ScaleOut needs for srHook +{{ db_sid | lower }}adm ALL=(ALL) NOPASSWD: /usr/sbin/crm_attribute -n hana_{{ db_sid | lower }}_site_srHook_* +{{ db_sid | lower }}adm ALL=(ALL) NOPASSWD: /usr/sbin/crm_attribute -n hana_{{ db_sid | lower }}_gsh * +{{ db_sid | lower }}adm ALL=(ALL) NOPASSWD: /usr/sbin/SAPHanaSR-hookHelper --sid={{ db_sid | lower }} * + diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/templates/corosync.conf.j2 b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/templates/corosync.conf.j2 new file mode 100644 index 0000000000..ab4e2faba6 --- /dev/null +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/templates/corosync.conf.j2 @@ -0,0 +1,61 @@ +# Please read the corosync.conf.5 manual page + +totem { + version: 2 + secauth: on + crypto_hash: sha1 + crypto_cipher: aes256 + cluster_name: hacluster + clear_node_high_bit: yes + + token: {{ cluster_totem.token }} + token_retransmits_before_loss_const: {{ cluster_totem.retransmits }} + join: {{ cluster_totem.join }} + consensus: {{ cluster_totem.consensus }} + max_messages: {{ cluster_totem.max_messages }} + + interface { + ringnumber: 0 + mcastport: 5405 + ttl: 1 + } + + + transport: udpu + + + +} + +logging { + fileline: off + to_stderr: no + to_logfile: yes + logfile: /var/log/cluster/corosync.log + to_syslog: no + debug: off + timestamp: on + logger_subsys { + subsys: QUORUM + debug: off + } +} + +nodelist { + node { + ring0_addr: {{ primary_instance.ip_db }} + nodeid: 1 + } + node { + ring0_addr: {{ secondary_instance.ip_db }} + nodeid: 2 + } +} + +quorum { + # Enable and configure quorum subsystem (default: off) + # see also corosync.conf.5 and votequorum.5 + provider: corosync_votequorum + expected_votes: {{ cluster_quorum.expected_votes }} + two_node: {{ cluster_quorum.two_node }} +} diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/vars/main.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/vars/main.yml new file mode 100644 index 0000000000..035ce4e416 --- /dev/null +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/vars/main.yml @@ -0,0 +1,47 @@ +--- + +cluster_totem: + token: 30000 + retransmits: 10 + join: 60 + consensus: 36000 + max_messages: 20 + +cluster_quorum: + expected_votes: 2 + two_node: 1 + +# These are the default timeouts used for the SAP HANA OS clustering. Depending on the +# SAP HANA System, these may need to be adjusted if the operation takes longer than expected. +cluster_sap_hana_timeouts: + start: 3600 + stop: 3600 + monitor_master: 700 + monitor_slave: 700 + promote: 3600 + demote: 3600 + +cluster_status_cmd: + RedHat: "pcs status --full" + Suse: "crm status full" + +cluster_status_report_wait_in_s: 60 +hsr_status_report_wait_in_s: 60 +rescue_hsr_status_report_wait_in_s: 120 + + +# The following values should be same as iSCSI configuration +# run 'sudo targetcli ls' on iSCSI target virtual machines to get all iSCSI configuration +cluster_name: db{{ sid | lower }} +# storage_object: sbd{{ cluster_name }} +# target: "{{ iscsi_object }}.{{ cluster_name }}.local:{{ cluster_name }}" + +# HANA utility commands +sapcontrol_command: "/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe/sapcontrol -nr {{ db_instance_number }}" +storage_object: sbd{{ cluster_name }} +target: "{{ iscsi_object }}.{{ cluster_name }}.local:{{ cluster_name }}" + + +hana_stop_start_timeout_in_seconds: 600 +hana_stop_start_delay_in_seconds: 10 +... diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index 69b49122c8..e2f34c285f 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -37,6 +37,10 @@ always_upload_jinja_templates: true hana_use_master_password: y hana_password_copy: "" hana_backup_path: /hana/backup +# Note: the spacing and lining of Jinja2 expression is critical here, lest we end up with a stray character in the path +hana_data_basepath: "{% if db_scale_out %}/hana/data/{{ db_sid | upper }}{% else %}/hana/data{% endif %}" +hana_log_basepath: "{% if db_scale_out %}/hana/log/{{ db_sid | upper }}{% else %}/hana/log{% endif %}" +hana_autostart: false # When set to true, will configure autostart parameter to 1 for HANA nodes. Only applicable for Scale out sap_sid: "" # REQUIRED - SAP Install download_basket_dir: "{{ target_media_location }}/download_basket" db_sid: "XDB" # Default From 48442ba32420d753309feb6059606d70080963b2 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 17 May 2024 13:41:31 +0300 Subject: [PATCH 557/607] chore: Refactor deployment scripts and Terraform modules --- .../tasks/main.yaml | 5 +--- deploy/scripts/New-SDAFDevopsProject.ps1 | 29 ++++++++++--------- .../modules/sap_system/hdb_node/outputs.tf | 2 +- .../sap_system/hdb_node/vm-observer.tf | 2 +- 4 files changed, 18 insertions(+), 20 deletions(-) diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml index b8fa0ba3d0..77c9c11c94 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml @@ -413,8 +413,6 @@ register: hana_stopped environment: TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" - register: hana_installation - failed_when: hana_installation.rc > 0 rescue: - name: "Fail if HANA installation failed with rc > 1" ansible.builtin.fail: @@ -525,8 +523,7 @@ mode: 0755 when: - not hana_installed.stat.exists - when: - - database_high_availability + - database_high_availability # /*----------------------------End of setup----------------------------------8 diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index f9ad38537b..b15ecdd5ea 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -745,25 +745,26 @@ if ($WebApp) { Write-Host "Found an existing App Registration:" $ApplicationName $ExistingData = (az ad app list --all --filter "startswith(displayName, '$ApplicationName')" --query "[?displayName=='$ApplicationName']| [0]" --only-show-errors) | ConvertFrom-Json - $APP_REGISTRATION_ID = $ExistingData.appId + $APP_REGISTRATION_ID = $ExistingData.appId - $confirmation = Read-Host "Reset the app registration secret y/n?" - if ($confirmation -eq 'y') { - $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors) + $confirmation = Read-Host "Reset the app registration secret y/n?" + if ($confirmation -eq 'y') { + $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors) + } + else { + $WEB_APP_CLIENT_SECRET = Read-Host "Please enter the app registration secret" + } } else { - $WEB_APP_CLIENT_SECRET = Read-Host "Please enter the app registration secret" - } -} -else { - Write-Host "Creating an App Registration for" $ApplicationName -ForegroundColor Green - Add-Content -Path manifest.json -Value '[{"resourceAppId":"00000003-0000-0000-c000-000000000000","resourceAccess":[{"id":"e1fe6dd8-ba31-4d61-89e7-88639da4683d","type":"Scope"}]}]' + Write-Host "Creating an App Registration for" $ApplicationName -ForegroundColor Green + Add-Content -Path manifest.json -Value '[{"resourceAppId":"00000003-0000-0000-c000-000000000000","resourceAccess":[{"id":"e1fe6dd8-ba31-4d61-89e7-88639da4683d","type":"Scope"}]}]' - $APP_REGISTRATION_ID = (az ad app create --display-name $ApplicationName --enable-id-token-issuance true --sign-in-audience AzureADMyOrg --required-resource-access ".${pathSeparator}manifest.json" --query "appId").Replace('"', "") + $APP_REGISTRATION_ID = (az ad app create --display-name $ApplicationName --enable-id-token-issuance true --sign-in-audience AzureADMyOrg --required-resource-access ".${pathSeparator}manifest.json" --query "appId").Replace('"', "") - if (Test-Path ".${pathSeparator}manifest.json") { Write-Host "Removing manifest.json" ; Remove-Item ".${pathSeparator}manifest.json" } + if (Test-Path ".${pathSeparator}manifest.json") { Write-Host "Removing manifest.json" ; Remove-Item ".${pathSeparator}manifest.json" } - $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors) + $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors) + } } #endregion @@ -977,7 +978,7 @@ if (!$AlreadySet -or $ResetPAT ) { accessLevel = @{ accountLicenseType = "stakeholder" } - user = @{ + user = @{ origin = "aad" originId = $MSI_objectId subjectKind = "servicePrincipal" diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf index d3f2981b7d..c55b023ba8 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf @@ -240,7 +240,7 @@ output "observer_ips" { output "observer_vms" { description = "Resource IDs for observer nodes" value = local.enable_deployment ? ( - azurerm_linux_virtual_machine.observer[*].id,) : ( + azurerm_linux_virtual_machine.observer[*].id) : ( [""] ) } diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf index 99cf0c5f44..55b1b9d65a 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf @@ -47,7 +47,7 @@ resource "azurerm_network_interface" "observer" { resource "azurerm_linux_virtual_machine" "observer" { provider = azurerm.main - count = local.deploy_observer && upper(local.anydb_ostype) == "LINUX" ? 1 : 0 + count = local.deploy_observer depends_on = [var.anchor_vm] resource_group_name = var.resource_group[0].name location = var.resource_group[0].location From 1469ca44f44bb0df61419911dc2cc8c484cb2e6e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 17 May 2024 13:57:47 +0300 Subject: [PATCH 558/607] Refactor deployment scripts and Terraform modules --- .../run/sap_system/tfvar_variables.tf | 23 +++++++++++++++---- deploy/terraform/run/sap_system/transform.tf | 1 + .../sap_system/hdb_node/variables_local.tf | 3 ++- .../sap_system/hdb_node/vm-observer.tf | 8 +++---- 4 files changed, 25 insertions(+), 10 deletions(-) diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index 151cfff01f..a85a7a55fc 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -448,11 +448,6 @@ variable "database_cluster_type" { default = "AFA" } -variable "use_observer" { - description = "If true, an observer virtual machine will be used" - default = true - } - variable "database_vm_zones" { description = "If provided, the database tier will be deployed in the specified zones" default = [] @@ -584,6 +579,24 @@ variable "database_use_premium_v2_storage" { default = false } +######################################################################################### +# # +# Observer variables # +# # +######################################################################################### + + +variable "use_observer" { + description = "If true, an observer virtual machine will be used" + default = true + } + +variable "observer_nic_ips" { + description = "If provided, the database tier observer virtual machines will be configured with the specified IPs (db subnet)" + default = [""] + } + + ######################################################################################### # # # Application tier variables # diff --git a/deploy/terraform/run/sap_system/transform.tf b/deploy/terraform/run/sap_system/transform.tf index f69a3a53a8..8dd5378c11 100644 --- a/deploy/terraform/run/sap_system/transform.tf +++ b/deploy/terraform/run/sap_system/transform.tf @@ -88,6 +88,7 @@ locals { database_cluster_disk_lun = var.database_cluster_disk_lun database_cluster_disk_size = var.database_cluster_disk_size database_cluster_disk_type = var.database_cluster_disk_type + observer_vm_ips = var.observer_vm_ips platform = var.database_platform use_ANF = var.database_HANA_use_ANF_scaleout_scenario || try(var.databases[0].use_ANF, false) diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf index 0ff45c4e2b..dc9de7290e 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf @@ -136,6 +136,8 @@ locals { hdb_admin_vm = 4 hdb_db_vm = 5 hdb_storage_vm = 4 + observer_db_vm = 8 + } // Ports used for specific HANA Versions @@ -414,7 +416,6 @@ locals { use_shared_volumes = local.use_avg || var.hana_ANF_volumes.use_for_shared && var.hana_ANF_volumes.use_existing_shared_volume #If using an existing VM for observer set use_observer to false in .tfvars - deploy_observer = var.use_observer observer_size = "Standard_D4s_v3" observer_authentication = local.authentication observer_custom_image = local.hdb_custom_image diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf index 55b1b9d65a..dba9f4950f 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf @@ -7,7 +7,7 @@ #######################################4#######################################8 resource "azurerm_network_interface" "observer" { provider = azurerm.main - count = local.deploy_observer ? 1 : 0 + count = var.use_observer ? 1 : 0 name = format("%s%s%s%s%s", var.naming.resource_prefixes.nic, local.prefix, @@ -25,10 +25,10 @@ resource "azurerm_network_interface" "observer" { subnet_id = var.db_subnet.id private_ip_address = var.database.use_DHCP ? ( null) : ( - try(local.observer.nic_ips[count.index], + try(var.database.observer_vm_ips[count.index], cidrhost( var.db_subnet.address_prefixes[0], - tonumber(count.index) + local.anydb_ip_offsets.observer_db_vm + tonumber(count.index) + local.hdb_ip_offsets.observer_db_vm ) ) ) @@ -47,7 +47,7 @@ resource "azurerm_network_interface" "observer" { resource "azurerm_linux_virtual_machine" "observer" { provider = azurerm.main - count = local.deploy_observer + count = var.use_observer ? 1 : 0 depends_on = [var.anchor_vm] resource_group_name = var.resource_group[0].name location = var.resource_group[0].location From edd6341cc5b645fa7b629833d304ecf2ff41d130 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 17 May 2024 14:01:57 +0300 Subject: [PATCH 559/607] chore: Mount SAP TransFilesystems and handle remounting when not using external NFS --- deploy/terraform/run/sap_system/transform.tf | 2 +- .../terraform-units/modules/sap_system/hdb_node/outputs.tf | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/terraform/run/sap_system/transform.tf b/deploy/terraform/run/sap_system/transform.tf index 8dd5378c11..1e9dc360a5 100644 --- a/deploy/terraform/run/sap_system/transform.tf +++ b/deploy/terraform/run/sap_system/transform.tf @@ -88,7 +88,7 @@ locals { database_cluster_disk_lun = var.database_cluster_disk_lun database_cluster_disk_size = var.database_cluster_disk_size database_cluster_disk_type = var.database_cluster_disk_type - observer_vm_ips = var.observer_vm_ips + observer_vm_ips = var.observer_nic_ips platform = var.database_platform use_ANF = var.database_HANA_use_ANF_scaleout_scenario || try(var.databases[0].use_ANF, false) diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf index c55b023ba8..e5e6b4d7e6 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf @@ -231,7 +231,7 @@ output "ANF_subnet_prefix" { output "observer_ips" { description = "IP adresses for observer nodes" - value = local.enable_deployment && local.deploy_observer ? ( + value = local.enable_deployment && var.use_observer ? ( azurerm_network_interface.observer[*].private_ip_address) : ( [] ) @@ -239,8 +239,8 @@ output "observer_ips" { output "observer_vms" { description = "Resource IDs for observer nodes" - value = local.enable_deployment ? ( - azurerm_linux_virtual_machine.observer[*].id) : ( + value = local.enable_deployment && var.use_observer ? ( + azurerm_linux_virtual_machine.observer[*].id) : ( [""] ) } From 9c14766c1e1eb12b3808742c61cb547b4d15be4a Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 17 May 2024 18:25:37 +0300 Subject: [PATCH 560/607] Fix zonal code for observer --- .../terraform-units/modules/sap_system/hdb_node/vm-observer.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf index dba9f4950f..422bc3d28a 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf @@ -65,7 +65,7 @@ resource "azurerm_linux_virtual_machine" "observer" { admin_password = local.enable_auth_key ? null : var.sid_password disable_password_authentication = !local.enable_auth_password - zone = local.zonal_deployment ? setsubtract(["1", "2", "3"], local.zones)[0] : null + zone = local.zonal_deployment ? try(setsubtract(["1", "2", "3"], local.zones)[0],local.zones[0]) : null network_interface_ids = [ azurerm_network_interface.observer[count.index].id From acc579bd997b1789ee53962db610fe489902f6d6 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 17 May 2024 21:49:58 +0300 Subject: [PATCH 561/607] Don't add the client network if not used --- .../modules/sap_system/output_files/sap-parameters.tmpl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl index 6047f02782..b9a1b80d34 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl @@ -99,7 +99,10 @@ subnet_cidr_storage: ${subnet_cidr_storage} subnet_cidr_anf: ${subnet_cidr_anf} subnet_cidr_app: ${subnet_cidr_app} subnet_cidr_db: ${subnet_cidr_db} + +%{~ if length(subnet_cidr_client) != 0 } subnet_cidr_client: ${subnet_cidr_client} +%{~ endif } # db_high_availability is a boolean flag indicating if the # SAP database servers are deployed using high availability From f97a4e21aab8b9ae96aa78846a37201aa0f51784 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 18 May 2024 00:11:25 +0300 Subject: [PATCH 562/607] Add become: true for reboot --- deploy/ansible/roles-sap-os/2.10-sap-notes/handlers/main.yaml | 1 + .../5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml | 2 ++ 2 files changed, 3 insertions(+) diff --git a/deploy/ansible/roles-sap-os/2.10-sap-notes/handlers/main.yaml b/deploy/ansible/roles-sap-os/2.10-sap-notes/handlers/main.yaml index 0dbd71ecf0..8ad0c4d8d0 100644 --- a/deploy/ansible/roles-sap-os/2.10-sap-notes/handlers/main.yaml +++ b/deploy/ansible/roles-sap-os/2.10-sap-notes/handlers/main.yaml @@ -1,6 +1,7 @@ # --- - name: "2.10-sap-notes: Reboot after the selinux is configured" + become: true ansible.builtin.reboot: reboot_timeout: 300 # ... diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index 718c5c17fd..60c01d95b7 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -151,6 +151,8 @@ msg: "Reboot and wait 5 minutes" - name: "5.6 SCSERS - RHEL - Reboot the primary/secondary instance" + become: true + become_user: root ansible.builtin.reboot: reboot_timeout: 300 post_reboot_delay: 300 From 79fe09463928c66b70ddcc2afc415136a556b69f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 18 May 2024 00:48:31 +0300 Subject: [PATCH 563/607] Don't fail the reboot --- .../tasks/5.6.4.2-sap-resources-RedHat.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index 60c01d95b7..0b0d6b2864 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -154,8 +154,9 @@ become: true become_user: root ansible.builtin.reboot: - reboot_timeout: 300 - post_reboot_delay: 300 + reboot_timeout: 150 + post_reboot_delay: 150 + failed_when: false - name: "5.6 SCSERS - RHEL - Set the Cluster out of maintenance mode" ansible.builtin.shell: pcs property set maintenance-mode=false From 3e3645820bcc12fca9200771af2e0f026af70190 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 19 May 2024 09:25:53 +0300 Subject: [PATCH 564/607] Fix the merge conflicts --- .../tasks/main.yaml | 664 ++++++++++++++---- 1 file changed, 529 insertions(+), 135 deletions(-) diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml index 77c9c11c94..d55419da45 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml @@ -90,7 +90,11 @@ - "Client Subnet CIDR: {{ subnet_client_cidr }}" - "Storage Subnet CIDR: {{ subnet_storage_cidr }}" -# Scale out ANF only runs on primary node or the first node in the SID_DB list. This is mandatory. +# Scale out - ANF with shared storage +# Scale out ANF must only run on the designated primary node from the DB server list. +# /*---------------------------------------------------------------------------8 +# | Primary site setup with Shared storage scale out | +# +------------------------------------4--------------------------------------*/ - name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - ANF" block: @@ -350,7 +354,7 @@ # ToDo: Check if we can interrogate the HANA DB to see if it is stopped. - name: "4.0.3 - SAP HANA SCALE OUT: Wait 5 minutes for SAP system to stop" ansible.builtin.wait_for: - timeout: 120 + timeout: 300 - name: "4.0.3 - SAP HANA SCALE OUT: Start HANA Database" become_user: "{{ db_sid | lower }}adm" @@ -370,160 +374,550 @@ - name: "4.0.3 - SAP HANA SCALE OUT: Wait 5 minutes for SAP system to start" ansible.builtin.wait_for: - timeout: 120 - -# TODO: add block for Scale out with HSR support here, same as regular installation. -- name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - HSR" - block: + timeout: 300 - - name: "4.0.3 - SAP HANA SCALE OUT: remove install response file if exists" - ansible.builtin.file: - path: "{{ dir_params }}/{{ sap_inifile }}" - state: absent + when: + - not hana_installed.stat.exists + - not (database_high_availability | default(false)) + # Only allowed for the first node. No other node in the scale out - ANF setup is allowed to install hdblcm. + - ansible_hostname == db_hosts[0] + - db_scale_out is defined + - db_scale_out - - name: "4.0.3 - SAP HANA SCALE OUT: SAP HANA Template processing: Create ini file {{ sap_inifile }} from {{ HANA_2_00_install.rsp }}" - ansible.builtin.template: - src: "HANA_2_00_install.rsp" - dest: "{{ dir_params }}/{{ sap_inifile }}" - mode: 0644 - force: true - # Template parameter mapping - vars: - _rsp_component_root: "../COMPONENTS" - _rsp_components: "{{ hana_components }}" - _rsp_sapmnt: "/hana/shared" # Default Value - _rsp_hostname: "{{ virtual_host }}" - _rsp_sid: "{{ db_sid | upper }}" - _rsp_number: "{{ db_instance_number }}" - _rsp_system_usage: "custom" - use_master_password: "{{ hana_use_master_password }}" - password_copy: "{%if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" - - name: "4.0.3 - SAP HANA SCALE OUT: Progress" - ansible.builtin.debug: - msg: "Start HANA Installation" +# Scale our HSR with multi site replication +# DB servers need to be split into two sites, each with designated primary. HANA setup will run on the primaries only. +# /*---------------------------------------------------------------------------8 +# | Primary site setup with Shared nothing scale out | +# +------------------------------------4--------------------------------------*/ - - name: "4.0.3 - SAP HANA SCALE OUT: installation" - block: - - name: "4.0.3 - SAP HANA SCALE OUT: Execute hdblcm on {{ virtual_host }}" +- name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - HSR" + block: + - name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - HSR ( Primary Site )" + block: + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: remove install response file if exists" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_inifile }}" + state: absent + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Template processing: Create ini file {{ sap_inifile }} from {{ HANA_2_00_install.rsp }}" + ansible.builtin.template: + src: "HANA_2_00_install_scaleout.rsp" + dest: "{{ dir_params }}/{{ sap_inifile }}" + mode: 0644 + force: true + # Template parameter mapping + vars: + _rsp_component_root: "../COMPONENTS" + _rsp_components: "{{ hana_components }}" + _rsp_sapmnt: "/hana/shared" # Default Value + _rsp_hostname: "{{ virtual_host }}" + _rsp_sid: "{{ db_sid | upper }}" + _rsp_number: "{{ db_instance_number }}" + _rsp_system_usage: "custom" + use_master_password: "{{ hana_use_master_password }}" + _rsp_hana_data_basepath: "{{ hana_data_basepath }}" + _rsp_hana_log_basepath: "{{ hana_log_basepath }}" + password_copy: "{%if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" + _rsp_internal_network: "{{ (subnet_address + '/' + subnet_prefix) }}" + _rsp_root_password: "{{ root_password }}" + _rsp_additional_hosts: "{% for item in ansible_play_hosts_all[2::2] %} + {% if loop.index == ansible_play_hosts_all | length -1 %} + {{ item }}:role=worker:group=default:workergroup=default + {% else %} + {{ item }}:role=worker:group=default:workergroup=default, + {% endif %} + {% endfor %}" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Template processing: Create custom ini file {{ sap_custom_config }} from {{ HANA_2_00_customconfig.rsp }}" + ansible.builtin.template: + src: "HANA_2_00_customconfig.rsp" + dest: "{{ dir_params }}/{{ sap_custom_config }}" + mode: 0644 + force: true + vars: + _rsp_basepath_shared: "no" + _rsp_hana_data_basepath: "{{ hana_data_basepath }}" + _rsp_hana_log_basepath: "{{ hana_log_basepath }}" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Progress" + ansible.builtin.debug: + msg: "Start HANA Installation" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: installation" + block: + - name: "SAP HANA SCALE OUT-HSR: Execute hdblcm on {{ primary_instance_name }}" + ansible.builtin.shell: | + umask {{ custom_umask | default('022') }} ; + chmod 755 /usr/sap; + ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' --custom_cfg='{{ dir_params }}' + args: + chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" + creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + environment: + TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" + register: hana_installation + failed_when: hana_installation.rc > 0 + rescue: + - name: "Fail if HANA installation failed with rc > 1" + ansible.builtin.fail: + msg: "INSTALL:0022:Execute hdblcm failed." + when: hana_installation.rc > 1 + + - name: "SAP HANA SCALE OUT-HSR: Progress" + ansible.builtin.debug: + msg: "Restarting the HANA Installation" + when: hana_installation.rc == 1 + + - name: "SAP HANA SCALE OUT-HSR: Re-execute hdblcm on {{ virtual_host }} and rescue" + block: + - name: "SAP HANA SCALE OUT-HSR: Re-execute hdblcm on {{ virtual_host }}" + ansible.builtin.shell: | + umask {{ custom_umask | default('022') }} ; + chmod 755 /usr/sap; + ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' + args: + chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" + creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + environment: + TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" + register: hana_installation + failed_when: hana_installation.rc > 0 + when: hana_installation.rc == 1 + rescue: + - name: "Fail if HANA installation failed on second attempt." + ansible.builtin.fail: + msg: "INSTALL:0022:Execute hdblcm failed." + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Installation results" + ansible.builtin.debug: + msg: + - "HANA Installation failed" + - "HDBLCM output: {{ hana_installation }}" + when: + - hana_installation.rc is defined + - hana_installation.rc > 0 + + - name: "Errorhandling: SAP HANA" + ansible.builtin.debug: + msg: "INSTALL:{{ hana_installation }}" + when: + - hana_installation.rc is defined + - hana_installation.rc > 0 + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Successful installation" + block: + + - name: "SAP HANA SCALE OUT-HSR: Installation results" + ansible.builtin.debug: + msg: "HANA Installation succeeded" + + - name: "SAP HANA SCALE OUT-HSR: HANA Install: flag" + ansible.builtin.file: + path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + state: touch + mode: 0755 + + - name: "Retrieve Subscription ID and Resource Group Name" + ansible.builtin.uri: + url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 + use_proxy: false + headers: + Metadata: true + register: azure_metadata + + - name: "Extract details" + ansible.builtin.set_fact: + subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" + resource_group_name: "{{ azure_metadata.json.compute.resourceGroupName }}" + + - name: "Show the subscription and resource group" + ansible.builtin.debug: + msg: + - "Subscription ID: {{ subscription_id }}" + - "Resource Group Name: {{ resource_group_name }}" + + - name: "Include deploy/ansible/roles-misc/0.6-ARM-Deployment" + ansible.builtin.include_role: + name: roles-misc/0.6-ARM-Deployment + vars: + subscriptionId: "{{ subscription_id }}" + resourceGroupName: "{{ resource_group_name }}" + + - name: "SAP HANA SCALE OUT-HSR: ARM Deployment flag" + ansible.builtin.file: + path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb_arm.txt" + state: touch + mode: 0755 + + - name: "SAP HANA SCALE OUT-HSR: remove install response file" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_inifile }}" + state: absent + + - name: "SAP HANA SCALE OUT-HSR: remove custom config response file" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_custom_config }}" + state: absent + + when: + - hana_installation.rc is defined + - hana_installation.rc < 1 + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Create backup folder" + ansible.builtin.file: + path: "{{ hana_backup_path }}" + state: directory + group: sapsys + owner: "{{ db_sid | lower }}adm" + mode: 0755 + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Configure global.ini" + block: + - name: "Prepare global.ini for domain name resolution." + become_user: root + become: true + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: communication + state: present + mode: 0644 + option: listeninterface + value: .internal + + - name: "Prepare global.ini for installation in non-shared environment" + become_user: root + become: true + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: persistence + state: present + mode: 0644 + option: basepath_shared + value: no + + - name: "Prepare global.ini for site hosts name resolution (Primary Site)" + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: "internal_hostname_resolution" + mode: 0644 + state: present + option: "{{ hostvars[item].ansible_host }}" + value: "{{ hostvars[item].virtual_host }}" + with_items: + - "{{ ansible_play_hosts_all[0::2] }}" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Restart HANA" + block: + - name: "Stop HANA Database" + become_user: "{{ db_sid | lower }}adm" + become: true ansible.builtin.shell: | sapcontrol -nr {{ db_instance_number }} -function StopSystem {{ db_sid | upper }} changed_when: false failed_when: false register: hana_stopped environment: - TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" - rescue: - - name: "Fail if HANA installation failed with rc > 1" - ansible.builtin.fail: - msg: "INSTALL:0022:Execute hdblcm failed." - when: hana_installation.rc > 1 - - - name: "4.0.3 - SAP HANA SCALE OUT: Progress" - ansible.builtin.debug: - msg: "Restarting the HANA Installation" - when: hana_installation.rc == 1 - - - - name: "4.0.3 - SAP HANA SCALE OUT: Re-execute hdblcm on {{ virtual_host }} and rescue" - block: - - name: "4.0.3 - SAP HANA SCALE OUT: Re-execute hdblcm on {{ virtual_host }}" - ansible.builtin.shell: | - umask {{ custom_umask | default('022') }} ; - chmod 755 /usr/sap; - ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' - args: - chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" - creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" - environment: - TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" - register: hana_installation - failed_when: hana_installation.rc > 0 - when: hana_installation.rc == 1 - rescue: - - name: "Fail if HANA installation failed on second attempt." - ansible.builtin.fail: - msg: "INSTALL:0022:Execute hdblcm failed." - - - name: "4.0.3 - SAP HANA SCALE OUT: Installation results" - ansible.builtin.debug: - msg: - - "HANA Installation failed" - - "HDBLCM output: {{ hana_installation }}" - when: - - hana_installation.rc is defined - - hana_installation.rc > 0 - - - name: "Errorhandling: SAP HANA" - ansible.builtin.debug: - msg: "INSTALL:{{ hana_installation }}" - when: - - hana_installation.rc is defined - - hana_installation.rc > 0 - - - name: "4.0.3 - SAP HANA SCALE OUT: Successful installation" - block: - - - name: "4.0.3 - SAP HANA SCALE OUT: Installation results" - ansible.builtin.debug: - msg: "HANA Installation succeeded" + PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}:/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + DIR_LIBRARY: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + LD_LIBRARY_PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + SAPSYSTEMNAME: "{{ db_sid | upper }}" + vars: + allow_world_readable_tmpfiles: true - - name: "4.0.3 - SAP HANA SCALE OUT: HANA Install: flag" - ansible.builtin.file: - path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" - state: touch - mode: 0755 + # ToDo: Check if we can interrogate the HANA DB to see if it is stopped. + - name: "Wait 2 minutes for SAP system to stop" + ansible.builtin.wait_for: + timeout: 120 - - name: "4.0.3 - SAP HANA SCALE OUT: Retrieve Subscription ID and Resource Group Name" - ansible.builtin.uri: - url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 - use_proxy: false - headers: - Metadata: true - register: azure_metadata + - name: "Start HANA Database" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + sapcontrol -nr {{ db_instance_number }} -function StartSystem {{ db_sid | upper }} + changed_when: false + failed_when: false + register: hana_started + environment: + PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}:/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + DIR_LIBRARY: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + LD_LIBRARY_PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + SAPSYSTEMNAME: "{{ db_sid | upper }}" + vars: + allow_world_readable_tmpfiles: true - - name: "4.0.3 - SAP HANA SCALE OUT: Extract Azure subscription details" - ansible.builtin.set_fact: - subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" - resource_group_name: "{{ azure_metadata.json.compute.resourceGroupName }}" + - name: "Wait 2 minutes for SAP system to start" + ansible.builtin.wait_for: + timeout: 120 + when: + - ansible_hostname == primary_instance_name + - not hana_installed.stat.exists - - name: "4.0.3 - SAP HANA SCALE OUT: Show the subscription and resource group" - ansible.builtin.debug: - msg: - - "Subscription ID: {{ subscription_id }}" - - "Resource Group Name: {{ resource_group_name }}" +# /*---------------------------------------------------------------------------8 +# | Secondary site setup with Shared nothing scale out | +# +------------------------------------4--------------------------------------*/ - - name: "4.0.3 - SAP HANA SCALE OUT: Include deploy/ansible/roles-misc/0.6-ARM-Deployment" - ansible.builtin.include_role: - name: roles-misc/0.6-ARM-Deployment + - name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - HSR ( Secondary Site )" + block: + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: remove install response file if exists" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_inifile }}" + state: absent + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Template processing: Create ini file {{ sap_inifile }} from {{ HANA_2_00_install.rsp }}" + ansible.builtin.template: + src: "HANA_2_00_install_scaleout.rsp" + dest: "{{ dir_params }}/{{ sap_inifile }}" + mode: 0644 + force: true + # Template parameter mapping + vars: + _rsp_component_root: "../COMPONENTS" + _rsp_components: "{{ hana_components }}" + _rsp_sapmnt: "/hana/shared" # Default Value + _rsp_hostname: "{{ virtual_host }}" + _rsp_sid: "{{ db_sid | upper }}" + _rsp_number: "{{ db_instance_number }}" + _rsp_system_usage: "custom" + use_master_password: "{{ hana_use_master_password }}" + _rsp_hana_data_basepath: "{{ hana_data_basepath }}" + _rsp_hana_log_basepath: "{{ hana_log_basepath }}" + password_copy: "{%if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" + _rsp_internal_network: "{{ (subnet_address + '/' + subnet_prefix) }}" + _rsp_root_password: "{{ root_password }}" + _rsp_additional_hosts: "{% for item in ansible_play_hosts_all[3::2] %} + {% if loop.index == ansible_play_hosts_all | length -1 %} + {{ item }}:role=worker:group=default:workergroup=default + {% else %} + {{ item }}:role=worker:group=default:workergroup=default, + {% endif %} + {% endfor %}" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Template processing: Create custom ini file {{ sap_custom_config }} from {{ HANA_2_00_customconfig.rsp }}" + ansible.builtin.template: + src: "HANA_2_00_customconfig.rsp" + dest: "{{ dir_params }}/{{ sap_custom_config }}" + mode: 0644 + force: true + vars: + _rsp_basepath_shared: "no" + _rsp_hana_data_basepath: "{{ hana_data_basepath }}" + _rsp_hana_log_basepath: "{{ hana_log_basepath }}" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Progress" + ansible.builtin.debug: + msg: "Start HANA Installation" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: installation" + block: + - name: "SAP HANA: Execute hdblcm on {{ secondary_instance_name }}" + ansible.builtin.shell: | + umask {{ custom_umask | default('022') }} ; + chmod 755 /usr/sap; + ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' --custom_cfg='{{ dir_params }}' + args: + chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" + creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + environment: + TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" + register: hana_installation + failed_when: hana_installation.rc > 0 + rescue: + - name: "Fail if HANA installation failed with rc > 1" + ansible.builtin.fail: + msg: "INSTALL:0022:Execute hdblcm failed." + when: hana_installation.rc > 1 + + - name: "SAP HANA: Progress" + ansible.builtin.debug: + msg: "Restarting the HANA Installation" + when: hana_installation.rc == 1 + + - name: "SAP HANA: Re-execute hdblcm on {{ virtual_host }} and rescue" + block: + - name: "SAP HANA: Re-execute hdblcm on {{ virtual_host }}" + ansible.builtin.shell: | + umask {{ custom_umask | default('022') }} ; + chmod 755 /usr/sap; + ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' + args: + chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" + creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + environment: + TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" + register: hana_installation + failed_when: hana_installation.rc > 0 + when: hana_installation.rc == 1 + rescue: + - name: "Fail if HANA installation failed on second attempt." + ansible.builtin.fail: + msg: "INSTALL:0022:Execute hdblcm failed." + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Installation results" + ansible.builtin.debug: + msg: + - "HANA Installation failed" + - "HDBLCM output: {{ hana_installation }}" + when: + - hana_installation.rc is defined + - hana_installation.rc > 0 + + - name: "Errorhandling: SAP HANA" + ansible.builtin.debug: + msg: "INSTALL:{{ hana_installation }}" + when: + - hana_installation.rc is defined + - hana_installation.rc > 0 + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Successful installation" + block: + + - name: "SAP HANA SCALE OUT-HSR: Installation results" + ansible.builtin.debug: + msg: "HANA Installation succeeded" + + - name: "SAP HANA SCALE OUT-HSR: HANA Install: flag" + ansible.builtin.file: + path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + state: touch + mode: 0755 + + - name: "Retrieve Subscription ID and Resource Group Name" + ansible.builtin.uri: + url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 + use_proxy: false + headers: + Metadata: true + register: azure_metadata + + - name: "Extract details" + ansible.builtin.set_fact: + subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" + resource_group_name: "{{ azure_metadata.json.compute.resourceGroupName }}" + + - name: "Show the subscription and resource group" + ansible.builtin.debug: + msg: + - "Subscription ID: {{ subscription_id }}" + - "Resource Group Name: {{ resource_group_name }}" + + - name: "Include deploy/ansible/roles-misc/0.6-ARM-Deployment" + ansible.builtin.include_role: + name: roles-misc/0.6-ARM-Deployment + vars: + subscriptionId: "{{ subscription_id }}" + resourceGroupName: "{{ resource_group_name }}" + + - name: "SAP HANA SCALE OUT-HSR: ARM Deployment flag" + ansible.builtin.file: + path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb_arm.txt" + state: touch + mode: 0755 + + - name: "SAP HANA SCALE OUT-HSR: remove install response file" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_inifile }}" + state: absent + + - name: "SAP HANA SCALE OUT-HSR: remove custom config response file" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_custom_config }}" + state: absent + + when: + - hana_installation.rc is defined + - hana_installation.rc < 1 + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Create backup folder" + ansible.builtin.file: + path: "{{ hana_backup_path }}" + state: directory + group: sapsys + owner: "{{ db_sid | lower }}adm" + mode: 0755 + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Configure global.ini" + block: + - name: "Prepare global.ini for domain name resolution." + become_user: root + become: true + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: communication + state: present + mode: 0644 + option: listeninterface + value: .internal + + - name: "Prepare global.ini for installation in non-shared environment" + become_user: root + become: true + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: persistence + state: present + mode: 0644 + option: basepath_shared + value: no + + - name: "Prepare global.ini for site hosts name resolution (Secondary Site)" + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: "internal_hostname_resolution" + mode: 0644 + state: present + option: "{{ hostvars[item].ansible_host }}" + value: "{{ hostvars[item].virtual_host }}" + with_items: + - "{{ ansible_play_hosts_all[1::2] }}" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Restart HANA" + block: + - name: "Stop HANA Database" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + sapcontrol -nr {{ db_instance_number }} -function StopSystem {{ db_sid | upper }} + changed_when: false + failed_when: false + register: hana_stopped + environment: + PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}:/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + DIR_LIBRARY: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + LD_LIBRARY_PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + SAPSYSTEMNAME: "{{ db_sid | upper }}" vars: allow_world_readable_tmpfiles: true - - name: "4.0.3 - SAP HANA SCALE OUT: ARM Deployment flag" - ansible.builtin.file: - path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb_arm.txt" - state: touch - mode: 0755 - - - name: "4.0.3 - SAP HANA SCALE OUT: remove install response file" - ansible.builtin.file: - path: "{{ dir_params }}/{{ sap_inifile }}" - state: absent + # ToDo: Check if we can interrogate the HANA DB to see if it is stopped. + - name: "Wait 2 minutes for SAP system to stop" + ansible.builtin.wait_for: + timeout: 120 - when: - - hana_installation.rc is defined - - hana_installation.rc < 1 + - name: "Start HANA Database" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + sapcontrol -nr {{ db_instance_number }} -function StartSystem {{ db_sid | upper }} + changed_when: false + failed_when: false + register: hana_started + environment: + PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}:/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + DIR_LIBRARY: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + LD_LIBRARY_PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + SAPSYSTEMNAME: "{{ db_sid | upper }}" + vars: + allow_world_readable_tmpfiles: true - - name: "4.0.3 - SAP HANA SCALE OUT: Create backup folder" - ansible.builtin.file: - path: "{{ hana_backup_path }}" - state: directory - group: sapsys - owner: "{{ db_sid | lower }}adm" - mode: 0755 - when: + - name: "Wait 2 minutes for SAP system to start" + ansible.builtin.wait_for: + timeout: 120 + when: + - ansible_hostname == secondary_instance_name - not hana_installed.stat.exists - - database_high_availability + when: + - database_high_availability # /*----------------------------End of setup----------------------------------8 From 7c504bbbd2eea63b2ea1bef0a70ede07c677da5c Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 19 May 2024 10:11:07 +0300 Subject: [PATCH 565/607] Rename variables --- .../playbook_02_os_sap_specific_config.yaml | 2 +- .../ansible/playbook_04_00_00_db_install.yaml | 24 +++---- deploy/ansible/playbook_04_00_01_db_ha.yaml | 2 +- .../tasks/main.yaml | 4 +- .../roles-os/1.10-networking/tasks/main.yaml | 2 +- .../tasks/1.18.2-provision.yml | 4 +- .../2.4-hosts-file/tasks/main.yaml | 2 +- .../tasks/2.6.0-afs-mounts.yaml | 2 +- .../tasks/2.6.1-anf-mounts.yaml | 70 ++++++------------- .../tasks/2.6.1.2-anf-mounts-scaleout.yaml | 38 +++++----- .../tasks/2.6.7-afs-mounts-simplemount.yaml | 2 +- .../tasks/2.6.8-anf-mounts-simplemount.yaml | 8 +-- .../2.6-sap-mounts/tasks/main.yaml | 6 +- .../roles-sap/5.1-dbload/tasks/main.yaml | 2 +- .../roles-sap/5.2-pas-install/tasks/main.yaml | 2 +- .../roles-sap/5.3-app-install/tasks/main.yaml | 2 +- .../5.8.4.0-clusterPrep-ScaleOut-RedHat.yml | 2 +- .../tasks/5.8.5-post_provision_report.yml | 6 +- deploy/ansible/vars/ansible-input-api.yaml | 6 +- .../output_files/sap-parameters.tmpl | 2 +- 20 files changed, 79 insertions(+), 109 deletions(-) diff --git a/deploy/ansible/playbook_02_os_sap_specific_config.yaml b/deploy/ansible/playbook_02_os_sap_specific_config.yaml index b6ab630a9e..3b3ff840e3 100644 --- a/deploy/ansible/playbook_02_os_sap_specific_config.yaml +++ b/deploy/ansible/playbook_02_os_sap_specific_config.yaml @@ -165,7 +165,7 @@ subnet_client_cidr: "{{ subnet_cidr_client | default(azure_network_metadata.json.interface[0].ipv4.subnet[0].address + '/' + azure_network_metadata.json.interface[0].ipv4.subnet[0].prefix) }}" when: - platform == 'HANA' - - db_scale_out + - database_scale_out - not database_high_availability tags: - always diff --git a/deploy/ansible/playbook_04_00_00_db_install.yaml b/deploy/ansible/playbook_04_00_00_db_install.yaml index 74c94dd1ec..0f55f873ad 100644 --- a/deploy/ansible/playbook_04_00_00_db_install.yaml +++ b/deploy/ansible/playbook_04_00_00_db_install.yaml @@ -81,8 +81,8 @@ ansible.builtin.set_fact: root_password: "{{ lookup('ansible.builtin.password', '/dev/null', seed=inventory_hostname) }}" when: - - db_scale_out is defined - - db_scale_out + - database_scale_out is defined + - database_scale_out register: root_password_generated - name: "Database Installation Playbook: - Show root password" @@ -125,7 +125,7 @@ become: true when: - node_tier == 'hana' - - not db_scale_out + - not database_scale_out block: - name: "Database Installation Playbook: - Setting the DB facts" ansible.builtin.set_fact: @@ -148,7 +148,7 @@ ansible.builtin.include_role: name: roles-db/4.0.0-hdb-install when: - - not db_scale_out + - not database_scale_out - name: "Database Installation Playbook: - Create db-install-done flag" delegate_to: localhost @@ -192,7 +192,7 @@ # - db_high_availability - database_high_availability - node_tier == 'hana' - - not db_scale_out + - not database_scale_out block: - name: "Database Installation Playbook: - Setting the facts" ansible.builtin.set_fact: @@ -317,7 +317,7 @@ name: sshd state: restarted when: - - db_scale_out + - database_scale_out - hostvars.localhost.root_password is defined - name: "Database Installation Playbook: - Install HANA Scale Out" @@ -341,8 +341,8 @@ ansible.builtin.set_fact: root_password: "{{ hostvars.localhost.root_password }}" when: - # - db_scale_out is defined - - db_scale_out + # - database_scale_out is defined + - database_scale_out # - not db_high_availability - name: "Database Installation Playbook: - Show SAP password" @@ -356,7 +356,7 @@ ansible.builtin.include_role: name: roles-db/4.0.3-hdb-install-scaleout when: - - db_scale_out + - database_scale_out - name: "Database Installation Playbook: - Create db-install-done flag" delegate_to: localhost @@ -421,7 +421,7 @@ name: sshd state: restarted when: - - db_scale_out + - database_scale_out - hostvars.localhost.root_password is defined @@ -441,7 +441,7 @@ # - db_high_availability - db_high_availability - node_tier in ['hana','observer'] - - db_scale_out + - database_scale_out block: - name: "Database Installation Playbook: - Setting the facts" ansible.builtin.set_fact: @@ -506,7 +506,7 @@ become_user: root when: - node_tier in ['hana','observer'] - - db_scale_out + - database_scale_out tags: - 1.18-scaleout-pacemaker diff --git a/deploy/ansible/playbook_04_00_01_db_ha.yaml b/deploy/ansible/playbook_04_00_01_db_ha.yaml index 3d409b71ed..3696c70c9d 100644 --- a/deploy/ansible/playbook_04_00_01_db_ha.yaml +++ b/deploy/ansible/playbook_04_00_01_db_ha.yaml @@ -194,7 +194,7 @@ name: roles-sap/5.8-hanadb-scaleout-pacemaker when: - db_high_availability - - db_scale_out + - database_scale_out tags: - 5.8-hanadb-scaleout-pacemaker diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml index d55419da45..1d6737504a 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml @@ -381,8 +381,8 @@ - not (database_high_availability | default(false)) # Only allowed for the first node. No other node in the scale out - ANF setup is allowed to install hdblcm. - ansible_hostname == db_hosts[0] - - db_scale_out is defined - - db_scale_out + - database_scale_out is defined + - database_scale_out # Scale our HSR with multi site replication diff --git a/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml b/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml index 99661b2e35..fb01875bc1 100644 --- a/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml @@ -174,7 +174,7 @@ - name: "1.10 Networking - Add routes and restart VM for HANA scaleout" when: - - db_scale_out + - database_scale_out - node_tier == 'hana' block: # display the list of azure network interfaces and the expected ethX interface names diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2-provision.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2-provision.yml index c348c0b190..0afccfbf40 100644 --- a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2-provision.yml +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2-provision.yml @@ -20,7 +20,7 @@ when: - node_tier in ['observer','hana'] - platform == 'HANA' - - db_scale_out + - database_scale_out - database_high_availability - ansible_os_family | upper == "SUSE" block: @@ -70,7 +70,7 @@ - name: Implement the Scale out Resource Agent hook (REDHAT) when: - node_tier in ['observer','hana'] - - db_scale_out + - database_scale_out - database_high_availability - ansible_os_family | upper == "REDHAT" block: diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml index 163e3e44c5..bce1917e52 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml @@ -99,7 +99,7 @@ when: - platform == 'HANA' - not database_high_availability - - db_scale_out + - database_scale_out block: - name: "2.4 Hosts: - Set the DB Virtual Instance hostname - scale out" diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 929619e0e5..f4a1059dad 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -413,7 +413,7 @@ vars: primary_host: "{{ ansible_hostname }}" when: - - db_scale_out + - database_scale_out - db_high_availability - hana_shared_mountpoint is defined - hana_shared_mountpoint | length == 2 diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index e012dcfb88..d23396ff85 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -570,16 +570,17 @@ # +------------------------------------4--------------------------------------*/ # Standard block tasks for non scale out setups - name: "ANF Mount: Run tasks for non-scale out setups" + when: + - not database_scale_out + - tier == 'sapos' + - node_tier == 'hana' block: - - name: "ANF Mount: Create /hana folder" + - name: "ANF Mount: Create /hana folder" ansible.builtin.file: - path: /hana - mode: 0755 - state: directory - group: sapsys - when: - - tier == 'sapos' - - node_tier == 'hana' + path: /hana + mode: 0755 + state: directory + group: sapsys - name: "ANF Mount: HANA data" ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml @@ -599,8 +600,6 @@ vars: primary_host: "{{ db_hosts[0] }}" when: - - tier == 'sapos' - - node_tier == 'hana' - hana_data_mountpoint is defined - hana_data_mountpoint | length > 0 - ansible_hostname == db_hosts[0] @@ -623,8 +622,6 @@ vars: primary_host: "{{ db_hosts[0] }}" when: - - tier == 'sapos' - - node_tier == 'hana' - hana_log_mountpoint is defined - hana_log_mountpoint | length > 0 - ansible_hostname == db_hosts[0] @@ -647,8 +644,6 @@ vars: primary_host: "{{ db_hosts[0] }}" when: - - tier == 'sapos' - - node_tier == 'hana' - hana_shared_mountpoint is defined - hana_shared_mountpoint | length > 0 - ansible_hostname == db_hosts[0] @@ -671,8 +666,6 @@ vars: primary_host: "{{ db_hosts[1] }}" when: - - tier == 'sapos' - - node_tier == 'hana' - hana_data_mountpoint is defined - hana_data_mountpoint | length > 1 - db_hosts | length == 2 @@ -696,8 +689,6 @@ vars: primary_host: "{{ db_hosts[1] }}" when: - - tier == 'sapos' - - node_tier == 'hana' - hana_log_mountpoint is defined - hana_log_mountpoint | length > 1 - db_hosts | length ==2 @@ -721,8 +712,6 @@ vars: primary_host: "{{ db_hosts[1] }}" when: - - tier == 'sapos' - - node_tier == 'hana' - hana_shared_mountpoint is defined - hana_shared_mountpoint | length > 1 - db_hosts | length == 2 @@ -739,11 +728,6 @@ - { 'path': '/hana/data' } - { 'path': '/hana/log' } - { 'path': '/hana/shared' } - when: - - tier == 'sapos' - - node_tier == 'hana' - when: - - not db_scale_out # /*---------------------------------------------------------------------------8 # | | @@ -752,16 +736,19 @@ # +------------------------------------4--------------------------------------*/ # Run this block set when db_Scale_out is true but db_high_availability is false - name: "ANF Mount: Run tasks for scale out setups" + when: + - database_scale_out + - not database_high_availability + - tier == 'sapos' + - node_tier == 'hana' block: - name: "ANF Mount: Scale Out - Create SAP Directories (usrsap)" ansible.builtin.file: - owner: "{{ db_sid | lower }}adm" - group: sapsys - mode: 0755 - path: "/usr/sap/{{ db_sid | upper }}" - state: directory - when: - - tier == 'hana' + owner: "{{ db_sid | lower }}adm" + group: sapsys + mode: 0755 + path: "/usr/sap/{{ db_sid | upper }}" + state: directory - name: "ANF Mount: Scale Out - Create SAP Directories (hana data)" ansible.builtin.file: @@ -770,8 +757,6 @@ mode: 0755 path: "/hana/data/{{ db_sid | upper }}" state: directory - when: - - tier == 'hana' - name: "ANF Mount: Scale Out - Create SAP Directories (hana log)" ansible.builtin.file: @@ -780,8 +765,6 @@ mode: 0755 path: "/hana/log/{{ db_sid | upper }}" state: directory - when: - - tier == 'hana' - name: "ANF Mount: HANA shared - Scale out" ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml @@ -802,7 +785,6 @@ vars: primary_host: "{{ ansible_hostname }}" when: - - node_tier == 'hana' - hana_shared_mountpoint is defined - hana_shared_mountpoint | length > 0 @@ -825,12 +807,10 @@ vars: primary_host: "{{ ansible_hostname }}" when: - - node_tier == 'hana' - hana_shared_mountpoint is defined - hana_shared_mountpoint | length == 1 # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. - - db_scale_out is defined - - db_scale_out + - database_scale_out | default(false) - name: "ANF Mount: HANA Data - Scale out - Create mount list" block: @@ -859,7 +839,6 @@ create_temp_folders: 'true' } when: - - node_tier == 'hana' - hana_data_mountpoint is defined - name: "DEBUG:ANF Mount: HANA Data - Scale out - Create mount list" @@ -874,7 +853,6 @@ vars: primary_host: "{{ ansible_hostname }}" when: - - node_tier == 'hana' - hana_data_mountpoint is defined - name: "ANF Mount: HANA Log - Scale out - Create mount list" @@ -904,7 +882,6 @@ create_temp_folders: 'true' } when: - - node_tier == 'hana' - hana_log_mountpoint is defined - name: "DEBUG:ANF Mount: HANA Log - Scale out - Create mount list" @@ -919,7 +896,6 @@ vars: primary_host: "{{ ansible_hostname }}" when: - - node_tier == 'hana' - hana_log_mountpoint is defined - name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" @@ -934,12 +910,6 @@ - "{{ hana_data_scaleout_mountpoint }}" - { 'path': '/hana/shared' } - { 'path': '/usr/sap/{{ db_sid | upper }}' } - when: - - tier == 'sapos' - - node_tier == 'hana' - when: - - db_scale_out - - not db_high_availability ... diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.2-anf-mounts-scaleout.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.2-anf-mounts-scaleout.yaml index f9d7046401..3b1e3d2d31 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.2-anf-mounts-scaleout.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.2-anf-mounts-scaleout.yaml @@ -760,8 +760,8 @@ # when: # - tier == 'hana' # # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. -# - db_scale_out is defined -# - db_scale_out +# - database_scale_out is defined +# - database_scale_out # - name: "ANF Mount: Scale Out - Create SAP Directories (hana data)" # ansible.builtin.file: @@ -773,8 +773,8 @@ # when: # - tier == 'hana' # # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. -# - db_scale_out is defined -# - db_scale_out +# - database_scale_out is defined +# - database_scale_out # - name: "ANF Mount: Scale Out - Create SAP Directories (hana log)" @@ -787,8 +787,8 @@ # when: # - tier == 'hana' # # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. -# - db_scale_out is defined -# - db_scale_out +# - database_scale_out is defined +# - database_scale_out # - name: "ANF Mount: HANA shared - Scale out" # ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml @@ -814,8 +814,8 @@ # - hana_shared_mountpoint is defined # - hana_shared_mountpoint | length > 0 # # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. -# - db_scale_out is defined -# - db_scale_out +# - database_scale_out is defined +# - database_scale_out # # This runs for unique share per node # - name: "ANF Mount: usrsap - Scale out" @@ -840,8 +840,8 @@ # - hana_shared_mountpoint is defined # - hana_shared_mountpoint | length == 1 # # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. -# - db_scale_out is defined -# - db_scale_out +# - database_scale_out is defined +# - database_scale_out # - name: "ANF Mount: HANA Data - Scale out - Create mount list" # block: @@ -874,8 +874,8 @@ # - hana_data_mountpoint is defined # # - hana_data_mountpoint | length == db_hosts | length # # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. -# - db_scale_out is defined -# - db_scale_out +# - database_scale_out is defined +# - database_scale_out # - name: "DEBUG:ANF Mount: HANA Data - Scale out - Create mount list" # ansible.builtin.debug: @@ -893,8 +893,8 @@ # - hana_data_mountpoint is defined # # - hana_data_mountpoint | length == db_hosts | length # # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. -# - db_scale_out is defined -# - db_scale_out +# - database_scale_out is defined +# - database_scale_out # - name: "ANF Mount: HANA Log - Scale out - Create mount list" @@ -928,8 +928,8 @@ # - hana_log_mountpoint is defined # # - hana_log_mountpoint | length == db_hosts | length # # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. -# - db_scale_out is defined -# - db_scale_out +# - database_scale_out is defined +# - database_scale_out # - name: "DEBUG:ANF Mount: HANA Log - Scale out - Create mount list" # ansible.builtin.debug: @@ -947,8 +947,8 @@ # - hana_log_mountpoint is defined # # - hana_log_mountpoint | length == db_hosts | length # # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. -# - db_scale_out is defined -# - db_scale_out +# - database_scale_out is defined +# - database_scale_out # - name: "ANF Mount: Set Permissons on HANA (HSR) Directories ({{ item.path }})" @@ -985,6 +985,6 @@ # - tier == 'sapos' # - node_tier == 'hana' # - not (db_high_availability | default(false)) -# - db_scale_out +# - database_scale_out # ... diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml index 464e1adc76..cd77dd9587 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml @@ -340,7 +340,7 @@ vars: primary_host: "{{ ansible_hostname }}" when: - - db_scale_out + - database_scale_out - db_high_availability - hana_shared_mountpoint is defined - hana_shared_mountpoint | length == 2 diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml index fa41192925..662187c3f7 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml @@ -678,7 +678,7 @@ - tier == 'sapos' - node_tier == 'hana' when: - - not db_scale_out + - not database_scale_out # Run this block set when db_Scale_out is true but db_high_availability is false - name: "ANF Mount: Run tasks for scale out setups" @@ -760,8 +760,8 @@ - hana_shared_mountpoint is defined - hana_shared_mountpoint | length == 1 # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. - - db_scale_out is defined - - db_scale_out + - database_scale_out is defined + - database_scale_out - name: "ANF Mount: HANA Data - Scale out - Create mount list" block: @@ -869,7 +869,7 @@ - tier == 'sapos' - node_tier == 'hana' when: - - db_scale_out + - database_scale_out - not db_high_availability diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml index 41504986f1..60f54b7a18 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml @@ -423,13 +423,13 @@ # Update : Deprecated as the scale out anf mount code functionality is now integrated into 2.6.1 and 2.6.8 # This will be removed in the next release, left here for tracing and documentation -# Import this task only if db_scale_out is defined. Implies that sap_mnt is defined, i.e. ANF is used +# Import this task only if database_scale_out is defined. Implies that sap_mnt is defined, i.e. ANF is used # - name: "2.6 SAP Mounts: - Import ANF tasks for Scale-Out" # ansible.builtin.import_tasks: 2.6.1.2-anf-mounts-scaleout.yaml # when: # - NFS_provider == 'ANF' -# - db_scale_out is defined -# - db_scale_out +# - database_scale_out is defined +# - database_scale_out # - sap_mnt is defined or sap_trans is defined or usr_sap_install_mountpoint is defined - name: "2.6 SAP Mounts: - Import ANF tasks" diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml index 49dc42b09a..5a62483772 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml @@ -147,7 +147,7 @@ ansible.builtin.set_fact: db_virtualhost_temp: >- {%- set _host_name = hostvars[db_server_temp | first]['virtual_host'] -%} - {%- if db_scale_out and not database_high_availability -%} + {%- if database_scale_out and not database_high_availability -%} {%- set _host_name = sap_sid | lower ~ db_sid | lower ~ 'db' ~ db_instance_number ~ 'so' -%} {%- endif -%} {{- _host_name -}} diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index 9e80ca045c..2e60699bb0 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -32,7 +32,7 @@ ansible.builtin.set_fact: db_virtualhost_temp: >- {%- set _host_name = hostvars[db_server_temp | first]['virtual_host'] -%} - {%- if db_scale_out and not database_high_availability -%} + {%- if database_scale_out and not database_high_availability -%} {%- set _host_name = sap_sid | lower ~ db_sid | lower ~ 'db' ~ db_instance_number ~ 'so' -%} {%- endif -%} {{- _host_name -}} diff --git a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml index 7335f19f49..4dd83a952e 100644 --- a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml @@ -30,7 +30,7 @@ ansible.builtin.set_fact: db_virtualhost_temp: >- {%- set _host_name = hostvars[db_server_temp | first]['virtual_host'] -%} - {%- if db_scale_out and not database_high_availability -%} + {%- if database_scale_out and not database_high_availability -%} {%- set _host_name = sap_sid | lower ~ db_sid | lower ~ 'db' ~ db_instance_number ~ 'so' -%} {%- endif -%} {{- _host_name -}} diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-RedHat.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-RedHat.yml index 011fbf7424..b2a3e12132 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-RedHat.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-RedHat.yml @@ -27,7 +27,7 @@ - name: "Configure the ANF/AFS file system resources" when: - database_high_availability - - db_scale_out + - database_scale_out - NFS_provider in ["ANF","AFS"] - hana_shared_mountpoint is defined - hana_shared_mountpoint | length > 1 diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.5-post_provision_report.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.5-post_provision_report.yml index 622e0ac1ea..8787528fac 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.5-post_provision_report.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.5-post_provision_report.yml @@ -40,7 +40,7 @@ seconds: "{{ hsr_status_report_wait_in_s }}" - name: "Verify that the hook script is working as expected" - when: not db_scale_out + when: not database_scale_out block: - name: "Verify the hook Installation" become_user: "{{ db_sid | lower }}adm" @@ -81,7 +81,7 @@ # Code block is specifically for ScaleOut, SUSE and RedHat - name: "Verify that the hook script is working as expected (Scale Out) for SUSE " when: - - db_scale_out + - database_scale_out - ansible_os_family | upper == 'SUSE' - inventory_hostname == primary_instance_name block: @@ -119,7 +119,7 @@ when: - ansible_os_family | upper == "REDHAT" - ansible_hostname == primary_instance_name - - db_scale_out + - database_scale_out block: - name: "Verify the hook Installation (REDHAT)" become_user: "{{ db_sid | lower }}adm" diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index 45350e0b9c..d430cff454 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -39,8 +39,8 @@ hana_use_master_password: y hana_password_copy: "" hana_backup_path: /hana/backup # Note: the spacing and lining of Jinja2 expression is critical here, lest we end up with a stray character in the path -hana_data_basepath: "{% if db_scale_out %}/hana/data/{{ db_sid | upper }}{% else %}/hana/data{% endif %}" -hana_log_basepath: "{% if db_scale_out %}/hana/log/{{ db_sid | upper }}{% else %}/hana/log{% endif %}" +hana_data_basepath: "{% if database_scale_out %}/hana/data/{{ db_sid | upper }}{% else %}/hana/data{% endif %}" +hana_log_basepath: "{% if database_scale_out %}/hana/log/{{ db_sid | upper }}{% else %}/hana/log{% endif %}" hana_autostart: false # When set to true, will configure autostart parameter to 1 for HANA nodes. Only applicable for Scale out sap_sid: "" # REQUIRED - SAP Install download_basket_dir: "{{ target_media_location }}/download_basket" @@ -220,7 +220,7 @@ use_simple_mount: false # Cluster - Defaults # database_high_availability: false -db_scale_out: false +database_scale_out: false database_cluster_type: "AFA" db_no_standby: false # when set to true, will deploy the scale out - ANF cluster without a standby node. # scs_high_availability: false diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl index b9a1b80d34..8bba28ee53 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl @@ -91,7 +91,7 @@ platform: ${platform} ############################################################################# # Scale out defines if the database is to be deployed in a scale out configuration -db_scale_out: ${scale_out} +database_scale_out: ${scale_out} db_no_standby: ${scale_out_no_standby_role} subnet_cidr_storage: ${subnet_cidr_storage} From 9c44a035c7395fb55642abaa2e001da1474b7024 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 19 May 2024 20:01:39 +0300 Subject: [PATCH 566/607] chore: Remove commented out code for database_high_availability in ANF Mount task --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index d23396ff85..896b0712a0 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -738,7 +738,7 @@ - name: "ANF Mount: Run tasks for scale out setups" when: - database_scale_out - - not database_high_availability + # - not database_high_availability - tier == 'sapos' - node_tier == 'hana' block: From e566b8ebed9eace993f33d91331da7e5d2aa8e89 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 19 May 2024 20:37:32 +0300 Subject: [PATCH 567/607] chore: Update variable names and fix merge conflicts --- .../4.0.3-hdb-install-scaleout/tasks/main.yaml | 4 ++-- .../roles-sap-os/2.4-hosts-file/templates/hosts.j2 | 2 +- deploy/ansible/vars/ansible-input-api.yaml | 2 +- .../sap_system/output_files/sap-parameters.tmpl | 12 ++++++------ 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml index 1d6737504a..9f70a76d53 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml @@ -124,11 +124,11 @@ # This comes in from the main ansible playbook. It is the password for the root user. Must be randomized after the installation. _rsp_root_password: "{{ root_password }}" # Note : Default configuration involves placing the last node in DB List as standby. - # Note : This behavior can be overridden via property 'db_no_standby' to force all remaining nodes as workers + # Note : This behavior can be overridden via property 'database_no_standby' to force all remaining nodes as workers # Note : This configuration is not recommended as it leaves your distributed system without a standby _rsp_additional_hosts: "{% for item in db_hosts[1:] %} {% if loop.index == db_hosts | length -1 %} - {% if db_no_standby %} + {% if database_no_standby %} {{ item }}:role=worker:group=default:workergroup=default {% else %} {{ item }}:role=standby:group=default:workergroup=default diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 b/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 index d6ecb7324f..6917662e0f 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 @@ -129,7 +129,7 @@ ansible_facts. {% else %} {# Loop through remaining IPs for the virtual host #} {% for ip in host_ips[1:] %} -{% if (db_scale_out) %} +{% if (database_scale_out) %} {% if (subnet_cidr_storage | ansible.utils.network_in_usable(ip)) %} {{ '%-19s' | format(ip) }}{{ '%-80s ' | format(host + '-storage.' + sap_fqdn) }}{{ '%-21s' | format(host + '-storage') }} {% elif (subnet_client_cidr | ansible.utils.network_in_usable(ip)) %} diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index d430cff454..523043b220 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -222,7 +222,7 @@ use_simple_mount: false # database_high_availability: false database_scale_out: false database_cluster_type: "AFA" -db_no_standby: false # when set to true, will deploy the scale out - ANF cluster without a standby node. +database_no_standby: false # when set to true, will deploy the scale out - ANF cluster without a standby node. # scs_high_availability: false scs_cluster_type: "AFA" # Configure pacemaker for Azure scheduled events diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl index 8bba28ee53..d1c8043e61 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl @@ -91,14 +91,14 @@ platform: ${platform} ############################################################################# # Scale out defines if the database is to be deployed in a scale out configuration -database_scale_out: ${scale_out} -db_no_standby: ${scale_out_no_standby_role} +database_scale_out: ${scale_out} +database_no_standby: ${scale_out_no_standby_role} -subnet_cidr_storage: ${subnet_cidr_storage} +subnet_cidr_storage: ${subnet_cidr_storage} %{~ endif } -subnet_cidr_anf: ${subnet_cidr_anf} -subnet_cidr_app: ${subnet_cidr_app} -subnet_cidr_db: ${subnet_cidr_db} +subnet_cidr_anf: ${subnet_cidr_anf} +subnet_cidr_app: ${subnet_cidr_app} +subnet_cidr_db: ${subnet_cidr_db} %{~ if length(subnet_cidr_client) != 0 } subnet_cidr_client: ${subnet_cidr_client} From 94c744616ee4ba98bc11324d12981ec2f0435868 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 19 May 2024 22:42:32 +0300 Subject: [PATCH 568/607] Use UID and not the actual account --- .../tasks/2.6.1-anf-mounts.yaml | 6 ++-- .../tasks/2.6.8-anf-mounts-simplemount.yaml | 34 +++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index 896b0712a0..5c8481781e 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -744,7 +744,7 @@ block: - name: "ANF Mount: Scale Out - Create SAP Directories (usrsap)" ansible.builtin.file: - owner: "{{ db_sid | lower }}adm" + owner: '{{ hdbadm_uid }}' group: sapsys mode: 0755 path: "/usr/sap/{{ db_sid | upper }}" @@ -752,7 +752,7 @@ - name: "ANF Mount: Scale Out - Create SAP Directories (hana data)" ansible.builtin.file: - owner: "{{ db_sid | lower }}adm" + owner: '{{ hdbadm_uid }}' group: sapsys mode: 0755 path: "/hana/data/{{ db_sid | upper }}" @@ -760,7 +760,7 @@ - name: "ANF Mount: Scale Out - Create SAP Directories (hana log)" ansible.builtin.file: - owner: "{{ db_sid | lower }}adm" + owner: '{{ hdbadm_uid }}' group: sapsys mode: 0755 path: "/hana/log/{{ db_sid | upper }}" diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml index 662187c3f7..8c1989c242 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml @@ -683,33 +683,33 @@ # Run this block set when db_Scale_out is true but db_high_availability is false - name: "ANF Mount: Run tasks for scale out setups" block: - - name: "ANF Mount: Scale Out - Create SAP Directories (usrsap)" + - name: "ANF Mount: Scale Out - Create SAP Directories (usrsap)" ansible.builtin.file: - owner: "{{ db_sid | lower }}adm" - group: sapsys - mode: 0755 - path: "/usr/sap/{{ db_sid | upper }}" - state: directory + owner: '{{ hdbadm_uid }}' + group: sapsys + mode: 0755 + path: "/usr/sap/{{ db_sid | upper }}" + state: directory when: - tier == 'hana' - - name: "ANF Mount: Scale Out - Create SAP Directories (hana data)" + - name: "ANF Mount: Scale Out - Create SAP Directories (hana data)" ansible.builtin.file: - owner: "{{ db_sid | lower }}adm" - group: sapsys - mode: 0755 - path: "/hana/data/{{ db_sid | upper }}" - state: directory + owner: '{{ hdbadm_uid }}' + group: sapsys + mode: 0755 + path: "/hana/data/{{ db_sid | upper }}" + state: directory when: - tier == 'hana' - name: "ANF Mount: Scale Out - Create SAP Directories (hana log)" ansible.builtin.file: - owner: "{{ db_sid | lower }}adm" - group: sapsys - mode: 0755 - path: "/hana/log/{{ db_sid | upper }}" - state: directory + owner: '{{ hdbadm_uid }}' + group: sapsys + mode: 0755 + path: "/hana/log/{{ db_sid | upper }}" + state: directory when: - tier == 'hana' From 5935860388d1fea4968f12752c8584588437433b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 26 May 2024 02:24:16 +0300 Subject: [PATCH 569/607] Remove SAS tokes from storage account --- deploy/ansible/playbook_02_os_sap_specific_config.yaml | 4 ++-- .../roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml | 2 +- deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml | 2 +- deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 | 2 +- .../terraform-units/modules/sap_landscape/storage_accounts.tf | 3 +++ .../sap_system/common_infrastructure/storage_accounts.tf | 1 + 6 files changed, 9 insertions(+), 5 deletions(-) diff --git a/deploy/ansible/playbook_02_os_sap_specific_config.yaml b/deploy/ansible/playbook_02_os_sap_specific_config.yaml index 3b3ff840e3..69ade3b845 100644 --- a/deploy/ansible/playbook_02_os_sap_specific_config.yaml +++ b/deploy/ansible/playbook_02_os_sap_specific_config.yaml @@ -160,9 +160,9 @@ # we do not need to add the same assertion for subnet_cidr_client as it is # calculated for specific deployment scenarios. - - name: "SAP OS configuration playbook: - Set the subnet_client_cidr fact" + - name: "SAP OS configuration playbook: - Set the subnet_cidr_client fact" ansible.builtin.set_fact: - subnet_client_cidr: "{{ subnet_cidr_client | default(azure_network_metadata.json.interface[0].ipv4.subnet[0].address + '/' + azure_network_metadata.json.interface[0].ipv4.subnet[0].prefix) }}" + subnet_cidr_client: "{{ subnet_cidr_client | default(azure_network_metadata.json.interface[0].ipv4.subnet[0].address + '/' + azure_network_metadata.json.interface[0].ipv4.subnet[0].prefix) }}" when: - platform == 'HANA' - database_scale_out diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml index 9f70a76d53..4545bd8a5c 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml @@ -300,7 +300,7 @@ state: present mode: 0644 option: "map_{{ hostvars[item].virtual_host }}" - value: "{{ (hostvars[item].ansible_all_ipv4_addresses | ansible.utils.ipaddr(subnet_client_cidr)) | first | default(hostvars[item].ansible_host) }}" + value: "{{ (hostvars[item].ansible_all_ipv4_addresses | ansible.utils.ipaddr(subnet_cidr_client)) | first | default(hostvars[item].ansible_host) }}" with_items: - "{{ groups[(sap_sid | upper)~'_DB' ] }}" diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml index bce1917e52..f244a84ece 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/tasks/main.yaml @@ -108,7 +108,7 @@ - name: "2.4 Hosts: - Calculate host name - DB - Scale Out - Standby" ansible.builtin.set_fact: - db_so_virtualhost_ip: "{{ hostvars[item].ansible_all_ipv4_addresses | ansible.utils.ipaddr(hostvars[item]['subnet_client_cidr']) | first | default(hostvars[item].ansible_host) }}" + db_so_virtualhost_ip: "{{ hostvars[item].ansible_all_ipv4_addresses | ansible.utils.ipaddr(hostvars[item]['subnet_cidr_client']) | first | default(hostvars[item].ansible_host) }}" with_items: - "{{ groups[(sap_sid | upper)~'_DB' ][0] }}" diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 b/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 index 6917662e0f..549eaa1478 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 @@ -132,7 +132,7 @@ ansible_facts. {% if (database_scale_out) %} {% if (subnet_cidr_storage | ansible.utils.network_in_usable(ip)) %} {{ '%-19s' | format(ip) }}{{ '%-80s ' | format(host + '-storage.' + sap_fqdn) }}{{ '%-21s' | format(host + '-storage') }} -{% elif (subnet_client_cidr | ansible.utils.network_in_usable(ip)) %} +{% elif (subnet_cidr_client | ansible.utils.network_in_usable(ip)) %} {{ '%-19s' | format(ip) }}{{ '%-80s ' | format(host + '-client.' + sap_fqdn) }}{{ '%-21s' | format(host + '-client') }} {% endif %} {% else %} diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf index d1c2296a9e..ab3d13e2a4 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf @@ -292,6 +292,7 @@ resource "azurerm_storage_account" "transport" { enable_https_traffic_only = false min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false + shared_access_key_enabled = false public_network_access_enabled = var.public_network_access_enabled @@ -513,6 +514,8 @@ resource "azurerm_storage_account" "install" { min_tls_version = "TLS1_2" public_network_access_enabled = var.public_network_access_enabled tags = var.tags + shared_access_key_enabled = false + network_rules { default_action = var.enable_firewall_for_keyvaults_and_storage ? "Deny" : "Allow" virtual_network_subnet_ids = compact([ diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf index 4e4e001608..bd18cf9712 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf @@ -39,6 +39,7 @@ resource "azurerm_storage_account" "sapmnt" { allow_nested_items_to_be_public = false public_network_access_enabled = try(var.landscape_tfstate.public_network_access_enabled, true) + shared_access_key_enabled = false tags = var.tags network_rules { From 1d63049dd7749f58ec5218b311c1b474e6f2c2af Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 26 May 2024 02:57:40 +0300 Subject: [PATCH 570/607] chore: Refactor ANF Mount task to improve readability and remove commented out code --- .../2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index 5c8481781e..5743ef9552 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -841,12 +841,15 @@ when: - hana_data_mountpoint is defined - - name: "DEBUG:ANF Mount: HANA Data - Scale out - Create mount list" + - name: "DEBUG:ANF Mount: HANA Data - Scale out - Create mount list" ansible.builtin.debug: - var: hana_data_scaleout_mountpoint + var: hana_data_scaleout_mountpoint + verbosity: 2 + when: + - hana_data_mountpoint is defined - - name: "ANF Mount: HANA Data - Scale out" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + - name: "ANF Mount: HANA Data - Scale out" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml # Do not change this to loop:. It Breaks. i don't know why but this modification only seems to work with with_items: despite multiple formatting attempts. with_items: - "{{ hana_data_scaleout_mountpoint | list }}" @@ -887,6 +890,9 @@ - name: "DEBUG:ANF Mount: HANA Log - Scale out - Create mount list" ansible.builtin.debug: var: hana_log_scaleout_mountpoint + verbosity: 2 + when: + - hana_log_mountpoint is defined - name: "ANF Mount: HANA Log - Scale out" ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml From c626bed3845c3d6b9cd25aef0352867af96bd15b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 26 May 2024 02:59:46 +0300 Subject: [PATCH 571/607] Split the task --- .../tasks/2.6.1-anf-mounts.yaml | 25 +++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index 5743ef9552..fb2bedb8cd 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -912,10 +912,31 @@ state: directory recurse: true with_items: - - "{{ hana_log_scaleout_mountpoint }}" - - "{{ hana_data_scaleout_mountpoint }}" - { 'path': '/hana/shared' } - { 'path': '/usr/sap/{{ db_sid | upper }}' } + - name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" + ansible.builtin.file: + owner: '{{ hdbadm_uid }}' + group: sapsys + path: "{{ item.path }}" + state: directory + recurse: true + with_items: + - "{{ hana_log_scaleout_mountpoint }}" + when: + - hana_log_mountpoint is defined + + - name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" + ansible.builtin.file: + owner: '{{ hdbadm_uid }}' + group: sapsys + path: "{{ item.path }}" + state: directory + recurse: true + with_items: + - "{{ hana_data_scaleout_mountpoint }}" + when: + - hana_data_mountpoint is defined ... From 0a0558cf95c1555816e6e3939335d0499674ee71 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 26 May 2024 13:39:56 +0300 Subject: [PATCH 572/607] Use AzureAD provider for storage --- deploy/terraform/run/sap_landscape/providers.tf | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/run/sap_landscape/providers.tf b/deploy/terraform/run/sap_landscape/providers.tf index 549a2abc0f..ef3f031b6a 100644 --- a/deploy/terraform/run/sap_landscape/providers.tf +++ b/deploy/terraform/run/sap_landscape/providers.tf @@ -14,9 +14,10 @@ provider "azurerm" { features {} - subscription_id = length(local.deployer_subscription_id) > 0 ? local.deployer_subscription_id : null - use_msi = var.use_spn ? false : true + subscription_id = length(local.deployer_subscription_id) > 0 ? local.deployer_subscription_id : null + use_msi = var.use_spn ? false : true skip_provider_registration = true + storage_use_azuread = true } provider "azurerm" { @@ -36,6 +37,7 @@ provider "azurerm" { client_secret = var.use_spn ? local.spn.client_secret : null tenant_id = var.use_spn ? local.spn.tenant_id : null use_msi = var.use_spn ? false : true + storage_use_azuread = true partner_id = "25c87b5f-716a-4067-bcd8-116956916dd6" alias = "workload" From c222d17888b164f8970aee9333a65d5e67f61aec Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 26 May 2024 14:20:12 +0300 Subject: [PATCH 573/607] Move network rules outside the storage account --- .../modules/sap_landscape/providers.tf | 4 +-- .../modules/sap_landscape/storage_accounts.tf | 33 ++++++++++++++----- 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf b/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf index ed1db2f2b8..6f6f007541 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf @@ -3,9 +3,9 @@ terraform { azurerm = { source = "hashicorp/azurerm" configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement, azurerm.peering] - version = ">= 3.23" + version = ">= 3.3" } - + azapi = { source = "Azure/azapi" configuration_aliases = [azapi.api] diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf index ab3d13e2a4..096191549d 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf @@ -516,9 +516,26 @@ resource "azurerm_storage_account" "install" { tags = var.tags shared_access_key_enabled = false - network_rules { - default_action = var.enable_firewall_for_keyvaults_and_storage ? "Deny" : "Allow" - virtual_network_subnet_ids = compact([ +} + +resource "azurerm_storage_account_network_rules" "install" { + provider = azurerm.main + count = local.use_AFS_for_shared && length(var.install_storage_account_id) == 0 ? 1 : 0 + depends_on = [ + azurerm_storage_account.install, + azurerm_storage_share.install, + azurerm_storage_share.install_smb + ] + + storage_account_id = azurerm_storage_account.install[0].id + default_action = var.enable_firewall_for_keyvaults_and_storage ? "Deny" : "Allow" + + ip_rules = compact([ + length(local.deployer_public_ip_address) > 0 ? local.deployer_public_ip_address : "", + length(var.Agent_IP) > 0 ? var.Agent_IP : "" + ]) + + virtual_network_subnet_ids = compact([ local.database_subnet_defined ? ( local.database_subnet_existing ? var.infrastructure.vnets.sap.subnet_db.arm_id : azurerm_subnet.db[0].id) : ( null @@ -529,16 +546,14 @@ resource "azurerm_storage_account" "install" { length(local.deployer_subnet_management_id) > 0 ? local.deployer_subnet_management_id : null ] ) - ip_rules = compact([ - length(local.deployer_public_ip_address) > 0 ? local.deployer_public_ip_address : "", - length(var.Agent_IP) > 0 ? var.Agent_IP : "" - ]) - } - + lifecycle { + ignore_changes = [virtual_network_subnet_ids] + } } + resource "azurerm_private_dns_a_record" "install" { provider = azurerm.dnsmanagement count = var.use_private_endpoint && local.use_Azure_native_DNS && local.use_AFS_for_shared && length(var.install_private_endpoint_id) == 0 ? 1 : 0 From eeb231f2b0bc22e8fe4ea44c5b275faaa7a4a0b8 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 26 May 2024 14:40:54 +0300 Subject: [PATCH 574/607] Support SAS less deployments --- deploy/pipelines/02-sap-workload-zone.yaml | 545 +++++++++++---------- deploy/scripts/installer.sh | 7 - 2 files changed, 273 insertions(+), 279 deletions(-) diff --git a/deploy/pipelines/02-sap-workload-zone.yaml b/deploy/pipelines/02-sap-workload-zone.yaml index 572fc2e9f1..ba5ef4385f 100644 --- a/deploy/pipelines/02-sap-workload-zone.yaml +++ b/deploy/pipelines/02-sap-workload-zone.yaml @@ -119,84 +119,84 @@ stages: echo "##vso[build.updatebuildnumber]Deploying the SAP Workload zone defined in $(workload_zone_folder)" - # Check if running on deployer - if [ ! -f /etc/profile.d/deploy_server.sh ]; then + # Check if running on deployer + if [ ! -f /etc/profile.d/deploy_server.sh ]; then echo -e "$green --- Install dos2unix ---$reset" sudo apt-get -qq install dos2unix - else + else source /etc/profile.d/deploy_server.sh - fi + fi - if [ ! -f $CONFIG_REPO_PATH/LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) ]; then + if [ ! -f $CONFIG_REPO_PATH/LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) ]; then echo -e "$boldred--- $(workload_zone_configuration_file) was not found ---$reset" echo "##vso[task.logissue type=error]File $(workload_zone_configuration_file) was not found." exit 2 - fi + fi - echo -e "$green--- Checkout $(Build.SourceBranchName) ---$reset" + echo -e "$green--- Checkout $(Build.SourceBranchName) ---$reset" - cd $CONFIG_REPO_PATH - mkdir -p .sap_deployment_automation - git checkout -q $(Build.SourceBranchName) + cd $CONFIG_REPO_PATH + mkdir -p .sap_deployment_automation + git checkout -q $(Build.SourceBranchName) - echo -e "$green--- Validations ---$reset" + echo -e "$green--- Validations ---$reset" - if [ -z $WL_ARM_SUBSCRIPTION_ID ]; then + if [ -z $WL_ARM_SUBSCRIPTION_ID ]; then echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined in the $(variable_group) variable group." exit 2 - fi - if [ $USE_MSI != "true" ]; then + fi + if [ $USE_MSI != "true" ]; then if [ -z $WL_ARM_CLIENT_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined in the $(variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined in the $(variable_group) variable group." + exit 2 fi if [ -z $WL_ARM_CLIENT_SECRET ]; then - echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined in the $(variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined in the $(variable_group) variable group." + exit 2 fi if [ -z $WL_ARM_TENANT_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined in the $(variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined in the $(variable_group) variable group." + exit 2 fi if [ -z $CP_ARM_SUBSCRIPTION_ID ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_SUBSCRIPTION_ID was not defined in the $(parent_variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable CP_ARM_SUBSCRIPTION_ID was not defined in the $(parent_variable_group) variable group." + exit 2 fi if [ -z $CP_ARM_CLIENT_ID ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_ID was not defined in the $(parent_variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_ID was not defined in the $(parent_variable_group) variable group." + exit 2 fi if [ -z $CP_ARM_CLIENT_SECRET ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_SECRET was not defined in the $(parent_variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_SECRET was not defined in the $(parent_variable_group) variable group." + exit 2 fi if [ -z $CP_ARM_TENANT_ID ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_TENANT_ID was not defined in the $(parent_variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable CP_ARM_TENANT_ID was not defined in the $(parent_variable_group) variable group." + exit 2 fi - fi + fi echo -e "$green--- Convert config file to UX format ---$reset" - dos2unix -q LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) + dos2unix -q LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) echo -e "$green--- Read details ---$reset" - ENVIRONMENT=$(grep "^environment" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs) - LOCATION=$(grep "^location" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs | tr 'A-Z' 'a-z') - NETWORK=$(grep "^network_logical_name" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs) - echo Environment: ${ENVIRONMENT} - echo Location: ${LOCATION} - echo Network: ${NETWORK} + ENVIRONMENT=$(grep "^environment" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs) + LOCATION=$(grep "^location" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs | tr 'A-Z' 'a-z') + NETWORK=$(grep "^network_logical_name" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs) + echo Environment: ${ENVIRONMENT} + echo Location: ${LOCATION} + echo Network: ${NETWORK} - ENVIRONMENT_IN_FILENAME=$(echo $(workload_zone_folder) | awk -F'-' '{print $1}' | xargs ) - LOCATION_CODE=$(echo $(workload_zone_folder) | awk -F'-' '{print $2}' | xargs ) - case "$LOCATION_CODE" in + ENVIRONMENT_IN_FILENAME=$(echo $(workload_zone_folder) | awk -F'-' '{print $1}' | xargs ) + LOCATION_CODE=$(echo $(workload_zone_folder) | awk -F'-' '{print $2}' | xargs ) + case "$LOCATION_CODE" in "AUCE") LOCATION_IN_FILENAME="australiacentral" ;; "AUC2") LOCATION_IN_FILENAME="australiacentral2" ;; "AUEA") LOCATION_IN_FILENAME="australiaeast" ;; @@ -253,417 +253,418 @@ stages: "WUS2") LOCATION_IN_FILENAME="westus2" ;; "WUS3") LOCATION_IN_FILENAME="westus3" ;; *) LOCATION_IN_FILENAME="westeurope" ;; - esac - - NETWORK_IN_FILENAME=$(echo $(workload_zone_folder) | awk -F'-' '{print $3}' | xargs ) - echo "Environment(filename): $ENVIRONMENT_IN_FILENAME" - echo "Location(filename): $LOCATION_IN_FILENAME" - echo "Network(filename): $NETWORK_IN_FILENAME" - echo "Deployer Environment $(deployer_environment)" - echo "Deployer Region $(deployer_region)" - echo "Workload TFvars $workload_zone_configuration_file" - echo "" - - echo "Agent: $(this_agent)" - echo "Organization: $(System.CollectionUri)" - echo "Project: $(System.TeamProject)" - echo "" - echo "Azure CLI version:" - echo "-------------------------------------------------" - az --version - - - if [ $ENVIRONMENT != $ENVIRONMENT_IN_FILENAME ]; then + esac + + NETWORK_IN_FILENAME=$(echo $(workload_zone_folder) | awk -F'-' '{print $3}' | xargs ) + echo "Environment(filename): $ENVIRONMENT_IN_FILENAME" + echo "Location(filename): $LOCATION_IN_FILENAME" + echo "Network(filename): $NETWORK_IN_FILENAME" + echo "Deployer Environment $(deployer_environment)" + echo "Deployer Region $(deployer_region)" + echo "Workload TFvars $workload_zone_configuration_file" + echo "" + + echo "Agent: $(this_agent)" + echo "Organization: $(System.CollectionUri)" + echo "Project: $(System.TeamProject)" + echo "" + echo "Azure CLI version:" + echo "-------------------------------------------------" + az --version + + + if [ $ENVIRONMENT != $ENVIRONMENT_IN_FILENAME ]; then echo "##vso[task.logissue type=error]The environment setting in $(workload_zone_configuration_file) '$ENVIRONMENT' does not match the $(workload_zone_configuration_file) file name '$ENVIRONMENT_IN_FILENAME'. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-INFRASTRUCTURE" exit 2 - fi + fi - if [ $LOCATION != $LOCATION_IN_FILENAME ]; then + if [ $LOCATION != $LOCATION_IN_FILENAME ]; then echo "##vso[task.logissue type=error]The location setting in $(workload_zone_configuration_file) '$LOCATION' does not match the $(workload_zone_configuration_file) file name '$LOCATION_IN_FILENAME'. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-INFRASTRUCTURE" exit 2 - fi + fi - if [ $NETWORK != $NETWORK_IN_FILENAME ]; then + if [ $NETWORK != $NETWORK_IN_FILENAME ]; then echo "##vso[task.logissue type=error]The network_logical_name setting in $(workload_zone_configuration_file) '$NETWORK' does not match the $(workload_zone_configuration_file) file name '$NETWORK_IN_FILENAME-. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-INFRASTRUCTURE" exit 2 - fi + fi echo -e "$green--- Configure devops CLI extension ---$reset" - az config set extension.use_dynamic_install=yes_without_prompt --output none + az config set extension.use_dynamic_install=yes_without_prompt --output none - az extension add --name azure-devops --output none + az extension add --name azure-devops --output none - az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none + az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none - export PARENT_VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(parent_variable_group)'].id | [0]") - echo '$(parent_variable_group) id: ' $PARENT_VARIABLE_GROUP_ID - if [ -z ${PARENT_VARIABLE_GROUP_ID} ]; then + export PARENT_VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(parent_variable_group)'].id | [0]") + echo '$(parent_variable_group) id: ' $PARENT_VARIABLE_GROUP_ID + if [ -z ${PARENT_VARIABLE_GROUP_ID} ]; then echo "##vso[task.logissue type=error]Variable group $(parent_variable_group) could not be found." exit 2 - fi + fi - export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") - echo '$(variable_group) id: ' $VARIABLE_GROUP_ID - if [ -z ${VARIABLE_GROUP_ID} ]; then + export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") + echo '$(variable_group) id: ' $VARIABLE_GROUP_ID + if [ -z ${VARIABLE_GROUP_ID} ]; then echo "##vso[task.logissue type=error]Variable group $(variable_group) could not be found." exit 2 - fi + fi - echo "Agent Pool: " $(this_agent) + echo "Agent Pool: " $(this_agent) echo -e "$green--- Set CONFIG_REPO_PATH variable ---$reset" - deployer_environment_file_name=$CONFIG_REPO_PATH/.sap_deployment_automation/$(deployer_environment)$(deployer_region) ; echo 'Deployer Environment File' $deployer_environment_file_name - workload_environment_file_name=$CONFIG_REPO_PATH/.sap_deployment_automation/${ENVIRONMENT}${LOCATION_CODE}${NETWORK} ; echo 'Workload Environment File' $workload_environment_file_name - dos2unix -q ${deployer_environment_file_name} - dos2unix -q ${workload_environment_file_name} + deployer_environment_file_name=$CONFIG_REPO_PATH/.sap_deployment_automation/$(deployer_environment)$(deployer_region) ; echo 'Deployer Environment File' $deployer_environment_file_name + workload_environment_file_name=$CONFIG_REPO_PATH/.sap_deployment_automation/${ENVIRONMENT}${LOCATION_CODE}${NETWORK} ; echo 'Workload Environment File' $workload_environment_file_name + dos2unix -q ${deployer_environment_file_name} + dos2unix -q ${workload_environment_file_name} - if [ ! -f ${deployer_environment_file_name} ]; then + if [ ! -f ${deployer_environment_file_name} ]; then echo -e "$boldred--- $(deployer_environment)$(deployer_region) was not found ---$reset" echo "##vso[task.logissue type=error]Control plane configuration file $(deployer_environment)$(deployer_region) was not found." exit 2 - fi + fi echo -e "$green--- Read parameter values ---$reset" - if [ "true" == $(inherit) ]; then + if [ "true" == $(inherit) ]; then az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Deployer_State_FileName.value" | tr -d \") if [ -z ${az_var} ]; then - deployer_tfstate_key=$(cat ${deployer_environment_file_name} | grep deployer_tfstate_key | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer State File' $deployer_tfstate_key + deployer_tfstate_key=$(cat ${deployer_environment_file_name} | grep deployer_tfstate_key | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer State File' $deployer_tfstate_key else - deployer_tfstate_key=${az_var} ; echo 'Deployer State File' $deployer_tfstate_key + deployer_tfstate_key=${az_var} ; echo 'Deployer State File' $deployer_tfstate_key fi az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" | tr -d \") if [ -z ${az_var} ]; then - key_vault=$(cat ${deployer_environment_file_name} | grep keyvault= | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer Key Vault' ${key_vault} + key_vault=$(cat ${deployer_environment_file_name} | grep keyvault= | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer Key Vault' ${key_vault} else - key_vault=${az_var}; echo 'Deployer Key Vault' ${key_vault} + key_vault=${az_var}; echo 'Deployer Key Vault' ${key_vault} fi az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" | tr -d \") if [ -z ${az_var} ]; then - REMOTE_STATE_SA=$(cat ${deployer_environment_file_name} | grep REMOTE_STATE_SA | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file storage account' $REMOTE_STATE_SA + REMOTE_STATE_SA=$(cat ${deployer_environment_file_name} | grep REMOTE_STATE_SA | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file storage account' $REMOTE_STATE_SA else - REMOTE_STATE_SA=${az_var}; echo 'Terraform state file storage account' $REMOTE_STATE_SA + REMOTE_STATE_SA=${az_var}; echo 'Terraform state file storage account' $REMOTE_STATE_SA fi az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Subscription.value" | tr -d \") if [ -z ${az_var} ]; then - STATE_SUBSCRIPTION=$(cat ${deployer_environment_file_name} | grep STATE_SUBSCRIPTION | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION + STATE_SUBSCRIPTION=$(cat ${deployer_environment_file_name} | grep STATE_SUBSCRIPTION | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION else - STATE_SUBSCRIPTION=${az_var}; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION + STATE_SUBSCRIPTION=${az_var}; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "ARM_SUBSCRIPTION_ID.value" | tr -d \") if [ -z ${az_var} ]; then - echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined." - exit 2 + echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined." + exit 2 else - echo 'Target subscription' $WL_ARM_SUBSCRIPTION_ID + echo 'Target subscription' $WL_ARM_SUBSCRIPTION_ID fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Workload_Key_Vault.value" | tr -d \") if [ -z ${az_var} ]; then - if [ -f ${workload_environment_file_name} ]; then - export workload_key_vault=$(cat ${workload_environment_file_name} | grep workload_key_vault | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Key Vault' ${workload_key_vault} - fi + if [ -f ${workload_environment_file_name} ]; then + export workload_key_vault=$(cat ${workload_environment_file_name} | grep workload_key_vault | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Key Vault' ${workload_key_vault} + fi else - export workload_key_vault=$(Workload_Key_Vault) ; echo 'Workload Key Vault' ${workload_key_vault} + export workload_key_vault=$(Workload_Key_Vault) ; echo 'Workload Key Vault' ${workload_key_vault} fi - else + else deployer_tfstate_key=$(cat ${workload_environment_file_name} | grep deployer_tfstate_key | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer State File' $deployer_tfstate_key key_vault=$(cat ${workload_environment_file_name} | grep workload_key_vault= -m1 | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer Key Vault' ${key_vault} REMOTE_STATE_SA=$(cat ${workload_environment_file_name} | grep REMOTE_STATE_SA | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file storage account' $REMOTE_STATE_SA STATE_SUBSCRIPTION=$(cat ${workload_environment_file_name} | grep STATE_SUBSCRIPTION | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION - fi + fi secrets_set=1 if [ ! -f /etc/profile.d/deploy_server.sh ]; then - echo -e "$green --- Install terraform ---$reset" + echo -e "$green --- Install terraform ---$reset" - wget -q $(tf_url) - return_code=$? - if [ 0 != $return_code ]; then - echo "##vso[task.logissue type=error]Unable to download Terraform version $(tf_version)." - exit 2 - fi - unzip -qq terraform_$(tf_version)_linux_amd64.zip ; sudo mv terraform /bin/ - rm -f terraform_$(tf_version)_linux_amd64.zip - - if [ $USE_MSI != "true" ]; then - export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID - export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET - export ARM_TENANT_ID=$WL_ARM_TENANT_ID - export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID - export ARM_USE_MSI=false + wget -q $(tf_url) + return_code=$? + if [ 0 != $return_code ]; then + echo "##vso[task.logissue type=error]Unable to download Terraform version $(tf_version)." + exit 2 + fi + unzip -qq terraform_$(tf_version)_linux_amd64.zip ; sudo mv terraform /bin/ + rm -f terraform_$(tf_version)_linux_amd64.zip - echo -e "$green--- az login ---$reset" - az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code - fi + if [ $USE_MSI != "true" ]; then + export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$WL_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + export ARM_USE_MSI=false + + echo -e "$green--- az login ---$reset" + az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi fi else - echo -e "$green--- az login ---$reset" + echo -e "$green--- az login ---$reset" if [ $LOGON_USING_SPN == "true" ]; then - echo "Using SPN" - az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none + echo "Using SPN" + az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none else - az login --identity --allow-no-subscriptions --output none + az login --identity --allow-no-subscriptions --output none fi return_code=$? if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code fi - if [ $USE_MSI != "true" ]; then - echo -e "$green --- Set secrets ---$reset" + if [ $USE_MSI != "true" ]; then + echo -e "$green --- Set secrets ---$reset" - $SAP_AUTOMATION_REPO_PATH/deploy/scripts/set_secrets.sh --workload --vault "${key_vault}" --environment "${ENVIRONMENT}" \ - --region "${LOCATION}" --subscription $WL_ARM_SUBSCRIPTION_ID --spn_id $WL_ARM_CLIENT_ID --spn_secret "${WL_ARM_CLIENT_SECRET}" \ - --tenant_id $WL_ARM_TENANT_ID --keyvault_subscription $STATE_SUBSCRIPTION - secrets_set=$? ; echo -e "$cyan Set Secrets returned $secrets_set $reset" - az keyvault set-policy --name "${key_vault}" --object-id $WL_ARM_OBJECT_ID --secret-permissions get list --output none - fi + $SAP_AUTOMATION_REPO_PATH/deploy/scripts/set_secrets.sh --workload --vault "${key_vault}" --environment "${ENVIRONMENT}" \ + --region "${LOCATION}" --subscription $WL_ARM_SUBSCRIPTION_ID --spn_id $WL_ARM_CLIENT_ID --spn_secret "${WL_ARM_CLIENT_SECRET}" \ + --tenant_id $WL_ARM_TENANT_ID --keyvault_subscription $STATE_SUBSCRIPTION + secrets_set=$? ; echo -e "$cyan Set Secrets returned $secrets_set $reset" + az keyvault set-policy --name "${key_vault}" --object-id $WL_ARM_OBJECT_ID --secret-permissions get list --output none + fi fi debug_variable='--output none' debug_variable='' if [ $USE_MSI != "true" ]; then - az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none + az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none - isUserAccessAdmin=$(az role assignment list --role "User Access Administrator" --subscription $STATE_SUBSCRIPTION --query "[?principalType=='ServicePrincipal'].principalId | [0] " --assignee $CP_ARM_CLIENT_ID) + isUserAccessAdmin=$(az role assignment list --role "User Access Administrator" --subscription $STATE_SUBSCRIPTION --query "[?principalType=='ServicePrincipal'].principalId | [0] " --assignee $CP_ARM_CLIENT_ID) - tfstate_resource_id=$(az resource list --name "${REMOTE_STATE_SA}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Storage/storageAccounts --query "[].id | [0]" -o tsv) + tfstate_resource_id=$(az resource list --name "${REMOTE_STATE_SA}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Storage/storageAccounts --query "[].id | [0]" -o tsv) - if [ -n "${isUserAccessAdmin}" ]; then + if [ -n "${isUserAccessAdmin}" ]; then - echo -e "$green--- Set permissions ---$reset" - perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Reader" --query "[?principalId=='$WL_ARM_CLIENT_ID'].principalId | [0]" -o tsv --only-show-errors) - if [ -z "$perms" ]; then - echo -e "$green --- Assign subscription permissions to $perms ---$reset" - az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Reader" --scope "/subscriptions/${STATE_SUBSCRIPTION}" --output none - fi + echo -e "$green--- Set permissions ---$reset" + perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Reader" --query "[?principalId=='$WL_ARM_CLIENT_ID'].principalId | [0]" -o tsv --only-show-errors) + if [ -z "$perms" ]; then + echo -e "$green --- Assign subscription permissions to $perms ---$reset" + az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Reader" --scope "/subscriptions/${STATE_SUBSCRIPTION}" --output none + fi - perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Storage Account Contributor" --scope "${tfstate_resource_id}" --query "[?principalId=='$WL_ARM_OBJECT_ID'].principalName | [0]" -o tsv --only-show-errors) - if [ -z "$perms" ]; then - echo "Assigning Storage Account Contributor permissions for $WL_ARM_OBJECT_ID to ${tfstate_resource_id}" - az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Storage Account Contributor" --scope "${tfstate_resource_id}" --output none - fi + perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Storage Account Contributor" --scope "${tfstate_resource_id}" --query "[?principalId=='$WL_ARM_OBJECT_ID'].principalName | [0]" -o tsv --only-show-errors) + if [ -z "$perms" ]; then + echo "Assigning Storage Account Contributor permissions for $WL_ARM_OBJECT_ID to ${tfstate_resource_id}" + az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Storage Account Contributor" --scope "${tfstate_resource_id}" --output none + fi - resource_group_name=$(az resource show --id "${tfstate_resource_id}" --query resourceGroup -o tsv) + resource_group_name=$(az resource show --id "${tfstate_resource_id}" --query resourceGroup -o tsv) - if [ -n ${resource_group_name} ]; then - for scope in $(az resource list --resource-group "${resource_group_name}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Network/privateDnsZones --query "[].id" --output tsv); do - perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Private DNS Zone Contributor" --scope $scope --query "[?principalId=='$WL_ARM_OBJECT_ID'].principalId | [0]" -o tsv --only-show-errors) - if [ -z "$perms" ]; then - echo "Assigning DNS Zone Contributor permissions for $WL_ARM_OBJECT_ID to ${scope}" - az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Private DNS Zone Contributor" --scope $scope --output none + if [ -n ${resource_group_name} ]; then + for scope in $(az resource list --resource-group "${resource_group_name}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Network/privateDnsZones --query "[].id" --output tsv); do + perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Private DNS Zone Contributor" --scope $scope --query "[?principalId=='$WL_ARM_OBJECT_ID'].principalId | [0]" -o tsv --only-show-errors) + if [ -z "$perms" ]; then + echo "Assigning DNS Zone Contributor permissions for $WL_ARM_OBJECT_ID to ${scope}" + az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Private DNS Zone Contributor" --scope $scope --output none + fi + done fi - done - fi - resource_group_name=$(az keyvault show --name "${key_vault}" --query resourceGroup --subscription ${STATE_SUBSCRIPTION} -o tsv) + resource_group_name=$(az keyvault show --name "${key_vault}" --query resourceGroup --subscription ${STATE_SUBSCRIPTION} -o tsv) - if [ -n ${resource_group_name} ]; then - resource_group_id=$(az group show --name ${resource_group_name} --subscription ${STATE_SUBSCRIPTION} --query id -o tsv) + if [ -n ${resource_group_name} ]; then + resource_group_id=$(az group show --name ${resource_group_name} --subscription ${STATE_SUBSCRIPTION} --query id -o tsv) - vnet_resource_id=$(az resource list --resource-group "${resource_group_name}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Network/virtualNetworks -o tsv --query "[].id | [0]") - if [ -n "${vnet_resource_id}" ]; then - perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Network Contributor" --scope $vnet_resource_id --only-show-errors --query "[].principalId | [0]" --assignee $WL_ARM_OBJECT_ID -o tsv --only-show-errors) + vnet_resource_id=$(az resource list --resource-group "${resource_group_name}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Network/virtualNetworks -o tsv --query "[].id | [0]") + if [ -n "${vnet_resource_id}" ]; then + perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Network Contributor" --scope $vnet_resource_id --only-show-errors --query "[].principalId | [0]" --assignee $WL_ARM_OBJECT_ID -o tsv --only-show-errors) - if [ -z "$perms" ]; then - echo "Assigning Network Contributor rights for $WL_ARM_OBJECT_ID to ${vnet_resource_id}" - az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Network Contributor" --scope $vnet_resource_id --output none - fi + if [ -z "$perms" ]; then + echo "Assigning Network Contributor rights for $WL_ARM_OBJECT_ID to ${vnet_resource_id}" + az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Network Contributor" --scope $vnet_resource_id --output none + fi + fi fi + else + echo "##vso[task.logissue type=warning]Service Principal $CP_ARM_CLIENT_ID does not have 'User Access Administrator' permissions. Please ensure that the service principal $WL_ARM_CLIENT_ID has permissions on the Terrafrom state storage account and if needed on the Private DNS zone and the source management network resource" fi - else - echo "##vso[task.logissue type=warning]Service Principal $CP_ARM_CLIENT_ID does not have 'User Access Administrator' permissions. Please ensure that the service principal $WL_ARM_CLIENT_ID has permissions on the Terrafrom state storage account and if needed on the Private DNS zone and the source management network resource" - fi fi echo -e "$green--- Deploy the workload zone ---$reset" - cd $CONFIG_REPO_PATH/LANDSCAPE/$(workload_zone_folder) - if [ -f /etc/profile.d/deploy_server.sh ]; then + cd $CONFIG_REPO_PATH/LANDSCAPE/$(workload_zone_folder) + if [ -f /etc/profile.d/deploy_server.sh ]; then if [ $LOGON_USING_SPN == "true" ]; then - echo "Logon Using SPN" - - az logout --output none - export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID - export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET - export ARM_TENANT_ID=$WL_ARM_TENANT_ID - export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID - export ARM_USE_MSI=false - az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code - fi + echo "Logon Using SPN" + + az logout --output none + export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$WL_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + export ARM_USE_MSI=false + az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi else - export ARM_USE_MSI=true - az login --identity --allow-no-subscriptions --output none + export ARM_USE_MSI=true + az login --identity --allow-no-subscriptions --output none fi - else + else + export ARM_USE_AZUREAD=true if [ $USE_MSI != "true" ]; then - az logout --output none - export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID - export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET - export ARM_TENANT_ID=$WL_ARM_TENANT_ID - export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID - export ARM_USE_MSI=false - az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code - fi + az logout --output none + export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$WL_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + export ARM_USE_MSI=false + az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi fi - fi + fi - if [ $USE_MSI != "true" ]; then + if [ $USE_MSI != "true" ]; then $SAP_AUTOMATION_REPO_PATH/deploy/scripts/install_workloadzone.sh --parameterfile $(workload_zone_configuration_file) \ - --deployer_environment $(deployer_environment) --subscription $(ARM_SUBSCRIPTION_ID) \ - --spn_id $WL_ARM_CLIENT_ID --spn_secret $WL_ARM_CLIENT_SECRET --tenant_id $WL_ARM_TENANT_ID \ - --deployer_tfstate_key "${deployer_tfstate_key}" --keyvault "${key_vault}" --storageaccountname "${REMOTE_STATE_SA}" \ - --state_subscription "${STATE_SUBSCRIPTION}" --auto-approve --ado - else + --deployer_environment $(deployer_environment) --subscription $(ARM_SUBSCRIPTION_ID) \ + --spn_id $WL_ARM_CLIENT_ID --spn_secret $WL_ARM_CLIENT_SECRET --tenant_id $WL_ARM_TENANT_ID \ + --deployer_tfstate_key "${deployer_tfstate_key}" --keyvault "${key_vault}" --storageaccountname "${REMOTE_STATE_SA}" \ + --state_subscription "${STATE_SUBSCRIPTION}" --auto-approve --ado + else $SAP_AUTOMATION_REPO_PATH/deploy/scripts/install_workloadzone.sh --parameterfile $(workload_zone_configuration_file) \ - --deployer_environment $(deployer_environment) --subscription $(ARM_SUBSCRIPTION_ID) \ - --deployer_tfstate_key "${deployer_tfstate_key}" --keyvault "${key_vault}" --storageaccountname "${REMOTE_STATE_SA}" \ - --state_subscription "${STATE_SUBSCRIPTION}" --auto-approve --ado --msi - fi - return_code=$? - - echo "Return code: ${return_code}" - if [ -f ${workload_environment_file_name} ]; then - export workload_key_vault=$(cat ${workload_environment_file_name} | grep workloadkeyvault= | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Key Vault' ${workload_key_vault} - export workload_prefix=$(cat ${workload_environment_file_name} | grep workload_zone_prefix= | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Prefix' ${workload_prefix} + --deployer_environment $(deployer_environment) --subscription $(ARM_SUBSCRIPTION_ID) \ + --deployer_tfstate_key "${deployer_tfstate_key}" --keyvault "${key_vault}" --storageaccountname "${REMOTE_STATE_SA}" \ + --state_subscription "${STATE_SUBSCRIPTION}" --auto-approve --ado --msi + fi + return_code=$? + + echo "Return code: ${return_code}" + if [ -f ${workload_environment_file_name} ]; then + export workload_key_vault=$(cat ${workload_environment_file_name} | grep workloadkeyvault= | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Key Vault' ${workload_key_vault} + export workload_prefix=$(cat ${workload_environment_file_name} | grep workload_zone_prefix= | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Prefix' ${workload_prefix} export landscape_tfstate_key=$(cat ${workload_environment_file_name} | grep landscape_tfstate_key= | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Zone State File' $landscape_tfstate_key - fi + fi - az logout --output none + az logout --output none - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "FENCING_SPN_ID.value") - if [ -z ${az_var} ]; then + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "FENCING_SPN_ID.value") + if [ -z ${az_var} ]; then echo "##vso[task.logissue type=warning]Variable FENCING_SPN_ID is not set. Required for highly available deployments" - else + else export fencing_id=$(az keyvault secret list --vault-name $workload_key_vault --query [].name -o tsv | grep ${workload_prefix}-fencing-spn-id | xargs) if [ -z "$fencing_id" ]; then - az keyvault secret set --name ${workload_prefix}-fencing-spn-id --vault-name $workload_key_vault --value $(FENCING_SPN_ID) --output none - az keyvault secret set --name ${workload_prefix}-fencing-spn-pwd --vault-name $workload_key_vault --value=$FENCING_SPN_PWD --output none - az keyvault secret set --name ${workload_prefix}-fencing-spn-tenant --vault-name $workload_key_vault --value $(FENCING_SPN_TENANT) --output none + az keyvault secret set --name ${workload_prefix}-fencing-spn-id --vault-name $workload_key_vault --value $(FENCING_SPN_ID) --output none + az keyvault secret set --name ${workload_prefix}-fencing-spn-pwd --vault-name $workload_key_vault --value=$FENCING_SPN_PWD --output none + az keyvault secret set --name ${workload_prefix}-fencing-spn-tenant --vault-name $workload_key_vault --value $(FENCING_SPN_TENANT) --output none fi - fi + fi echo -e "$green--- Add & update files in the DevOps Repository ---$reset" - cd $(Build.Repository.LocalPath) - git pull + cd $(Build.Repository.LocalPath) + git pull - echo -e "$green--- Pull latest ---$reset" - cd $CONFIG_REPO_PATH - git pull + echo -e "$green--- Pull latest ---$reset" + cd $CONFIG_REPO_PATH + git pull - added=0 - if [ -f ${workload_environment_file_name} ]; then + added=0 + if [ -f ${workload_environment_file_name} ]; then git add ${workload_environment_file_name} added=1 - fi - if [ -f ${workload_environment_file_name}.md ]; then + fi + if [ -f ${workload_environment_file_name}.md ]; then git add ${workload_environment_file_name}.md added=1 - fi - if [ -f $(Deployment_Configuration_Path)/LANDSCAPE/$(workload_zone_folder)/.terraform/terraform.tfstate ]; then + fi + if [ -f $(Deployment_Configuration_Path)/LANDSCAPE/$(workload_zone_folder)/.terraform/terraform.tfstate ]; then git add -f $(Deployment_Configuration_Path)/LANDSCAPE/$(workload_zone_folder)/.terraform/terraform.tfstate added=1 - fi - if [ 1 == $added ]; then + fi + if [ 1 == $added ]; then git config --global user.email "$(Build.RequestedForEmail)" git config --global user.name "$(Build.RequestedFor)" git commit -m "Added updates from devops deployment $(Build.DefinitionName) [skip ci]" git -c http.extraheader="AUTHORIZATION: bearer $(System.AccessToken)" push --set-upstream origin $(Build.SourceBranchName) - fi + fi - if [ -f ${workload_environment_file_name}.md ]; then + if [ -f ${workload_environment_file_name}.md ]; then echo "##vso[task.uploadsummary]${workload_environment_file_name}.md" - fi + fi echo -e "$green--- Adding variables to the variable group" $(variable_group) "---$reset" - if [ -n $VARIABLE_GROUP_ID ]; then + if [ -n $VARIABLE_GROUP_ID ]; then az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query Terraform_Remote_Storage_Account_Name.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --value "${REMOTE_STATE_SA}" --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --value "${REMOTE_STATE_SA}" --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --value "${REMOTE_STATE_SA}" --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --value "${REMOTE_STATE_SA}" --output none --only-show-errors fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query Terraform_Remote_Storage_Subscription.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Subscription --value "${STATE_SUBSCRIPTION}" --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Subscription --value "${STATE_SUBSCRIPTION}" --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Subscription --value "${STATE_SUBSCRIPTION}" --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Subscription --value "${STATE_SUBSCRIPTION}" --output none --only-show-errors fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query Deployer_State_FileName.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --value "${deployer_tfstate_key}" --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --value "${deployer_tfstate_key}" --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --value "${deployer_tfstate_key}" --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --value "${deployer_tfstate_key}" --output none --only-show-errors fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query Deployer_Key_Vault.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --value ${key_vault} --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --value ${key_vault} --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --value ${key_vault} --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --value ${key_vault} --output none --only-show-errors fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "${NETWORK}"Workload_Key_Vault.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Key_Vault --value $workload_key_vault --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Key_Vault --value $workload_key_vault --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Key_Vault --value $workload_key_vault --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Key_Vault --value $workload_key_vault --output none --only-show-errors fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "${NETWORK}"Workload_Secret_Prefix.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Secret_Prefix --value "${workload_prefix}" --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Secret_Prefix --value "${workload_prefix}" --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Secret_Prefix --value "${workload_prefix}" --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Secret_Prefix --value "${workload_prefix}" --output none --only-show-errors fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "${NETWORK}"Workload_Zone_State_FileName.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Zone_State_FileName --value "${landscape_tfstate_key}" --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Zone_State_FileName --value "${landscape_tfstate_key}" --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Zone_State_FileName --value "${landscape_tfstate_key}" --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Zone_State_FileName --value "${landscape_tfstate_key}" --output none --only-show-errors fi - fi + fi - if [ 0 != $return_code ]; then + if [ 0 != $return_code ]; then echo "##vso[task.logissue type=error]Return code from install_workloadzone $return_code." if [ -f ${workload_environment_file_name}.err ]; then - error_message=$(cat ${workload_environment_file_name}.err) - echo "##vso[task.logissue type=error]Error message: $error_message." + error_message=$(cat ${workload_environment_file_name}.err) + echo "##vso[task.logissue type=error]Error message: $error_message." fi - fi + fi exit $return_code diff --git a/deploy/scripts/installer.sh b/deploy/scripts/installer.sh index 778a8f0ef5..8c48ba69eb 100755 --- a/deploy/scripts/installer.sh +++ b/deploy/scripts/installer.sh @@ -259,13 +259,6 @@ fi useSAS=$(az storage account show --name "${REMOTE_STATE_SA}" --query allowSharedKeyAccess --subscription "${STATE_SUBSCRIPTION}" --out tsv) -if [ "$useSAS" = "true" ] ; then - export ARM_USE_AZUREAD=false -else - export ARM_USE_AZUREAD=true -fi - - landscape_tfstate_key_parameter='' if [[ -z $landscape_tfstate_key ]]; From 5e43add850363fe5734a6116edd880241ec83180 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 26 May 2024 23:09:07 +0300 Subject: [PATCH 575/607] SAS less deployer --- deploy/pipelines/01-deploy-control-plane.yaml | 1 + .../terraform-units/modules/sap_landscape/storage_accounts.tf | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 7defba8091..052a8b72e2 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -678,6 +678,7 @@ stages: export TF_LOG_PATH=${CONFIG_REPO_PATH}/.sap_deployment_automation/terraform.log sudo chmod +x $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh + export ARM_USE_AZUREAD=true if [ "$USE_MSI" = "true" ]; then echo -e "$cyan--- Using MSI ---$reset" diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf index 096191549d..26c7dfa9e5 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf @@ -292,7 +292,7 @@ resource "azurerm_storage_account" "transport" { enable_https_traffic_only = false min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false - shared_access_key_enabled = false + # shared_access_key_enabled = false public_network_access_enabled = var.public_network_access_enabled @@ -514,7 +514,7 @@ resource "azurerm_storage_account" "install" { min_tls_version = "TLS1_2" public_network_access_enabled = var.public_network_access_enabled tags = var.tags - shared_access_key_enabled = false + # shared_access_key_enabled = false } From 607c729ff911387bf9ef79cd7da3a92316e4c348 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 27 May 2024 00:38:07 +0300 Subject: [PATCH 576/607] Refactor backend.tf files to use Azure AD authentication for storage --- deploy/scripts/installer.sh | 7 +++++++ deploy/terraform/run/sap_deployer/backend.tf | 1 + deploy/terraform/run/sap_library/backend.tf | 1 + 3 files changed, 9 insertions(+) diff --git a/deploy/scripts/installer.sh b/deploy/scripts/installer.sh index 8c48ba69eb..778a8f0ef5 100755 --- a/deploy/scripts/installer.sh +++ b/deploy/scripts/installer.sh @@ -259,6 +259,13 @@ fi useSAS=$(az storage account show --name "${REMOTE_STATE_SA}" --query allowSharedKeyAccess --subscription "${STATE_SUBSCRIPTION}" --out tsv) +if [ "$useSAS" = "true" ] ; then + export ARM_USE_AZUREAD=false +else + export ARM_USE_AZUREAD=true +fi + + landscape_tfstate_key_parameter='' if [[ -z $landscape_tfstate_key ]]; diff --git a/deploy/terraform/run/sap_deployer/backend.tf b/deploy/terraform/run/sap_deployer/backend.tf index ba7c160165..121a60e573 100644 --- a/deploy/terraform/run/sap_deployer/backend.tf +++ b/deploy/terraform/run/sap_deployer/backend.tf @@ -5,5 +5,6 @@ Description: */ terraform { backend "azurerm" { + use_azuread_auth = true # Use Azure AD authentication } } diff --git a/deploy/terraform/run/sap_library/backend.tf b/deploy/terraform/run/sap_library/backend.tf index bb0c7be191..e1f2595ea4 100644 --- a/deploy/terraform/run/sap_library/backend.tf +++ b/deploy/terraform/run/sap_library/backend.tf @@ -5,5 +5,6 @@ terraform { backend "azurerm" { + use_azuread_auth = true # Use Azure AD authentication } } From cdbbc07789edf49abd58df0a29fb27106b4cd02e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 27 May 2024 00:40:50 +0300 Subject: [PATCH 577/607] chore: Enable Azure AD authentication for Terraform remote state imports --- deploy/terraform/run/sap_landscape/imports.tf | 1 + deploy/terraform/run/sap_library/imports.tf | 1 + deploy/terraform/run/sap_system/imports.tf | 2 ++ 3 files changed, 4 insertions(+) diff --git a/deploy/terraform/run/sap_landscape/imports.tf b/deploy/terraform/run/sap_landscape/imports.tf index 74284361c4..6b2010c8f1 100644 --- a/deploy/terraform/run/sap_landscape/imports.tf +++ b/deploy/terraform/run/sap_landscape/imports.tf @@ -16,6 +16,7 @@ data "terraform_remote_state" "deployer" { key = var.deployer_tfstate_key subscription_id = local.saplib_subscription_id use_msi = var.use_spn ? false : true + use_azuread_auth = true } } diff --git a/deploy/terraform/run/sap_library/imports.tf b/deploy/terraform/run/sap_library/imports.tf index 14630d7983..2c67219195 100644 --- a/deploy/terraform/run/sap_library/imports.tf +++ b/deploy/terraform/run/sap_library/imports.tf @@ -13,6 +13,7 @@ data "terraform_remote_state" "deployer" { key = local.deployer_tfstate_key subscription_id = local.saplib_subscription_id use_msi = var.use_spn ? false : true + use_azuread_auth = true } } diff --git a/deploy/terraform/run/sap_system/imports.tf b/deploy/terraform/run/sap_system/imports.tf index 09f0753132..0171babd7e 100644 --- a/deploy/terraform/run/sap_system/imports.tf +++ b/deploy/terraform/run/sap_system/imports.tf @@ -17,6 +17,7 @@ data "terraform_remote_state" "deployer" { key = var.deployer_tfstate_key subscription_id = local.saplib_subscription_id use_msi = var.use_spn ? false : true + use_azuread_auth = true } } @@ -29,6 +30,7 @@ data "terraform_remote_state" "landscape" { key = var.landscape_tfstate_key subscription_id = local.saplib_subscription_id use_msi = var.use_spn ? false : true + use_azuread_auth = true } } From 4fa2213a57e95ec173d9a3b9dc7a386b5240d997 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 27 May 2024 13:19:02 +0300 Subject: [PATCH 578/607] chore: Refactor ANF Mount task to improve readability and remove commented out code --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index fb2bedb8cd..31cc9cb20f 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -738,7 +738,7 @@ - name: "ANF Mount: Run tasks for scale out setups" when: - database_scale_out - # - not database_high_availability + - not database_high_availability - tier == 'sapos' - node_tier == 'hana' block: From a347a4a37a3cf4b071b75dad5482207ddc8de937 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 27 May 2024 15:56:52 +0530 Subject: [PATCH 579/607] Refactor ANF Mount task to improve readability and remove unnecessary code --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index 31cc9cb20f..11e8fea071 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -738,7 +738,6 @@ - name: "ANF Mount: Run tasks for scale out setups" when: - database_scale_out - - not database_high_availability - tier == 'sapos' - node_tier == 'hana' block: @@ -840,6 +839,7 @@ } when: - hana_data_mountpoint is defined + - not database_high_availability - name: "DEBUG:ANF Mount: HANA Data - Scale out - Create mount list" ansible.builtin.debug: @@ -886,6 +886,7 @@ } when: - hana_log_mountpoint is defined + - not database_high_availability - name: "DEBUG:ANF Mount: HANA Log - Scale out - Create mount list" ansible.builtin.debug: From 8cc1b6269b9a947fa33d4363a19a5a93c81f0a45 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 27 May 2024 22:28:10 +0300 Subject: [PATCH 580/607] Fix shared volume --- .../tasks/main.yaml | 1003 ++++++++--------- .../modules/sap_system/hdb_node/outputs.tf | 29 +- .../sap_system/hdb_node/variables_local.tf | 6 + 3 files changed, 522 insertions(+), 516 deletions(-) diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml index 4545bd8a5c..4c8ce68fb3 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml @@ -387,41 +387,46 @@ # Scale our HSR with multi site replication # DB servers need to be split into two sites, each with designated primary. HANA setup will run on the primaries only. -# /*---------------------------------------------------------------------------8 -# | Primary site setup with Shared nothing scale out | -# +------------------------------------4--------------------------------------*/ - name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - HSR" + when: + - database_high_availability block: - - name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - HSR ( Primary Site )" - block: - - name: "4.0.3 - SAP HANA SCALE OUT-HSR: remove install response file if exists" - ansible.builtin.file: - path: "{{ dir_params }}/{{ sap_inifile }}" - state: absent - - - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Template processing: Create ini file {{ sap_inifile }} from {{ HANA_2_00_install.rsp }}" - ansible.builtin.template: - src: "HANA_2_00_install_scaleout.rsp" - dest: "{{ dir_params }}/{{ sap_inifile }}" - mode: 0644 - force: true - # Template parameter mapping - vars: - _rsp_component_root: "../COMPONENTS" - _rsp_components: "{{ hana_components }}" - _rsp_sapmnt: "/hana/shared" # Default Value - _rsp_hostname: "{{ virtual_host }}" - _rsp_sid: "{{ db_sid | upper }}" - _rsp_number: "{{ db_instance_number }}" - _rsp_system_usage: "custom" - use_master_password: "{{ hana_use_master_password }}" - _rsp_hana_data_basepath: "{{ hana_data_basepath }}" - _rsp_hana_log_basepath: "{{ hana_log_basepath }}" - password_copy: "{%if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" - _rsp_internal_network: "{{ (subnet_address + '/' + subnet_prefix) }}" - _rsp_root_password: "{{ root_password }}" - _rsp_additional_hosts: "{% for item in ansible_play_hosts_all[2::2] %} + # /*---------------------------------------------------------------------------8 + # | Primary site setup with Shared nothing scale out | + # +------------------------------------4--------------------------------------*/ + - name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - HSR ( Primary Site )" + when: + - ansible_hostname == primary_instance_name + - not hana_installed.stat.exists + block: + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: remove install response file if exists" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_inifile }}" + state: absent + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Template processing: Create ini file {{ sap_inifile }} from {{ HANA_2_00_install.rsp }}" + ansible.builtin.template: + src: "HANA_2_00_install_scaleout.rsp" + dest: "{{ dir_params }}/{{ sap_inifile }}" + mode: 0644 + force: true + # Template parameter mapping + vars: + _rsp_component_root: "../COMPONENTS" + _rsp_components: "{{ hana_components }}" + _rsp_sapmnt: "/hana/shared" # Default Value + _rsp_hostname: "{{ virtual_host }}" + _rsp_sid: "{{ db_sid | upper }}" + _rsp_number: "{{ db_instance_number }}" + _rsp_system_usage: "custom" + use_master_password: "{{ hana_use_master_password }}" + _rsp_hana_data_basepath: "{{ hana_data_basepath }}" + _rsp_hana_log_basepath: "{{ hana_log_basepath }}" + password_copy: "{%if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" + _rsp_internal_network: "{{ (subnet_address + '/' + subnet_prefix) }}" + _rsp_root_password: "{{ root_password }}" + _rsp_additional_hosts: "{% for item in ansible_play_hosts_all[2::2] %} {% if loop.index == ansible_play_hosts_all | length -1 %} {{ item }}:role=worker:group=default:workergroup=default {% else %} @@ -429,495 +434,489 @@ {% endif %} {% endfor %}" - - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Template processing: Create custom ini file {{ sap_custom_config }} from {{ HANA_2_00_customconfig.rsp }}" - ansible.builtin.template: - src: "HANA_2_00_customconfig.rsp" - dest: "{{ dir_params }}/{{ sap_custom_config }}" - mode: 0644 - force: true - vars: - _rsp_basepath_shared: "no" - _rsp_hana_data_basepath: "{{ hana_data_basepath }}" - _rsp_hana_log_basepath: "{{ hana_log_basepath }}" - - - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Progress" - ansible.builtin.debug: - msg: "Start HANA Installation" - - - name: "4.0.3 - SAP HANA SCALE OUT-HSR: installation" - block: - - name: "SAP HANA SCALE OUT-HSR: Execute hdblcm on {{ primary_instance_name }}" - ansible.builtin.shell: | + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Template processing: Create custom ini file {{ sap_custom_config }} from {{ HANA_2_00_customconfig.rsp }}" + ansible.builtin.template: + src: "HANA_2_00_customconfig.rsp" + dest: "{{ dir_params }}/{{ sap_custom_config }}" + mode: 0644 + force: true + vars: + _rsp_basepath_shared: "no" + _rsp_hana_data_basepath: "{{ hana_data_basepath }}" + _rsp_hana_log_basepath: "{{ hana_log_basepath }}" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Progress" + ansible.builtin.debug: + msg: "Start HANA Installation" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: installation" + block: + - name: "SAP HANA SCALE OUT-HSR: Execute hdblcm on {{ primary_instance_name }}" + ansible.builtin.shell: | umask {{ custom_umask | default('022') }} ; chmod 755 /usr/sap; ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' --custom_cfg='{{ dir_params }}' - args: - chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" - creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + args: + chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" + creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + environment: + TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" + register: hana_installation + failed_when: hana_installation.rc > 0 + rescue: + - name: "Fail if HANA installation failed with rc > 1" + ansible.builtin.fail: + msg: "INSTALL:0022:Execute hdblcm failed." + when: hana_installation.rc > 1 + + - name: "SAP HANA SCALE OUT-HSR: Progress" + ansible.builtin.debug: + msg: "Restarting the HANA Installation" + when: hana_installation.rc == 1 + + - name: "SAP HANA SCALE OUT-HSR: Re-execute hdblcm on {{ virtual_host }} and rescue" + block: + - name: "SAP HANA SCALE OUT-HSR: Re-execute hdblcm on {{ virtual_host }}" + ansible.builtin.shell: | + umask {{ custom_umask | default('022') }} ; + chmod 755 /usr/sap; + ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' + args: + chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" + creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + environment: + TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" + register: hana_installation + failed_when: hana_installation.rc > 0 + when: hana_installation.rc == 1 + rescue: + - name: "Fail if HANA installation failed on second attempt." + ansible.builtin.fail: + msg: "INSTALL:0022:Execute hdblcm failed." + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Installation results" + ansible.builtin.debug: + msg: + - "HANA Installation failed" + - "HDBLCM output: {{ hana_installation }}" + when: + - hana_installation.rc is defined + - hana_installation.rc > 0 + + - name: "Errorhandling: SAP HANA" + ansible.builtin.debug: + msg: "INSTALL:{{ hana_installation }}" + when: + - hana_installation.rc is defined + - hana_installation.rc > 0 + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Successful installation" + block: + + - name: "SAP HANA SCALE OUT-HSR: Installation results" + ansible.builtin.debug: + msg: "HANA Installation succeeded" + + - name: "SAP HANA SCALE OUT-HSR: HANA Install: flag" + ansible.builtin.file: + path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + state: touch + mode: 0755 + + - name: "Retrieve Subscription ID and Resource Group Name" + ansible.builtin.uri: + url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 + use_proxy: false + headers: + Metadata: true + register: azure_metadata + + - name: "Extract details" + ansible.builtin.set_fact: + subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" + resource_group_name: "{{ azure_metadata.json.compute.resourceGroupName }}" + + - name: "Show the subscription and resource group" + ansible.builtin.debug: + msg: + - "Subscription ID: {{ subscription_id }}" + - "Resource Group Name: {{ resource_group_name }}" + + - name: "Include deploy/ansible/roles-misc/0.6-ARM-Deployment" + ansible.builtin.include_role: + name: roles-misc/0.6-ARM-Deployment + vars: + subscriptionId: "{{ subscription_id }}" + resourceGroupName: "{{ resource_group_name }}" + + - name: "SAP HANA SCALE OUT-HSR: ARM Deployment flag" + ansible.builtin.file: + path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb_arm.txt" + state: touch + mode: 0755 + + - name: "SAP HANA SCALE OUT-HSR: remove install response file" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_inifile }}" + state: absent + + - name: "SAP HANA SCALE OUT-HSR: remove custom config response file" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_custom_config }}" + state: absent + + when: + - hana_installation.rc is defined + - hana_installation.rc < 1 + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Create backup folder" + ansible.builtin.file: + path: "{{ hana_backup_path }}" + state: directory + group: sapsys + owner: "{{ db_sid | lower }}adm" + mode: 0755 + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Configure global.ini" + block: + - name: "Prepare global.ini for domain name resolution." + become_user: root + become: true + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: communication + state: present + mode: 0644 + option: listeninterface + value: .internal + + - name: "Prepare global.ini for installation in non-shared environment" + become_user: root + become: true + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: persistence + state: present + mode: 0644 + option: basepath_shared + value: no + + - name: "Prepare global.ini for site hosts name resolution (Primary Site)" + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: "internal_hostname_resolution" + mode: 0644 + state: present + option: "{{ hostvars[item].ansible_host }}" + value: "{{ hostvars[item].virtual_host }}" + with_items: + - "{{ ansible_play_hosts_all[0::2] }}" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Restart HANA" + block: + - name: "Stop HANA Database" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + sapcontrol -nr {{ db_instance_number }} -function StopSystem {{ db_sid | upper }} + changed_when: false + failed_when: false + register: hana_stopped environment: - TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" - register: hana_installation - failed_when: hana_installation.rc > 0 - rescue: - - name: "Fail if HANA installation failed with rc > 1" - ansible.builtin.fail: - msg: "INSTALL:0022:Execute hdblcm failed." - when: hana_installation.rc > 1 - - - name: "SAP HANA SCALE OUT-HSR: Progress" - ansible.builtin.debug: - msg: "Restarting the HANA Installation" - when: hana_installation.rc == 1 - - - name: "SAP HANA SCALE OUT-HSR: Re-execute hdblcm on {{ virtual_host }} and rescue" - block: - - name: "SAP HANA SCALE OUT-HSR: Re-execute hdblcm on {{ virtual_host }}" - ansible.builtin.shell: | - umask {{ custom_umask | default('022') }} ; - chmod 755 /usr/sap; - ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' - args: - chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" - creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" - environment: - TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" - register: hana_installation - failed_when: hana_installation.rc > 0 - when: hana_installation.rc == 1 - rescue: - - name: "Fail if HANA installation failed on second attempt." - ansible.builtin.fail: - msg: "INSTALL:0022:Execute hdblcm failed." - - - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Installation results" - ansible.builtin.debug: - msg: - - "HANA Installation failed" - - "HDBLCM output: {{ hana_installation }}" - when: - - hana_installation.rc is defined - - hana_installation.rc > 0 - - - name: "Errorhandling: SAP HANA" - ansible.builtin.debug: - msg: "INSTALL:{{ hana_installation }}" - when: - - hana_installation.rc is defined - - hana_installation.rc > 0 - - - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Successful installation" - block: - - - name: "SAP HANA SCALE OUT-HSR: Installation results" - ansible.builtin.debug: - msg: "HANA Installation succeeded" - - - name: "SAP HANA SCALE OUT-HSR: HANA Install: flag" - ansible.builtin.file: - path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" - state: touch - mode: 0755 - - - name: "Retrieve Subscription ID and Resource Group Name" - ansible.builtin.uri: - url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 - use_proxy: false - headers: - Metadata: true - register: azure_metadata - - - name: "Extract details" - ansible.builtin.set_fact: - subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" - resource_group_name: "{{ azure_metadata.json.compute.resourceGroupName }}" - - - name: "Show the subscription and resource group" - ansible.builtin.debug: - msg: - - "Subscription ID: {{ subscription_id }}" - - "Resource Group Name: {{ resource_group_name }}" - - - name: "Include deploy/ansible/roles-misc/0.6-ARM-Deployment" - ansible.builtin.include_role: - name: roles-misc/0.6-ARM-Deployment + PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}:/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + DIR_LIBRARY: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + LD_LIBRARY_PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + SAPSYSTEMNAME: "{{ db_sid | upper }}" vars: - subscriptionId: "{{ subscription_id }}" - resourceGroupName: "{{ resource_group_name }}" - - - name: "SAP HANA SCALE OUT-HSR: ARM Deployment flag" - ansible.builtin.file: - path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb_arm.txt" - state: touch - mode: 0755 - - - name: "SAP HANA SCALE OUT-HSR: remove install response file" - ansible.builtin.file: - path: "{{ dir_params }}/{{ sap_inifile }}" - state: absent - - - name: "SAP HANA SCALE OUT-HSR: remove custom config response file" - ansible.builtin.file: - path: "{{ dir_params }}/{{ sap_custom_config }}" - state: absent - - when: - - hana_installation.rc is defined - - hana_installation.rc < 1 - - - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Create backup folder" - ansible.builtin.file: - path: "{{ hana_backup_path }}" - state: directory - group: sapsys - owner: "{{ db_sid | lower }}adm" - mode: 0755 - - - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Configure global.ini" - block: - - name: "Prepare global.ini for domain name resolution." - become_user: root - become: true - community.general.ini_file: - path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" - section: communication - state: present - mode: 0644 - option: listeninterface - value: .internal - - - name: "Prepare global.ini for installation in non-shared environment" - become_user: root + allow_world_readable_tmpfiles: true + + # ToDo: Check if we can interrogate the HANA DB to see if it is stopped. + - name: "Wait 2 minutes for SAP system to stop" + ansible.builtin.wait_for: + timeout: 120 + + - name: "Start HANA Database" + become_user: "{{ db_sid | lower }}adm" become: true - community.general.ini_file: - path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" - section: persistence - state: present - mode: 0644 - option: basepath_shared - value: no - - - name: "Prepare global.ini for site hosts name resolution (Primary Site)" - community.general.ini_file: - path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" - section: "internal_hostname_resolution" - mode: 0644 - state: present - option: "{{ hostvars[item].ansible_host }}" - value: "{{ hostvars[item].virtual_host }}" - with_items: - - "{{ ansible_play_hosts_all[0::2] }}" - - - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Restart HANA" - block: - - name: "Stop HANA Database" - become_user: "{{ db_sid | lower }}adm" - become: true - ansible.builtin.shell: | - sapcontrol -nr {{ db_instance_number }} -function StopSystem {{ db_sid | upper }} - changed_when: false - failed_when: false - register: hana_stopped - environment: - PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}:/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe - DIR_LIBRARY: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe - LD_LIBRARY_PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe - SAPSYSTEMNAME: "{{ db_sid | upper }}" - vars: - allow_world_readable_tmpfiles: true + ansible.builtin.shell: | + sapcontrol -nr {{ db_instance_number }} -function StartSystem {{ db_sid | upper }} + changed_when: false + failed_when: false + register: hana_started + environment: + PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}:/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + DIR_LIBRARY: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + LD_LIBRARY_PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + SAPSYSTEMNAME: "{{ db_sid | upper }}" + vars: + allow_world_readable_tmpfiles: true - # ToDo: Check if we can interrogate the HANA DB to see if it is stopped. - - name: "Wait 2 minutes for SAP system to stop" - ansible.builtin.wait_for: - timeout: 120 + - name: "Wait 2 minutes for SAP system to start" + ansible.builtin.wait_for: + timeout: 120 - - name: "Start HANA Database" - become_user: "{{ db_sid | lower }}adm" - become: true - ansible.builtin.shell: | - sapcontrol -nr {{ db_instance_number }} -function StartSystem {{ db_sid | upper }} - changed_when: false - failed_when: false - register: hana_started - environment: - PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}:/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe - DIR_LIBRARY: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe - LD_LIBRARY_PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe - SAPSYSTEMNAME: "{{ db_sid | upper }}" + # /*---------------------------------------------------------------------------8 + # | Secondary site setup with Shared nothing scale out | + # +------------------------------------4--------------------------------------*/ + - name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - HSR ( Secondary Site )" + block: + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: remove install response file if exists" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_inifile }}" + state: absent + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Template processing: Create ini file {{ sap_inifile }} from {{ HANA_2_00_install.rsp }}" + ansible.builtin.template: + src: "HANA_2_00_install_scaleout.rsp" + dest: "{{ dir_params }}/{{ sap_inifile }}" + mode: 0644 + force: true + # Template parameter mapping vars: - allow_world_readable_tmpfiles: true + _rsp_component_root: "../COMPONENTS" + _rsp_components: "{{ hana_components }}" + _rsp_sapmnt: "/hana/shared" # Default Value + _rsp_hostname: "{{ virtual_host }}" + _rsp_sid: "{{ db_sid | upper }}" + _rsp_number: "{{ db_instance_number }}" + _rsp_system_usage: "custom" + use_master_password: "{{ hana_use_master_password }}" + _rsp_hana_data_basepath: "{{ hana_data_basepath }}" + _rsp_hana_log_basepath: "{{ hana_log_basepath }}" + password_copy: "{%if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" + _rsp_internal_network: "{{ (subnet_address + '/' + subnet_prefix) }}" + _rsp_root_password: "{{ root_password }}" + _rsp_additional_hosts: "{% for item in ansible_play_hosts_all[3::2] %} + {% if loop.index == ansible_play_hosts_all | length -1 %} + {{ item }}:role=worker:group=default:workergroup=default + {% else %} + {{ item }}:role=worker:group=default:workergroup=default, + {% endif %} + {% endfor %}" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Template processing: Create custom ini file {{ sap_custom_config }} from {{ HANA_2_00_customconfig.rsp }}" + ansible.builtin.template: + src: "HANA_2_00_customconfig.rsp" + dest: "{{ dir_params }}/{{ sap_custom_config }}" + mode: 0644 + force: true + vars: + _rsp_basepath_shared: "no" + _rsp_hana_data_basepath: "{{ hana_data_basepath }}" + _rsp_hana_log_basepath: "{{ hana_log_basepath }}" - - name: "Wait 2 minutes for SAP system to start" - ansible.builtin.wait_for: - timeout: 120 - when: - - ansible_hostname == primary_instance_name - - not hana_installed.stat.exists + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Progress" + ansible.builtin.debug: + msg: "Start HANA Installation" -# /*---------------------------------------------------------------------------8 -# | Secondary site setup with Shared nothing scale out | -# +------------------------------------4--------------------------------------*/ + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: installation" + block: + - name: "SAP HANA: Execute hdblcm on {{ secondary_instance_name }}" + ansible.builtin.shell: | + umask {{ custom_umask | default('022') }} ; + chmod 755 /usr/sap; + ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' --custom_cfg='{{ dir_params }}' + args: + chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" + creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + environment: + TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" + register: hana_installation + failed_when: hana_installation.rc > 0 + rescue: + - name: "Fail if HANA installation failed with rc > 1" + ansible.builtin.fail: + msg: "INSTALL:0022:Execute hdblcm failed." + when: hana_installation.rc > 1 + + - name: "SAP HANA: Progress" + ansible.builtin.debug: + msg: "Restarting the HANA Installation" + when: hana_installation.rc == 1 + + - name: "SAP HANA: Re-execute hdblcm on {{ virtual_host }} and rescue" + block: + - name: "SAP HANA: Re-execute hdblcm on {{ virtual_host }}" + ansible.builtin.shell: | + umask {{ custom_umask | default('022') }} ; + chmod 755 /usr/sap; + ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' + args: + chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" + creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + environment: + TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" + register: hana_installation + failed_when: hana_installation.rc > 0 + when: hana_installation.rc == 1 + rescue: + - name: "Fail if HANA installation failed on second attempt." + ansible.builtin.fail: + msg: "INSTALL:0022:Execute hdblcm failed." + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Installation results" + ansible.builtin.debug: + msg: + - "HANA Installation failed" + - "HDBLCM output: {{ hana_installation }}" + when: + - hana_installation.rc is defined + - hana_installation.rc > 0 - - name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - HSR ( Secondary Site )" - block: - - name: "4.0.3 - SAP HANA SCALE OUT-HSR: remove install response file if exists" - ansible.builtin.file: - path: "{{ dir_params }}/{{ sap_inifile }}" - state: absent - - - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Template processing: Create ini file {{ sap_inifile }} from {{ HANA_2_00_install.rsp }}" - ansible.builtin.template: - src: "HANA_2_00_install_scaleout.rsp" - dest: "{{ dir_params }}/{{ sap_inifile }}" - mode: 0644 - force: true - # Template parameter mapping - vars: - _rsp_component_root: "../COMPONENTS" - _rsp_components: "{{ hana_components }}" - _rsp_sapmnt: "/hana/shared" # Default Value - _rsp_hostname: "{{ virtual_host }}" - _rsp_sid: "{{ db_sid | upper }}" - _rsp_number: "{{ db_instance_number }}" - _rsp_system_usage: "custom" - use_master_password: "{{ hana_use_master_password }}" - _rsp_hana_data_basepath: "{{ hana_data_basepath }}" - _rsp_hana_log_basepath: "{{ hana_log_basepath }}" - password_copy: "{%if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" - _rsp_internal_network: "{{ (subnet_address + '/' + subnet_prefix) }}" - _rsp_root_password: "{{ root_password }}" - _rsp_additional_hosts: "{% for item in ansible_play_hosts_all[3::2] %} - {% if loop.index == ansible_play_hosts_all | length -1 %} - {{ item }}:role=worker:group=default:workergroup=default - {% else %} - {{ item }}:role=worker:group=default:workergroup=default, - {% endif %} - {% endfor %}" + - name: "Errorhandling: SAP HANA" + ansible.builtin.debug: + msg: "INSTALL:{{ hana_installation }}" + when: + - hana_installation.rc is defined + - hana_installation.rc > 0 + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Successful installation" + block: + + - name: "SAP HANA SCALE OUT-HSR: Installation results" + ansible.builtin.debug: + msg: "HANA Installation succeeded" + + - name: "SAP HANA SCALE OUT-HSR: HANA Install: flag" + ansible.builtin.file: + path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + state: touch + mode: 0755 + + - name: "Retrieve Subscription ID and Resource Group Name" + ansible.builtin.uri: + url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 + use_proxy: false + headers: + Metadata: true + register: azure_metadata + + - name: "Extract details" + ansible.builtin.set_fact: + subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" + resource_group_name: "{{ azure_metadata.json.compute.resourceGroupName }}" + + - name: "Show the subscription and resource group" + ansible.builtin.debug: + msg: + - "Subscription ID: {{ subscription_id }}" + - "Resource Group Name: {{ resource_group_name }}" + + - name: "Include deploy/ansible/roles-misc/0.6-ARM-Deployment" + ansible.builtin.include_role: + name: roles-misc/0.6-ARM-Deployment + vars: + subscriptionId: "{{ subscription_id }}" + resourceGroupName: "{{ resource_group_name }}" + + - name: "SAP HANA SCALE OUT-HSR: ARM Deployment flag" + ansible.builtin.file: + path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb_arm.txt" + state: touch + mode: 0755 + + - name: "SAP HANA SCALE OUT-HSR: remove install response file" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_inifile }}" + state: absent + + - name: "SAP HANA SCALE OUT-HSR: remove custom config response file" + ansible.builtin.file: + path: "{{ dir_params }}/{{ sap_custom_config }}" + state: absent + + when: + - hana_installation.rc is defined + - hana_installation.rc < 1 + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Create backup folder" + ansible.builtin.file: + path: "{{ hana_backup_path }}" + state: directory + group: sapsys + owner: "{{ db_sid | lower }}adm" + mode: 0755 - - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Template processing: Create custom ini file {{ sap_custom_config }} from {{ HANA_2_00_customconfig.rsp }}" - ansible.builtin.template: - src: "HANA_2_00_customconfig.rsp" - dest: "{{ dir_params }}/{{ sap_custom_config }}" - mode: 0644 - force: true - vars: - _rsp_basepath_shared: "no" - _rsp_hana_data_basepath: "{{ hana_data_basepath }}" - _rsp_hana_log_basepath: "{{ hana_log_basepath }}" - - - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Progress" - ansible.builtin.debug: - msg: "Start HANA Installation" - - - name: "4.0.3 - SAP HANA SCALE OUT-HSR: installation" - block: - - name: "SAP HANA: Execute hdblcm on {{ secondary_instance_name }}" + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Configure global.ini" + block: + - name: "Prepare global.ini for domain name resolution." + become_user: root + become: true + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: communication + state: present + mode: 0644 + option: listeninterface + value: .internal + + - name: "Prepare global.ini for installation in non-shared environment" + become_user: root + become: true + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: persistence + state: present + mode: 0644 + option: basepath_shared + value: no + + - name: "Prepare global.ini for site hosts name resolution (Secondary Site)" + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: "internal_hostname_resolution" + mode: 0644 + state: present + option: "{{ hostvars[item].ansible_host }}" + value: "{{ hostvars[item].virtual_host }}" + with_items: + - "{{ ansible_play_hosts_all[1::2] }}" + + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Restart HANA" + block: + - name: "Stop HANA Database" + become_user: "{{ db_sid | lower }}adm" + become: true ansible.builtin.shell: | - umask {{ custom_umask | default('022') }} ; - chmod 755 /usr/sap; - ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' --custom_cfg='{{ dir_params }}' - args: - chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" - creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" + sapcontrol -nr {{ db_instance_number }} -function StopSystem {{ db_sid | upper }} + changed_when: false + failed_when: false + register: hana_stopped environment: - TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" - register: hana_installation - failed_when: hana_installation.rc > 0 - rescue: - - name: "Fail if HANA installation failed with rc > 1" - ansible.builtin.fail: - msg: "INSTALL:0022:Execute hdblcm failed." - when: hana_installation.rc > 1 - - - name: "SAP HANA: Progress" - ansible.builtin.debug: - msg: "Restarting the HANA Installation" - when: hana_installation.rc == 1 - - - name: "SAP HANA: Re-execute hdblcm on {{ virtual_host }} and rescue" - block: - - name: "SAP HANA: Re-execute hdblcm on {{ virtual_host }}" - ansible.builtin.shell: | - umask {{ custom_umask | default('022') }} ; - chmod 755 /usr/sap; - ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' - args: - chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" - creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" - environment: - TMPDIR: "{{ tmp_directory }}/{{ db_sid | upper }}" - register: hana_installation - failed_when: hana_installation.rc > 0 - when: hana_installation.rc == 1 - rescue: - - name: "Fail if HANA installation failed on second attempt." - ansible.builtin.fail: - msg: "INSTALL:0022:Execute hdblcm failed." - - - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Installation results" - ansible.builtin.debug: - msg: - - "HANA Installation failed" - - "HDBLCM output: {{ hana_installation }}" - when: - - hana_installation.rc is defined - - hana_installation.rc > 0 - - - name: "Errorhandling: SAP HANA" - ansible.builtin.debug: - msg: "INSTALL:{{ hana_installation }}" - when: - - hana_installation.rc is defined - - hana_installation.rc > 0 - - - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Successful installation" - block: - - - name: "SAP HANA SCALE OUT-HSR: Installation results" - ansible.builtin.debug: - msg: "HANA Installation succeeded" - - - name: "SAP HANA SCALE OUT-HSR: HANA Install: flag" - ansible.builtin.file: - path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" - state: touch - mode: 0755 - - - name: "Retrieve Subscription ID and Resource Group Name" - ansible.builtin.uri: - url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 - use_proxy: false - headers: - Metadata: true - register: azure_metadata - - - name: "Extract details" - ansible.builtin.set_fact: - subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" - resource_group_name: "{{ azure_metadata.json.compute.resourceGroupName }}" - - - name: "Show the subscription and resource group" - ansible.builtin.debug: - msg: - - "Subscription ID: {{ subscription_id }}" - - "Resource Group Name: {{ resource_group_name }}" - - - name: "Include deploy/ansible/roles-misc/0.6-ARM-Deployment" - ansible.builtin.include_role: - name: roles-misc/0.6-ARM-Deployment + PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}:/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + DIR_LIBRARY: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + LD_LIBRARY_PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + SAPSYSTEMNAME: "{{ db_sid | upper }}" vars: - subscriptionId: "{{ subscription_id }}" - resourceGroupName: "{{ resource_group_name }}" - - - name: "SAP HANA SCALE OUT-HSR: ARM Deployment flag" - ansible.builtin.file: - path: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb_arm.txt" - state: touch - mode: 0755 - - - name: "SAP HANA SCALE OUT-HSR: remove install response file" - ansible.builtin.file: - path: "{{ dir_params }}/{{ sap_inifile }}" - state: absent - - - name: "SAP HANA SCALE OUT-HSR: remove custom config response file" - ansible.builtin.file: - path: "{{ dir_params }}/{{ sap_custom_config }}" - state: absent - - when: - - hana_installation.rc is defined - - hana_installation.rc < 1 - - - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Create backup folder" - ansible.builtin.file: - path: "{{ hana_backup_path }}" - state: directory - group: sapsys - owner: "{{ db_sid | lower }}adm" - mode: 0755 - - - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Configure global.ini" - block: - - name: "Prepare global.ini for domain name resolution." - become_user: root - become: true - community.general.ini_file: - path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" - section: communication - state: present - mode: 0644 - option: listeninterface - value: .internal - - - name: "Prepare global.ini for installation in non-shared environment" - become_user: root - become: true - community.general.ini_file: - path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" - section: persistence - state: present - mode: 0644 - option: basepath_shared - value: no - - - name: "Prepare global.ini for site hosts name resolution (Secondary Site)" - community.general.ini_file: - path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" - section: "internal_hostname_resolution" - mode: 0644 - state: present - option: "{{ hostvars[item].ansible_host }}" - value: "{{ hostvars[item].virtual_host }}" - with_items: - - "{{ ansible_play_hosts_all[1::2] }}" - - - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Restart HANA" - block: - - name: "Stop HANA Database" - become_user: "{{ db_sid | lower }}adm" - become: true - ansible.builtin.shell: | - sapcontrol -nr {{ db_instance_number }} -function StopSystem {{ db_sid | upper }} - changed_when: false - failed_when: false - register: hana_stopped - environment: - PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}:/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe - DIR_LIBRARY: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe - LD_LIBRARY_PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe - SAPSYSTEMNAME: "{{ db_sid | upper }}" - vars: - allow_world_readable_tmpfiles: true + allow_world_readable_tmpfiles: true - # ToDo: Check if we can interrogate the HANA DB to see if it is stopped. - - name: "Wait 2 minutes for SAP system to stop" - ansible.builtin.wait_for: - timeout: 120 + # ToDo: Check if we can interrogate the HANA DB to see if it is stopped. + - name: "Wait 2 minutes for SAP system to stop" + ansible.builtin.wait_for: + timeout: 120 - - name: "Start HANA Database" - become_user: "{{ db_sid | lower }}adm" - become: true - ansible.builtin.shell: | - sapcontrol -nr {{ db_instance_number }} -function StartSystem {{ db_sid | upper }} - changed_when: false - failed_when: false - register: hana_started - environment: - PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}:/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe - DIR_LIBRARY: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe - LD_LIBRARY_PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe - SAPSYSTEMNAME: "{{ db_sid | upper }}" - vars: - allow_world_readable_tmpfiles: true + - name: "Start HANA Database" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + sapcontrol -nr {{ db_instance_number }} -function StartSystem {{ db_sid | upper }} + changed_when: false + failed_when: false + register: hana_started + environment: + PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}:/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + DIR_LIBRARY: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + LD_LIBRARY_PATH: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/exe + SAPSYSTEMNAME: "{{ db_sid | upper }}" + vars: + allow_world_readable_tmpfiles: true - - name: "Wait 2 minutes for SAP system to start" - ansible.builtin.wait_for: - timeout: 120 - when: - - ansible_hostname == secondary_instance_name - - not hana_installed.stat.exists - when: - - database_high_availability + - name: "Wait 2 minutes for SAP system to start" + ansible.builtin.wait_for: + timeout: 120 + when: + - ansible_hostname == secondary_instance_name + - not hana_installed.stat.exists # /*----------------------------End of setup----------------------------------8 diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf index e5e6b4d7e6..c9f31af7ff 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf @@ -170,20 +170,21 @@ output "hana_log_ANF_volumes" { output "hana_shared" { description = "HANA Shared primary volume" - value = try(var.hana_ANF_volumes.use_for_shared ? ( - format("%s:/%s", - var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( - data.azurerm_netapp_volume.hanashared[0].mount_ip_addresses[0]) : ( - try(azurerm_netapp_volume.hanashared[0].mount_ip_addresses[0], "") - ), - var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( - data.azurerm_netapp_volume.hanashared[0].volume_path) : ( - try(azurerm_netapp_volume.hanashared[0].volume_path, "") - ) - ) - ) : ( - "" - ), "") + value = local.shared_volume_count > 0 ? flatten([ + for idx in range(local.shared_volume_count) : [ + format("%s:/%s", + var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( + data.azurerm_netapp_volume.hanashared[idx].mount_ip_addresses[0]) : ( + azurerm_netapp_volume.hanashared[idx].mount_ip_addresses[0] + ), + var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( + data.azurerm_netapp_volume.hanashared[idx].volume_path) : ( + azurerm_netapp_volume.hanashared[idx].volume_path + ) + ) + + ] + ]) : [] } output "application_volume_group" { diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf index dc9de7290e..0238e44b38 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf @@ -393,6 +393,12 @@ locals { (var.database_server_count - var.database.stand_by_node_count) * var.hana_ANF_volumes.log_volume_count) : ( 0 ) + shared_volume_count = (local.create_shared_volumes) ? ( + length(var.ppg)) : ( + 0 + ) + + extension_settings = length(var.database.user_assigned_identity_id) > 0 ? [{ "key" = "msi_res_id" "value" = var.database.user_assigned_identity_id From 25353c8baeade591f8ca1071d8e6bd6e9f9a403b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 27 May 2024 23:15:31 +0300 Subject: [PATCH 581/607] Fix the outputs --- .../output_files/ansible_inventory.tmpl | 18 ++++++++-- .../output_files/sap-parameters.tmpl | 36 +++++++++---------- 2 files changed, 34 insertions(+), 20 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/ansible_inventory.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/ansible_inventory.tmpl index d9e615cf0e..e97293aa13 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/ansible_inventory.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/ansible_inventory.tmpl @@ -10,10 +10,12 @@ ${sid}_DB: become_user : ${db_become_user} os_type : ${db_os_type} vm_name : ${db_vmnodes[idx]} +%{~ if db_connectiontype | lower == "winrm" } ${winrm_cert_valid} ${winrm_timeout_sec} ${winrm_read_timeout} ${winrm_transport} +%{~ endif } %{~ endfor } vars: @@ -32,10 +34,12 @@ ${sid}_SCS: become_user : ${scs_become_user} os_type : ${scs_os_type} vm_name : ${scs_vmnodes[idx]} +%{~ if scs_connectiontype | lower == "winrm" } ${winrm_cert_valid} ${winrm_timeout_sec} ${winrm_read_timeout} ${winrm_transport} +%{~ endif } %{~ endfor } vars: @@ -54,10 +58,12 @@ ${sid}_ERS: become_user : ${scs_become_user} os_type : ${scs_os_type} vm_name : ${ers_vmnodes[idx]} +%{~ if ers_connectiontype | lower == "winrm" } ${winrm_cert_valid} ${winrm_timeout_sec} ${winrm_read_timeout} ${winrm_transport} +%{~ endif } %{~ endfor } vars: @@ -71,16 +77,18 @@ ${sid}_PAS: ${pas_servers[idx]}: ansible_host : ${ip_pas} ansible_user : ${ansible_user} - ansible_connection : ${scs_connection} - connection_type : ${scs_connectiontype} + ansible_connection : ${app_connection} + connection_type : ${app_connectiontype} virtual_host : ${virt_pas_servers[idx]} become_user : ${app_become_user} os_type : ${app_os_type} vm_name : ${pas_vmnodes[idx]} +%{~ if app_connectiontype | lower == "winrm" } ${winrm_cert_valid} ${winrm_timeout_sec} ${winrm_read_timeout} ${winrm_transport} +%{~ endif } %{~ endfor } vars: @@ -99,10 +107,12 @@ ${sid}_APP: become_user : ${app_become_user} os_type : ${app_os_type} vm_name : ${app_vmnodes[idx]} +%{~ if app_connectiontype | lower == "winrm" } ${winrm_cert_valid} ${winrm_timeout_sec} ${winrm_read_timeout} ${winrm_transport} +%{~ endif } %{~ endfor } vars: @@ -121,10 +131,12 @@ ${sid}_WEB: become_user : ${web_become_user} os_type : ${web_os_type} vm_name : ${web_vmnodes[idx]} +%{~ if web_connectiontype | lower == "winrm" } ${winrm_cert_valid} ${winrm_timeout_sec} ${winrm_read_timeout} ${winrm_transport} +%{~ endif } %{~ endfor } vars: @@ -142,10 +154,12 @@ ${sid}_OBSERVER_DB: virtual_host : ${observers[idx]} become_user : ${db_become_user} os_type : ${db_os_type} +%{~ if db_connection | lower == "winrm" } ${winrm_cert_valid} ${winrm_timeout_sec} ${winrm_read_timeout} ${winrm_transport} +%{~ endif } %{~ endfor } vars: diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl index d1c8043e61..186f0d41b1 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl @@ -24,12 +24,12 @@ sap_sid: ${sid} # web_sid is the Web Dispatcher SID (if applicable) web_sid: ${web_sid} -wd_instance_number: "${web_instance_number}" +wd_instance_number: "${web_instance_number}" # scs_high_availability is a boolean flag indicating # if the SAP Central Services are deployed using high availability -scs_high_availability: ${scs_high_availability} # {scs_high_availability} -scs_cluster_type: ${scs_cluster_type} +scs_high_availability: ${scs_high_availability} # {scs_high_availability} +scs_cluster_type: ${scs_cluster_type} # SCS Instance Number scs_instance_number: "${scs_instance_number}" @@ -47,7 +47,7 @@ ers_lb_ip: ${ers_server_loadbalancer_ip} %{~ if platform == "SQLSERVER" } # IP address of CNO in Windows and takes the form IPAddress/CIDR -scs_clst_lb_ip: ${scs_cluster_loadbalancer_ip} +scs_clst_lb_ip: ${scs_cluster_loadbalancer_ip} %{~ endif } @@ -91,38 +91,38 @@ platform: ${platform} ############################################################################# # Scale out defines if the database is to be deployed in a scale out configuration -database_scale_out: ${scale_out} -database_no_standby: ${scale_out_no_standby_role} +database_scale_out: ${scale_out} +database_no_standby: ${scale_out_no_standby_role} -subnet_cidr_storage: ${subnet_cidr_storage} +subnet_cidr_storage: ${subnet_cidr_storage} %{~ endif } -subnet_cidr_anf: ${subnet_cidr_anf} -subnet_cidr_app: ${subnet_cidr_app} -subnet_cidr_db: ${subnet_cidr_db} +subnet_cidr_anf: ${subnet_cidr_anf} +subnet_cidr_app: ${subnet_cidr_app} +subnet_cidr_db: ${subnet_cidr_db} %{~ if length(subnet_cidr_client) != 0 } -subnet_cidr_client: ${subnet_cidr_client} +subnet_cidr_client: ${subnet_cidr_client} %{~ endif } # db_high_availability is a boolean flag indicating if the # SAP database servers are deployed using high availability -db_high_availability: ${database_high_availability} -database_high_availability: ${database_high_availability} -database_cluster_type: ${database_cluster_type} +db_high_availability: ${database_high_availability} +database_high_availability: ${database_high_availability} +database_cluster_type: ${database_cluster_type} # database_loadbalancer_ip is the IP address of the load balancer for the database virtual machines -database_loadbalancer_ip: ${database_loadbalancer_ip} +database_loadbalancer_ip: ${database_loadbalancer_ip} # Backwards compatibility -db_lb_ip: ${database_loadbalancer_ip} +db_lb_ip: ${database_loadbalancer_ip} # database_cluster_ip is the IP address of the load balancer for the database cluster in Windows -database_cluster_ip: ${database_cluster_ip} +database_cluster_ip: ${database_cluster_ip} # use_simple_mount defines if simple mount is to be used use_simple_mount: ${is_use_simple_mount} # use_fence_kdump defines if optional kdump stonith device needs to be added for RHEL clusters. -use_fence_kdump: ${is_use_fence_kdump} +use_fence_kdump: ${is_use_fence_kdump} ############################################################################# From d7ffe7d4782160a7bb4790c8017111841cc87c98 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 27 May 2024 23:19:50 +0300 Subject: [PATCH 582/607] Change the if statement --- .../sap_system/output_files/ansible_inventory.tmpl | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/ansible_inventory.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/ansible_inventory.tmpl index e97293aa13..d5232c1cbc 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/ansible_inventory.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/ansible_inventory.tmpl @@ -10,7 +10,7 @@ ${sid}_DB: become_user : ${db_become_user} os_type : ${db_os_type} vm_name : ${db_vmnodes[idx]} -%{~ if db_connectiontype | lower == "winrm" } +%{~ if db_connectiontype == "winrm" } ${winrm_cert_valid} ${winrm_timeout_sec} ${winrm_read_timeout} @@ -34,7 +34,7 @@ ${sid}_SCS: become_user : ${scs_become_user} os_type : ${scs_os_type} vm_name : ${scs_vmnodes[idx]} -%{~ if scs_connectiontype | lower == "winrm" } +%{~ if scs_connectiontype == "winrm" } ${winrm_cert_valid} ${winrm_timeout_sec} ${winrm_read_timeout} @@ -58,7 +58,7 @@ ${sid}_ERS: become_user : ${scs_become_user} os_type : ${scs_os_type} vm_name : ${ers_vmnodes[idx]} -%{~ if ers_connectiontype | lower == "winrm" } +%{~ if ers_connectiontype == "winrm" } ${winrm_cert_valid} ${winrm_timeout_sec} ${winrm_read_timeout} @@ -83,7 +83,7 @@ ${sid}_PAS: become_user : ${app_become_user} os_type : ${app_os_type} vm_name : ${pas_vmnodes[idx]} -%{~ if app_connectiontype | lower == "winrm" } +%{~ if app_connectiontype == "winrm" } ${winrm_cert_valid} ${winrm_timeout_sec} ${winrm_read_timeout} @@ -107,7 +107,7 @@ ${sid}_APP: become_user : ${app_become_user} os_type : ${app_os_type} vm_name : ${app_vmnodes[idx]} -%{~ if app_connectiontype | lower == "winrm" } +%{~ if app_connectiontype == "winrm" } ${winrm_cert_valid} ${winrm_timeout_sec} ${winrm_read_timeout} @@ -131,7 +131,7 @@ ${sid}_WEB: become_user : ${web_become_user} os_type : ${web_os_type} vm_name : ${web_vmnodes[idx]} -%{~ if web_connectiontype | lower == "winrm" } +%{~ if web_connectiontype == "winrm" } ${winrm_cert_valid} ${winrm_timeout_sec} ${winrm_read_timeout} @@ -154,7 +154,7 @@ ${sid}_OBSERVER_DB: virtual_host : ${observers[idx]} become_user : ${db_become_user} os_type : ${db_os_type} -%{~ if db_connection | lower == "winrm" } +%{~ if db_connection == "winrm" } ${winrm_cert_valid} ${winrm_timeout_sec} ${winrm_read_timeout} From d63c4d3b710f6d9ea3f100400b80511afaf9fcfb Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 28 May 2024 00:12:58 +0300 Subject: [PATCH 583/607] Fix Mount output --- deploy/terraform/run/sap_system/module.tf | 2 +- .../modules/sap_system/output_files/sap-parameters.tmpl | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index c885b7e99d..d579ef8d38 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -423,7 +423,7 @@ module "output_files" { shared_home = var.shared_home hana_data = module.hdb_node.hana_data_ANF_volumes hana_log = module.hdb_node.hana_log_ANF_volumes - hana_shared = [module.hdb_node.hana_shared] + hana_shared = module.hdb_node.hana_shared usr_sap = module.common_infrastructure.usrsap_path ######################################################################################### diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl index 186f0d41b1..1b503f016b 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl @@ -50,7 +50,6 @@ ers_lb_ip: ${ers_server_loadbalancer_ip} scs_clst_lb_ip: ${scs_cluster_loadbalancer_ip} %{~ endif } - # PAS Instance Number pas_instance_number: "${pas_instance_number}" @@ -115,8 +114,10 @@ database_loadbalancer_ip: ${database_loadbalancer_ip} # Backwards compatibility db_lb_ip: ${database_loadbalancer_ip} +%{~ if platform == "SQLSERVER" } # database_cluster_ip is the IP address of the load balancer for the database cluster in Windows database_cluster_ip: ${database_cluster_ip} +%{~ endif } # use_simple_mount defines if simple mount is to be used use_simple_mount: ${is_use_simple_mount} From a9f710639c93fc2b8abf4a73ac5e8c10b50797ca Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 28 May 2024 13:08:31 +0300 Subject: [PATCH 584/607] Fixes for secondary --- .../tasks/2.6.1-anf-mounts.yaml | 140 +++++++++--------- 1 file changed, 74 insertions(+), 66 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index 11e8fea071..23d5389338 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -569,7 +569,7 @@ # | | # +------------------------------------4--------------------------------------*/ # Standard block tasks for non scale out setups -- name: "ANF Mount: Run tasks for non-scale out setups" +- name: "ANF Mount: Run tasks for non-scale out HANA setups" when: - not database_scale_out - tier == 'sapos' @@ -734,38 +734,33 @@ # | ANF Mount: Run tasks for scale out setups | # | | # +------------------------------------4--------------------------------------*/ -# Run this block set when db_Scale_out is true but db_high_availability is false -- name: "ANF Mount: Run tasks for scale out setups" +# Run this block set when database_scale_out is true +# when database_high_availability is true, we need two values for hana_shared_mountpoint which will be mounted on alternative nodes denoting two sites. +# When database_high_availability is false/undefined, only one value of hana_shared_mountpoint is used and mounted on all HANA hosts. + +- name: "ANF Mount: Run tasks for scale out HANA setups" when: - database_scale_out - tier == 'sapos' - node_tier == 'hana' block: - - name: "ANF Mount: Scale Out - Create SAP Directories (usrsap)" + - name: "ANF Mount: Scale Out - Create SAP Directories" ansible.builtin.file: owner: '{{ hdbadm_uid }}' group: sapsys mode: 0755 - path: "/usr/sap/{{ db_sid | upper }}" + path: "{{ item.path }}" state: directory + with_items: # Variables are defined in /deploy/ansible/var/ansible-input-api.yaml file + - { path: '{{ hana_log_basepath }}' } + - { path: '{{ hana_shared_basepath }}' } + - { path: '{{ hana_usrsap_basepath }}' } + - { path: '{{ hana_data_baasepath }}'} - - name: "ANF Mount: Scale Out - Create SAP Directories (hana data)" - ansible.builtin.file: - owner: '{{ hdbadm_uid }}' - group: sapsys - mode: 0755 - path: "/hana/data/{{ db_sid | upper }}" - state: directory - - name: "ANF Mount: Scale Out - Create SAP Directories (hana log)" - ansible.builtin.file: - owner: '{{ hdbadm_uid }}' - group: sapsys - mode: 0755 - path: "/hana/log/{{ db_sid | upper }}" - state: directory - - - name: "ANF Mount: HANA shared - Scale out" + # This only runs for HANA Scale out with standby node configuration + # Only first HANA shared volume is used. + - name: "ANF Mount: HANA shared - Scale out - Shared storage with stand-by" ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml loop: - { @@ -775,7 +770,7 @@ 'folder': 'shared', 'mount': '{{ hana_shared_mountpoint[0] }}', 'opts': '{{ mnt_options }}', - 'path': '/hana/shared', + 'path': '{{ hana_shared_basepath }}', 'permissions': '0775', 'set_chattr_on_dir': false, 'target_nodes' : ['hana'], @@ -786,9 +781,39 @@ when: - hana_shared_mountpoint is defined - hana_shared_mountpoint | length > 0 + - not database_high_availability + + # This runs for HANA scale out with HSR & Pacemaker. + # We need two HANA shared volumes, one for each site + - name: "ANF Mount: HANA shared - Scale out - HSR & Pacemaker" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + loop: + - { + 'type': 'shared', + 'temppath': 'shared', + # change folder to match the mount folder within the share + 'folder': 'shared', + # Logic : hana_shared_mountpoint[0] goes on odd numbered HANA hosts and hana_shared_mountpoint[1] goes on even numbered HANA hosts. + 'mount': "{% if ansible_hostname in query('inventory_hostnames', '{{ sap_sid | upper }}_DB')[0::2] %}{{ hana_shared_mountpoint[0] }}{% else %}{{ hana_shared_mountpoint[1] }}{% endif %}", + 'opts': '{{ mnt_options }}', + 'path': '{{ hana_shared_basepath }}', + 'permissions': '0775', + 'set_chattr_on_dir': false, + 'target_nodes' : ['hana'], + 'create_temp_folders': true + } + vars: + primary_host: "{{ ansible_hostname }}" + when: + - hana_shared_mountpoint is defined + - hana_shared_mountpoint | length > 1 + - database_high_availability + - # This runs for unique share per node - - name: "ANF Mount: usrsap - Scale out" + # This runs for unique folder path within corresponding /hana/shared per node for Scale out with Standby. + # Scale out with Shared storage and stand-by will always use /usr/sap/ derived from /hana/shared directory. /usr/sap base can be on root file system or a small disk to store supporting SAP binaries. + # Note: For Scale out - HSR & Pacemaker, we recommend Premium SSD/V2 at the moment. Support for ANF shares will be added in the near future + - name: "ANF Mount: usrsap - Scale out - Shared storage with stand-by" ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml loop: - { @@ -797,7 +822,7 @@ 'folder': "usr-sap-hanadb{{ lookup('ansible.utils.index_of', db_hosts, 'eq', ansible_hostname) }}", 'mount': '{{ hana_shared_mountpoint[0] }}', 'opts': '{{ mnt_options }}', - 'path': '/usr/sap/{{ db_sid | upper }}', + 'path': '{{ hana_usrsap_basepath }}', 'permissions': '0775', 'set_chattr_on_dir': false, 'target_nodes' : ['hana'], @@ -809,14 +834,16 @@ - hana_shared_mountpoint is defined - hana_shared_mountpoint | length == 1 # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. - - database_scale_out | default(false) + - not database_high_availability - - name: "ANF Mount: HANA Data - Scale out - Create mount list" + # This runs for HANA Scale out - Shared storage with Stand by. Since the /hana/data & /hana/log is shared across HANA nodes + # This builds a list of unique hana data mountpoints for each of the HANA hosts + - name: "ANF Mount: HANA Data - Scale out - Shared storage with stand-by - Create mount list" block: - name: "Initialize HANA Data mountpoints" ansible.builtin.set_fact: hana_data_scaleout_mountpoint: [] - - name: "Build HANA Data mountpoints" + - name: "Build HANA Data mountpoints" ansible.builtin.set_fact: # hana_data_mountpoint: "{{ hana_data_mountpoint | default([]) + [item] }}" hana_data_scaleout_mountpoint: "{{ hana_data_scaleout_mountpoint + dataupdate }}" @@ -831,7 +858,7 @@ folder: 'hanadata', mount: "{{ item }}", opts: "{{ mnt_options }}", - path: "{{ '/hana/data/' + (db_sid | upper ) + '/mnt0000' + ( my_index + 1 )| string }}", + path: "{{ hana_data_basepath + '/mnt0000' + ( my_index + 1 )| string }}", permissions: '0775', set_chattr_on_dir: false, target_nodes: ['hana'], @@ -841,15 +868,17 @@ - hana_data_mountpoint is defined - not database_high_availability - - name: "DEBUG:ANF Mount: HANA Data - Scale out - Create mount list" + + - name: "DEBUG:ANF Mount: HANA Data - Scale out - Shared storage with stand-by - Create mount list" ansible.builtin.debug: - var: hana_data_scaleout_mountpoint - verbosity: 2 + var: hana_data_scaleout_mountpoint + verbosity: 2 when: - - hana_data_mountpoint is defined + - hana_data_scaleout_mountpoint is defined - - name: "ANF Mount: HANA Data - Scale out" - ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml + + - name: "ANF Mount: HANA Data - Scale out - Shared storage with stand-by" + ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml # Do not change this to loop:. It Breaks. i don't know why but this modification only seems to work with with_items: despite multiple formatting attempts. with_items: - "{{ hana_data_scaleout_mountpoint | list }}" @@ -857,8 +886,11 @@ primary_host: "{{ ansible_hostname }}" when: - hana_data_mountpoint is defined + - not database_high_availability - - name: "ANF Mount: HANA Log - Scale out - Create mount list" + # This runs for HANA Scale out - Shared storage with Stand by. Since the /hana/data & /hana/log is shared across HANA nodes + # This builds a list of unique hana data mountpoints for each of the HANA hosts + - name: "ANF Mount: HANA Log - Scale out - Shared storage with stand-by - Create mount list" block: - name: "Initialize HANA Log mountpoints" ansible.builtin.set_fact: @@ -886,16 +918,13 @@ } when: - hana_log_mountpoint is defined - - not database_high_availability - - name: "DEBUG:ANF Mount: HANA Log - Scale out - Create mount list" + - name: "DEBUG:ANF Mount: HANA Log - Scale out - Shared storage with stand-by - Create mount list" ansible.builtin.debug: var: hana_log_scaleout_mountpoint - verbosity: 2 - when: - - hana_log_mountpoint is defined + verbosity: 2 - - name: "ANF Mount: HANA Log - Scale out" + - name: "ANF Mount: HANA Log - Scale out - Shared storage with stand-by" ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml # Do not change this to loop:. It Breaks. i don't know why but this modification only seems to work with with_items: despite multiple formatting attempts. with_items: @@ -905,17 +934,6 @@ when: - hana_log_mountpoint is defined - - name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" - ansible.builtin.file: - owner: '{{ hdbadm_uid }}' - group: sapsys - path: "{{ item.path }}" - state: directory - recurse: true - with_items: - - { 'path': '/hana/shared' } - - { 'path': '/usr/sap/{{ db_sid | upper }}' } - - name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" ansible.builtin.file: owner: '{{ hdbadm_uid }}' @@ -925,19 +943,9 @@ recurse: true with_items: - "{{ hana_log_scaleout_mountpoint }}" - when: - - hana_log_mountpoint is defined - - - name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" - ansible.builtin.file: - owner: '{{ hdbadm_uid }}' - group: sapsys - path: "{{ item.path }}" - state: directory - recurse: true - with_items: - "{{ hana_data_scaleout_mountpoint }}" - when: - - hana_data_mountpoint is defined + - { 'path': '{{ hana_shared_basepath }}' } + - { 'path': '{{ hana_usrsap_basepath }}' } + ... From 3d18b2500d563b184c01ebd4c5f1ae3e1ea02f69 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 28 May 2024 13:45:23 +0300 Subject: [PATCH 585/607] Add hana_shared_basepath --- deploy/ansible/vars/ansible-input-api.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index 523043b220..98a82b780c 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -41,6 +41,7 @@ hana_backup_path: /hana/backup # Note: the spacing and lining of Jinja2 expression is critical here, lest we end up with a stray character in the path hana_data_basepath: "{% if database_scale_out %}/hana/data/{{ db_sid | upper }}{% else %}/hana/data{% endif %}" hana_log_basepath: "{% if database_scale_out %}/hana/log/{{ db_sid | upper }}{% else %}/hana/log{% endif %}" +hana_shared_basepath: "/hana/shared" hana_autostart: false # When set to true, will configure autostart parameter to 1 for HANA nodes. Only applicable for Scale out sap_sid: "" # REQUIRED - SAP Install download_basket_dir: "{{ target_media_location }}/download_basket" From 046623ea32886533ccc1b5b742ef1320fbd6e639 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 28 May 2024 14:36:53 +0300 Subject: [PATCH 586/607] feat: Add hana_usrsap_basepath variable to ansible-input-api.yaml --- deploy/ansible/vars/ansible-input-api.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index 98a82b780c..00eecfcdfc 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -42,6 +42,7 @@ hana_backup_path: /hana/backup hana_data_basepath: "{% if database_scale_out %}/hana/data/{{ db_sid | upper }}{% else %}/hana/data{% endif %}" hana_log_basepath: "{% if database_scale_out %}/hana/log/{{ db_sid | upper }}{% else %}/hana/log{% endif %}" hana_shared_basepath: "/hana/shared" +hana_usrsap_basepath: "/usr/sap" hana_autostart: false # When set to true, will configure autostart parameter to 1 for HANA nodes. Only applicable for Scale out sap_sid: "" # REQUIRED - SAP Install download_basket_dir: "{{ target_media_location }}/download_basket" From bc4c3f631ae98d6cf5f5a01d628b530f69037361 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 28 May 2024 15:03:47 +0300 Subject: [PATCH 587/607] fixed typo --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index 23d5389338..27c9c9ddd3 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -755,7 +755,7 @@ - { path: '{{ hana_log_basepath }}' } - { path: '{{ hana_shared_basepath }}' } - { path: '{{ hana_usrsap_basepath }}' } - - { path: '{{ hana_data_baasepath }}'} + - { path: '{{ hana_data_basepath }}'} # This only runs for HANA Scale out with standby node configuration From 292bc0f29bff721356c0cf872b14fa3409a192de Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 28 May 2024 17:58:58 +0300 Subject: [PATCH 588/607] Refactor ANF Mount task to for SHA scaleout --- .../tasks/2.6.1-anf-mounts.yaml | 25 +++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index 27c9c9ddd3..5352446108 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -942,10 +942,31 @@ state: directory recurse: true with_items: - - "{{ hana_log_scaleout_mountpoint }}" - - "{{ hana_data_scaleout_mountpoint }}" - { 'path': '{{ hana_shared_basepath }}' } - { 'path': '{{ hana_usrsap_basepath }}' } + - name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" + ansible.builtin.file: + owner: '{{ hdbadm_uid }}' + group: sapsys + path: "{{ item.path }}" + state: directory + recurse: true + with_items: + - "{{ hana_log_scaleout_mountpoint }}" + when: + - hana_log_scaleout_mountpoint is defined + + - name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" + ansible.builtin.file: + owner: '{{ hdbadm_uid }}' + group: sapsys + path: "{{ item.path }}" + state: directory + recurse: true + with_items: + - "{{ hana_data_scaleout_mountpoint }}" + when: + - hana_data_scaleout_mountpoint is defined ... From 086dd49b02815dad68c77293a130196bc25cf23e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 28 May 2024 19:22:46 +0300 Subject: [PATCH 589/607] Change the until condition --- .../1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-RedHat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-RedHat.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-RedHat.yml index 1737a03dfc..3291a3a474 100644 --- a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-RedHat.yml @@ -90,7 +90,7 @@ register: cluster_stable_check retries: 12 delay: 10 - until: "(primary_instance_name + ' ' + secondary_instance_name) in cluster_stable_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in cluster_stable_check.stdout" + until: "primary_instance_name in cluster_stable_check.stdout and secondary_instance_name in cluster_stable_check.stdout" when: ansible_distribution_major_version in ["8", "9"] - name: "1.18.2.0 Generic Pacemaker - Ensure the expected quorum votes is set for the cluster" From c1ae3195721af48f5c94c9b24882f01be8860205 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 28 May 2024 19:47:16 +0300 Subject: [PATCH 590/607] Make the HANA packages --- .../tasks/1.17.2-provision.yml | 26 +++++++++++++++++++ .../1.4-packages/vars/os-packages.yaml | 1 - 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml index 7b9272d75d..110726b4fe 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml @@ -57,6 +57,32 @@ failed_when: secondary_to_primary_ssh_result.stdout_lines[0] != primary_instance_name when: ansible_hostname == secondary_instance_name +- name: Implement the HA Resource Agent (REDHAT) + when: + - node_tier in ['hana'] + - not database_scale_out + - database_high_availability + - ansible_os_family | upper == "REDHAT" + block: + - name: Generate list of deployed packages on current host + ansible.builtin.package_facts: + + - name: "Ensure resource-agents-sap-hana is installed (REDHAT)" + ansible.builtin.package: + name: resource-agents-sap-hana + state: present + when: + - ansible_facts.packages['resource-agents-sap-hana'] is defined + + - name: "Ensure resource-agents-sap-hana-scaleout is absent (REDHAT)" + ansible.builtin.package: + name: resource-agents-sap-hana-scaleout + state: absent + when: + - ansible_facts.packages['resource-agents-sap-hana-scaleout'] is not defined + + + # Clustering commands are based on the Host OS - name: "1.17 Generic Pacemaker - Cluster based on {{ ansible_os_family }} on VM {{ ansible_hostname }}" ansible.builtin.include_tasks: "1.17.2.0-cluster-{{ ansible_os_family }}.yml" diff --git a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml index 9669c97d94..8d87096154 100644 --- a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml +++ b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml @@ -232,7 +232,6 @@ packages: - { tier: 'sapos', package: 'uuidd', node_tier: 'all', state: 'present' } - { tier: 'sapos', package: 'csh', node_tier: 'all', state: 'present' } # ------------------------- Begin - Packages required for Clustering ---------------------------------------8 - - { tier: 'ha', package: 'resource-agents-sap-hana', node_tier: 'hana', state: 'present' } - { tier: 'ha', package: 'resource-agents-sap', node_tier: 'scs', state: 'present' } - { tier: 'ha', package: 'resource-agents-sap', node_tier: 'ers', state: 'present' } - { tier: 'ha', package: 'resource-agents-cloud', node_tier: 'hana', state: 'present' } From 8fd497bc40b34f9e50247dc35e0995e69416fdcc Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 28 May 2024 20:21:26 +0300 Subject: [PATCH 591/607] remove the package --- deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml index 8d87096154..33f0c4ede4 100644 --- a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml +++ b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml @@ -174,7 +174,6 @@ packages: - { tier: 'ha', package: 'pacemaker', node_tier: 'all', state: 'present' } - { tier: 'ha', package: 'nmap', node_tier: 'all', state: 'present' } - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'all', state: 'present' } - - { tier: 'ha', package: 'resource-agents-sap-hana', node_tier: 'hana', state: 'present' } - { tier: 'ha', package: 'resource-agents-sap', node_tier: 'scs', state: 'present' } - { tier: 'ha', package: 'resource-agents-sap', node_tier: 'ers', state: 'present' } # ------------------------- End - Packages required for Clustering -----------------------------------------8 From 41c5f252be8d984af7f2c6f734471484b99d4fe4 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 29 May 2024 10:09:56 +0300 Subject: [PATCH 592/607] Fix path --- deploy/pipelines/01-deploy-control-plane.yaml | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 814ec623c5..69ef1149ee 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -572,19 +572,14 @@ stages: echo SAP_AUTOMATION_REPO_PATH=$SAP_AUTOMATION_REPO_PATH >.sap_deployment_automation/config export SAP_AUTOMATION_REPO_PATH=$SAP_AUTOMATION_REPO_PATH - cd DEPLOYER - ls -lart - cd $(deployerfolder) - ls -lart - echo -e "$green--- File Validations ---$reset" - if [ ! -f DEPLOYER/$(deployerfolder)/$(deployerconfig) ]; then + if [ ! -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) ]; then echo -e "$boldred--- File ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) was not found ---$reset" echo "##vso[task.logissue type=error]File ${CONFIG_REPO_PATH}/${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) was not found." exit 2 fi - if [ ! -f LIBRARY/$(libraryfolder)/$(libraryconfig) ]; then + if [ ! -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) ]; then echo -e "$boldred--- File ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) was not found ---$reset" echo "##vso[task.logissue type=error]File ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) was not found." exit 2 @@ -701,7 +696,6 @@ stages: export TF_LOG_PATH=${CONFIG_REPO_PATH}/.sap_deployment_automation/terraform.log sudo chmod +x $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh - export ARM_USE_AZUREAD=true if [ "$USE_MSI" = "true" ]; then echo -e "$cyan--- Using MSI ---$reset" From c553965c88f949e43675600271cb78620e68885b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 29 May 2024 10:16:52 +0300 Subject: [PATCH 593/607] remove duplicate --- .../modules/sap_deployer/infrastructure.tf | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf index d301822699..03ef7bca79 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf @@ -145,19 +145,3 @@ resource "azurerm_role_assignment" "resource_group_acsservice_msi" { principal_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].principal_id : data.azurerm_user_assigned_identity.deployer[0].principal_id } -resource "azurerm_role_assignment" "resource_group_acsservice" { - provider = azurerm.main - count = var.assign_subscription_permissions && var.deployer.add_system_assigned_identity ? var.deployer_vm_count : 0 - scope = local.resource_group_exists ? data.azurerm_resource_group.deployer[0].id : azurerm_resource_group.deployer[0].id - role_definition_name = "Azure Center for SAP solutions administrator" - principal_id = azurerm_linux_virtual_machine.deployer[count.index].identity[0].principal_id -} - -resource "azurerm_role_assignment" "resource_group_acsservice_msi" { - provider = azurerm.main - count = var.assign_subscription_permissions ? 1 : 0 - scope = local.resource_group_exists ? data.azurerm_resource_group.deployer[0].id : azurerm_resource_group.deployer[0].id - role_definition_name = "Azure Center for SAP solutions administrator" - principal_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].principal_id : data.azurerm_user_assigned_identity.deployer[0].principal_id -} - From 9376a1c6fb2f6adcee3bb849add8ff1672f24279 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 30 May 2024 04:03:20 +0300 Subject: [PATCH 594/607] chore: Update count condition in dns.tf for local private DNS usage --- deploy/terraform/terraform-units/modules/sap_library/dns.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_library/dns.tf b/deploy/terraform/terraform-units/modules/sap_library/dns.tf index c0b41cda3d..d9196a16e1 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/dns.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/dns.tf @@ -31,7 +31,7 @@ resource "azurerm_private_dns_zone" "blob" { resource "azurerm_private_dns_zone" "table" { provider = azurerm.main - count = local.use_local_private_dns && var.use_private_endpoint && length(try(var.deployer_tfstate.webapp_id),"")> 0 ? 1 : 0 + count = local.use_local_private_dns && var.use_private_endpoint && length(try(var.deployer_tfstate.webapp_id,""))> 0 ? 1 : 0 depends_on = [ azurerm_resource_group.library ] From 64f294e0acdb41fffcb88b79bb89b16b4dc8cc93 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 30 May 2024 16:54:25 +0530 Subject: [PATCH 595/607] chore: Update and remove duplicate resource definitions for kdump disks output in anydb_node, kudmp disks and extensions in app_tier module --- .../modules/sap_system/anydb_node/outputs.tf | 13 +- .../modules/sap_system/app_tier/vm-scs.tf | 139 ------------------ 2 files changed, 1 insertion(+), 151 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf index a1bdeece3e..b633165785 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf @@ -165,18 +165,7 @@ output "database_shared_disks" { ) ) } -output "database_kdump_disks" { - description = "List of Azure kdump disks" - value = distinct( - flatten( - [for vm in var.naming.virtualmachine_names.ANYDB_VMNAME : - [for idx, disk in azurerm_virtual_machine_data_disk_attachment.kdump : - format("{ host: '%s', LUN: %d, type: 'kdump' }", vm, disk.lun) - ] - ] - ) - ) - } + output "database_kdump_disks" { description = "List of Azure kdump disks" value = distinct( diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index b50a5a13ab..1e172ef4fb 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -726,145 +726,6 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_scs_win" { } -resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_lnx" { - provider = azurerm.main - count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( - local.scs_server_count) : ( - 0 ) - virtual_machine_id = azurerm_linux_virtual_machine.scs[count.index].id - name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" - publisher = "Microsoft.Azure.Security.Monitoring" - type = "AzureSecurityLinuxAgent" - type_handler_version = "2.0" - auto_upgrade_minor_version = true - - settings = jsonencode( - { - "enableGenevaUpload" = true, - "enableAutoConfig" = true, - "reportSuccessOnUnsupportedDistro" = true, - } - ) -} - -resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_win" { - provider = azurerm.main - count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.scs_os.os_type) == "WINDOWS" ? ( - local.scs_server_count) : ( - 0 ) - virtual_machine_id = azurerm_windows_virtual_machine.scs[count.index].id - name = "Microsoft.Azure.Security.Monitoring.AzureSecurityWindowsAgent" - publisher = "Microsoft.Azure.Security.Monitoring" - type = "AzureSecurityWindowsAgent" - type_handler_version = "1.0" - auto_upgrade_minor_version = true - - settings = jsonencode( - { - "enableGenevaUpload" = true, - "enableAutoConfig" = true, - "reportSuccessOnUnsupportedDistro" = true, - } - ) -} - -######################################################################################### -# # -# Azure Data Disk for Kdump # -# # -#######################################+################################################# -resource "azurerm_managed_disk" "kdump" { - provider = azurerm.main - count = ( - local.enable_deployment && - var.application_tier.scs_high_availability && - ( - upper(var.application_tier.scs_os.os_type) == "LINUX" && - ( var.application_tier.fence_kdump_disk_size > 0 ) - ) - ) ? local.scs_server_count : 0 - - name = format("%s%s%s%s%s", - try( var.naming.resource_prefixes.fence_kdump_disk, ""), - local.prefix, - var.naming.separator, - var.naming.virtualmachine_names.SCS_VMNAME[count.index], - try( var.naming.resource_suffixes.fence_kdump_disk, "fence_kdump_disk" ) - ) - location = var.resource_group[0].location - resource_group_name = var.resource_group[0].name - create_option = "Empty" - storage_account_type = "Premium_LRS" - disk_size_gb = try(var.application_tier.fence_kdump_disk_size,64) - disk_encryption_set_id = try(var.options.disk_encryption_set_id, null) - tags = var.tags - - zone = local.scs_zonal_deployment ? ( - upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( - azurerm_linux_virtual_machine.scs[count.index].zone) : - null - ) : ( - null - ) - lifecycle { - ignore_changes = [ - create_option, - hyper_v_generation, - source_resource_id, - tags - ] - } - -} - -resource "azurerm_virtual_machine_data_disk_attachment" "kdump" { - provider = azurerm.main - count = ( - local.enable_deployment && - var.application_tier.scs_high_availability && - ( - upper(var.application_tier.scs_os.os_type) == "LINUX" && - ( var.application_tier.fence_kdump_disk_size > 0 ) - ) - ) ? local.scs_server_count : 0 - - managed_disk_id = azurerm_managed_disk.kdump[count.index].id - virtual_machine_id = (upper(var.application_tier.scs_os.os_type) == "LINUX" # If Linux - ) ? ( - azurerm_linux_virtual_machine.scs[count.index].id - ) : null - caching = "None" - lun = var.application_tier.fence_kdump_lun_number -} - -resource "azurerm_virtual_machine_extension" "monitoring_extension_scs_lnx" { - provider = azurerm.main - count = local.deploy_monitoring_extension && upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( - local.scs_server_count) : ( - 0 ) - virtual_machine_id = azurerm_linux_virtual_machine.scs[count.index].id - name = "Microsoft.Azure.Monitor.AzureMonitorLinuxAgent" - publisher = "Microsoft.Azure.Monitor" - type = "AzureMonitorLinuxAgent" - type_handler_version = "1.0" - auto_upgrade_minor_version = true -} - - -resource "azurerm_virtual_machine_extension" "monitoring_extension_scs_win" { - provider = azurerm.main - count = local.deploy_monitoring_extension && upper(var.application_tier.scs_os.os_type) == "WINDOWS" ? ( - local.scs_server_count) : ( - 0 ) - virtual_machine_id = azurerm_windows_virtual_machine.scs[count.index].id - name = "Microsoft.Azure.Monitor.AzureMonitorWindowsAgent" - publisher = "Microsoft.Azure.Monitor" - type = "AzureMonitorWindowsAgent" - type_handler_version = "1.0" - auto_upgrade_minor_version = true -} - - resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_lnx" { provider = azurerm.main count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( From 88cf409ec454af6cbcd28d54ec6c8c5f71153c3f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 1 Jun 2024 00:51:49 +0300 Subject: [PATCH 596/607] Add tags for deployer --- .../terraform/bootstrap/sap_deployer/tfvar_variables.tf | 5 ++++- deploy/terraform/bootstrap/sap_deployer/transform.tf | 9 +++------ deploy/terraform/run/sap_deployer/tfvar_variables.tf | 5 ++++- deploy/terraform/run/sap_deployer/transform.tf | 9 +++------ 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf index e1d0e830c7..594e40b6f6 100644 --- a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf @@ -411,7 +411,10 @@ variable "subnets_to_add_to_firewall_for_keyvaults_and_storage" { default = [] } - +variable "tags" { + description = "If provided, tags for all resources" + default = {} + } ######################################################################################### # # # DNS settings # diff --git a/deploy/terraform/bootstrap/sap_deployer/transform.tf b/deploy/terraform/bootstrap/sap_deployer/transform.tf index 84f49ec231..49ca8fa23b 100644 --- a/deploy/terraform/bootstrap/sap_deployer/transform.tf +++ b/deploy/terraform/bootstrap/sap_deployer/transform.tf @@ -26,12 +26,8 @@ locals { "" ) } - tags = try( - coalesce( - var.resourcegroup_tags, - try(var.infrastructure.tags, {}) - ), - {} + tags = merge( + var.tags, var.resourcegroup_tags ) vnets = { @@ -133,6 +129,7 @@ locals { deploy_monitoring_extension = var.deploy_monitoring_extension deploy_defender_extension = var.deploy_defender_extension + } deployer = { size = try( diff --git a/deploy/terraform/run/sap_deployer/tfvar_variables.tf b/deploy/terraform/run/sap_deployer/tfvar_variables.tf index 1c159fb3cb..cfed10f8a4 100644 --- a/deploy/terraform/run/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/run/sap_deployer/tfvar_variables.tf @@ -414,7 +414,10 @@ variable "shared_access_key_enabled" { type = bool } - +variable "tags" { + description = "If provided, tags for all resources" + default = {} + } ######################################################################################### # # # DNS settings # diff --git a/deploy/terraform/run/sap_deployer/transform.tf b/deploy/terraform/run/sap_deployer/transform.tf index 88936dcb28..a59d4634b7 100644 --- a/deploy/terraform/run/sap_deployer/transform.tf +++ b/deploy/terraform/run/sap_deployer/transform.tf @@ -23,12 +23,9 @@ locals { "" ) } - tags = try( - coalesce( - var.resourcegroup_tags, - try(var.infrastructure.tags, {}) - ), - {} + tags = merge( + var.tags, var.resourcegroup_tags + ) vnets = { From 22262a1cec9512f9325a55335b0dfa378268a449 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 1 Jun 2024 23:46:50 +0300 Subject: [PATCH 597/607] refactor: Update tags assignment in sap_deployer module --- .../terraform-units/modules/sap_deployer/variables_local.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_deployer/variables_local.tf index 7fa8473b4f..63dffd5b5e 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/variables_local.tf @@ -262,6 +262,6 @@ locals { # automation_keyvault_resourcegroup_name = local.automation_keyvault_exist ? split("/", local.prvt_key_vault_id)[4] : "" // Tags - tags = try(var.deployer.tags, { "Role" = "Deployer" }) + tags = merge(var.infrastructure.tags,try(var.deployer.tags, { "Role" = "Deployer" })) } From 85a8c556551e6070e98dff4783e3f970fae055c0 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 1 Jun 2024 23:55:15 +0300 Subject: [PATCH 598/607] Use the built in token to authenticate to ADO --- deploy/pipelines/01-deploy-control-plane.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 7d584be18e..a452d7d9a8 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -893,7 +893,7 @@ stages: TF_VAR_agent_pool: $(POOL) TF_VAR_agent_ado_url: $(System.CollectionUri) TF_VAR_tf_version: $(tf_version) - AZURE_DEVOPS_EXT_PAT: $(PAT) + AZURE_DEVOPS_EXT_PAT: $(System.AccessToken) IS_PIPELINE_DEPLOYMENT: true WEB_APP_CLIENT_SECRET: $(WEB_APP_CLIENT_SECRET) APP_REGISTRATION_APP_ID: $(APP_REGISTRATION_APP_ID) From 22b57c1ae40237ff196f81e07388b3e26ec800a0 Mon Sep 17 00:00:00 2001 From: "Shekhar Sorot ( MSFT )" Date: Wed, 11 Sep 2024 12:59:44 +0530 Subject: [PATCH 599/607] Updates to scale out HSR code (#598) * Fix the deployment using SPN * Ensure that custom_logical_volumes can be striped + have sensible stripesize default if a lvm consists of more than one pv. (#587) Use already established pattern from framework specific LVMs to define stripesize on custom logical volumes. * Perform az login before the az access token in ams provider creation steps * Revert "Perform az login before the az access token in ams provider creation" * Update AMS provider creation tasks in ansible playbook * Update deploy control plane pipeline to use service principal for authentication * chore: Temporarily set identity type to "SystemAssigned, UserAssigned" in app_service.tf * test for new /etc/hosts configuration for HSR scale out * Update hosts.j2 * Update hosts.j2 * Update hosts.j2 * Various Ansible fixes (#591) * Add Red Hat 8.10 repo to 1.3-repository vars * Create entries for Red Hat 8.10 in 1.4-packages vars * Add 'pam' to OS packages list for DB2 with state 'latest' Ensures that x86_64 package is updated, avoiding conflict with libpam.so.0 install, which requires i686 version of pam * Add "state: 'latest'" to loops * Correct cluster version check in 1.17-generic-pacemaker * Correct cluster version check in 5.6-scsers-pacemaker * Correct cluster version check in 5.5-hanadb-pacemaker * Create entries for Red Hat 8.10 in 1.17-generic-pacemaker --------- Co-authored-by: Csaba Daradics * add code for scaleout - hook * fix to HSR code branch * Update main.yaml * Update main.yaml * Update main.yaml * Update main.yaml * Update 1.4.3-update-packages-RedHat.yaml * Update 1.4.3-update-packages-RedHat.yaml * chore: Adjust Azure fence agent packages and remove unused Azure Python packages from list when deploying on SLES 15 SP5 * chore: Override changed status for saptune_check and active_saptune_solution tasks in 2.10.3.yaml * chore: Add condition to check if saptune_solution_enabled is defined in 2.10.3.yaml * chore: Add condition to check if saptune_solution_enabled is defined in 2.10.3.yaml * chore: Update New-SDAFDevopsWorkloadZone.ps1 script to fix variable group creation issue * Update main.yaml * Update playbook_04_00_01_db_ha.yaml * Update main.yaml * Update main.yaml * chore: Refactor saptune_solution_to_apply logic in 2.10.3.yaml * Set HDB schema name for ABAP and JAVA systems (#593) * Set HDB Schema Name task * fix command error and remove ignore_errors * Fix parsing error * Update main.yaml * Update main.yaml * Update main.yaml * Update main.yaml * Update main.yaml * chore: Comment out unnecessary role assignments in New-SDAFDevopsProject.ps1 script * Update main.yaml * exclude sapmnt from observer * bug fix, Scale out tasks not running on Majority maker node in playbook 4_01 * Update playbook_04_00_01_db_ha.yaml * replace pause with wait-for * replace pause with wait_for * enable AUTOMATED_REGISTER to true * Release testing (#597) * script from main branch * chore: Add "nat_gateway" variable to global variables in sap_namegenerator module * chore: Update bom-register.yaml to use the correct path for the Microsoft supplied BOM archive * chore: Add debug task to bom-register.yaml for Microsoft supplied BOM archive * Refactor bom-register.yaml to remove debug task and include correct path for Microsoft supplied BOM archive * Refactor sap-system-deployment.yaml to configure devops CLI extension * Refactor sap-system-deployment.yaml to configure devops CLI extension * Refactor sap-system-deployment.yaml to configure devops CLI extension * chore: Remove unnecessary code for extra parameters in DB and SAP installation pipeline * chore: Update SDAF version to 3.11.0.3 * Refactor az logout command in sap-workload-zone.yaml * Refactor az logout command in sap-workload-zone.yaml * Refactor SSH command in 1.17 Generic Pacemaker provision playbook * Add SAP on Azure quality chekcs feature to the 05-DB-and-SAP-installation.yaml pipeline. * remove duplicate block * remove blank line * add mode to get_url downloaded file. * remove blank line from start of file. * Rename quality check to quality assurance (#600) * script from main branch * chore: Add "nat_gateway" variable to global variables in sap_namegenerator module * chore: Update bom-register.yaml to use the correct path for the Microsoft supplied BOM archive * chore: Add debug task to bom-register.yaml for Microsoft supplied BOM archive * Refactor bom-register.yaml to remove debug task and include correct path for Microsoft supplied BOM archive * Refactor sap-system-deployment.yaml to configure devops CLI extension * Refactor sap-system-deployment.yaml to configure devops CLI extension * Refactor sap-system-deployment.yaml to configure devops CLI extension * chore: Remove unnecessary code for extra parameters in DB and SAP installation pipeline * chore: Update SDAF version to 3.11.0.3 * Refactor az logout command in sap-workload-zone.yaml * Refactor az logout command in sap-workload-zone.yaml * Refactor SSH command in 1.17 Generic Pacemaker provision playbook * chore: Remove cacheable flag from 3.3 BoM Processing task * Refactor bom-register.yaml to remove debug task and include correct path for Microsoft supplied BOM archive * Add SAP on Azure quality chekcs feature to the 05-DB-and-SAP-installation.yaml pipeline. * remove duplicate block * remove blank line * add mode to get_url downloaded file. * remove blank line from start of file. * Refactor sap-system-deployment.yaml to configure devops CLI extension * Refactor sap-system-deployment.yaml to configure devops CLI extension * chore: Update SDAF version to 3.11.0.3 * Refactor az logout command in sap-workload-zone.yaml * chore: move SAP on Azure quality checks after post configuration * chore: Update quality check paths to quality_assurance * chore: Update quality assurance file paths * chore: Refactor YAML files to improve code organization and readability * chore: Add cacheable flag to 3.3 BoM Processing task --------- Co-authored-by: dkSteBTh * Release v3.11.0.3 (#599) * Bring in the manual updates * Fixing Merge conflicts * Fix Linting * Pacemaker changes, saptune and NAT Gateway (#583) ## Summary of Changes ### Authentication and Identity Management - **Web Application Authentication Configuration**: Repeated updates to refine and simplify the authentication configuration and identity management scripts. ### Repository and Package Management - **SLES Repositories**: Added repositories for SLES 15.3, 15.4, and 15.5. - **WAAgent Updates**: Updated WAAgent package, configuration checks, and systemd service reloads across multiple roles. ### Deployment Configuration - **NAT Gateway**: Added support for provisioning a NAT gateway, including configuration variables in Terraform files. - **AutoUpdate Configuration**: Updated AutoUpdate.Enabled settings and added Extensions.WaitForCloudInit across various roles. - **Oracle Simplification**: Simplified Oracle-related configurations, including ASM backup process and Data Guard tasks. - **SAP Deployment Playbooks**: Various updates to SAP deployment playbooks, including fixing conditions, resource flag settings, and systemd service paths. ### Infrastructure and Pipeline Enhancements - **Control Plane Pipeline**: Multiple fixes and improvements to error handling, logging, environment variables, and Azure AD authentication. - **Terraform and Ansible Versions**: Updated versions in deployment scripts to 1.7.5. - **Dotnet SDK**: Bumped dotnet SDK installation to the latest version. ### Miscellaneous - **Error Handling and Logging**: Improved error handling and logging across various deployment scripts and playbooks. - **Validation Fixes**: Fixed validation conditions for disk space, OS version checks, and cluster initialization commands. --------- Co-authored-by: Kimmo Forss Co-authored-by: devanshjain * Fix regex necessary to comment lines in /usr/sap/sapservices (#584) Co-authored-by: Csaba Daradics * remove duplicate resource * Fix path * chore: Fix the count for the table resource * Misc fixes * Fix systemd service reload in 1.4 Packages role * Various Terraform code fixes (#586) * Fix typo in terraform-units/modules/sap_landscape/providers.tf * Remove duplicate of resource azurerm_network_security_rule/nsr_controlplane_storage In terraform-units/modules/sap_landscape/nsg.tf * Remove fourth argument from nat_gateway_name definition In terraform-units/modules/sap_landscape/variables_local.tf * Remove duplicate for database_kdump_disks In terraform-units/modules/sap_system/anydb_node/outputs.tf * Remove all duplicates from terraform-units/modules/sap_system/app_tier/vm-scs.tf * Remove duplicates in terraform-units/modules/sap_system/output_files/sap-parameters.tmpl --------- Co-authored-by: Csaba Daradics * chore: Update count condition in dns.tf for local private DNS usage * chore: Update NAT Gateway public IP name format * chore: Update NAT Gateway public IP lifecycle configuration * chore: Update NAT Gateway provider to azureng * chore: Fix typo in azureng provider configuration alias in sap_landscape module * chore: Update NAT Gateway provider to azurerm.main * Update 01-deploy-control-plane.yaml * chore: Update app_service_plan name format in sap_deployer module * Update ARM_CLIENT_SECRET assignment in deploy control plane pipeline * Add the compliance extensions also to the deployers * Ensure that custom_logical_volumes can be striped + have sensible stripesize default if a lvm consists of more than one pv. (#587) Use already established pattern from framework specific LVMs to define stripesize on custom logical volumes. * Update AMS provider creation tasks in ansible playbook * Update deploy control plane pipeline to use service principal for authentication * chore: Temporarily set identity type to "SystemAssigned, UserAssigned" in app_service.tf * Various Ansible fixes (#591) * Add Red Hat 8.10 repo to 1.3-repository vars * Create entries for Red Hat 8.10 in 1.4-packages vars * Add 'pam' to OS packages list for DB2 with state 'latest' Ensures that x86_64 package is updated, avoiding conflict with libpam.so.0 install, which requires i686 version of pam * Add "state: 'latest'" to loops * Correct cluster version check in 1.17-generic-pacemaker * Correct cluster version check in 5.6-scsers-pacemaker * Correct cluster version check in 5.5-hanadb-pacemaker * Create entries for Red Hat 8.10 in 1.17-generic-pacemaker --------- Co-authored-by: Csaba Daradics * chore: Adjust Azure fence agent packages and remove unused Azure Python packages from list when deploying on SLES 15 SP5 * chore: Override changed status for saptune_check and active_saptune_solution tasks in 2.10.3.yaml * chore: Add condition to check if saptune_solution_enabled is defined in 2.10.3.yaml * chore: Add condition to check if saptune_solution_enabled is defined in 2.10.3.yaml * chore: Update New-SDAFDevopsWorkloadZone.ps1 script to fix variable group creation issue * chore: Refactor saptune_solution_to_apply logic in 2.10.3.yaml * Set HDB schema name for ABAP and JAVA systems (#593) * Set HDB Schema Name task * fix command error and remove ignore_errors * Fix parsing error * chore: Comment out unnecessary role assignments in New-SDAFDevopsProject.ps1 script * Release testing (#597) * script from main branch * chore: Add "nat_gateway" variable to global variables in sap_namegenerator module * chore: Update bom-register.yaml to use the correct path for the Microsoft supplied BOM archive * chore: Add debug task to bom-register.yaml for Microsoft supplied BOM archive * Refactor bom-register.yaml to remove debug task and include correct path for Microsoft supplied BOM archive * Refactor sap-system-deployment.yaml to configure devops CLI extension * Refactor sap-system-deployment.yaml to configure devops CLI extension * Refactor sap-system-deployment.yaml to configure devops CLI extension * chore: Remove unnecessary code for extra parameters in DB and SAP installation pipeline * chore: Update SDAF version to 3.11.0.3 * Refactor az logout command in sap-workload-zone.yaml * Refactor az logout command in sap-workload-zone.yaml * Refactor SSH command in 1.17 Generic Pacemaker provision playbook * Add SAP on Azure quality chekcs feature to the 05-DB-and-SAP-installation.yaml pipeline. * remove duplicate block * remove blank line * add mode to get_url downloaded file. * remove blank line from start of file. * Rename quality check to quality assurance (#600) * script from main branch * chore: Add "nat_gateway" variable to global variables in sap_namegenerator module * chore: Update bom-register.yaml to use the correct path for the Microsoft supplied BOM archive * chore: Add debug task to bom-register.yaml for Microsoft supplied BOM archive * Refactor bom-register.yaml to remove debug task and include correct path for Microsoft supplied BOM archive * Refactor sap-system-deployment.yaml to configure devops CLI extension * Refactor sap-system-deployment.yaml to configure devops CLI extension * Refactor sap-system-deployment.yaml to configure devops CLI extension * chore: Remove unnecessary code for extra parameters in DB and SAP installation pipeline * chore: Update SDAF version to 3.11.0.3 * Refactor az logout command in sap-workload-zone.yaml * Refactor az logout command in sap-workload-zone.yaml * Refactor SSH command in 1.17 Generic Pacemaker provision playbook * chore: Remove cacheable flag from 3.3 BoM Processing task * Refactor bom-register.yaml to remove debug task and include correct path for Microsoft supplied BOM archive * Add SAP on Azure quality chekcs feature to the 05-DB-and-SAP-installation.yaml pipeline. * remove duplicate block * remove blank line * add mode to get_url downloaded file. * remove blank line from start of file. * Refactor sap-system-deployment.yaml to configure devops CLI extension * Refactor sap-system-deployment.yaml to configure devops CLI extension * chore: Update SDAF version to 3.11.0.3 * Refactor az logout command in sap-workload-zone.yaml * chore: move SAP on Azure quality checks after post configuration * chore: Update quality check paths to quality_assurance * chore: Update quality assurance file paths * chore: Refactor YAML files to improve code organization and readability * chore: Add cacheable flag to 3.3 BoM Processing task --------- Co-authored-by: dkSteBTh --------- Co-authored-by: Kimmo Forss Co-authored-by: hdamecharla <71097261+hdamecharla@users.noreply.github.com> Co-authored-by: daradicscsaba <62608496+daradicscsaba@users.noreply.github.com> Co-authored-by: Csaba Daradics Co-authored-by: hdamecharla Co-authored-by: Nadeen Noaman <95418928+nnoaman@users.noreply.github.com> Co-authored-by: Steffen Bo Thomsen Co-authored-by: Jesper Severinsen <30658160+jesperseverinsen@users.noreply.github.com> * Bump Azure.Identity from 1.11.3 to 1.11.4 in /Webapp/SDAF (#594) Bumps [Azure.Identity](https://github.com/Azure/azure-sdk-for-net) from 1.11.3 to 1.11.4. - [Release notes](https://github.com/Azure/azure-sdk-for-net/releases) - [Commits](https://github.com/Azure/azure-sdk-for-net/compare/Azure.Identity_1.11.3...Azure.Identity_1.11.4) --- updated-dependencies: - dependency-name: Azure.Identity dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore: Update os-packages.yaml for redhat8.6 * enable EPEL repositories for RedHat, enable unrar package to allow for newer database schemas * Update repos.yaml * restrict unrar to SCS tier only * Update bom_processor.yaml * Update os-packages.yaml * test "unar" no directory option to force files unpack to CD_EXPORT in flatten structure * experimental change to check for nested directories when running unar for Redhat on schema files * Update bom_processor.yaml * port RAR exe fix from windows to Linux 3.3 bom for testing * Update process_exe_archives.yaml * Update process_exe_archives.yaml * made a minor typo in path due to habitual CMD.exe user * Update process_exe_archives.yaml * Update process_exe_archives.yaml * Update process_exe_archives.yaml * Update process_exe_archives.yaml * chore: Create directories for SAP deployment automation * chore: Update authentication prompt for App Registration configuration * Update configuration_menu.sh * chore: Update PostBuildCleanup task to version 4 in 01-deploy-control-plane.yaml * chore: Update PostBuildCleanup task to version 4 for all stages in 01-deploy-control-plane.yaml * chore: Update PostBuildCleanup task to version 4 in deploy pipelines * chore: Update clusterPrep-RedHat.yml to avoid resource discovery during location constraints * Do not fail on saptune solution verify (#602) Set failed_when to false, so that saptune does not fail on N/A parameters. * chore: Update New-SDAFDevopsProject.ps1 to use tsv output format for subscription and identity lists * Revert "chore: Update New-SDAFDevopsProject.ps1 to use tsv output format for subscription and identity lists" This reverts commit e86dff14a149d8c866b2ce5b4570f2212959c062. * chore: Update accelerated networking configuration in Terraform modules, as enable_accelerated_networking is deprecated; new parameter is accelerated_networking_enabled * Ensure we are in the right context when getting access tokens and subsequently running the ps1 script, where we already have the trust setup for the SSH key. Not doing it this way, leads to either needing to manually create an SSH session inside pwsh with POSH-SSH to ensure the known_hosts entry is updated or having to update the quality check script upstream, to allow the -Force flag for the SSH session. (#603) * Fix for catching AHCO_INA_SERVICE delivery Unit import failure (#605) Co-authored-by: jasksingh * Web App Component updates * Several (bug)fixes for RHEL deployments and deployments in general (#604) * Add fast_stop=no to pacemaker fileystem resources Pacemaker isn't respecting the stop timeout on filesystem resources due to the default setting fast_stop=yes. Without setting fencing will occur because if SAP (A)SCS / ERS isn't stopped in time processes will be terminated which are restarted by sapstartsrv and node will be fenced because fileystem can't be unmounted. https://www.suse.com/support/kb/doc/?id=000020860 https://access.redhat.com/solutions/4801371 * Distribute systemd services between SCS / ERS nodes and stop services Both (A)SCS and ERS systemd services should be present on SCS and ERS nodes otherwise pacemaker only handles SCS on the SCS node and ERS on the ERS node with the systemd integration. * Add resource clear for move contrainsts on (A)SCS resource group * Bugfix folders on local disks to be managed after mounting local disk * sid_private_key isn't required * Add sdu_secret prefix/suffix to manage custom Key Vault secret naming * Update Web App to support NAT Gateway * Hotfix release after testing with new RHEL image (#611) * update: added fixed encountered during RHEL94 testing * chore: Update Red Hat and SLES package versions for Red Hat 9.4 * update: add network rules to deployer diagnostic storage account * chore: Update sap-vm-resources.tmpl to include additional server information * chore: Add ${SID}_virtual_machines.json to git if it exists * chore: Update sap_system module to use database_server_vm_resource_ids for database server VMs * chore: Update sap_system module to include empty lists for SCS, application, and webdisp server VMs * chore: Update sap-vm-resources.tmpl to include additional server information * chore: Update sap-vm-resources.tmpl to include additional server information * chore: Update sap-vm-resources.tmpl to include additional server information * chore: Update sap_system module to use comma-separated database server VM resource IDs * chore: Update sap-vm-resources.tmpl to include additional server information * chore: Update sap_system module to include empty lists for SCS, application, and webdisp server VMs * chore: Update sap_system module to include application server VM resource IDs * chore: Refactor cluster_group_location task in ACSS registration role * Refactor cluster_group_location task in ACSS registration role * Refactor cluster_group_location task in ACSS registration role * Refactor ACSS registration role to use 'ansible.builtin.command' instead of 'ansible.builtin.shell' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor cluster_group_location task in ACSS registration role * Refactor cluster_group_location task in ACSS registration role * Refactor ACSS registration role to use 'ansible.builtin.command' instead of 'ansible.builtin.shell' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' instead of 'ansible.builtin.shell' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' instead of 'ansible.builtin.shell' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' instead of 'ansible.builtin.shell' for executing Azure CLI commands * Refactor use_spn variable to default to false for all SAP deployment modules * Refactor systemd process limit configuration for pacemaker cluster * Refactor systemd process limit configuration for pacemaker cluster * Update os-packages.yaml (#613) * chore: Refactor Azure Fencing Agent creation in 1.17 Generic Pacemaker role (#614) * Add SAP-CAL Integration for non-HA Installation (#608) * Add AVG support for Scale out scenarios (#577) * Add data and log volumes * Refactor AVG logic * Fix proximity_placement_group_id calculation in avg.tf * Refactor for_each condition in avg.tf * Refactor for_each condition in avg.tf * Refactor volume creation logic in variables_local.tf * Refactor volume creation logic in variables_local.tf * Refactor volume creation logic in variables_local.tf * Refactor zone calculation logic in variables_local.tf * Refactor proximity_placement_group_id calculation in avg.tf * Add dependency on azurerm_virtual_machine_data_disk_attachment.scs in vm-app.tf * Add dependency on azurerm_virtual_machine_data_disk_attachment.scs in infrastructure.tf * Refactor package update condition in 1.4.3-update-packages-RedHat.yaml --------- Co-authored-by: Kimmo Forss * Update subnet_cidr_storage in sap-parameters.tmpl * Update hosts jinja for client subnet * Update SAP-specific configuration playbook for HANA database scale-out scenario * Version update * Simplify Web App Identity management * Update Azure package versions in SDAFWebApp.csproj * Update Web Application authentication configuration script * Update Web Application authentication configuration script * Update Web Application authentication configuration script * Add SLES 15.3, 15.4, and 15.5 repositories * Update Web Application authentication configuration script and simplify Web App Identity management * Refactor Web App Identity management and update authentication configuration script * Update Web Application authentication configuration script * Update Web Application authentication configuration script and simplify Web App Identity management * Commented out SSH trust relationship checks in 1.17.2-provision.yml * Revert "Commented out SSH trust relationship checks in 1.17.2-provision.yml" This reverts commit 09cd30de6003a891b5c8c31b4c96b495b676aa9b. * ACSS updates * Oracle simplification * Add AutoUpdate.Enabled configuration in 1.1-swap role and enable package cache update in 1.4-packages role * Update deployment type configuration in OS and SAP specific playbooks * Update AutoUpdate.Enabled configuration in 1.1-swap role and add Extensions.WaitForCloudInit configuration * Update AutoUpdate.Enabled configuration in 1.1-swap role and add Extensions.WaitForCloudInit configuration * Update WAAgent package and restart service in 1.1-swap role * Updated key_vault_sap_landscape.tf * Revert "Updated key_vault_sap_landscape.tf" * Update WAAgent package and restart service in 1.1-swap role * Add SAP CAL Integration * Update AutoUpdate.Enabled configuration in 1.1-swap role and add Extensions.WaitForCloudInit configuration * Revert "Add SAP CAL Integration" This reverts commit adae6662ba478d9f1d4d0de7f5c175e4f5da739b. * Update WAAgent package and restart service in 1.4-packages role * Update waagent configuration check in 1.4-packages role * Update waagent configuration check and systemd service reload in 1.4-packages role * Update AutoUpdate.Enabled configuration and add Extensions.WaitForCloudInit configuration in 1.1-swap role * Update waagent configuration check and systemd service reload in 1.1-swap role * Update waagent configuration check and systemd service reload in 1.1-swap role * Update database_high_availability condition in playbook_04_00_01_db_ha.yaml * Add the ability to block app registration * Update systemd service file path in 5.6.7-config-systemd-sap-start.yml * Update systemd service file path in 5.6.7-config-systemd-sap-start.yml * Update systemd service file path in 5.6.7-config-systemd-sap-start.yml * Update web_instance_number and add web_sid variable in sap_system/transform.tf * Fix validation error message for web dispatcher sid in variables_global.tf * Remove chkconfig package from os-packages.yaml * Update systemd service file path in 5.6.7-config-systemd-sap-start.yml * Update OS version check for RHEL 8.2 and SLES 15 in 5.6.1-set_runtime_facts.yml * Update OS version check for RHEL 9.0 or newer in 1.4.0-packages-RedHat-prep.yaml * Update Oracle ASM backup process and fix file permissions * Fix file path in 1.4.0-packages-RedHat-prep.yaml * Update OS version check for RHEL 9.0 or newer in 1.4.0-packages-RedHat-prep.yaml * Update file path and preserve file permissions in 1.4.0-packages-RedHat-prep.yaml * Fix action values in playbook_04_00_01_db_ha.yaml and roles-db/4.1.3-ora-dg/tasks/main.yaml * Fix action values in playbook_04_00_01_db_ha.yaml and roles-db/4.1.3-ora-dg/tasks/main.yaml * Update wait time for StartService in 5.6 SCS/ERS Validation * Update Terraform version to 1.8.0 in deployment scripts and tfvar_variables.tf files * Fix missing else statement in deploy control plane pipeline * Fix missing else statement in deploy control plane pipeline * Fix missing else statement in deploy control plane pipeline * Fix missing else statement in deploy control plane pipeline * Fix missing else statement in deploy control plane pipeline * Fix missing else statement in deploy control plane pipeline * Fix missing else statement in deploy control plane pipeline * Update virtual machine extension reference in vm.tf * Update virtual machine extension version to 1.0 in vm.tf * Fix missing else statement in deploy control plane pipeline * Update network interface and virtual machine counts in vm-observer.tf * Update database high availability configuration * Update use_spn property to false in LandscapeModel and SystemModel * Update Terraform and Ansible versions to 1.7.5 in deployment scripts and variables * Update Display value in SystemDetails.json * Fix validation condition in variables_global.tf * Add ORACLE Post Processing: Reboot after Enabling HugePages task * Fix typo in Oracle Data Guard - Observer: Change UID for Oracle user task * install passlib * Add patch_mode support * Update deployment playbook to set single_server fact based on host count * Update patch_mode configuration in Terraform files * Update file permissions in SAP deployment playbook * Update deployment playbooks to set single_server fact consistently * Fix waagent configuration in swap role * Fix indentation in swap role tasks/main.yaml * Fix cluster group move command in 5.6 SCS/ERS Validation playbook * Fix condition in 1.17-generic-pacemaker playbook to exclude node_tier 'hana' * Fix commented out corosync configuration in 1.17-generic-pacemaker playbook * Create the SID subfolder * Update verbosity level in 5.6.7-config-systemd-sap-start.yml * Add passlib * Simplify Python logic * Update app_bom_id variable in 5.3-app-install/tasks/main.yaml * Update passlib installation in Ansible playbooks * Update reboot timeout and post-reboot delay in 5.6.4.2-sap-resources-Suse.yml * Update swap role and package tasks * Fix condition in 1.17-generic-pacemaker playbook to exclude node_tier 'hana' * Fix failed_when condition in 1.17-generic-pacemaker playbook * Refactor cluster initialization commands in 1.17-generic-pacemaker playbook * enable corosync and pacemaker on Suse * change from command to shell * Update verbosity level for debug message in 5.6.4.0-cluster-Suse.yml * Refactor command to shell in 5.6-scsers-pacemaker tasks * Refactor command to shell in 5.6-scsers-pacemaker tasks * Refactor cluster initialization commands in 1.17-generic-pacemaker playbook * Refactor cluster initialization commands in 1.17-generic-pacemaker playbook * Refactor cluster initialization commands in 5.6-scsers-pacemaker tasks * Refactor path in ora-dg-observer-setup.yaml to include sap_sid variable * Refactor cluster initialization commands in 5.6-scsers-pacemaker tasks and add SAP component installation check * Refactor cluster initialization commands in 1.17-generic-pacemaker playbook * Refactor cluster initialization commands in 1.17-generic-pacemaker playbook and 5.6-scsers-pacemaker tasks * add missing quotes * Fix disk space validation in playbook_00_validate_parameters.yaml * Refactor SAP resource flag setting in Ansible playbooks * Refactor SAP component installation check in 5.6-scsers-pacemaker tasks * Refactor SAP resources installed message in 5.6-scsers-pacemaker tasks * Refactor SCS/ERS validation tasks in 5.6-scsers-pacemaker playbook * Refactor SAP resource flag setting in Ansible playbooks * Refactor ORACLE: Find MOPatch tasks in 4.1.0-ora-install playbook * support for pools with auto qos * support for pools with auto qos * support for pools with auto qos * provide a way to override the oracle user * Update Web Application Configuration documentation * Fix default value for SAP_installed in 5.6-scsers-pacemaker tasks * Fix default value for SAP_installed in 5.6-scsers-pacemaker tasks * Fix shell command in 5.6-scsers-pacemaker pre_checks.yml * Passwordless Web App * Passwordless * Update variable group creation in New-SDAFDevopsProject.ps1 script * Fix client_id reference in app_service.tf * Update packages * Update Web Application Configuration to use resource group scope for role assignments * Update Web Application Configuration documentation * Fix target_nodes value in 2.6.1-anf-mounts.yaml * Web App updates * Update enable_db_lb_deployment logic in variables_local.tf * Bump up the dotnet version * Remove PAT * Remove PAT * Fix TF_VAR_agent_pat assignment in deploy control plane pipeline * Fix PAT assignment in deploy control plane pipeline * Update TF_VAR_agent_pool assignment in deploy control plane pipeline * Add MSI registration * Fix typo * Update versionLabel to v3.11.0.2 in New-SDAFDevopsProject.ps1 * Fix typo in New-SDAFDevopsProject.ps1 + add PAT back for Control Plane * Update ANF mount paths in 2.6.1-anf-mounts.yaml * Fix PostBuildCleanup task in deploy control plane pipeline * Update PostBuildCleanup task to version 4 in deploy control plane pipeline * Update SAP_AUTOMATION_REPO_PATH assignment in deploy control plane pipeline * Update DEPLOYER folder and file validations in deploy control plane pipeline * Update deploy control plane pipeline with environment and location information * Update deploy control plane pipeline with Deployer TFvars variable * Update deploy control plane pipeline with Library TFvars variable * Update SAP_AUTOMATION_REPO_PATH assignment in deploy control plane pipeline * Update installer.sh to display parameter file and current directory * Update deploy control plane pipeline with Library and Deployer TFvars variables * Update SAP_AUTOMATION_REPO_PATH assignment in deploy control plane pipeline * Update PostBuildCleanup task to version 3 in deploy control plane pipeline * Update dotnet-sdk installation in configure_deployer.sh.tmpl * Update deploy control plane pipeline with TF_VAR_agent_pat variable * Update deploy control plane pipeline with Azure CLI version display * Update deploy control plane pipeline with Workload TFvars variable * Update deploy control plane pipeline with removal of AZURE_DEVOPS_EXT_PAT environment variable * Update deploy control plane pipeline with removal of AZURE_DEVOPS_EXT_PAT environment variable * Update deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml to remove the 'recurse' option in the ansible.builtin.file task * Update deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml to fix failed_when condition in rman restore tasks * chore: Update app_service.tf to add WHICH_ENV variable * Update app_service.tf to allow specific app registrations * chore: Update NuGet.Packaging dependency to version 6.9.1 * chore: Update app_service.tf to remove unused app setting and add WHICH_ENV variable * chore: Update deploy control plane pipeline with removal of AZURE_DEVOPS_EXT_PAT environment variable * chore: Update AFS Mount task to exclude 'app' node tier * chore: Update hosts.j2 template to exclude virtual hosts for non-high availability scenarios * chore: Update New-SDAFDevopsProject.ps1 to improve App Registration creation process * Change the ID to add * chore: Update New-SDAFDevopsProject.ps1 to improve App Registration creation process * Add SAP-CAL Integration * Linting * chore: Update deploy control plane pipeline with necessary environment variables * chore: Update deploy control plane pipeline to use idToken for ARM_CLIENT_SECRET * chore: Update deploy control plane pipeline to use idToken for ARM_CLIENT_SECRET * chore: Update deploy control plane pipeline to use idToken for ARM_CLIENT_SECRET * chore: Update deploy control plane pipeline to use System.AccessToken for AZURE_DEVOPS_EXT_PAT * chore: Update deploy control plane pipeline to remove unused agent pool check * chore: Remove unused agent pool check in deploy control plane pipeline * chore: Update deploy control plane pipeline to use $(PAT) for AZURE_DEVOPS_EXT_PAT * changes to ERS group * chore: Update deploy control plane pipeline to improve error handling and logging * chore: Update deploy control plane pipeline to enable Azure AD authentication * chore: Update deploy control plane pipeline to extract deployer_random_id from environment file * chore: Improve error handling and logging in deploy control plane pipeline * chore: Update deploy control plane pipeline to extract deployer_random_id from environment file * chore: Update deploy control plane pipeline to create variable group variables for key vault, terraform remote storage subscription, and deployer random ID seed * chore: Update deploy control plane pipeline to fix typo in ARM_USE_AZUREAD variable * chore: Update deploy control plane pipeline to fix typo in ARM_USE_AZUREAD variable * chore: Update deploy control plane pipeline to fix typo in ARM_USE_AZUREAD variable * chore: Update deploy control plane pipeline to use $(PAT) instead of $(System.AccessToken) for AZURE_DEVOPS_EXT_PAT * chore: Update deploy control plane pipeline to improve error handling and logging * chore: Update deploy control plane pipeline to remove unnecessary Azure login * chore: Update deploy control plane pipeline to remove unnecessary Azure login * chore: Update deploy control plane pipeline to remove unnecessary Azure login * chore: Update bootstrap flag to false in sap_library module * chore: Update storage account network rules for tfstate and sapbits * chore: Update dotnet-sdk installation to version 8.0 * chore: Update dotnet-sdk installation to latest version * chore: Update HttpClient usage in RestHelper.cs and Azure SDK versions in SDAFWebApp.csproj * chore: Update random_id_b64 format in output.tf files * chore: Update RestHelper.cs to accept a type parameter in the constructor * chore: Ignore changes to app_settings in azurerm_windows_web_app resource * chore: Update random_id_b64 format in output.tf files * chore: Update RestHelper.cs to use HttpClient instead of HttpClientGH * chore: Add Build Service user to Build Administrators group * Add the ability to authenticate using PAT * chore: Update RestHelper.cs to use HttpClient instead of HttpClientGH * Update on devops login * chore: Update New-SDAFDevopsProject.ps1 to use tsv output for project creation * chore: Refactor RestHelper.cs to use HttpClient and support PAT authentication * Change module name * update: SAP ASCS/SCS/ERS start resources configuration for SUSE - ENSA1 and ENSA2 when using simple mount. This commit updates the configuration of SAP ASCS/SCS/ERS start resources for SUSE - ENSA1 and ENSA2. * chore: Update SAP Directories creation in ansible playbook This commit updates the ansible playbook to create SAP Directories. It modifies the tasks to create the directories "/usr/sap/trans" and "/sapmnt/{{ sap_sid | upper }}". These changes improve the handling of SAP Transport Filesystems in the deployment process. * feat: Add additional destination port ranges for NSG rules This commit updates the NSG rules in the `sap_landscape` module to include additional destination port ranges. The destination port ranges for the `nsr_controlplane_app`, `nsr_controlplane_web`, `nsr_controlplane_storage`, `nsr_controlplane_db`, and `nsr_controlplane_admin` rules have been expanded to include ports 2049 and 111. * Update error message * Update SAP ASCS/SCS/ERS start resources configuration for SUSE - ENSA1 and ENSA2 when using simple mount. * Add the MSI to the project * Added debug statement to playbook_sapcal_integration.yaml * Added debug statement to playbook_sapcal_integration.yaml * Revert "Added debug statement to playbook_sapcal_integration.yaml" This reverts commit 839170ef4c76cc1b50a020e4ca3d5d3b1b20b932. * Revert "Added debug statement to playbook_sapcal_integration.yaml" This reverts commit 5170d0b0eaa69964306c16541568bf5325403345. * Skip all BOM related tasks if enable_sap_cal is true * Updated the variable name for consistency * Ensured tasks run with appropriate privileges * Store SAP-CAL API response/file in the repository * Lint code and set default values * Use a secure tempfile --------- Co-authored-by: Kimmo Forss Co-authored-by: Kimmo Forss Co-authored-by: devanshjain Co-authored-by: hdamecharla Co-authored-by: Devansh Jain <86314060+devanshjainms@users.noreply.github.com> * Allow for splitting out the privatelink resources (#616) * Add the ability to split out the privatelink resources * feat: Add privatelinkdnsmanagement provider configuration * refactor: Update storage_accounts.tf to use var.dns_settings.dns_zone_names.table_dns_zone_name * refactor: Update DNS zone names in dns.tf and storage_accounts.tf * refactor: Update DNS zone names in storage_accounts.tf to use var.dns_settings.dns_zone_names.table_dns_zone_name * refactor: Update DNS zone names in infrastructure.tf, key_vault.tf, and keyvault_endpoint.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in keyvault_endpoint.tf and storage_accounts.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in storage_accounts.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in storage_accounts.tf to use var.dns_settings.dns_zone_names * refactor: Update count condition in dns.tf to use local.use_local_privatelink_dns instead of negation of it * refactor: Update DNS zone names in storage_accounts.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in storage_accounts.tf and sap_deployer/tfvar_variables.tf to use var.dns_settings.dns_zone_names * Add the ability to split out DNS records for privatelink resources * refactor: Update DNS zone names to use var.dns_settings.dns_zone_names * refactor: Add privatelink DNS resource group and subscription properties to LandscapeModel * refactor: Update DNS zone names in infrastructure.tf, key_vault.tf, and keyvault_endpoint.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in LandscapeDetails.json, storage_accounts.tf, infrastructure.tf, and transform.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in transform.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in storage_accounts.tf to use var.dns_settings.dns_zone_names * Add register_virtual_network_to_dns attribute * Add the ability to control the patch mode * add vm_agent_platform_updates_enabled * refactor: Remove patch_mode from vm-scs.tf * refactor: Remove patch_mode from vm-anchor.tf * Add auto update of the extensions * refactor: Tweak the Windows patch mode * Windows update settings * Debug show SystemD version * refactor: Update SystemD version debug message in 1.17 Generic Pacemaker role * refactor: Update VM patch information in SystemModel and LandscapeModel * refactor: Update Process limit configuration in 1.17 Generic Pacemaker role * refactor: Update process limit configuration for pacemaker version in 1.17 Generic Pacemaker role * refactor: Update process limit configuration for systemd version in 1.17 Generic Pacemaker role * refactor: Update process limit configuration for systemd version in 1.17 Generic Pacemaker role * Remove the white space * fix: Associate the iSCSI subnet with the route table * refactor: Add python3-pip package for different node tiers in HA setup * refactor: remove the lower pipe from distro name * refactor: Split out OracleLinux tasks * refactor: Update iSCSI subnet association with route table * chore: Update NuGet.Packaging dependency to version 6.11.0 * TEswt if we can handle no read access scenarios to key vault * revert casing * refactor: Split out OracleLinux tasks * chore: Add condition to include custom repositories in 1.3 Repository tasks * refactor: Update 1.3 Repository tasks to include custom repositories for SUSE and RedHat * refactor: Remove unnecessary OracleLinux tasks and custom repositories * refactor: Update VM deployment configuration * Remove the token check * refactor: Add TF_VAR_agent_pat to control plane deployment pipeline * refactor: Fix private DNS zone ID in keyvault_endpoint.tf * Web App and version updates * Restore patch_mode * Web App updates * chore: Add System.Data.SqlClient package reference * refactor: Update 1.3 Repository tasks to include custom repositories for SUSE and RedHat * refactor: Update tfvar_variables.tf with new variables for tfstate storage account and deployer's tfstate file * Remove some of the python packages * Remove unnecessary python packages * refactor: Remove trailing spaces in LandscapeDetails.json and SystemDetails.json * refactor: Remove trailing spaces in LandscapeDetails.json and SystemDetails.json * Fix reboot on RHEL * refactor: Fix typo in DBLoad task names * refactor: Update cluster resource monitor intervals to 20 seconds * LINT fixes --------- Co-authored-by: Kimmo Forss * Extend 'root' and 'tmp' logical volumes for SAP CAL integration on RHEL (#618) * Extend 'root' and 'tmp' logical volumes for SAP CAL integration on RHEL * chore: Set pipefail and Code Linting * feat: Add ability to split out privatelink resources * feat: Refactor subnet configuration to enforce private link endpoint network policies Refactor the subnet configuration in the `sap_landscape` module to enforce private link endpoint network policies. This change ensures that the private link endpoints have network policies enabled, as specified by the `var.use_private_endpoint` variable. Co-authored-by: Kimmo Forss * fix: update the management dns subscription id to saplib sub id, pin azurerm version in lanscape, deployer (#619) * pin azurerm version in deployer and landscape * Extend 'root' and 'tmp' logical volumes for SAP CAL integration on RHEL * chore: Extend 'root' and 'tmp' logical volumes for SAP CAL integration on RHEL * Add iSCSI NSG rule * Change rule name * Add Expiry to workload zone key vault secrets * Provide a more robust way to source the deployer subscription * Add support for secret expiry * chore: Update keyvault set-policy command in sap-workload-zone.yaml Co-authored-by: Kimmo Forss * feat: Add expiry date to secrets in key vault * chore: Disable cross-tenant replication for sapmnt storage account * chore: Update DNS configuration for sap_library module * chore: Update DNS configuration for sap_library module * chore: Update use_local_privatelink_dns condition in sap_library module * chore: Update DNS configuration for sap_library module * chore: Update private DNS configuration in sap_library module * Don't create route table if Firewall is used * chore: Update key_vault.tf for sap_library module * chore: Update private DNS configuration in sap_library module * chore: Update private endpoint configuration for sapmnt storage account * Bump up the TF version * Also add the DB Virtual Hostname * chore: Update private endpoint configuration for sapmnt storage account * chore: Update default value for "use_private_endpoint" to true * chore: remove extra line from 2.10.3.yaml * Update github-actions-ansible-lint.yml Update lint version * chore: Update yum to dnf for Oracle RPM package installation * chore: Update secret expiry date for SAP cluster and system passwords * chore: Update default value for "use_private_endpoint" to true * chore: Update secret expiry date for SAP cluster and system passwords * chore: Update secret expiry date format for SAP cluster and system passwords * chore: Update resource group name for private DNS zone virtual network link * chore: Update secret expiry date format for SAP cluster and system passwords * Set Expiry for all key vault secrets * chore: Fix typo * Feature: Add support for SLES 15 SP6 * chore: Remove redundant code for adding HA repositories for RHEL * Configure constraint HANA ANF mounts only when HANA mountpoints are used (#625) When using ANF for the deployment without data, log and shared mountpoint on ANF the constraint shouldn't be configured. * AND constraint logic * Add support for TGZ files * Fix the OR statement * chore: Update verbosity level for debugging BOM object creation * chore: Update reboot timeout and post-reboot delay for 5.6 SCSERS - RHEL instance * Run reboot as root * chore: Expand volumes for Red Hat OS family * chore: Update Test-SDAFReadiness.ps1 script * chore: Update PostBuildCleanup task version to 4 * chore: Update privatelink DNS configuration in transform.tf * Expand logical volumes and resize file systems * Expand logical volumes and resize file systems * Fix Linting * chore: update the check for the free size in VG * Expand logical volumes and resize file systems * chore: Update expand-volumes task to handle default values for sufficient_vg_space and sufficient_vg_space_db * Chore: Fix typo * chore: Clear the failed state of hosts during database installation playbook * Update free space check for SAPCAL * Update ANF_sapmnt variable description and default value * Update ANF_sapmnt variable description and default value * HSR changes * chore: Update Azure.ResourceManager package to version 1.13.0 * Update backup commands and capture backup results * Update backup commands and capture backup results * Update backup commands and capture backup results * Update backup commands to capture backup results * Update backup commands to capture backup results * V3.12.0.0 (#624) * Bring in the manual updates * Fixing Merge conflicts * Fix Linting * Pacemaker changes, saptune and NAT Gateway (#583) ## Summary of Changes ### Authentication and Identity Management - **Web Application Authentication Configuration**: Repeated updates to refine and simplify the authentication configuration and identity management scripts. ### Repository and Package Management - **SLES Repositories**: Added repositories for SLES 15.3, 15.4, and 15.5. - **WAAgent Updates**: Updated WAAgent package, configuration checks, and systemd service reloads across multiple roles. ### Deployment Configuration - **NAT Gateway**: Added support for provisioning a NAT gateway, including configuration variables in Terraform files. - **AutoUpdate Configuration**: Updated AutoUpdate.Enabled settings and added Extensions.WaitForCloudInit across various roles. - **Oracle Simplification**: Simplified Oracle-related configurations, including ASM backup process and Data Guard tasks. - **SAP Deployment Playbooks**: Various updates to SAP deployment playbooks, including fixing conditions, resource flag settings, and systemd service paths. ### Infrastructure and Pipeline Enhancements - **Control Plane Pipeline**: Multiple fixes and improvements to error handling, logging, environment variables, and Azure AD authentication. - **Terraform and Ansible Versions**: Updated versions in deployment scripts to 1.7.5. - **Dotnet SDK**: Bumped dotnet SDK installation to the latest version. ### Miscellaneous - **Error Handling and Logging**: Improved error handling and logging across various deployment scripts and playbooks. - **Validation Fixes**: Fixed validation conditions for disk space, OS version checks, and cluster initialization commands. --------- Co-authored-by: Kimmo Forss Co-authored-by: devanshjain * Fix regex necessary to comment lines in /usr/sap/sapservices (#584) Co-authored-by: Csaba Daradics * remove duplicate resource * Fix path * chore: Fix the count for the table resource * Misc fixes * Fix systemd service reload in 1.4 Packages role * Various Terraform code fixes (#586) * Fix typo in terraform-units/modules/sap_landscape/providers.tf * Remove duplicate of resource azurerm_network_security_rule/nsr_controlplane_storage In terraform-units/modules/sap_landscape/nsg.tf * Remove fourth argument from nat_gateway_name definition In terraform-units/modules/sap_landscape/variables_local.tf * Remove duplicate for database_kdump_disks In terraform-units/modules/sap_system/anydb_node/outputs.tf * Remove all duplicates from terraform-units/modules/sap_system/app_tier/vm-scs.tf * Remove duplicates in terraform-units/modules/sap_system/output_files/sap-parameters.tmpl --------- Co-authored-by: Csaba Daradics * chore: Update count condition in dns.tf for local private DNS usage * chore: Update NAT Gateway public IP name format * chore: Update NAT Gateway public IP lifecycle configuration * chore: Update NAT Gateway provider to azureng * chore: Fix typo in azureng provider configuration alias in sap_landscape module * chore: Update NAT Gateway provider to azurerm.main * Update 01-deploy-control-plane.yaml * chore: Update app_service_plan name format in sap_deployer module * Update ARM_CLIENT_SECRET assignment in deploy control plane pipeline * Add the compliance extensions also to the deployers * Ensure that custom_logical_volumes can be striped + have sensible stripesize default if a lvm consists of more than one pv. (#587) Use already established pattern from framework specific LVMs to define stripesize on custom logical volumes. * Update AMS provider creation tasks in ansible playbook * Update deploy control plane pipeline to use service principal for authentication * chore: Temporarily set identity type to "SystemAssigned, UserAssigned" in app_service.tf * Various Ansible fixes (#591) * Add Red Hat 8.10 repo to 1.3-repository vars * Create entries for Red Hat 8.10 in 1.4-packages vars * Add 'pam' to OS packages list for DB2 with state 'latest' Ensures that x86_64 package is updated, avoiding conflict with libpam.so.0 install, which requires i686 version of pam * Add "state: 'latest'" to loops * Correct cluster version check in 1.17-generic-pacemaker * Correct cluster version check in 5.6-scsers-pacemaker * Correct cluster version check in 5.5-hanadb-pacemaker * Create entries for Red Hat 8.10 in 1.17-generic-pacemaker --------- Co-authored-by: Csaba Daradics * chore: Adjust Azure fence agent packages and remove unused Azure Python packages from list when deploying on SLES 15 SP5 * chore: Override changed status for saptune_check and active_saptune_solution tasks in 2.10.3.yaml * chore: Add condition to check if saptune_solution_enabled is defined in 2.10.3.yaml * chore: Add condition to check if saptune_solution_enabled is defined in 2.10.3.yaml * chore: Update New-SDAFDevopsWorkloadZone.ps1 script to fix variable group creation issue * chore: Refactor saptune_solution_to_apply logic in 2.10.3.yaml * Set HDB schema name for ABAP and JAVA systems (#593) * Set HDB Schema Name task * fix command error and remove ignore_errors * Fix parsing error * chore: Comment out unnecessary role assignments in New-SDAFDevopsProject.ps1 script * Release testing (#597) * script from main branch * chore: Add "nat_gateway" variable to global variables in sap_namegenerator module * chore: Update bom-register.yaml to use the correct path for the Microsoft supplied BOM archive * chore: Add debug task to bom-register.yaml for Microsoft supplied BOM archive * Refactor bom-register.yaml to remove debug task and include correct path for Microsoft supplied BOM archive * Refactor sap-system-deployment.yaml to configure devops CLI extension * Refactor sap-system-deployment.yaml to configure devops CLI extension * Refactor sap-system-deployment.yaml to configure devops CLI extension * chore: Remove unnecessary code for extra parameters in DB and SAP installation pipeline * chore: Update SDAF version to 3.11.0.3 * Refactor az logout command in sap-workload-zone.yaml * Refactor az logout command in sap-workload-zone.yaml * Refactor SSH command in 1.17 Generic Pacemaker provision playbook * Add SAP on Azure quality chekcs feature to the 05-DB-and-SAP-installation.yaml pipeline. * remove duplicate block * remove blank line * add mode to get_url downloaded file. * remove blank line from start of file. * Rename quality check to quality assurance (#600) * script from main branch * chore: Add "nat_gateway" variable to global variables in sap_namegenerator module * chore: Update bom-register.yaml to use the correct path for the Microsoft supplied BOM archive * chore: Add debug task to bom-register.yaml for Microsoft supplied BOM archive * Refactor bom-register.yaml to remove debug task and include correct path for Microsoft supplied BOM archive * Refactor sap-system-deployment.yaml to configure devops CLI extension * Refactor sap-system-deployment.yaml to configure devops CLI extension * Refactor sap-system-deployment.yaml to configure devops CLI extension * chore: Remove unnecessary code for extra parameters in DB and SAP installation pipeline * chore: Update SDAF version to 3.11.0.3 * Refactor az logout command in sap-workload-zone.yaml * Refactor az logout command in sap-workload-zone.yaml * Refactor SSH command in 1.17 Generic Pacemaker provision playbook * chore: Remove cacheable flag from 3.3 BoM Processing task * Refactor bom-register.yaml to remove debug task and include correct path for Microsoft supplied BOM archive * Add SAP on Azure quality chekcs feature to the 05-DB-and-SAP-installation.yaml pipeline. * remove duplicate block * remove blank line * add mode to get_url downloaded file. * remove blank line from start of file. * Refactor sap-system-deployment.yaml to configure devops CLI extension * Refactor sap-system-deployment.yaml to configure devops CLI extension * chore: Update SDAF version to 3.11.0.3 * Refactor az logout command in sap-workload-zone.yaml * chore: move SAP on Azure quality checks after post configuration * chore: Update quality check paths to quality_assurance * chore: Update quality assurance file paths * chore: Refactor YAML files to improve code organization and readability * chore: Add cacheable flag to 3.3 BoM Processing task --------- Co-authored-by: dkSteBTh * chore: Update os-packages.yaml for redhat8.6 * chore: Create directories for SAP deployment automation * chore: Update authentication prompt for App Registration configuration * chore: Update PostBuildCleanup task to version 4 in 01-deploy-control-plane.yaml * chore: Update PostBuildCleanup task to version 4 for all stages in 01-deploy-control-plane.yaml * chore: Update PostBuildCleanup task to version 4 in deploy pipelines * chore: Update clusterPrep-RedHat.yml to avoid resource discovery during location constraints * Do not fail on saptune solution verify (#602) Set failed_when to false, so that saptune does not fail on N/A parameters. * chore: Update New-SDAFDevopsProject.ps1 to use tsv output format for subscription and identity lists * Revert "chore: Update New-SDAFDevopsProject.ps1 to use tsv output format for subscription and identity lists" This reverts commit e86dff14a149d8c866b2ce5b4570f2212959c062. * chore: Update accelerated networking configuration in Terraform modules, as enable_accelerated_networking is deprecated; new parameter is accelerated_networking_enabled * Ensure we are in the right context when getting access tokens and subsequently running the ps1 script, where we already have the trust setup for the SSH key. Not doing it this way, leads to either needing to manually create an SSH session inside pwsh with POSH-SSH to ensure the known_hosts entry is updated or having to update the quality check script upstream, to allow the -Force flag for the SSH session. (#603) * Fix for catching AHCO_INA_SERVICE delivery Unit import failure (#605) Co-authored-by: jasksingh * Web App Component updates * Several (bug)fixes for RHEL deployments and deployments in general (#604) * Add fast_stop=no to pacemaker fileystem resources Pacemaker isn't respecting the stop timeout on filesystem resources due to the default setting fast_stop=yes. Without setting fencing will occur because if SAP (A)SCS / ERS isn't stopped in time processes will be terminated which are restarted by sapstartsrv and node will be fenced because fileystem can't be unmounted. https://www.suse.com/support/kb/doc/?id=000020860 https://access.redhat.com/solutions/4801371 * Distribute systemd services between SCS / ERS nodes and stop services Both (A)SCS and ERS systemd services should be present on SCS and ERS nodes otherwise pacemaker only handles SCS on the SCS node and ERS on the ERS node with the systemd integration. * Add resource clear for move contrainsts on (A)SCS resource group * Bugfix folders on local disks to be managed after mounting local disk * sid_private_key isn't required * Add sdu_secret prefix/suffix to manage custom Key Vault secret naming * Update Web App to support NAT Gateway * Hotfix release after testing with new RHEL image (#611) * update: added fixed encountered during RHEL94 testing * chore: Update Red Hat and SLES package versions for Red Hat 9.4 * update: add network rules to deployer diagnostic storage account * chore: Update sap-vm-resources.tmpl to include additional server information * chore: Add ${SID}_virtual_machines.json to git if it exists * chore: Update sap_system module to use database_server_vm_resource_ids for database server VMs * chore: Update sap_system module to include empty lists for SCS, application, and webdisp server VMs * chore: Update sap-vm-resources.tmpl to include additional server information * chore: Update sap-vm-resources.tmpl to include additional server information * chore: Update sap-vm-resources.tmpl to include additional server information * chore: Update sap_system module to use comma-separated database server VM resource IDs * chore: Update sap-vm-resources.tmpl to include additional server information * chore: Update sap_system module to include empty lists for SCS, application, and webdisp server VMs * chore: Update sap_system module to include application server VM resource IDs * chore: Refactor cluster_group_location task in ACSS registration role * Refactor cluster_group_location task in ACSS registration role * Refactor cluster_group_location task in ACSS registration role * Refactor ACSS registration role to use 'ansible.builtin.command' instead of 'ansible.builtin.shell' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor cluster_group_location task in ACSS registration role * Refactor cluster_group_location task in ACSS registration role * Refactor ACSS registration role to use 'ansible.builtin.command' instead of 'ansible.builtin.shell' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' instead of 'ansible.builtin.shell' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' instead of 'ansible.builtin.shell' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' instead of 'ansible.builtin.shell' for executing Azure CLI commands * Refactor use_spn variable to default to false for all SAP deployment modules * Refactor systemd process limit configuration for pacemaker cluster * Refactor systemd process limit configuration for pacemaker cluster * Update os-packages.yaml (#613) * chore: Refactor Azure Fencing Agent creation in 1.17 Generic Pacemaker role (#614) * Add SAP-CAL Integration for non-HA Installation (#608) * Add AVG support for Scale out scenarios (#577) * Add data and log volumes * Refactor AVG logic * Fix proximity_placement_group_id calculation in avg.tf * Refactor for_each condition in avg.tf * Refactor for_each condition in avg.tf * Refactor volume creation logic in variables_local.tf * Refactor volume creation logic in variables_local.tf * Refactor volume creation logic in variables_local.tf * Refactor zone calculation logic in variables_local.tf * Refactor proximity_placement_group_id calculation in avg.tf * Add dependency on azurerm_virtual_machine_data_disk_attachment.scs in vm-app.tf * Add dependency on azurerm_virtual_machine_data_disk_attachment.scs in infrastructure.tf * Refactor package update condition in 1.4.3-update-packages-RedHat.yaml --------- Co-authored-by: Kimmo Forss * Update subnet_cidr_storage in sap-parameters.tmpl * Update hosts jinja for client subnet * Update SAP-specific configuration playbook for HANA database scale-out scenario * Version update * Simplify Web App Identity management * Update Azure package versions in SDAFWebApp.csproj * Update Web Application authentication configuration script * Update Web Application authentication configuration script * Update Web Application authentication configuration script * Add SLES 15.3, 15.4, and 15.5 repositories * Update Web Application authentication configuration script and simplify Web App Identity management * Refactor Web App Identity management and update authentication configuration script * Update Web Application authentication configuration script * Update Web Application authentication configuration script and simplify Web App Identity management * Commented out SSH trust relationship checks in 1.17.2-provision.yml * Revert "Commented out SSH trust relationship checks in 1.17.2-provision.yml" This reverts commit 09cd30de6003a891b5c8c31b4c96b495b676aa9b. * ACSS updates * Oracle simplification * Add AutoUpdate.Enabled configuration in 1.1-swap role and enable package cache update in 1.4-packages role * Update deployment type configuration in OS and SAP specific playbooks * Update AutoUpdate.Enabled configuration in 1.1-swap role and add Extensions.WaitForCloudInit configuration * Update AutoUpdate.Enabled configuration in 1.1-swap role and add Extensions.WaitForCloudInit configuration * Update WAAgent package and restart service in 1.1-swap role * Updated key_vault_sap_landscape.tf * Revert "Updated key_vault_sap_landscape.tf" * Update WAAgent package and restart service in 1.1-swap role * Add SAP CAL Integration * Update AutoUpdate.Enabled configuration in 1.1-swap role and add Extensions.WaitForCloudInit configuration * Revert "Add SAP CAL Integration" This reverts commit adae6662ba478d9f1d4d0de7f5c175e4f5da739b. * Update WAAgent package and restart service in 1.4-packages role * Update waagent configuration check in 1.4-packages role * Update waagent configuration check and systemd service reload in 1.4-packages role * Update AutoUpdate.Enabled configuration and add Extensions.WaitForCloudInit configuration in 1.1-swap role * Update waagent configuration check and systemd service reload in 1.1-swap role * Update waagent configuration check and systemd service reload in 1.1-swap role * Update database_high_availability condition in playbook_04_00_01_db_ha.yaml * Add the ability to block app registration * Update systemd service file path in 5.6.7-config-systemd-sap-start.yml * Update systemd service file path in 5.6.7-config-systemd-sap-start.yml * Update systemd service file path in 5.6.7-config-systemd-sap-start.yml * Update web_instance_number and add web_sid variable in sap_system/transform.tf * Fix validation error message for web dispatcher sid in variables_global.tf * Remove chkconfig package from os-packages.yaml * Update systemd service file path in 5.6.7-config-systemd-sap-start.yml * Update OS version check for RHEL 8.2 and SLES 15 in 5.6.1-set_runtime_facts.yml * Update OS version check for RHEL 9.0 or newer in 1.4.0-packages-RedHat-prep.yaml * Update Oracle ASM backup process and fix file permissions * Fix file path in 1.4.0-packages-RedHat-prep.yaml * Update OS version check for RHEL 9.0 or newer in 1.4.0-packages-RedHat-prep.yaml * Update file path and preserve file permissions in 1.4.0-packages-RedHat-prep.yaml * Fix action values in playbook_04_00_01_db_ha.yaml and roles-db/4.1.3-ora-dg/tasks/main.yaml * Fix action values in playbook_04_00_01_db_ha.yaml and roles-db/4.1.3-ora-dg/tasks/main.yaml * Update wait time for StartService in 5.6 SCS/ERS Validation * Update Terraform version to 1.8.0 in deployment scripts and tfvar_variables.tf files * Fix missing else statement in deploy control plane pipeline * Fix missing else statement in deploy control plane pipeline * Fix missing else statement in deploy control plane pipeline * Fix missing else statement in deploy control plane pipeline * Fix missing else statement in deploy control plane pipeline * Fix missing else statement in deploy control plane pipeline * Fix missing else statement in deploy control plane pipeline * Update virtual machine extension reference in vm.tf * Update virtual machine extension version to 1.0 in vm.tf * Fix missing else statement in deploy control plane pipeline * Update network interface and virtual machine counts in vm-observer.tf * Update database high availability configuration * Update use_spn property to false in LandscapeModel and SystemModel * Update Terraform and Ansible versions to 1.7.5 in deployment scripts and variables * Update Display value in SystemDetails.json * Fix validation condition in variables_global.tf * Add ORACLE Post Processing: Reboot after Enabling HugePages task * Fix typo in Oracle Data Guard - Observer: Change UID for Oracle user task * install passlib * Add patch_mode support * Update deployment playbook to set single_server fact based on host count * Update patch_mode configuration in Terraform files * Update file permissions in SAP deployment playbook * Update deployment playbooks to set single_server fact consistently * Fix waagent configuration in swap role * Fix indentation in swap role tasks/main.yaml * Fix cluster group move command in 5.6 SCS/ERS Validation playbook * Fix condition in 1.17-generic-pacemaker playbook to exclude node_tier 'hana' * Fix commented out corosync configuration in 1.17-generic-pacemaker playbook * Create the SID subfolder * Update verbosity level in 5.6.7-config-systemd-sap-start.yml * Add passlib * Simplify Python logic * Update app_bom_id variable in 5.3-app-install/tasks/main.yaml * Update passlib installation in Ansible playbooks * Update reboot timeout and post-reboot delay in 5.6.4.2-sap-resources-Suse.yml * Update swap role and package tasks * Fix condition in 1.17-generic-pacemaker playbook to exclude node_tier 'hana' * Fix failed_when condition in 1.17-generic-pacemaker playbook * Refactor cluster initialization commands in 1.17-generic-pacemaker playbook * enable corosync and pacemaker on Suse * change from command to shell * Update verbosity level for debug message in 5.6.4.0-cluster-Suse.yml * Refactor command to shell in 5.6-scsers-pacemaker tasks * Refactor command to shell in 5.6-scsers-pacemaker tasks * Refactor cluster initialization commands in 1.17-generic-pacemaker playbook * Refactor cluster initialization commands in 1.17-generic-pacemaker playbook * Refactor cluster initialization commands in 5.6-scsers-pacemaker tasks * Refactor path in ora-dg-observer-setup.yaml to include sap_sid variable * Refactor cluster initialization commands in 5.6-scsers-pacemaker tasks and add SAP component installation check * Refactor cluster initialization commands in 1.17-generic-pacemaker playbook * Refactor cluster initialization commands in 1.17-generic-pacemaker playbook and 5.6-scsers-pacemaker tasks * add missing quotes * Fix disk space validation in playbook_00_validate_parameters.yaml * Refactor SAP resource flag setting in Ansible playbooks * Refactor SAP component installation check in 5.6-scsers-pacemaker tasks * Refactor SAP resources installed message in 5.6-scsers-pacemaker tasks * Refactor SCS/ERS validation tasks in 5.6-scsers-pacemaker playbook * Refactor SAP resource flag setting in Ansible playbooks * Refactor ORACLE: Find MOPatch tasks in 4.1.0-ora-install playbook * support for pools with auto qos * support for pools with auto qos * support for pools with auto qos * provide a way to override the oracle user * Update Web Application Configuration documentation * Fix default value for SAP_installed in 5.6-scsers-pacemaker tasks * Fix default value for SAP_installed in 5.6-scsers-pacemaker tasks * Fix shell command in 5.6-scsers-pacemaker pre_checks.yml * Passwordless Web App * Passwordless * Update variable group creation in New-SDAFDevopsProject.ps1 script * Fix client_id reference in app_service.tf * Update packages * Update Web Application Configuration to use resource group scope for role assignments * Update Web Application Configuration documentation * Fix target_nodes value in 2.6.1-anf-mounts.yaml * Web App updates * Update enable_db_lb_deployment logic in variables_local.tf * Bump up the dotnet version * Remove PAT * Remove PAT * Fix TF_VAR_agent_pat assignment in deploy control plane pipeline * Fix PAT assignment in deploy control plane pipeline * Update TF_VAR_agent_pool assignment in deploy control plane pipeline * Add MSI registration * Fix typo * Update versionLabel to v3.11.0.2 in New-SDAFDevopsProject.ps1 * Fix typo in New-SDAFDevopsProject.ps1 + add PAT back for Control Plane * Update ANF mount paths in 2.6.1-anf-mounts.yaml * Fix PostBuildCleanup task in deploy control plane pipeline * Update PostBuildCleanup task to version 4 in deploy control plane pipeline * Update SAP_AUTOMATION_REPO_PATH assignment in deploy control plane pipeline * Update DEPLOYER folder and file validations in deploy control plane pipeline * Update deploy control plane pipeline with environment and location information * Update deploy control plane pipeline with Deployer TFvars variable * Update deploy control plane pipeline with Library TFvars variable * Update SAP_AUTOMATION_REPO_PATH assignment in deploy control plane pipeline * Update installer.sh to display parameter file and current directory * Update deploy control plane pipeline with Library and Deployer TFvars variables * Update SAP_AUTOMATION_REPO_PATH assignment in deploy control plane pipeline * Update PostBuildCleanup task to version 3 in deploy control plane pipeline * Update dotnet-sdk installation in configure_deployer.sh.tmpl * Update deploy control plane pipeline with TF_VAR_agent_pat variable * Update deploy control plane pipeline with Azure CLI version display * Update deploy control plane pipeline with Workload TFvars variable * Update deploy control plane pipeline with removal of AZURE_DEVOPS_EXT_PAT environment variable * Update deploy control plane pipeline with removal of AZURE_DEVOPS_EXT_PAT environment variable * Update deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml to remove the 'recurse' option in the ansible.builtin.file task * Update deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml to fix failed_when condition in rman restore tasks * chore: Update app_service.tf to add WHICH_ENV variable * Update app_service.tf to allow specific app registrations * chore: Update NuGet.Packaging dependency to version 6.9.1 * chore: Update app_service.tf to remove unused app setting and add WHICH_ENV variable * chore: Update deploy control plane pipeline with removal of AZURE_DEVOPS_EXT_PAT environment variable * chore: Update AFS Mount task to exclude 'app' node tier * chore: Update hosts.j2 template to exclude virtual hosts for non-high availability scenarios * chore: Update New-SDAFDevopsProject.ps1 to improve App Registration creation process * Change the ID to add * chore: Update New-SDAFDevopsProject.ps1 to improve App Registration creation process * Add SAP-CAL Integration * Linting * chore: Update deploy control plane pipeline with necessary environment variables * chore: Update deploy control plane pipeline to use idToken for ARM_CLIENT_SECRET * chore: Update deploy control plane pipeline to use idToken for ARM_CLIENT_SECRET * chore: Update deploy control plane pipeline to use idToken for ARM_CLIENT_SECRET * chore: Update deploy control plane pipeline to use System.AccessToken for AZURE_DEVOPS_EXT_PAT * chore: Update deploy control plane pipeline to remove unused agent pool check * chore: Remove unused agent pool check in deploy control plane pipeline * chore: Update deploy control plane pipeline to use $(PAT) for AZURE_DEVOPS_EXT_PAT * changes to ERS group * chore: Update deploy control plane pipeline to improve error handling and logging * chore: Update deploy control plane pipeline to enable Azure AD authentication * chore: Update deploy control plane pipeline to extract deployer_random_id from environment file * chore: Improve error handling and logging in deploy control plane pipeline * chore: Update deploy control plane pipeline to extract deployer_random_id from environment file * chore: Update deploy control plane pipeline to create variable group variables for key vault, terraform remote storage subscription, and deployer random ID seed * chore: Update deploy control plane pipeline to fix typo in ARM_USE_AZUREAD variable * chore: Update deploy control plane pipeline to fix typo in ARM_USE_AZUREAD variable * chore: Update deploy control plane pipeline to fix typo in ARM_USE_AZUREAD variable * chore: Update deploy control plane pipeline to use $(PAT) instead of $(System.AccessToken) for AZURE_DEVOPS_EXT_PAT * chore: Update deploy control plane pipeline to improve error handling and logging * chore: Update deploy control plane pipeline to remove unnecessary Azure login * chore: Update deploy control plane pipeline to remove unnecessary Azure login * chore: Update deploy control plane pipeline to remove unnecessary Azure login * chore: Update bootstrap flag to false in sap_library module * chore: Update storage account network rules for tfstate and sapbits * chore: Update dotnet-sdk installation to version 8.0 * chore: Update dotnet-sdk installation to latest version * chore: Update HttpClient usage in RestHelper.cs and Azure SDK versions in SDAFWebApp.csproj * chore: Update random_id_b64 format in output.tf files * chore: Update RestHelper.cs to accept a type parameter in the constructor * chore: Ignore changes to app_settings in azurerm_windows_web_app resource * chore: Update random_id_b64 format in output.tf files * chore: Update RestHelper.cs to use HttpClient instead of HttpClientGH * chore: Add Build Service user to Build Administrators group * Add the ability to authenticate using PAT * chore: Update RestHelper.cs to use HttpClient instead of HttpClientGH * Update on devops login * chore: Update New-SDAFDevopsProject.ps1 to use tsv output for project creation * chore: Refactor RestHelper.cs to use HttpClient and support PAT authentication * Change module name * update: SAP ASCS/SCS/ERS start resources configuration for SUSE - ENSA1 and ENSA2 when using simple mount. This commit updates the configuration of SAP ASCS/SCS/ERS start resources for SUSE - ENSA1 and ENSA2. * chore: Update SAP Directories creation in ansible playbook This commit updates the ansible playbook to create SAP Directories. It modifies the tasks to create the directories "/usr/sap/trans" and "/sapmnt/{{ sap_sid | upper }}". These changes improve the handling of SAP Transport Filesystems in the deployment process. * feat: Add additional destination port ranges for NSG rules This commit updates the NSG rules in the `sap_landscape` module to include additional destination port ranges. The destination port ranges for the `nsr_controlplane_app`, `nsr_controlplane_web`, `nsr_controlplane_storage`, `nsr_controlplane_db`, and `nsr_controlplane_admin` rules have been expanded to include ports 2049 and 111. * Update error message * Update SAP ASCS/SCS/ERS start resources configuration for SUSE - ENSA1 and ENSA2 when using simple mount. * Add the MSI to the project * Added debug statement to playbook_sapcal_integration.yaml * Added debug statement to playbook_sapcal_integration.yaml * Revert "Added debug statement to playbook_sapcal_integration.yaml" This reverts commit 839170ef4c76cc1b50a020e4ca3d5d3b1b20b932. * Revert "Added debug statement to playbook_sapcal_integration.yaml" This reverts commit 5170d0b0eaa69964306c16541568bf5325403345. * Skip all BOM related tasks if enable_sap_cal is true * Updated the variable name for consistency * Ensured tasks run with appropriate privileges * Store SAP-CAL API response/file in the repository * Lint code and set default values * Use a secure tempfile --------- Co-authored-by: Kimmo Forss Co-authored-by: Kimmo Forss Co-authored-by: devanshjain Co-authored-by: hdamecharla Co-authored-by: Devansh Jain <86314060+devanshjainms@users.noreply.github.com> * Allow for splitting out the privatelink resources (#616) * Add the ability to split out the privatelink resources * feat: Add privatelinkdnsmanagement provider configuration * refactor: Update storage_accounts.tf to use var.dns_settings.dns_zone_names.table_dns_zone_name * refactor: Update DNS zone names in dns.tf and storage_accounts.tf * refactor: Update DNS zone names in storage_accounts.tf to use var.dns_settings.dns_zone_names.table_dns_zone_name * refactor: Update DNS zone names in infrastructure.tf, key_vault.tf, and keyvault_endpoint.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in keyvault_endpoint.tf and storage_accounts.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in storage_accounts.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in storage_accounts.tf to use var.dns_settings.dns_zone_names * refactor: Update count condition in dns.tf to use local.use_local_privatelink_dns instead of negation of it * refactor: Update DNS zone names in storage_accounts.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in storage_accounts.tf and sap_deployer/tfvar_variables.tf to use var.dns_settings.dns_zone_names * Add the ability to split out DNS records for privatelink resources * refactor: Update DNS zone names to use var.dns_settings.dns_zone_names * refactor: Add privatelink DNS resource group and subscription properties to LandscapeModel * refactor: Update DNS zone names in infrastructure.tf, key_vault.tf, and keyvault_endpoint.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in LandscapeDetails.json, storage_accounts.tf, infrastructure.tf, and transform.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in transform.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in storage_accounts.tf to use var.dns_settings.dns_zone_names * Add register_virtual_network_to_dns attribute * Add the ability to control the patch mode * add vm_agent_platform_updates_enabled * refactor: Remove patch_mode from vm-scs.tf * refactor: Remove patch_mode from vm-anchor.tf * Add auto update of the extensions * refactor: Tweak the Windows patch mode * Windows update settings * Debug show SystemD version * refactor: Update SystemD version debug message in 1.17 Generic Pacemaker role * refactor: Update VM patch information in SystemModel and LandscapeModel * refactor: Update Process limit configuration in 1.17 Generic Pacemaker role * refactor: Update process limit configuration for pacemaker version in 1.17 Generic Pacemaker role * refactor: Update process limit configuration for systemd version in 1.17 Generic Pacemaker role * refactor: Update process limit configuration for systemd version in 1.17 Generic Pacemaker role * Remove the white space * fix: Associate the iSCSI subnet with the route table * refactor: Add python3-pip package for different node tiers in HA setup * refactor: remove the lower pipe from distro name * refactor: Split out OracleLinux tasks * refactor: Update iSCSI subnet association with route table * chore: Update NuGet.Packaging dependency to version 6.11.0 * TEswt if we can handle no read access scenarios to key vault * revert casing * refactor: Split out OracleLinux tasks * chore: Add condition to include custom repositories in 1.3 Repository tasks * refactor: Update 1.3 Repository tasks to include custom repositories for SUSE and RedHat * refactor: Remove unnecessary OracleLinux tasks and custom repositories * refactor: Update VM deployment configuration * Remove the token check * refactor: Add TF_VAR_agent_pat to control plane deployment pipeline * refactor: Fix private DNS zone ID in keyvault_endpoint.tf * Web App and version updates * Restore patch_mode * Web App updates * chore: Add System.Data.SqlClient package reference * refactor: Update 1.3 Repository tasks to include custom repositories for SUSE and RedHat * refactor: Update tfvar_variables.tf with new variables for tfstate storage account and deployer's tfstate file * Remove some of the python packages * Remove unnecessary python packages * refactor: Remove trailing spaces in LandscapeDetails.json and SystemDetails.json * refactor: Remove trailing spaces in LandscapeDetails.json and SystemDetails.json * Fix reboot on RHEL * refactor: Fix typo in DBLoad task names * refactor: Update cluster resource monitor intervals to 20 seconds * LINT fixes --------- Co-authored-by: Kimmo Forss * Extend 'root' and 'tmp' logical volumes for SAP CAL integration on RHEL (#618) * Extend 'root' and 'tmp' logical volumes for SAP CAL integration on RHEL * chore: Set pipefail and Code Linting * feat: Add ability to split out privatelink resources * feat: Refactor subnet configuration to enforce private link endpoint network policies Refactor the subnet configuration in the `sap_landscape` module to enforce private link endpoint network policies. This change ensures that the private link endpoints have network policies enabled, as specified by the `var.use_private_endpoint` variable. Co-authored-by: Kimmo Forss * fix: update the management dns subscription id to saplib sub id, pin azurerm version in lanscape, deployer (#619) * pin azurerm version in deployer and landscape * Extend 'root' and 'tmp' logical volumes for SAP CAL integration on RHEL * chore: Extend 'root' and 'tmp' logical volumes for SAP CAL integration on RHEL * Add iSCSI NSG rule * Change rule name * Add Expiry to workload zone key vault secrets * Provide a more robust way to source the deployer subscription * chore: Update keyvault set-policy command in sap-workload-zone.yaml Co-authored-by: Kimmo Forss * feat: Add expiry date to secrets in key vault * chore: Disable cross-tenant replication for sapmnt storage account * chore: Update DNS configuration for sap_library module * chore: Update use_local_privatelink_dns condition in sap_library module * Don't create route table if Firewall is used * chore: Update private DNS configuration in sap_library module * chore: Update private endpoint configuration for sapmnt storage account * Bump up the TF version * Add the DB Virtual Hostname * chore: Update private endpoint configuration for sapmnt storage account * Update github-actions-ansible-lint.yml Update lint version * chore: Update yum to dnf for Oracle RPM package installation * chore: Update default value for "use_private_endpoint" to true * chore: Update secret expiry date format for SAP cluster and system passwords * chore: Update resource group name for private DNS zone virtual network link * chore: Update secret expiry date format for SAP cluster and system passwords * Set Expiry for all key vault secrets * Feature: Add support for SLES 15 SP6 * chore: Remove redundant code for adding HA repositories for RHEL * Configure constraint HANA ANF mounts only when HANA mountpoints are used (#625) When using ANF for the deployment without data, log and shared mountpoint on ANF the constraint shouldn't be configured. * Add support for TGZ files * chore: Update verbosity level for debugging BOM object creation * chore: Update reboot timeout and post-reboot delay for 5.6 SCSERS - RHEL instance * Run reboot as root * chore: Expand volumes for Red Hat OS family * chore: Update Test-SDAFReadiness.ps1 script * chore: Update PostBuildCleanup task version to 4 * chore: Update privatelink DNS configuration in transform.tf * chore: update the check for the free size in VG * Expand logical volumes and resize file systems * chore: Clear the failed state of hosts during database installation playbook * Update free space check for SAPCAL * HSR changes * chore: Update Azure.ResourceManager package to version 1.13.0 --------- Co-authored-by: Kimmo Forss Co-authored-by: hdamecharla <71097261+hdamecharla@users.noreply.github.com> Co-authored-by: daradicscsaba <62608496+daradicscsaba@users.noreply.github.com> Co-authored-by: Csaba Daradics Co-authored-by: hdamecharla Co-authored-by: Nadeen Noaman <95418928+nnoaman@users.noreply.github.com> Co-authored-by: Steffen Bo Thomsen Co-authored-by: Jesper Severinsen <30658160+jesperseverinsen@users.noreply.github.com> Co-authored-by: Jaskirat Singh <108129510+jaskisin@users.noreply.github.com> Co-authored-by: jasksingh Co-authored-by: Harm Jan Stam Co-authored-by: Kimmo Forss --------- Signed-off-by: dependabot[bot] Co-authored-by: Nadeen Noaman <95418928+nnoaman@users.noreply.github.com> Co-authored-by: Steffen Bo Thomsen Co-authored-by: devanshjain Co-authored-by: Devansh Jain <86314060+devanshjainms@users.noreply.github.com> Co-authored-by: hdamecharla Co-authored-by: daradicscsaba <62608496+daradicscsaba@users.noreply.github.com> Co-authored-by: Csaba Daradics Co-authored-by: Jesper Severinsen <30658160+jesperseverinsen@users.noreply.github.com> Co-authored-by: Kimmo Forss Co-authored-by: hdamecharla <71097261+hdamecharla@users.noreply.github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Jaskirat Singh <108129510+jaskisin@users.noreply.github.com> Co-authored-by: jasksingh Co-authored-by: Harm Jan Stam Co-authored-by: Kimmo Forss --- .../workflows/github-actions-ansible-lint.yml | 2 +- Webapp/SDAF/Models/CustomValidators.cs | 17 + Webapp/SDAF/Models/LandscapeModel.cs | 59 +- Webapp/SDAF/Models/SystemModel.cs | 17 +- .../ParameterDetails/LandscapeDetails.json | 214 +- .../ParameterDetails/LandscapeTemplate.txt | 109 +- .../SDAF/ParameterDetails/SystemDetails.json | 174 +- .../SDAF/ParameterDetails/SystemTemplate.txt | 387 ++-- Webapp/SDAF/ParameterDetails/VM-Images.json | 12 + Webapp/SDAF/SDAFWebApp.csproj | 34 +- deploy/ansible/action_plugins/public_api.py | 1783 +++++++++++++++++ deploy/ansible/configuration_menu.sh | 4 +- .../playbook_00_validate_parameters.yaml | 10 + .../ansible/playbook_01_os_base_config.yaml | 9 + .../ansible/playbook_04_00_00_db_install.yaml | 15 + deploy/ansible/playbook_04_00_01_db_ha.yaml | 13 +- ...ook_06_02_sap_on_azure_quality_checks.yaml | 71 + .../ansible/playbook_sapcal_integration.yaml | 127 ++ .../4.0.0-hdb-install/tasks/main.yaml | 5 + .../tasks/4.0.1.4-create_hana_backup.yml | 19 + .../4.0.3-hdb-install-scaleout/readme.md | 67 + .../tasks/main.yaml | 34 +- .../roles-db/4.0.4-hdb-schema/tasks/main.yaml | 36 + .../roles-db/4.0.4-hdb-schema/vars/main.yaml | 3 + .../4.1.0-ora-install/tasks/main.yaml | 2 +- .../4.1.1-ora-asm-grid/tasks/main.yaml | 2 +- .../tasks/0.1.1-ha_clusterpasswords.yaml | 2 + .../roles-misc/0.1-passwords/tasks/main.yaml | 5 +- .../roles-misc/0.2-kv-secrets/tasks/main.yaml | 2 +- .../0.5-ACSS-registration/tasks/main.yaml | 38 +- .../0.8-ams-providers/tasks/main.yaml | 35 +- .../tasks/run_check.yaml | 108 + .../tasks/setup.yaml | 89 + .../vars/main.yaml | 34 + .../roles-os/1.1-swap/handlers/main.yaml | 13 + .../roles-os/1.10-networking/tasks/main.yaml | 6 + .../tasks/1.17.0-set_runtime_facts.yml | 4 +- .../tasks/1.17.2-provision.yml | 5 +- .../tasks/1.17.2.0-cluster-RedHat.yml | 52 +- .../tasks/1.17.2.0-cluster-Suse.yml | 77 +- .../1.17-generic-pacemaker/vars/main.yml | 6 + .../tasks/1.18.2-provision.yml | 2 - .../1.18-scaleout-pacemaker/tasks/main.yml | 1 - .../roles-os/1.20-prometheus/tasks/main.yml | 2 +- .../tasks/1.3.1-repositories-Suse.yaml | 10 - .../1.3.2-custom-repositories-RedHat.yaml | 39 + .../roles-os/1.3-repository/tasks/main.yml | 8 +- .../roles-os/1.3-repository/vars/repos.yaml | 21 +- .../tasks/1.4.0-packages-RedHat-prep.yaml | 1 - .../1.4-packages/tasks/1.4.1-packages.yaml | 24 +- .../tasks/1.4.3-update-packages-RedHat.yaml | 13 +- .../1.4-packages/vars/os-packages.yaml | 66 +- .../1.5-disk-setup/tasks/1.5-custom-disks.yml | 1 + .../tasks/1.5-expand-volumes.yml | 103 + .../roles-os/1.5-disk-setup/tasks/main.yml | 4 + .../1.5.1-disk-setup-asm/tasks/main.yml | 2 +- .../1.5.3-disk-setup-sapcal/tasks/main.yml | 75 + .../2.10-sap-notes/handlers/main.yaml | 13 +- .../2.10-sap-notes/tasks/2.10.3.yaml | 62 +- .../2.10-sap-notes/tasks/main.yaml | 1 + .../2.2-sapPermissions/tasks/main.yaml | 1 + .../2.3-sap-exports/tasks/main.yaml | 174 +- .../2.4-hosts-file/templates/hosts.j2 | 21 +- .../tasks/2.6-set_runtime_facts.yaml | 1 + .../tasks/2.6.0-afs-mounts.yaml | 2 +- .../tasks/2.6.1-anf-mounts.yaml | 4 +- .../tasks/2.6.7-afs-mounts-simplemount.yaml | 2 +- .../tasks/2.6.8-anf-mounts-simplemount.yaml | 4 +- .../2.6-sap-mounts/tasks/main.yaml | 143 +- .../tasks/bom_processor.yaml | 55 +- .../tasks/process_exe_archives.yaml | 42 + .../3.3.1-bom-utility/tasks/bom-register.yaml | 8 +- .../roles-sap/5.1-dbload/tasks/main.yaml | 46 +- .../tasks/oracle-postprocessing.yaml | 3 +- .../roles-sap/5.2-pas-install/tasks/main.yaml | 21 +- .../roles-sap/5.3-app-install/tasks/main.yaml | 21 +- .../tasks/5.5.3-SAPHanaSR.yml | 5 +- .../tasks/5.5.4.0-clusterPrep-RedHat.yml | 26 +- .../tasks/5.5.4.1-cluster-RedHat.yml | 21 +- .../tasks/5.5.4.1-cluster-Suse.yml | 9 +- .../tasks/5.6.1-set_runtime_facts.yml | 4 +- .../tasks/5.6.4.0-cluster-RedHat.yml | 8 +- .../tasks/5.6.4.0-cluster-Suse.yml | 8 +- .../tasks/5.6.4.2-sap-resources-RedHat.yml | 20 +- .../tasks/5.6.7-config-systemd-sap-start.yml | 124 +- .../tasks/5.8.3-SAPHanaSRMultiTarget.yml | 24 +- .../5.8.4.0-clusterPrep-ScaleOut-RedHat.yml | 8 +- .../tasks/5.8.4.1-cluster-ScaleOut-RedHat.yml | 4 +- .../tasks/5.8.4.1-cluster-ScaleOut-Suse.yml | 10 +- .../tasks/5.8.5-post_provision_report.yml | 20 +- .../6.0.0-sapcal-install/defaults/main.yml | 11 + .../6.0.0-sapcal-install/tasks/main.yml | 76 + .../6.0.0-sapcal-install/vars/main.yml | 14 + deploy/ansible/vars/ansible-input-api.yaml | 14 +- deploy/ansible/vars/disks_config.yml | 4 +- deploy/configs/version.txt | 2 +- deploy/pipelines/01-deploy-control-plane.yaml | 14 +- deploy/pipelines/02-sap-workload-zone.yaml | 552 ++--- .../pipelines/03-sap-system-deployment.yaml | 18 +- .../pipelines/04-sap-software-download.yaml | 4 +- .../pipelines/05-DB-and-SAP-installation.yaml | 38 +- .../06-post-installation-tooling.yaml | 4 +- deploy/pipelines/07-sap-cal-installation.yaml | 404 ++++ deploy/pipelines/12-remove-control-plane.yaml | 4 +- deploy/pipelines/21-deploy-web-app.yaml | 2 +- .../pipelines/22-sample-deployer-config.yaml | 2 +- .../pipelines/23-levelup-configuration.yaml | 2 +- .../templates/collect-calapi-file.yaml | 37 + .../templates/collect-log-files.yaml | 47 + .../07-sap-cal-installation-variables.yaml | 25 + deploy/scripts/New-SDAFDevopsProject.ps1 | 210 +- deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 | 4 +- deploy/scripts/Test-SDAFReadiness.ps1 | 17 +- deploy/scripts/deploy_controlplane.sh | 4 +- deploy/scripts/prepare_region.sh | 2 +- deploy/scripts/set_secrets.sh | 18 +- deploy/scripts/update_sas_token.sh | 2 +- .../bootstrap/sap_deployer/module.tf | 5 +- .../bootstrap/sap_deployer/providers.tf | 9 + .../bootstrap/sap_deployer/tfvar_variables.tf | 18 +- .../bootstrap/sap_deployer/transform.tf | 10 + .../terraform/bootstrap/sap_library/module.tf | 7 +- .../bootstrap/sap_library/providers.tf | 11 + .../bootstrap/sap_library/tfvar_variables.tf | 31 +- .../bootstrap/sap_library/transform.tf | 13 + deploy/terraform/run/sap_deployer/module.tf | 5 +- .../terraform/run/sap_deployer/providers.tf | 2 +- .../run/sap_deployer/tfvar_variables.tf | 21 +- .../terraform/run/sap_deployer/transform.tf | 11 + deploy/terraform/run/sap_landscape/module.tf | 12 +- deploy/terraform/run/sap_landscape/output.tf | 17 +- .../terraform/run/sap_landscape/providers.tf | 2 +- .../run/sap_landscape/tfvar_variables.tf | 62 +- .../terraform/run/sap_landscape/transform.tf | 26 +- .../run/sap_landscape/variables_global.tf | 11 - .../run/sap_landscape/variables_local.tf | 10 +- deploy/terraform/run/sap_library/module.tf | 7 +- deploy/terraform/run/sap_library/providers.tf | 14 +- .../run/sap_library/tfvar_variables.tf | 25 +- deploy/terraform/run/sap_library/transform.tf | 11 + deploy/terraform/run/sap_system/module.tf | 32 +- .../run/sap_system/tfvar_variables.tf | 55 +- deploy/terraform/run/sap_system/transform.tf | 15 + .../modules/sap_deployer/app_service.tf | 11 +- .../modules/sap_deployer/infrastructure.tf | 6 + .../templates/configure_deployer.sh.tmpl | 1 + .../modules/sap_deployer/variables_global.tf | 32 +- .../modules/sap_deployer/vm-deployer.tf | 4 +- .../modules/sap_landscape/ams.tf | 12 + .../modules/sap_landscape/infrastructure.tf | 35 +- .../modules/sap_landscape/iscsi.tf | 68 + .../sap_landscape/key_vault_sap_landscape.tf | 57 +- .../modules/sap_landscape/providers.tf | 2 +- .../modules/sap_landscape/storage_accounts.tf | 50 +- .../modules/sap_landscape/subnets.tf | 30 +- .../modules/sap_landscape/variables_global.tf | 38 +- .../modules/sap_landscape/variables_local.tf | 2 +- .../modules/sap_landscape/vm.tf | 18 + .../modules/sap_library/dns.tf | 32 +- .../modules/sap_library/infrastructure.tf | 21 +- .../modules/sap_library/key_vault.tf | 10 +- .../modules/sap_library/keyvault_endpoint.tf | 14 +- .../modules/sap_library/providers.tf | 2 +- .../modules/sap_library/storage_accounts.tf | 46 +- .../modules/sap_library/variables_global.tf | 34 +- .../modules/sap_library/variables_local.tf | 3 +- .../sap_namegenerator/variables_global.tf | 4 + .../sap_system/anydb_node/infrastructure.tf | 4 +- .../sap_system/anydb_node/variables_global.tf | 25 +- .../modules/sap_system/anydb_node/vm-anydb.tf | 24 +- .../sap_system/anydb_node/vm-observer.tf | 2 +- .../sap_system/app_tier/infrastructure.tf | 16 +- .../sap_system/app_tier/variables_global.tf | 18 +- .../modules/sap_system/app_tier/vm-app.tf | 26 +- .../modules/sap_system/app_tier/vm-scs.tf | 22 +- .../modules/sap_system/app_tier/vm-webdisp.tf | 21 +- .../key_vault_sap_system.tf | 10 +- .../common_infrastructure/outputs.tf | 2 +- .../common_infrastructure/storage_accounts.tf | 7 +- .../common_infrastructure/variables_global.tf | 38 +- .../common_infrastructure/variables_local.tf | 2 +- .../common_infrastructure/vm-anchor.tf | 17 +- .../sap_system/hdb_node/infrastructure.tf | 4 +- .../sap_system/hdb_node/variables_global.tf | 30 +- .../modules/sap_system/hdb_node/vm-hdb.tf | 13 +- .../sap_system/output_files/inventory.tf | 14 + .../output_files/sap-parameters.tmpl | 14 + .../output_files/sap-vm-resources.tmpl | 4 + .../output_files/variables_global.tf | 14 + 189 files changed, 5919 insertions(+), 1901 deletions(-) create mode 100644 deploy/ansible/action_plugins/public_api.py create mode 100644 deploy/ansible/playbook_06_02_sap_on_azure_quality_checks.yaml create mode 100644 deploy/ansible/playbook_sapcal_integration.yaml create mode 100644 deploy/ansible/roles-db/4.0.4-hdb-schema/tasks/main.yaml create mode 100644 deploy/ansible/roles-db/4.0.4-hdb-schema/vars/main.yaml create mode 100644 deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/run_check.yaml create mode 100644 deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/setup.yaml create mode 100644 deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/vars/main.yaml create mode 100644 deploy/ansible/roles-os/1.5-disk-setup/tasks/1.5-expand-volumes.yml create mode 100644 deploy/ansible/roles-os/1.5.3-disk-setup-sapcal/tasks/main.yml create mode 100644 deploy/ansible/roles-sap/3.3-bom-processing/tasks/process_exe_archives.yaml create mode 100644 deploy/ansible/roles-sap/6.0.0-sapcal-install/defaults/main.yml create mode 100644 deploy/ansible/roles-sap/6.0.0-sapcal-install/tasks/main.yml create mode 100644 deploy/ansible/roles-sap/6.0.0-sapcal-install/vars/main.yml create mode 100644 deploy/pipelines/07-sap-cal-installation.yaml create mode 100644 deploy/pipelines/templates/collect-calapi-file.yaml create mode 100644 deploy/pipelines/variables/07-sap-cal-installation-variables.yaml create mode 100644 deploy/terraform/terraform-units/modules/sap_system/output_files/sap-vm-resources.tmpl diff --git a/.github/workflows/github-actions-ansible-lint.yml b/.github/workflows/github-actions-ansible-lint.yml index c782aa06eb..472cc3e7d2 100644 --- a/.github/workflows/github-actions-ansible-lint.yml +++ b/.github/workflows/github-actions-ansible-lint.yml @@ -16,7 +16,7 @@ jobs: - name: Install Ansible and Ansible-Lint run: | python -m pip install --upgrade pip - pip install ansible-core ansible-lint==24.2.0 jmespath netaddr + pip install ansible-core ansible-lint==24.7.0 jmespath netaddr - name: Install Ansible Collections run: | diff --git a/Webapp/SDAF/Models/CustomValidators.cs b/Webapp/SDAF/Models/CustomValidators.cs index 3b6da854f1..bab3b8854c 100644 --- a/Webapp/SDAF/Models/CustomValidators.cs +++ b/Webapp/SDAF/Models/CustomValidators.cs @@ -242,6 +242,23 @@ public override bool IsValid(object value) } } + public class NATIdValidator : ValidationAttribute + { + public override bool IsValid(object value) + { + string pattern = @"^\/subscriptions\/\w{8}-\w{4}-\w{4}-\w{4}-\w{12}\/resourceGroups\/[a-zA-Z0-9-_]+\/providers\/Microsoft.Network\/natGateways\/[a-zA-Z0-9-_]+$"; + return RegexValidation(value, pattern); + } + } + + public class PIPIdValidator : ValidationAttribute + { + public override bool IsValid(object value) + { + string pattern = @"^\/subscriptions\/\w{8}-\w{4}-\w{4}-\w{4}-\w{12}\/resourceGroups\/[a-zA-Z0-9-_]+\/providers\/Microsoft.Network\/publicIPAddresses\/[a-zA-Z0-9-_]+$"; + return RegexValidation(value, pattern); + } + } public class ScaleSetIdValidator : ValidationAttribute { diff --git a/Webapp/SDAF/Models/LandscapeModel.cs b/Webapp/SDAF/Models/LandscapeModel.cs index a26f3c8e94..674737500f 100644 --- a/Webapp/SDAF/Models/LandscapeModel.cs +++ b/Webapp/SDAF/Models/LandscapeModel.cs @@ -1,4 +1,3 @@ -using AutomationForm.Models; using System.ComponentModel; using System.ComponentModel.DataAnnotations; using static AutomationForm.Models.CustomValidators; @@ -263,7 +262,7 @@ public bool IsValid() public string[] ANF_install_volume_zone { get; set; } - + /*---------------------------------------------------------------------------8 | | | DNS information | @@ -274,6 +273,10 @@ public bool IsValid() public string management_dns_subscription_id { get; set; } + public string privatelink_dns_resourcegroup_name { get; set; } + + public string privatelink_dns_subscription_id { get; set; } + public bool? use_custom_dns_a_registration { get; set; } = false; public string dns_label { get; set; } @@ -313,6 +316,8 @@ public bool IsValid() public int? soft_delete_retention_days { get; set; } = 14; + public bool? set_secret_expiry { get; set; } = false; + /*---------------------------------------------------------------------------8 | | | NFS information | @@ -355,11 +360,23 @@ public bool IsValid() [PrivateEndpointIdValidator] public string install_private_endpoint_id { get; set; } -/*---------------------------------------------------------------------------8 -| | -| Utility VM information | -| | -+------------------------------------4--------------------------------------*/ + + + /*---------------------------------------------------------------------------8 + | | + | VM patch information | + | | + +------------------------------------4--------------------------------------*/ + + public string patch_mode { get; set; } = "ImageDefault"; + public string patch_assessment_mode { get; set; } = "ImageDefault"; + + + /*---------------------------------------------------------------------------8 + | | + | Utility VM information | + | | + +------------------------------------4--------------------------------------*/ public int? utility_vm_count { get; set; } = 0; @@ -368,7 +385,7 @@ public bool IsValid() public string utility_vm_os_disk_size { get; set; } = "128"; public string utility_vm_os_disk_type { get; set; } = "Premium_LRS"; - + public bool? utility_vm_useDHCP { get; set; } = true; public Image utility_vm_image { get; set; } @@ -430,8 +447,32 @@ public bool IsValid() public string ams_instance_name { get; set; } - [AMSIdValidator(ErrorMessage = "Invalid User Assigned id")] + [AMSIdValidator(ErrorMessage = "Invalid Workspace id")] public string ams_laws_arm_id { get; set; } + + /*---------------------------------------------------------------------------8 + | | + | NAT Gateway information | + | | + +------------------------------------4--------------------------------------*/ + + public bool? deploy_nat_gateway { get; set; } = false; + + public string nat_gateway_name { get; set; } + + + [NATIdValidator(ErrorMessage = "Invalid NAT Gateway id")] + public string nat_gateway_arm_id { get; set; } + + public string[] nat_gateway_public_ip_zones { get; set; } + + [PIPIdValidator(ErrorMessage = "Invalid Public IP id")] + public string nat_gateway_public_ip_arm_id { get; set; } + + public int? nat_gateway_idle_timeout_in_minutes { get; set; } + + public Tag[] nat_gateway_public_ip_tags { get; set; } + } } diff --git a/Webapp/SDAF/Models/SystemModel.cs b/Webapp/SDAF/Models/SystemModel.cs index dcf36f3eb4..0dd25cb35d 100644 --- a/Webapp/SDAF/Models/SystemModel.cs +++ b/Webapp/SDAF/Models/SystemModel.cs @@ -203,7 +203,8 @@ public bool IsValid() public Tag[] configuration_settings { get; set; } public bool? dns_a_records_for_secondary_names { get; set; } = true; - public bool? use_private_endpoint { get; set; } + + public bool? use_private_endpoint { get; set; } = true; public bool? use_service_endpoint { get; set; } @@ -575,6 +576,8 @@ public bool IsValid() public string ANF_sapmnt_volume_name { get; set; } + public bool? ANF_sapmnt_use_existing { get; set; } + public int? ANF_sapmnt_volume_size { get; set; } public int? ANF_sapmnt_volume_throughput { get; set; } @@ -668,6 +671,18 @@ public bool IsValid() public int? use_fence_kdump_lun_scs { get; set; } = 4; + + /*---------------------------------------------------------------------------8 + | | + | VM patch information | + | | + +------------------------------------4--------------------------------------*/ + + public string patch_mode { get; set; } = "ImageDefault"; + public string patch_assessment_mode { get; set; } = "ImageDefault"; + + + } public class Tag diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index 52c4e85ac1..54da7b1e33 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -118,24 +118,6 @@ "Section": "Infrastructure settings", "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#environment-parameters", "Parameters": [ - { - "Name": "deploy_monitoring_extension", - "Required": false, - "Description": "If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 2 - }, - { - "Name": "deploy_defender_extension", - "Required": false, - "Description": "If defined, will add the Microsoft.Azure.Security.Monitoring extension to the virtual machines", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 2 - }, { "Name": "place_delete_lock_on_resources", "Required": false, @@ -889,6 +871,15 @@ "Options": [], "Overrules": "", "Display": 2 + }, + { + "Name": "set_secret_expiry", + "Required": false, + "Description": "Sets expiry date for secrets", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 } ] }, @@ -908,7 +899,7 @@ { "Name": "dns_server_list", "Required": false, - "Description": "Boolean value indicating if a custom dns record should be created for the storage account", + "Description": "List of IP addresses to add as DNS servers", "Type": "list", "Options": [ { @@ -963,6 +954,24 @@ "Options": [], "Overrules": "", "Display": 3 + }, + { + "Name": "privatelink_dns_subscription_id", + "Required": false, + "Description": "Subscription for the DNS zone containing the PrivateLink resources, if different from the management subscription", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "privatelink_dns_resourcegroup_name", + "Required": false, + "Description": "Resource group for the DNS zone containing the PrivateLink resources, if different from the SAP Library resource group", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 } ] }, @@ -1317,7 +1326,13 @@ ], "Overrules": "", "Display": 3 - }, + } + ] + }, + { + "Section": "Common Virtual Machine settings", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#vm-parameters", + "Parameters": [ { "Name": "user_assigned_identity_id", "Required": false, @@ -1326,7 +1341,79 @@ "Options": [], "Overrules": "", "Display": 3 + }, + { + "Name": "deploy_monitoring_extension", + "Required": false, + "Description": "If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "deploy_defender_extension", + "Required": false, + "Description": "If defined, will add the Microsoft.Azure.Security.Monitoring extension to the virtual machines", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "patch_mode", + "Required": false, + "Description": "Defines the patching mode for the Virtual Machines.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + }, + { + "Text": "ImageDefault (Linux)", + "Value": "ImageDefault" + }, + { + "Text": "Manual (Windows)", + "Value": "Manual" + }, + { + "Text": "AutomaticByOS (Windows)", + "Value": "AutomaticByOS" + }, + { + "Text": "AutomaticByPlatform (Azure Orchestrated patching)", + "Value": "AutomaticByPlatform" + } + ], + "Overrules": "", + "Display": 2 + }, + { + "Name": "patch_assessment_mode", + "Required": false, + "Description": "Specifies the mode of VM Guest Patching for the Virtual Machine.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + }, + { + "Text": "ImageDefault", + "Value": "ImageDefault" + }, + { + "Text": "AutomaticByPlatform (Azure Orchestrated patching)", + "Value": "AutomaticByPlatform" + } + ], + "Overrules": "", + "Display": 2 } + + ] }, { @@ -1566,5 +1653,92 @@ "Display": 2 } ] + }, + { + "Section": "NAT Gateway", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#dns-support", + "Parameters": [ + { + "Name": "deploy_nat_gateway", + "Required": false, + "Description": "Defines if a NAT gateway will be created.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "nat_gateway_name", + "Required": false, + "Description": "The name of the NAT Gateway", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "nat_gateway_arm_id", + "Required": false, + "Description": "Defines the Azure resource id for the NAT gateway", + "Type": "lookup", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "nat_gateway_public_ip_zones", + "Required": false, + "Description": "Defines the zones for the NAT Gateway public IP", + "Type": "list", + "Options": [ + { + "Text": "", + "Value": "" + }, + { + "Text": "1", + "Value": "1" + }, + { + "Text": "2", + "Value": "2" + }, + { + "Text": "3", + "Value": "3" + } + ], + "Overrules": "", + "Display": 2 + }, + { + "Name": "nat_gateway_public_ip_arm_id", + "Required": false, + "Description": "Azure resource id for the NAT Gateway public IP", + "Type": "lookup", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "nat_gateway_idle_timeout_in_minutes", + "Required": false, + "Description": "The idle timeout in minutes for the NAT Gateway", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "nat_gateway_public_ip_tags", + "Required": false, + "Description": "Defines a list of tags for the NAT Gateway public IP", + "Type": "tag", + "Options": [], + "Overrules": "", + "Display": 2 + } + ] } + ] diff --git a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt index 67ddd31369..4cf26cb4e7 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt @@ -36,28 +36,6 @@ $$Description$$ #If you want to provide a custom naming json use the following parameter. $$name_override_file$$ -# If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines -$$deploy_monitoring_extension$$ - -# If defined, will add the Microsoft.Azure.Security.Monitoring extension to the virtual machines -$$deploy_defender_extension$$ - - -######################################################################################### -# # -# Resource group details # -# # -######################################################################################### - -# The two resource group name and arm_id can be used to control the naming and the creation of the resource group - -# The resourcegroup_name value is optional, it can be used to override the name of the resource group that will be provisioned -$$resourcegroup_name$$ - -# The resourcegroup_name arm_id is optional, it can be used to provide an existing resource group for the deployment -$$resourcegroup_arm_id$$ - - ######################################################################################### # # # Networking # @@ -291,22 +269,68 @@ $$storage_subnet_nsg_arm_id$$ $$storage_subnet_nsg_name$$ +######################################################################################### +# # +# Common Virtual Machine settings # +# # +######################################################################################### + +# user_assigned_identity_id defines the user assigned identity to be assigned to the Virtual Machines +$$user_assigned_identity_id$$ + +# If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the Virtual Machines +$$deploy_monitoring_extension$$ + +# If defined, will add the Microsoft.Azure.Security.Monitoring extension to the Virtual Machines +$$deploy_defender_extension$$ + +# If defined, defines the patching mode for the Virtual Machines +$$patch_mode$$ + +# If defined, defines the mode of VM Guest Patching for the Virtual Machines +$$patch_assessment_mode$$ + + +######################################################################################### +# # +# Resource group details # +# # +######################################################################################### + +# The two resource group name and arm_id can be used to control the naming and the creation of the resource group + +# The resourcegroup_name value is optional, it can be used to override the name of the resource group that will be provisioned +$$resourcegroup_name$$ + +# The resourcegroup_name arm_id is optional, it can be used to provide an existing resource group for the deployment +$$resourcegroup_arm_id$$ + + + ######################################################################################### # # # DNS Settings # # # ######################################################################################### -# custom dns resource group name -$$management_dns_resourcegroup_name$$ -# custom dns subscription +# Subscription for the resource group containing the Private DNS zone for the compute resources $$management_dns_subscription_id$$ +# Resource group name for the resource group containing the Private DNS zone for the compute resources +$$management_dns_resourcegroup_name$$ + +# Subscription for the resource group containing the Private DNS zone for the Privatelink resources +$$privatelink_dns_subscription_id$$ + +# Resource group name for the resource group containing the Private DNS zone for the Privatelink resources +$$privatelink_dns_resourcegroup_name$$ + + # Defines if a custom dns solution is used $$use_custom_dns_a_registration$$ -# Defines if the Virtual network for the Virtual machines is registered with DNS +# Defines if the Virtual network for the Virtual Machines is registered with DNS # This also controls the creation of DNS entries for the load balancers $$register_virtual_network_to_dns$$ @@ -340,6 +364,9 @@ $$additional_users_to_add_to_keyvault_policies$$ # The number of days that items should be retained in the soft delete period $$soft_delete_retention_days$$ +# Set expiry date for secrets +$$set_secret_expiry$$ + ######################################################################################### # # # Credentials # @@ -527,9 +554,6 @@ $$iscsi_nic_ips$$ # Defines the Availability zones for the iSCSI devices $$iscsi_vm_zones$$ -# user_assigned_identity_id defines the user assigned identity to be assigned to the Virtual machines -$$user_assigned_identity_id$$ - ######################################################################################### # # # Terraform deployment parameters # @@ -601,3 +625,30 @@ $$ams_instance_name$$ # ams_laws_arm_id if provided, Azure resource id for the Log analytics workspace in AMS $$ams_laws_arm_id$$ + +#######################################4#######################################8 +# # +# NAT Gateway variables # +# # +#######################################4#######################################8 + +# If true, a NAT gateway will be created +$$deploy_nat_gateway$$ + +# If provided, the name of the NAT Gateway +$$nat_gateway_name$$ + +# If provided, the Azure resource id for the NAT Gateway +$$nat_gateway_arm_id$$ + +# If provided, the zones for the NAT Gateway public IP +$$nat_gateway_public_ip_zones$$ + +# If provided, Azure resource id for the NAT Gateway public IP +$$nat_gateway_public_ip_arm_id$$ + +# The idle timeout in minutes for the NAT Gateway +$$nat_gateway_idle_timeout_in_minutes$$ + +# If provided, the tags for the NAT Gateway public IP +$$nat_gateway_public_ip_tags$$ diff --git a/Webapp/SDAF/ParameterDetails/SystemDetails.json b/Webapp/SDAF/ParameterDetails/SystemDetails.json index 9723b7b816..33c74833a1 100644 --- a/Webapp/SDAF/ParameterDetails/SystemDetails.json +++ b/Webapp/SDAF/ParameterDetails/SystemDetails.json @@ -260,51 +260,6 @@ "Overrules": "", "Display": 3 }, - { - "Name": "deploy_v1_monitoring_extension", - "Required": false, - "Description": "Defines if the Microsoft.AzureCAT.AzureEnhancedMonitoring extension will be deployed.", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "deploy_monitoring_extension", - "Required": false, - "Description": "If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "deploy_defender_extension", - "Required": false, - "Description": "If defined, will add the Microsoft.Azure.Security.Monitoring extension to the virtual machines", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 2 - }, - { - "Name": "vm_disk_encryption_set_id", - "Required": false, - "Description": "Azure resource identifier for custom encryption key to use for disk encryption.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "user_assigned_identity_id", - "Required": false, - "Description": "Azure resource identifier for User assigned identity.", - "Type": "lookup", - "Options": [], - "Overrules": "", - "Display": 3 - }, { "Name": "upgrade_packages", "Required": false, @@ -1351,6 +1306,112 @@ } ] }, + { + "Section": "Common Virtual Machine settings", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#vm-parameters", + "Parameters": [ + { + "Name": "user_assigned_identity_id", + "Required": false, + "Description": "Azure resource identifier for User assigned identity.", + "Type": "lookup", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "deploy_monitoring_extension", + "Required": false, + "Description": "If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "deploy_defender_extension", + "Required": false, + "Description": "If defined, will add the Microsoft.Azure.Security.Monitoring extension to the virtual machines", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "deploy_v1_monitoring_extension", + "Required": false, + "Description": "Defines if the Microsoft.AzureCAT.AzureEnhancedMonitoring extension will be deployed.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "vm_disk_encryption_set_id", + "Required": false, + "Description": "Azure resource identifier for custom encryption key to use for disk encryption.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "patch_mode", + "Required": false, + "Description": "Defines the patching mode for the Virtual Machines.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + }, + { + "Text": "ImageDefault (Linux)", + "Value": "ImageDefault" + }, + { + "Text": "Manual (Windows)", + "Value": "Manual" + }, + { + "Text": "AutomaticByOS (Windows)", + "Value": "AutomaticByOS" + }, + { + "Text": "AutomaticByPlatform (Azure Orchestrated patching)", + "Value": "AutomaticByPlatform" + } + ], + "Overrules": "", + "Display": 2 + }, + { + "Name": "patch_assessment_mode", + "Required": false, + "Description": "Specifies the mode of VM Guest Patching for the Virtual Machine.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + }, + { + "Text": "ImageDefault", + "Value": "ImageDefault" + }, + { + "Text": "AutomaticByPlatform (Azure Orchestrated patching)", + "Value": "AutomaticByPlatform" + } + ], + "Overrules": "", + "Display": 2 + } + + + ] + }, + { "Section": "Cluster settings", "Link": "https://learn.microsoft.com/en-us/azure/virtual-machines/workloads/sap/automation-configure-system#environment-parameters", @@ -2228,6 +2289,15 @@ "Section": "ANF /sapmnt", "Link": "https://learn.microsoft.com/en-us/azure/virtual-machines/workloads/sap/automation-configure-system?branch=main#azure-netapp-files-support", "Parameters": [ + { + "Name": "ANF_sapmnt", + "Required": false, + "Description": "If defined, will create ANF volumes for /sapmnt", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, { "Name": "ANF_sapmnt_volume_size", "Required": false, @@ -2247,7 +2317,7 @@ "Display": 2 }, { - "Name": "ANF_sapmnt", + "Name": "ANF_sapmnt_use_existing", "Required": false, "Description": "Use existing sapmnt volume", "Type": "checkbox", @@ -2256,19 +2326,19 @@ "Display": 3 }, { - "Name": "ANF_sapmnt_use_clone_in_secondary_zone", + "Name": "ANF_sapmnt_volume_name", "Required": false, - "Description": "Use clone in secondary region for sapmnt volume", - "Type": "checkbox", + "Description": "Azure NetApp volumes name(s) for existing HANA /sapmnt volume(s)", + "Type": "field", "Options": [], "Overrules": "", "Display": 3 }, { - "Name": "ANF_sapmnt_volume_name", + "Name": "ANF_sapmnt_use_clone_in_secondary_zone", "Required": false, - "Description": "Azure NetApp volumes name(s) for existing HANA /sapmnt volume(s)", - "Type": "field", + "Description": "Use clone in secondary region for sapmnt volume", + "Type": "checkbox", "Options": [], "Overrules": "", "Display": 3 diff --git a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt index 0c692279fc..cdb302bf38 100644 --- a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt @@ -1,35 +1,32 @@ -########################################################################################## -# # -# Deployment topologies # -# # -# Standard (All roles on same server) # -# Define the database tier values and set enable_app_tier_deployment to false # -# # -# Distributed (1+1 or 1+1+N) # -# Define the database tier values and define scs_server_count = 1, # -# application_server_count >= 1 # -# # -# High Availability # -# Define the database tier values and database_high_availability = true # -# scs_server_count = 1 and scs_high_availability = true # -# application_server_count >= 1 # -# # -########################################################################################## - -########################################################################################## -# # -# This sample defines an Distributed deployment # -# # -########################################################################################## - -# The automation supports both creating resources (greenfield) or using existing resources (brownfield) -# For the greenfield scenario the automation defines default names for resources, -# if there is a XXXXname variable then the name is customizable -# for the brownfield scenario the Azure resource identifiers for the resources must be specified +######################################################################################### +# # +# Deployment topologies # +# # +# Standard (All roles on same server) # +# Define the database tier values and set enable_app_tier_deployment to false # +# # +# Distributed (1+1 or 1+1+N) # +# Define the database tier values and define scs_server_count = 1, # +# application_server_count >= 1 # +# # +# High Availability # +# Define the database tier values and database_high_availability = true # +# scs_server_count = 1 and scs_high_availability = true # +# application_server_count >= 1 # +# # +# The automation supports both creating resources (greenfield) or using existing # +# resources (brownfield). # +# # +# For the greenfield scenario the automation defines default names for resources, # +# if there is a XXXXname variable then the name is customizable. # +# For the brownfield scenario the Azure resource identifiers for the resources must # +# be specified using the XXXX_armid fields. # +# # +######################################################################################### ######################################################################################### # # -# Environment definitions # +# Environment/Application definitions # # # ######################################################################################### @@ -39,27 +36,37 @@ $$environment$$ # The location value is a mandatory field, it is used to control where the resources are deployed $$location$$ +# The sid value is a mandatory field that defines the SAP Application SID +$$sid$$ + +# The database_sid defines the database SID +$$database_sid$$ + +# The database_platform defines the database backend, supported values are +# - HANA +# - DB2 +# - ORACLE +# - ORACLE-ASM +# - SYBASE +# - SQLSERVER +# - NONE (in this case no database tier is deployed) +$$database_platform$$ + # Description of the SAP system. $$Description$$ +######################################################################################### +# # +# Deployment parameters # +# # +######################################################################################### -#If you want to customize the disk sizes for VMs use the following parameter to specify the custom sizing file. -$$custom_disk_sizes_filename$$ #If you want to provide a custom naming json use the following parameter. $$name_override_file$$ -# save_naming_information,defines that a json formatted file defining the resource names will be created -$$save_naming_information$$ - -# custom_prefix defines the prefix that will be added to the resource names -$$custom_prefix$$ - -# use_prefix defines if a prefix will be added to the resource names -$$use_prefix$$ - -# use_zonal_markers defines if a zonal markers will be added to the virtual machine resource names -$$use_zonal_markers$$ +#If you want to customize the disk sizes for VMs use the following parameter to specify the custom sizing file. +$$custom_disk_sizes_filename$$ # use_secondary_ips controls if the virtual machines should be deployed with two IP addresses. Required for SAP Virtual Hostname support $$use_secondary_ips$$ @@ -73,118 +80,18 @@ $$use_scalesets_for_deployment$$ # scaleset_id defines the scale set Azure resource Id $$scaleset_id$$ - # database_use_premium_v2_storage defines if the database tier will use premium v2 storage $$database_use_premium_v2_storage$$ # upgrade_packages defines if all packages should be upgraded after installation $$upgrade_packages$$ -# user_assigned_identity_id defines the user assigned identity to be assigned to the Virtual machines -$$user_assigned_identity_id$$ - -######################################################################################### -# # -# Networking # -# By default the networking is defined in the workload zone # -# Only use this section if the SID needs unique subnets/NSGs # -# # -# The deployment automation supports two ways of providing subnet information. # -# 1. Subnets are defined as part of the workload zone deployment # -# In this model multiple SAP System share the subnets # -# 2. Subnets are deployed as part of the SAP system # -# In this model each SAP system has its own sets of subnets # -# # -# The automation supports both creating the subnets (greenfield) # -# or using existing subnets (brownfield) # -# For the greenfield scenario the subnet address prefix must be specified whereas # -# for the brownfield scenario the Azure resource identifier for the subnet must # -# be specified # -# # -######################################################################################### - -# The network logical name is mandatory - it is used in the naming convention and should map to the workload virtual network logical name -$$network_logical_name$$ - -# use_loadbalancers_for_standalone_deployments is a boolean flag that can be used to control if standalone deployments (non HA) will have load balancers -$$use_loadbalancers_for_standalone_deployments$$ - -# use_private_endpoint is a boolean flag controlling if the key vaults and storage accounts have private endpoints -$$use_private_endpoint$$ - - -######################################################################################### -# # -# Cluster settings # -# # -######################################################################################### - -# scs_cluster_type defines cluster quorum type; AFA (Azure Fencing Agent), ASD (Azure Shared Disk), ISCSI -$$scs_cluster_type$$ - -#scs_cluster_disk_lun defines the LUN number for the SAP Central Services cluster disk -$$scs_cluster_disk_lun$$ - -#scs_cluster_disk_size defines the size for the SAP Central Services cluster disk -$$scs_cluster_disk_size$$ - -#scs_cluster_disk_type defines the storage_account_type of the shared disk for the SAP Central Services cluster -$$scs_cluster_disk_type$$ - -# database_cluster_type defines cluster quorum type; AFA (Azure Fencing Agent), ASD (Azure Shared Disk), ISCSI -$$database_cluster_type$$ - -#database_cluster_disk_lun defines the LUN number for the database cluster disk -$$database_cluster_disk_lun$$ - -#database_cluster_disk_size defines the size for the database cluster disk -$$database_cluster_disk_size$$ - -#database_cluster_disk_type defines the storage_account_type of the shared disk for the Database cluster -$$database_cluster_disk_type$$ - -# use_msi_for_clusters if defined will use managed service identity for the Pacemaker cluster fencing -$$use_msi_for_clusters$$ - -# fencing_role_name, If specified the role name to use for the fencing agent -$$fencing_role_name$$ - -# use_simple_mount specifies if Simple mounts are used (Applicable for SLES 15 SP# or newer) -$$use_simple_mount$$ - -# Configure fencing device based on the fence agent fence_kdump for both SCS and DB clusters -$$use_fence_kdump$$ - -# Default size of the kdump disk which will be attached to the VMs which are part DB cluster -$$use_fence_kdump_size_gb_db$$ - -# Default LUN number of the kdump disk which will be attached to the VMs which are part of DB cluster -$$use_fence_kdump_lun_db$$ - -# Default size of the kdump disk which will be attached to the VMs which are part of SCS cluster -$$use_fence_kdump_size_gb_scs$$ - -# Default LUN number of the kdump disk which will be attached to the VMs which are part of SCS cluster -$$use_fence_kdump_lun_scs$$ - ######################################################################################### # # # Database tier # # # # ######################################################################################### -$$database_sid$$ - -# database_platform defines the database backend, supported values are -# - HANA -# - DB2 -# - ORACLE -# - ORACLE-ASM -# - SYBASE -# - SQLSERVER -# - NONE (in this case no database tier is deployed) -$$database_platform$$ - # Defines the number of database servers $$database_server_count$$ @@ -285,7 +192,6 @@ $$database_use_avset$$ # Optional, Defines if the tags for the database virtual machines $$database_tags$$ - ######################################################################################### # # # Application tier # # @@ -300,9 +206,6 @@ $$enable_app_tier_deployment$$ # app_tier_use_DHCP is a boolean flag controlling if Azure subnet provided IP addresses should be used (true) $$app_tier_use_DHCP$$ -# sid is a mandatory field that defines the SAP Application SID -$$sid$$ - ######################################################################################### # # # SAP Central Services # @@ -466,37 +369,86 @@ $$webdispatcher_server_zones$$ $$webdispatcher_server_image$$ - ######################################################################################### # # -# Miscellaneous settings # +# Common Virtual Machine settings # # # ######################################################################################### -# resource_offset can be used to provide an offset for resource naming -# server#, disk# -$$resource_offset$$ +# user_assigned_identity_id defines the user assigned identity to be assigned to the Virtual machines +$$user_assigned_identity_id$$ # vm_disk_encryption_set_id if defined defines the custom encryption key $$vm_disk_encryption_set_id$$ -# deploy_application_security_groups if defined will create application security groups -$$deploy_application_security_groups$$ - -# deploy_v1_monitoring_extension Defines if the Microsoft.AzureCAT.AzureEnhancedMonitoring extension will be deployed -$$deploy_v1_monitoring_extension$$ - # If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines $$deploy_monitoring_extension$$ # If defined, will add the Microsoft.Azure.Security.Monitoring extension to the virtual machines $$deploy_defender_extension$$ -# dns_a_records_for_secondary_names defines if DNS records should be created for the virtual host names -$$dns_a_records_for_secondary_names$$ +# If defined, defines the patching mode for the virtual machines +$$patch_mode$$ + +# If defined, defines the mode of VM Guest Patching for the Virtual Machine +$$patch_assessment_mode$$ + + + +######################################################################################### +# # +# Cluster settings # +# # +######################################################################################### + +# scs_cluster_type defines cluster quorum type; AFA (Azure Fencing Agent), ASD (Azure Shared Disk), ISCSI +$$scs_cluster_type$$ + +#scs_cluster_disk_lun defines the LUN number for the SAP Central Services cluster disk +$$scs_cluster_disk_lun$$ + +#scs_cluster_disk_size defines the size for the SAP Central Services cluster disk +$$scs_cluster_disk_size$$ + +#scs_cluster_disk_type defines the storage_account_type of the shared disk for the SAP Central Services cluster +$$scs_cluster_disk_type$$ + +# database_cluster_type defines cluster quorum type; AFA (Azure Fencing Agent), ASD (Azure Shared Disk), ISCSI +$$database_cluster_type$$ + +#database_cluster_disk_lun defines the LUN number for the database cluster disk +$$database_cluster_disk_lun$$ + +#database_cluster_disk_size defines the size for the database cluster disk +$$database_cluster_disk_size$$ + +#database_cluster_disk_type defines the storage_account_type of the shared disk for the Database cluster +$$database_cluster_disk_type$$ + +# use_msi_for_clusters if defined will use managed service identity for the Pacemaker cluster fencing +$$use_msi_for_clusters$$ + +# fencing_role_name, If specified the role name to use for the fencing agent +$$fencing_role_name$$ + +# use_simple_mount specifies if Simple mounts are used (Applicable for SLES 15 SP# or newer) +$$use_simple_mount$$ + +# Configure fencing device based on the fence agent fence_kdump for both SCS and DB clusters +$$use_fence_kdump$$ + +# Default size of the kdump disk which will be attached to the VMs which are part DB cluster +$$use_fence_kdump_size_gb_db$$ + +# Default LUN number of the kdump disk which will be attached to the VMs which are part of DB cluster +$$use_fence_kdump_lun_db$$ + +# Default size of the kdump disk which will be attached to the VMs which are part of SCS cluster +$$use_fence_kdump_size_gb_scs$$ + +# Default LUN number of the kdump disk which will be attached to the VMs which are part of SCS cluster +$$use_fence_kdump_lun_scs$$ -# register_endpoints_with_dns defines if the endpoints should be registered with the DNS -$$register_endpoints_with_dns$$ ######################################################################################### # # @@ -536,7 +488,7 @@ $$ANF_HANA_use_Zones$$ ######################################################################################### # # -# HANA Data # +# Azure NetApp Files - HANA Data # # # ######################################################################################### @@ -561,7 +513,7 @@ $$ANF_HANA_data_volume_count$$ ######################################################################################### # # -# HANA Log # +# Azure NetApp Files - HANA Log # # # ######################################################################################### @@ -585,7 +537,7 @@ $$ANF_HANA_log_volume_count$$ ######################################################################################### # # -# HANA Shared # +# Azure NetApp Files - HANA Shared # # # ######################################################################################### @@ -607,7 +559,7 @@ $$ANF_HANA_shared_volume_name$$ ######################################################################################### # # -# Azure NetApp Files /usr/sap # +# Azure NetApp Files - /usr/sap # # # ######################################################################################### @@ -629,25 +581,30 @@ $$ANF_usr_sap_volume_name$$ ######################################################################################### # # -# Azure NetApp Files sapmnt # +# Azure NetApp Files - sapmnt # # # ######################################################################################### # ANF_sapmnt, if defined, will create Azure NetApp Files volume for /sapmnt $$ANF_sapmnt$$ -# ANF_sapmnt_use_clone_in_secondary_zone, if defined, uses clone in secondary region for sapmnt volume. -$$ANF_sapmnt_use_clone_in_secondary_zone$$ - # ANF_sapmnt_volume_size, if defined, provides the size of the /sapmnt volume. $$ANF_sapmnt_volume_size$$ # ANF_sapmnt_volume_throughput, if defined, provides the throughput of the /sapmnt volume. $$ANF_sapmnt_volume_throughput$$ +# Use existing Azure NetApp volumes for /sapmnt. +$$ANF_sapmnt_use_existing$$ + + # ANF_sapmnt_volume_name, if defined, provides the name of the /sapmnt volume. $$ANF_sapmnt_volume_name$$ +# ANF_sapmnt_use_clone_in_secondary_zone, if defined, uses clone in secondary region for sapmnt volume. +$$ANF_sapmnt_use_clone_in_secondary_zone$$ + + ######################################################################################### # # @@ -679,21 +636,20 @@ $$vm_disk_encryption_set_id$$ $$nsg_asg_with_vnet$$ ######################################################################################### -# RESOURCE GROUP -# The two resource group name and arm_id can be used to control the naming and the creation of the resource group -# The resourcegroup_name value is optional, it can be used to override the name of the resource group that will be provisioned -# The resourcegroup_name arm_id is optional, it can be used to provide an existing resource group for the deployment +# # +# Resource Group # +# # ######################################################################################### +# The resourcegroup_name value is optional, it can be used to override the name of the resource group that will be provisioned $$resourcegroup_name$$ +# The resourcegroup_name arm_id is optional, it can be used to provide an existing resource group for the deployment $$resourcegroup_arm_id$$ ######################################################################################### # # -# PPG # -# The proximity placement group names and arm_ids are optional can be used to -# control the naming and the creation of the proximity placement groups +# Proximity Placement Group # # # ######################################################################################### @@ -714,16 +670,51 @@ $$app_proximityplacementgroup_arm_ids$$ ######################################################################################### # # -# Key Vault variables # +# Key Vault information # # # ######################################################################################### +# If defined, specifies the Azure resource identifier for an existing key vault. +# Designed to host the SAP system credentials $$user_keyvault_id$$ +# If defined, specifies the Azure resource identifier for an existing key vault. +# Designed to host the deployment credentials used by the automation $$spn_keyvault_id$$ +# If defined, will enable purge control for the key vaults $$enable_purge_control_for_keyvaults$$ +######################################################################################### +# # +# Networking # +# By default the networking is defined in the workload zone # +# Only use this section if the SID needs unique subnets/NSGs # +# # +# The deployment automation supports two ways of providing subnet information. # +# 1. Subnets are defined as part of the workload zone deployment # +# In this model multiple SAP System share the subnets # +# 2. Subnets are deployed as part of the SAP system # +# In this model each SAP system has its own sets of subnets # +# # +# The automation supports both creating the subnets (greenfield) # +# or using existing subnets (brownfield) # +# For the greenfield scenario the subnet address prefix must be specified whereas # +# for the brownfield scenario the Azure resource identifier for the subnet must # +# be specified # +# # +######################################################################################### + +# The network logical name is mandatory - it is used in the naming convention and should map to the workload virtual network logical name +$$network_logical_name$$ + +# use_loadbalancers_for_standalone_deployments is a boolean flag that can be used to control if standalone deployments (non HA) will have load balancers +$$use_loadbalancers_for_standalone_deployments$$ + +# use_private_endpoint is a boolean flag controlling if the key vaults and storage accounts have private endpoints +$$use_private_endpoint$$ + + ######################################################################################### # # # Admin Subnet variables # @@ -926,4 +917,46 @@ $$enable_os_monitoring$$ $$ams_resource_id$$ +######################################################################################### +# # +# DNS settings # +# # +######################################################################################### + +# dns_a_records_for_secondary_names defines if DNS records should be created for the virtual host names +$$dns_a_records_for_secondary_names$$ + +# register_endpoints_with_dns defines if the endpoints should be registered with the DNS +$$register_endpoints_with_dns$$ + + + +######################################################################################### +# # +# Miscellaneous settings # +# # +######################################################################################### + +# deploy_application_security_groups if defined will create application security groups +$$deploy_application_security_groups$$ + +# deploy_v1_monitoring_extension Defines if the Microsoft.AzureCAT.AzureEnhancedMonitoring extension will be deployed +$$deploy_v1_monitoring_extension$$ + +# resource_offset can be used to provide an offset for resource naming +# server#, disk# +$$resource_offset$$ + +# save_naming_information,defines that a json formatted file defining the resource names will be created +$$save_naming_information$$ + +# custom_prefix defines the prefix that will be added to the resource names +$$custom_prefix$$ + +# use_prefix defines if a prefix will be added to the resource names +$$use_prefix$$ + +# use_zonal_markers defines if a zonal markers will be added to the virtual machine resource names +$$use_zonal_markers$$ + diff --git a/Webapp/SDAF/ParameterDetails/VM-Images.json b/Webapp/SDAF/ParameterDetails/VM-Images.json index 7c2ec5bb52..4bf913b983 100644 --- a/Webapp/SDAF/ParameterDetails/VM-Images.json +++ b/Webapp/SDAF/ParameterDetails/VM-Images.json @@ -215,6 +215,18 @@ "type": "marketplace" } }, + { + "name": "SUSE 15 SP6", + "data": { + "os_type": "LINUX", + "source_image_id": "", + "publisher": "SUSE", + "offer": "sles-sap-15-sp6", + "sku": "gen2", + "version": "latest", + "type": "marketplace" + } + }, { "name": "OracleLinux 8.2", "data": { diff --git a/Webapp/SDAF/SDAFWebApp.csproj b/Webapp/SDAF/SDAFWebApp.csproj index 7231158be1..8e64b487b3 100644 --- a/Webapp/SDAF/SDAFWebApp.csproj +++ b/Webapp/SDAF/SDAFWebApp.csproj @@ -15,24 +15,24 @@ - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + - + + diff --git a/deploy/ansible/action_plugins/public_api.py b/deploy/ansible/action_plugins/public_api.py new file mode 100644 index 0000000000..30d572c8ce --- /dev/null +++ b/deploy/ansible/action_plugins/public_api.py @@ -0,0 +1,1783 @@ +#!/usr/bin/env python3.9 +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import json +import os +import tempfile +import requests +from cryptography.fernet import Fernet +from azure.common.exceptions import AuthenticationError +from msrest.exceptions import ClientRequestError +from azure.common.credentials import ServicePrincipalCredentials +from azure.keyvault import KeyVaultClient +from ansible.errors import AnsibleConnectionFailure, AnsibleActionFail +from ansible.utils.display import Display +from ansible.module_utils.urls import Request, ConnectionError +from six.moves.urllib.error import HTTPError, URLError +from ansible.plugins.action import ActionBase + +method_spec_product = dict( + method=dict(type="str", required=True), + calKeyvaultId=dict(type="str", required=True, no_log=True), + clientId=dict(type="str", no_log=True), + clientSecret=dict(type="str", no_log=True), +) +method_spec_progress = dict( + method=dict(type="str", required=True), + calKeyvaultId=dict(type="str", required=True, no_log=True), + clientId=dict(type="str", no_log=True), + clientSecret=dict(type="str", no_log=True), + systemId=dict(type="str", required=True), + outputDirectoryPath=dict(type="str", no_log=True), + outputFile=dict(type="str", no_log=True), +) +method_spec_deployment = dict( + method=dict(type="str", required=True), + outputDirectoryPath=dict(type="str", no_log=True), + outputFile=dict(type="str", no_log=True), + calKeyvaultId=dict(type="str", required=True, no_log=True), + clientId=dict(type="str", no_log=True), + clientSecret=dict(type="str", no_log=True), + tenantId=dict(type="str", no_log=True), + accountId=dict(type="str", required=True, no_log=True), + productId=dict(type="str", required=True, no_log=True), + cloudProvider=dict(type="str", required=True), + planTemplateId=dict(type="str", required=True, no_log=True), + planTemplateName=dict(type="str", required=True, no_log=True), + region=dict(type="str", default="eastus2"), + availabilityScenario=dict( + type="str", + choices=["non-ha", "hana-system-replication", "clustering"], + default="clustering", + ), + infrastructureParameterSet=dict( + type="dict", + required=True, + options=dict( + operatingSystem=dict( + type="str", default="SUSE/sles-sap-15-sp3/gen1/2022.11.09" + ), + privateDnsZone=dict(type="str", required=True), + reversePrivateDnsZone=dict(type="str", required=True, no_log=True), + transitNetwork=dict(type="str", required=True, no_log=True), + workloadNetwork=dict(type="str", required=True, no_log=True), + sharedServicesNetwork=dict(type="str", required=True, no_log=True), + sharedServicesSubnet=dict(type="str", required=True, no_log=True), + workloadNetworkHanaSubnet=dict(type="str", required=True, no_log=True), + workloadNetworkAsSubnet=dict(type="str", required=True, no_log=True), + technicalCommunicationUser=dict(type="str", required=True, no_log=True), + techUserPassword=dict(type="str", required=True, no_log=True), + maintenancePlannerTransaction=dict(type="str", required=True, no_log=True), + hanaVmSize=dict(type="str", required=False, default="Standard_E20ds_v5"), + centralServicesVmSize=dict( + type="str", required=False, default="Standard_D4ds_v5" + ), + enqueueReplicationServerVmSize=dict( + type="str", required=False, default="Standard_D4ds_v5" + ), + applicationServerVmSize=dict( + type="str", required=False, default="Standard_E4ds_v5" + ), + numberOfApplicationServers=dict(type="int", required=False, default="0"), + webDispatcherVmSize=dict( + type="str", required=False, default="Standard_D2s_v5" + ), + ), + ), + installationParameterSets=dict( + type="dict", + required=True, + apply_defaults=True, + options=dict( + clientId=dict( + type="str", + required_if=[("availabilityScenario", "==", "clustering")], + no_log=True, + ), + clientSecret=dict( + type="str", + required_if=[("availabilityScenario", "==", "clustering")], + no_log=True, + ), + hanaDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + DBSID=dict(type="str", default="HDB"), + DBSIDAdminUserId=dict(type="str", default="1050"), + instanceNumber=dict(type="str", default="00"), + frontendHostname=dict(type="str", default="vhdbdb"), + primaryHanaPhysicalHostname=dict(type="str", default="phdbdbpr"), + primaryHanaVirtualHostname=dict(type="str", default="vhdbdbpr"), + secondaryHanaPhysicalHostname=dict(type="str", default="phdbdbsr"), + secondaryHanaVirtualHostname=dict(type="str", default="vhdbdbsr"), + ), + ), + s4hanaDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + SID=dict(type="str", default="S4H"), + SAPSysAdminUserId=dict(type="str", default="1079"), + SAPSysAdminGroupId=dict(type="str", default="79"), + sapGuiDefaultLanguage=dict(type="str", default="en"), + SAPSystemAdditionalLanguages=dict(type="str", default=""), + numberOfDialogWorkProcesses=dict(type="int", default="10"), + numberOfBatchWorkProcesses=dict(type="int", default="7"), + ), + ), + centralServicesDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + instanceNumber=dict(type="str", default="00"), + ABAPMessageServerPort=dict(type="str", default="3600"), + physicalHostname=dict(type="str", default="ps4hcs"), + virtualHostname=dict(type="str", default="vs4hcs"), + ), + ), + enqueueReplicationServerDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + instanceNumber=dict(type="str", default="10"), + physicalHostname=dict(type="str", default="ps4hers"), + virtualHostname=dict(type="str", default="vs4hers"), + ), + ), + primaryApplicationServerDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + instanceNumber=dict(type="str", default="00"), + physicalHostname=dict(type="str", default="ps4hpas"), + virtualHostname=dict(type="str", default="vs4hpas"), + ), + ), + additionalApplicationServersDeployment=dict( + type="list", + elements="dict", + apply_defaults=True, + options=dict( + instanceNumber=dict(type="str", default="00"), + physicalHostname=dict(type="str", default="ps4haas1"), + virtualHostname=dict(type="str", default="vs4haas1"), + ), + ), + webDispatcherDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + installationType=dict( + type="str", + choices=["Standalone", "Embedded", "None", "External"], + default="None", + ), + primaryInstanceNumber=dict(type="str", default="00"), + primaryPhysicalHostname=dict(ype="str", default="ps4hwdpr"), + primaryVirtualHostname=dict(type="str", default="vs4hwdpr"), + secondaryInstanceNumber=dict(type="str", default="00"), + secondaryPhysicalHostname=dict(type="str", default="ps4hwdsr"), + secondaryVirtualHostname=dict(type="str", default="vs4hwdsr"), + userIdOfSIDAdmin=dict(type="str", default="1080"), + virtualHostname=dict(type="str", default="vs4hwdext"), + fioriHostname=dict(type="str", default="vs4hwdext"), + fioriHostPort=dict(type="int", default="44300"), + productiveClientNumber=dict(type="str", default="500"), + ), + ), + ), + ), +) +method_spec_provisioning = dict( + method=dict(type="str", required=True), + outputDirectoryPath=dict(type="str", no_log=True), + outputFile=dict(type="str", no_log=True), + productId=dict(type="str", required=True, no_log=True), + planTemplateId=dict(type="str", no_log=True, default="default"), + availabilityScenario=dict( + type="str", + choices=["non-ha", "hana-system-replication", "clustering"], + default="clustering", + ), + calKeyvaultId=dict(type="str", required=True, no_log=True), + clientId=dict(type="str", no_log=True), + clientSecret=dict(type="str", no_log=True), + tenantId=dict(type="str", no_log=True), + infrastructureParameterSet=dict( + type="dict", + required=True, + required_one_of=[ + ["domainName", "privateDnsZone"], + ["techUserPassword", "techUserPasswordReference"], + ], + mutually_exclusive=[ + ["domainName", "privateDnsZone"], + ["techUserPassword", "techUserPasswordReference"], + ], + options=dict( + privateDnsZone=dict(type="str", no_log=True), + domainName=dict(type="str", no_log=True), + secretStoreId=dict(type="str", required=True, no_log=True), + deploymentServerSubnet=dict(type="str", no_log=True), + executionEngineSubnet=dict(type="str", no_log=True), + technicalCommunicationUser=dict(type="str", required=True, no_log=True), + techUserPassword=dict(type="str", no_log=True, default=""), + techUserPasswordReference=dict(type="str", no_log=True), + remoteOsUser=dict(type="str", required=True, no_log=True), + deploymentServerResourceGroup=dict(type="str", required=False, no_log=True), + sshPublicKeySecretName=dict(type="str", required=True, no_log=True), + sshPrivateKeySecretName=dict(type="str", required=True, no_log=True), + parameters=dict(type="str", no_log=True), + ), + ), + installationParameterSets=dict( + type="dict", + required=True, + apply_defaults=True, + options=dict( + hanaDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + primaryVmResourceId=dict(type="str", required=True), + secondaryVmResourceId=dict(type="str", default=""), + loadBalancerResourceId=dict(type="str", default=""), + frontEndIp=dict(type="str", default=""), + DBSID=dict(type="str", default="HDB"), + DBSIDAdminUserId=dict(type="str", default="1050"), + instanceNumber=dict(type="str", default="00"), + frontendHostname=dict(type="str", default=""), + primaryPhysicalHostname=dict(type="str", default=""), + primaryVirtualHostname=dict(type="str", default=""), + secondaryPhysicalHostname=dict(type="str", default=""), + secondaryVirtualHostname=dict(type="str", default=""), + ), + ), + s4hanaDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + SID=dict(type="str", default="S4H"), + SAPSysAdminUserId=dict(type="str", default="1079"), + SAPSysAdminGroupId=dict(type="str", default="79"), + sapGuiDefaultLanguage=dict(type="str", default="en"), + SAPSystemAdditionalLanguages=dict(type="str", default=""), + numberOfDialogWorkProcesses=dict(type="str", default="10"), + numberOfBatchWorkProcesses=dict(type="str", default="7"), + ), + ), + centralServicesDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + vmResourceId=dict(type="str", required=True), + loadBalancerResourceId=dict(type="str", default=""), + frontEndIp=dict(type="str", default=""), + instanceNumber=dict(type="str", default="00"), + ABAPMessageServerPort=dict(type="str", default=""), + physicalHostname=dict(type="str", default=""), + virtualHostname=dict(type="str", default=""), + loadBalancerHostname=dict( + type="str", + required_if=[("availabilityScenario", "==", "clustering")], + ), + ), + ), + enqueueReplicationServerDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + vmResourceId=dict(type="str", default=""), + frontEndIp=dict(type="str", default=""), + instanceNumber=dict(type="str", default="10"), + physicalHostname=dict(type="str", default=""), + virtualHostname=dict(type="str", default=""), + loadBalancerHostname=dict(type="str"), + ), + ), + applicationServersDeployment=dict( + type="list", + elements="dict", + apply_defaults=True, + options=dict( + vmResourceId=dict(type="str", default=""), + instanceNumber=dict(type="str", default="00"), + physicalHostname=dict(type="str", default=""), + virtualHostname=dict(type="str", default=""), + ), + ), + fioriConfiguration=dict( + type="dict", + apply_defaults=True, + options=dict( + fioriHostname=dict(type="str", default=""), + fioriHostPort=dict(type="str", default="44300"), + productiveClientNumber=dict(type="str", default="500"), + ossUser=dict(type="str", default=""), + ossUserPassword=dict(type="str", default=""), + ossUserPasswordReference=dict(type="str", default=""), + ), + ), + webDispatcherDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + installationType=dict( + type="str", + choices=["Standalone", "Embedded", "None", "External"], + default="None", + ), + virtualHostname=dict(type="str", default=""), + primaryVmResourceId=dict(type="str", default=""), + primaryInstanceNumber=dict(type="str", default="00"), + primaryPhysicalHostname=dict(type="str", default=""), + primaryVirtualHostname=dict(type="str", default=""), + userIdOfSIDAdmin=dict(type="str", default="1080"), + secondaryVmResourceId=dict(type="str", default=""), + loadBalancerResourceId=dict(type="str", default=""), + frontEndIp=dict(type="str", default=""), + secondaryInstanceNumber=dict(type="str", default="00"), + secondaryPhysicalHostname=dict(type="str", default=""), + secondaryVirtualHostname=dict(type="str", default=""), + ), + ), + ), + ), +) + +required_together = [["clientId", "clientSecret", "tenantId"]] + +# Generate a key for encryption/decryption +FERNET_KEY = os.environ.get("FERNET_KEY", Fernet.generate_key().decode()) +fernet = Fernet(FERNET_KEY.encode()) + + +class SAPsystem: + def __init__(self, params): + self.input_params = params + method = params.get("method") + scenario = params.get("availabilityScenario") + self.infrastructureParameterSet = params.get("infrastructureParameterSet") + self.installationParameterSets = params.get("installationParameterSets") + webdisp_type = self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("installationType") + if method == "deployment": + self.props = self.get_nonha_deployment_params() + if scenario == "hana-system-replication": + self.props.get("installationParameterSets").update( + self.get_ha_deployment_params() + ) + elif scenario == "clustering": + self.props.get("installationParameterSets").update( + self.get_ha_deployment_params() + ) + self.props.get("installationParameterSets").update( + self.get_cluster_deployment_params() + ) + if webdisp_type != "No": + if webdisp_type == "Standalone": + self.props["installationParameterSets"]["webDispatcherDeployment"][ + "parameters" + ] += self.get_webdisp_deployment_standalone_params().get( + "parameters" + ) + self.props["installationParameterSets"]["webDispatcherDeployment"][ + "parameters" + ] += self.get_webdisp_deployment_params().get("parameters") + if scenario != "NON_HA": + self.props["installationParameterSets"][ + "webDispatcherDeployment" + ]["parameters"] += self.get_webdisp_ha_deployment_params().get( + "parameters" + ) + else: + self.props["installationParameterSets"]["webDispatcherDeployment"][ + "parameters" + ] += self.get_webdisp_deployment_params().get("parameters") + elif method == "software_provisioning": + self.props = self.get_nonha_provisioning_params() + if scenario == "hana-system-replication": + self.props.get("deploymentParameterSets").update( + self.get_ha_provisioning_params() + ) + elif scenario == "clustering": + self.props.get("deploymentParameterSets").update( + self.get_ha_provisioning_params() + ) + + def clean_parameters(self, parameters): + # Filter out parameter dictionaries with value == "" or missing 'value' key + return [param for param in parameters if param.get("value") not in [None, ""]] + + def clean_structure(self, structure): + # Apply cleaning to the structure recursively + if isinstance(structure, dict): + cleaned_structure = {} + for k, v in structure.items(): + if k == "parameters" and isinstance(v, list): + cleaned_structure[k] = self.clean_parameters(v) + else: + cleaned_structure[k] = self.clean_structure(v) + return cleaned_structure + elif isinstance(structure, list): + return [self.clean_structure(item) for item in structure if item != ""] + else: + return structure + + def get_props(self): + return self.clean_structure(self.props) + + def get_nonha_deployment_params(self): + return { + "accountId": self.input_params.get("accountId"), + "productId": self.input_params.get("productId"), + "planTemplateId": self.input_params.get("planTemplateId"), + "planTemplateName": self.input_params.get("planTemplateName"), + "region": self.input_params.get("region"), + "cloudProvider": self.input_params.get("cloudProvider"), + "availabilityScenario": self.input_params.get("availabilityScenario"), + "infrastructureParameterSet": { + "operatingSystem": self.infrastructureParameterSet.get( + "operatingSystem" + ), + "privateDnsZone": self.infrastructureParameterSet.get("privateDnsZone"), + "reversePrivateDnsZone": self.infrastructureParameterSet.get( + "reversePrivateDnsZone" + ), + "transitNetwork": self.infrastructureParameterSet.get("transitNetwork"), + "workloadNetwork": self.infrastructureParameterSet.get( + "workloadNetwork" + ), + "sharedServicesNetwork": self.infrastructureParameterSet.get( + "sharedServicesNetwork" + ), + "sharedServicesSubnet": self.infrastructureParameterSet.get( + "sharedServicesSubnet" + ), + "workloadNetworkHanaSubnet": self.infrastructureParameterSet.get( + "workloadNetworkHanaSubnet" + ), + "workloadNetworkAsSubnet": self.infrastructureParameterSet.get( + "workloadNetworkAsSubnet" + ), + "hanaVmSize": self.infrastructureParameterSet.get("hanaVmSize"), + "centralServicesVmSize": self.infrastructureParameterSet.get( + "centralServicesVmSize" + ), + "enqueueReplicationServerVmSize": self.infrastructureParameterSet.get( + "enqueueReplicationServerVmSize" + ), + "applicationServerVmSize": self.infrastructureParameterSet.get( + "applicationServerVmSize" + ), + "numberOfApplicationServers": self.infrastructureParameterSet.get( + "numberOfApplicationServers" + ), + "webDispatcherVmSize": self.infrastructureParameterSet.get( + "webDispatcherVmSize" + ), + }, + "installationParameterSets": { + "downloadBinaries": { + "name": "Download Binaries", + "parameters": [ + { + "name": "technicalCommunicationUser", + "value": self.infrastructureParameterSet.get( + "technicalCommunicationUser" + ), + }, + { + "name": "techUserPassword", + "value": self.infrastructureParameterSet.get( + "techUserPassword" + ), + }, + { + "name": "maintenancePlannerTransaction", + "value": self.infrastructureParameterSet.get( + "maintenancePlannerTransaction" + ), + }, + ], + }, + "hanaDeployment": { + "name": "HANA Deployment", + "parameters": [ + { + "name": "DBSID", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("DBSID"), + }, + { + "name": "DBSIDAdminUserId", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("DBSIDAdminUserId"), + }, + { + "name": "instanceNumber", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("instanceNumber"), + }, + { + "name": "primaryHanaPhysicalHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("primaryHanaPhysicalHostname"), + }, + { + "name": "primaryHanaVirtualHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("primaryHanaVirtualHostname"), + }, + ], + }, + "s4hanaDeployment": { + "name": "S/4HANA Deployment", + "parameters": [ + { + "name": "SID", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("SID"), + }, + { + "name": "SAPSysAdminUserId", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("SAPSysAdminUserId"), + }, + { + "name": "SAPSysAdminGroupId", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("SAPSysAdminGroupId"), + }, + { + "name": "sapGuiDefaultLanguage", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("sapGuiDefaultLanguage"), + }, + { + "name": "SAPSystemAdditionalLanguages", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("SAPSystemAdditionalLanguages"), + }, + { + "name": "numberOfDialogWorkProcesses", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("numberOfDialogWorkProcesses"), + }, + { + "name": "numberOfBatchWorkProcesses", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("numberOfBatchWorkProcesses"), + }, + ], + }, + "centralServicesDeployment": { + "name": "ABAP SAP Central Services Deployment", + "parameters": [ + { + "name": "instanceNumber", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("instanceNumber"), + }, + { + "name": "ABAPMessageServerPort", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("ABAPMessageServerPort"), + }, + { + "name": "physicalHostname", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("physicalHostname"), + }, + { + "name": "virtualHostname", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("virtualHostname"), + }, + ], + }, + "primaryApplicationServerDeployment": { + "name": "Primary Application Server Deployment", + "parameters": [ + { + "name": "instanceNumber", + "value": self.installationParameterSets.get( + "primaryApplicationServerDeployment" + ).get("instanceNumber"), + }, + { + "name": "physicalHostname", + "value": self.installationParameterSets.get( + "primaryApplicationServerDeployment" + ).get("physicalHostname"), + }, + { + "name": "virtualHostname", + "value": self.installationParameterSets.get( + "primaryApplicationServerDeployment" + ).get("virtualHostname"), + }, + ], + }, + "additionalApplicationServersDeployment": self.installationParameterSets.get( + "additionalApplicationServersDeployment" + ), + "webDispatcherDeployment": { + "name": "SAP Web Dispatcher and Fiori Configuration", + "parameters": [ + { + "name": "installationType", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("installationType"), + } + ], + }, + }, + } + + def get_nonha_provisioning_params(self): + params = { + "productId": self.input_params.get("productId"), + "planTemplateId": self.input_params.get("planTemplateId"), + "availabilityScenario": self.input_params.get("availabilityScenario"), + "adaptiveDeployment": "false", + "dryRun": "false", + "infrastructureParameterSet": { + ### privateDnsZone or domainName is added ### + "deploymentServerSubnet": self.infrastructureParameterSet.get( + "deploymentServerSubnet" + ), + "executionEngineSubnet": self.infrastructureParameterSet.get( + "executionEngineSubnet" + ), + "osUser": self.infrastructureParameterSet.get("remoteOsUser"), + "secretStoreId": self.infrastructureParameterSet.get("secretStoreId"), + "sshPublicKeySecretName": self.infrastructureParameterSet.get( + "sshPublicKeySecretName" + ), + "sshPrivateKeySecretName": self.infrastructureParameterSet.get( + "sshPrivateKeySecretName" + ), + "deploymentServerResourceGroup": self.infrastructureParameterSet.get( + "deploymentServerResourceGroup" + ), + "parameters": [], + }, + "deploymentParameterSets": { + "downloadUser": { + "name": "Download User", + "parameters": [ + { + "name": "technicalCommunicationUser", + "value": self.infrastructureParameterSet.get( + "technicalCommunicationUser" + ), + }, + { + "name": "techUserPassword", + "value": self.infrastructureParameterSet.get( + "techUserPassword" + ), + }, + ], + }, + "hanaDeployment": { + "name": "HANA Deployment", + "parameters": [ + { + "name": "primaryVmResourceId", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("primaryVmResourceId"), + }, + { + "name": "DBSID", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("DBSID"), + }, + { + "name": "DBSIDAdminUserId", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("DBSIDAdminUserId"), + }, + { + "name": "instanceNumber", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("instanceNumber"), + }, + { + "name": "primaryPhysicalHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("primaryPhysicalHostname"), + }, + { + "name": "primaryVirtualHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("primaryVirtualHostname"), + }, + ], + }, + "s4hanaDeployment": { + "name": "S/4HANA Deployment", + "parameters": [ + { + "name": "SID", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("SID"), + }, + { + "name": "SAPSysAdminUserId", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("SAPSysAdminUserId"), + }, + { + "name": "SAPSysAdminGroupId", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("SAPSysAdminGroupId"), + }, + { + "name": "sapGuiDefaultLanguage", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("sapGuiDefaultLanguage"), + }, + { + "name": "SAPSystemAdditionalLanguages", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("SAPSystemAdditionalLanguages"), + }, + { + "name": "numberOfDialogWorkProcesses", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("numberOfDialogWorkProcesses"), + }, + { + "name": "numberOfBatchWorkProcesses", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("numberOfBatchWorkProcesses"), + }, + ], + }, + "centralServicesDeployment": { + "name": "ABAP SAP Central Services Deployment", + "parameters": [ + { + "name": "vmResourceId", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("vmResourceId"), + }, + { + "name": "instanceNumber", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("instanceNumber"), + }, + { + "name": "ABAPMessageServerPort", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("ABAPMessageServerPort"), + }, + { + "name": "physicalHostname", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("physicalHostname"), + }, + { + "name": "virtualHostname", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("virtualHostname"), + }, + ], + }, + "fioriConfiguration": { + "name": "SAP Fiori Configuration", + "parameters": [ + { + "name": "fioriHostname", + "value": self.installationParameterSets.get( + "fioriConfiguration" + ).get("fioriHostname"), + }, + { + "name": "fioriHostPort", + "value": self.installationParameterSets.get( + "fioriConfiguration" + ).get("fioriHostPort"), + }, + { + "name": "productiveClientNumber", + "value": self.installationParameterSets.get( + "fioriConfiguration" + ).get("productiveClientNumber"), + }, + { + "name": "ossUser", + "value": self.installationParameterSets.get( + "fioriConfiguration" + ).get("ossUser"), + }, + { + "name": "ossUserPassword", + "value": self.installationParameterSets.get( + "fioriConfiguration" + ).get("ossUserPassword"), + }, + ], + }, + "webDispatcherDeployment": { + "name": "SAP Web Dispatcher Configuration", + "parameters": [ + { + "name": "installationType", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("installationType"), + }, + { + "name": "primaryVmResourceId", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryVmResourceId"), + }, + { + "name": "virtualHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("virtualHostname"), + }, + { + "name": "primaryInstanceNumber", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryInstanceNumber"), + }, + { + "name": "primaryPhysicalHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryPhysicalHostname"), + }, + { + "name": "primaryVirtualHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryVirtualHostname"), + }, + { + "name": "userIdOfSIDAdmin", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("userIdOfSIDAdmin"), + }, + ], + }, + }, + } + + self.transform_application_servers() + params["deploymentParameterSets"]["applicationServersDeployment"] = ( + self.installationParameterSets.get("applicationServersDeployment") + ) + + # Check if privateDnsZone is provided, and add it to infrastructure parameters if true + if self.infrastructureParameterSet.get("privateDnsZone") is not None: + params["infrastructureParameterSet"]["privateDnsZone"] = ( + self.infrastructureParameterSet.get("privateDnsZone") + ) + + # Check if domainName is provided, and add it to infrastructure parameters if true + if self.infrastructureParameterSet.get("domainName") is not None: + params["infrastructureParameterSet"]["domainName"] = ( + self.infrastructureParameterSet.get("domainName") + ) + + if self.infrastructureParameterSet.get("techUserPasswordReference") is not None: + new_parameter = { + "name": "passwordReference", + "value": self.infrastructureParameterSet.get( + "techUserPasswordReference" + ), + } + params["deploymentParameterSets"]["downloadUser"]["parameters"].append( + new_parameter + ) + if ( + self.installationParameterSets.get("fioriConfiguration").get( + "ossUserPasswordReference" + ) + is not None + ): + new_parameter = { + "name": "ossUserPasswordReference", + "value": self.installationParameterSets.get("fioriConfiguration").get( + "ossUserPasswordReference" + ), + } + params["deploymentParameterSets"]["fioriConfiguration"][ + "parameters" + ].append(new_parameter) + return params + + def get_ha_deployment_params(self): + return dict( + hanaDeployment={ + "name": "HANA Deployment", + "parameters": [ + { + "name": "DBSID", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("DBSID"), + }, + { + "name": "DBSIDAdminUserId", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("DBSIDAdminUserId"), + }, + { + "name": "instanceNumber", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("instanceNumber"), + }, + { + "name": "frontendHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("frontendHostname"), + }, + { + "name": "primaryHanaPhysicalHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("primaryHanaPhysicalHostname"), + }, + { + "name": "primaryHanaVirtualHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("primaryHanaVirtualHostname"), + }, + { + "name": "secondaryHanaPhysicalHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("secondaryHanaPhysicalHostname"), + }, + { + "name": "secondaryHanaVirtualHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("secondaryHanaVirtualHostname"), + }, + ], + }, + enqueueReplicationServerDeployment={ + "name": "Enqueue Replication Server Deployment", + "parameters": [ + { + "name": "instanceNumber", + "value": self.installationParameterSets.get( + "enqueueReplicationServerDeployment" + ).get("instanceNumber"), + }, + { + "name": "physicalHostname", + "value": self.installationParameterSets.get( + "enqueueReplicationServerDeployment" + ).get("physicalHostname"), + }, + { + "name": "virtualHostname", + "value": self.installationParameterSets.get( + "enqueueReplicationServerDeployment" + ).get("virtualHostname"), + }, + ], + }, + ) + + def get_cluster_deployment_params(self): + return dict( + clustering={ + "name": "Service Principal for High Availability Cluster", + "parameters": [ + { + "name": "clientId", + "value": self.installationParameterSets.get("clientId"), + }, + { + "name": "clientSecret", + "value": self.installationParameterSets.get("clientSecret"), + }, + ], + } + ) + + def get_webdisp_deployment_standalone_params(self): + return dict( + parameters=( + { + "name": "primaryInstanceNumber", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryInstanceNumber"), + }, + { + "name": "primaryPhysicalHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryPhysicalHostname"), + }, + { + "name": "primaryVirtualHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryVirtualHostname"), + }, + { + "name": "userIdOfSIDAdmin", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("userIdOfSIDAdmin"), + }, + { + "name": "fioriHostPort", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("fioriHostPort"), + }, + { + "name": "virtualHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("virtualHostname"), + }, + ) + ) + + def get_webdisp_ha_deployment_params(self): + return dict( + parameters=( + { + "name": "secondaryInstanceNumber", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("secondaryInstanceNumber"), + }, + { + "name": "secondaryPhysicalHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("secondaryPhysicalHostname"), + }, + { + "name": "secondaryVirtualHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("secondaryVirtualHostname"), + }, + { + "name": "fioriHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("fioriHostname"), + }, + { + "name": "fioriHostPort", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("fioriHostPort"), + }, + { + "name": "productiveClientNumber", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("productiveClientNumber"), + }, + ) + ) + + def get_webdisp_deployment_params(self): + return dict( + parameters=( + { + "name": "fioriHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("fioriHostname"), + }, + { + "name": "fioriHostPort", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("fioriHostPort"), + }, + { + "name": "productiveClientNumber", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("productiveClientNumber"), + }, + ) + ) + + def get_ha_provisioning_params(self): + params = dict( + hanaDeployment={ + "name": "HANA Deployment", + "parameters": [ + { + "name": "primaryVmResourceId", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("primaryVmResourceId"), + }, + { + "name": "DBSID", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("DBSID"), + }, + { + "name": "DBSIDAdminUserId", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("DBSIDAdminUserId"), + }, + { + "name": "instanceNumber", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("instanceNumber"), + }, + { + "name": "primaryPhysicalHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("primaryPhysicalHostname"), + }, + { + "name": "primaryVirtualHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("primaryVirtualHostname"), + }, + { + "name": "secondaryVmResourceId", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("secondaryVmResourceId"), + }, + { + "name": "loadBalancerResourceId", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("loadBalancerResourceId"), + }, + { + "name": "frontEndIp", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("frontEndIp"), + }, + { + "name": "frontendHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("frontendHostname"), + }, + { + "name": "secondaryPhysicalHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("secondaryPhysicalHostname"), + }, + { + "name": "secondaryVirtualHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("secondaryVirtualHostname"), + }, + ], + }, + centralServicesDeployment={ + "name": "ABAP SAP Central Services Deployment", + "parameters": [ + { + "name": "loadBalancerResourceId", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("loadBalancerResourceId"), + }, + { + "name": "loadBalancerHostname", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("loadBalancerHostname"), + }, + { + "name": "frontEndIp", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("frontEndIp"), + }, + { + "name": "vmResourceId", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("vmResourceId"), + }, + { + "name": "instanceNumber", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("instanceNumber"), + }, + { + "name": "ABAPMessageServerPort", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("ABAPMessageServerPort"), + }, + { + "name": "physicalHostname", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("physicalHostname"), + }, + { + "name": "virtualHostname", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("virtualHostname"), + }, + ], + }, + enqueueReplicationServerDeployment={ + "name": "Enqueue Replication Server Deployment", + "parameters": [ + { + "name": "vmResourceId", + "value": self.installationParameterSets.get( + "enqueueReplicationServerDeployment" + ).get("vmResourceId"), + }, + { + "name": "loadBalancerHostname", + "value": self.installationParameterSets.get( + "enqueueReplicationServerDeployment" + ).get("loadBalancerHostname"), + }, + { + "name": "frontEndIp", + "value": self.installationParameterSets.get( + "enqueueReplicationServerDeployment" + ).get("frontEndIp"), + }, + { + "name": "instanceNumber", + "value": self.installationParameterSets.get( + "enqueueReplicationServerDeployment" + ).get("instanceNumber"), + }, + { + "name": "physicalHostname", + "value": self.installationParameterSets.get( + "enqueueReplicationServerDeployment" + ).get("physicalHostname"), + }, + { + "name": "virtualHostname", + "value": self.installationParameterSets.get( + "enqueueReplicationServerDeployment" + ).get("virtualHostname"), + }, + ], + }, + webDispatcherDeployment={ + "name": "SAP Web Dispatcher and Fiori Configuration", + "parameters": [ + { + "name": "installationType", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("installationType"), + }, + { + "name": "primaryVmResourceId", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryVmResourceId"), + }, + { + "name": "virtualHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("virtualHostname"), + }, + { + "name": "primaryInstanceNumber", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryInstanceNumber"), + }, + { + "name": "primaryPhysicalHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryPhysicalHostname"), + }, + { + "name": "primaryVirtualHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryVirtualHostname"), + }, + { + "name": "userIdOfSIDAdmin", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("userIdOfSIDAdmin"), + }, + { + "name": "secondaryVmResourceId", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("secondaryVmResourceId"), + }, + { + "name": "loadBalancerResourceId", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("loadBalancerResourceId"), + }, + { + "name": "frontEndIp", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("frontEndIp"), + }, + { + "name": "secondaryInstanceNumber", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("secondaryInstanceNumber"), + }, + { + "name": "secondaryPhysicalHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("secondaryPhysicalHostname"), + }, + { + "name": "secondaryVirtualHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("secondaryVirtualHostname"), + }, + ], + }, + ) + return params + + def transform_application_servers(self): + application_servers = self.installationParameterSets.get( + "applicationServersDeployment", [] + ) + + transformed_application_servers = [] + for index, server in enumerate(application_servers, start=1): + name = f"Application Server {index} Deployment" + parameters = [ + {"name": "vmResourceId", "value": server.get("vmResourceId", "")}, + {"name": "instanceNumber", "value": server.get("instanceNumber", "")}, + { + "name": "physicalHostname", + "value": server.get("physicalHostname", ""), + }, + {"name": "virtualHostname", "value": server.get("virtualHostname", "")}, + ] + + transformed_application_servers.append( + {"name": name, "parameters": parameters} + ) + + self.installationParameterSets["applicationServersDeployment"] = ( + transformed_application_servers + ) + + +class Connection: + def __init__(self, address, outputDir, outputFile): + self._address = address.rstrip("/") + self._headers = {} + self._client = Request() + self.logLocation = f"{outputDir}/{outputFile}" + + def _request(self, method, path, payload=None): + headers = self._headers.copy() + data = None + if payload: + data = json.dumps(payload) + headers["Content-Type"] = "application/json" + + url = self._address + path + r_data = {} # Initialize r_data to avoid referencing an uninitialized variable + try: + r = self._client.open(method, url, data=data, headers=headers, timeout=60) + r_status = r.getcode() + r_headers = dict(r.headers) + data = r.read().decode("utf-8") + r_data = json.loads(data) if data else {} + except HTTPError as e: + r_status = e.code + r_headers = dict(e.headers) + try: + r_data = e.read().decode("utf-8") + + except UnicodeDecodeError: + raise AnsibleConnectionFailure(f"HTTPError {r_status}: {r_headers}") + raise AnsibleConnectionFailure( + f"HTTPError {r_status}: {r_headers} Response {r_data}" + ) + finally: + if isinstance(r_data, str): + r_data = json.loads(r_data) + file_data = r_data.copy() + with open(self.logLocation, "w") as f: + if file_data.get("access_token"): + file_data.pop("access_token") + json.dump(file_data, f, sort_keys=True, indent=4) + return r_status, r_headers, r_data + + def get(self, path): + return self._request("GET", path) + + def post(self, path, payload=None): + return self._request("POST", path, payload) + + def delete(self, path): + return self._request("DELETE", path) + + def get_full_path(self, file_name): + absolute_path = os.path.dirname(__file__) + relative_path = file_name + full_path = os.path.join(absolute_path, relative_path) + return full_path + + def login(self, oauthServerUrl, apiEndpoint): + self._address = oauthServerUrl + self._client.client_cert, cert_temp_file = self.create_temp_file_from_encrypted( + self.get_full_path("cert_file.pem") + ) + self._client.client_key, key_temp_file = self.create_temp_file_from_encrypted( + self.get_full_path("key_file") + ) + status, headers, data = self.post("") + try: + if status in [200, 201, 204, 206]: + token = data.get("access_token") + self._address = apiEndpoint + if token is not None: + self._headers["Authorization"] = "Bearer " + token + else: + raise AnsibleActionFail( + "Unable to fetch CAL token. Exit code %s" % status + ) + finally: + # Clean up temporary files + if self.get_full_path("cert_file.pem"): + os.remove(self.get_full_path("cert_file.pem")) + if self.get_full_path("key_file"): + os.remove(self.get_full_path("key_file")) + self._client.client_cert = None + self._client.client_key = None + + def create_temp_file_from_encrypted(self, encrypted_file_path): + with open(encrypted_file_path, "rb") as file: + encrypted_data = file.read() + decrypted_data = fernet.decrypt(encrypted_data).decode() + + fd, temp_file_path = tempfile.mkstemp() + with os.fdopen(fd, "w") as tmp: + tmp.write(decrypted_data) + + return temp_file_path, temp_file_path + + def decrypt_file(self, file_path): + with open(file_path, "rb") as file: + encrypted_data = file.read() + decrypted_data = fernet.decrypt(encrypted_data).decode() + with open(file_path, "w") as file: + file.write(decrypted_data) + + +class AzureKeyVaultManager: + def __init__(self, vault_url, client_id=None, secret=None, tenant=None): + self.vault_url = vault_url + self.client_id = client_id + self.secret = secret + self.tenant = tenant + self.token = None + self.token_acquired = False + self.get_token() + + def get_token(self): + display = Display() + token_params = { + "api-version": "2018-02-01", + "resource": "https://vault.azure.net", + } + token_headers = {"Metadata": "true"} + try: + token_res = requests.get( + "http://169.254.169.254/metadata/identity/oauth2/token", + params=token_params, + headers=token_headers, + ) + token = token_res.json().get("access_token") + if token is not None: + self.token_acquired = True + self.token = token + else: + display.v("No token was available.") + except requests.exceptions.RequestException: + display.v( + "Try using service principal if provided. Unable to fetch MSI token. " + ) + self.token_acquired = False + + def get_secrets(self, secrets): + ret = [] + if self.vault_url is None: + raise AnsibleActionFail("Failed to get a valid vault URL.") + if self.token_acquired: + secret_params = {"api-version": "2016-10-01"} + secret_headers = {"Authorization": "Bearer " + self.token} + for secret in secrets: + try: + secret_res = requests.get( + self.vault_url + "/secrets/" + secret, + params=secret_params, + headers=secret_headers, + ) + ret.append(secret_res.json()["value"]) + except requests.exceptions.RequestException: + raise AnsibleActionFail( + "Failed to fetch secret: " + secret + " via MSI endpoint." + ) + except KeyError: + raise AnsibleActionFail("Failed to fetch secret " + secret + ".") + return ret + else: + return self.get_secret_non_msi(secrets) + + def get_secret_non_msi(self, secrets): + try: + credentials = ServicePrincipalCredentials( + client_id=self.client_id, secret=self.secret, tenant=self.tenant + ) + client = KeyVaultClient(credentials) + except AuthenticationError: + raise AnsibleActionFail( + "Invalid credentials for the subscription provided." + ) + + ret = [] + for secret in secrets: + try: + secret_val = client.get_secret(self.vault_url, secret, "").value + ret.append(secret_val) + except ClientRequestError: + raise AnsibleActionFail("Error occurred in the request") + return ret + + def create_certificates_files(self, client_cert, client_key): + script_dir = os.path.dirname(os.path.abspath(__file__)) + cert_file_path = os.path.join(script_dir, "cert_file.pem") + key_file_path = os.path.join(script_dir, "key_file") + # Encrypt and save the certificates + self.encrypt_and_save(client_cert, cert_file_path) + self.encrypt_and_save(client_key, key_file_path) + + def encrypt_and_save(self, data, file_path): + encrypted_data = fernet.encrypt(data.encode()) + with open(file_path, "wb") as file: + file.write(encrypted_data) + + +class ActionModule(ActionBase): + def __init__(self, *args, **kwargs): + super(ActionModule, self).__init__(*args, **kwargs) + self._supports_check_mode = False + + def run(self, tmp=None, task_vars=None): + result = super(ActionModule, self).run(tmp, task_vars) + # Get parameters from task arguments + method = self._task.args.get("method") + output_directory = self._task.args.get("outputDirectoryPath", "/tmp") + output_file = self._task.args.get("outputFile", "output.txt") + azure_arg_mapping = { + "calKeyvaultId": "vault_url", + "clientId": "client_id", + "clientSecret": "secret", + "tenantId": "tenant", + } + + # Extract relevant arguments and map them to AzureKeyVaultManager constructor argument names + azure_args = { + azure_arg_mapping[key]: value + for key, value in self._task.args.items() + if key in azure_arg_mapping + } + + # Retrieve secrets from Azure Key Vault + azure_mngr = AzureKeyVaultManager(**azure_args) + api_secrets = azure_mngr.get_secrets( + ["apiEndpoint", "clientCertificate", "clientPrivateKey", "oauthServerUrl"] + ) + + apiEndPoint, clientCertificate, clientPrivateKey, oathUrl = api_secrets + + # Create certificate files + azure_mngr.create_certificates_files(clientCertificate, clientPrivateKey) + + conn = Connection("", output_directory, output_file) + + if method == "get_product": + validation_result, new_module_args = self.validate_argument_spec( + method_spec_product, required_together=required_together + ) + conn.login(oathUrl, apiEndPoint) + status, _, data = conn.get("/solutions/v1/products") + result.update(status=status, response=str(data)) + elif method == "get_progress": + validation_result, new_module_args = self.validate_argument_spec( + method_spec_progress, required_together=required_together + ) + conn.login(oathUrl, apiEndPoint) + system_id = new_module_args.get("systemId") + status, _, data = conn.get( + "/workloads/v1/systems/" + system_id + "/provisioningProgress" + ) + result.update(status=status, response=str(data)) + elif method == "deployment": + validation_result, new_module_args = self.validate_argument_spec( + method_spec_deployment, required_together=required_together + ) + conn.login(oathUrl, apiEndPoint) + status, _, data = conn.get("/solutions/v1/products") + + if data is not None: + products_dict = {p["productId"]: p for p in data.get("products")} + product = products_dict.get(new_module_args.get("productId")) + product_constraints = [ + item + for item in product.get("availableProviders") + if "Microsoft Azure" in item["name"] + ][0] + if not product: + raise AnsibleActionFail( + "Product not found. Choose from the available products' list %s" + % products_dict + ) + + method_spec_deployment.get("infrastructureParameterSet").get("options").get( + "operatingSystem" + ).update({"choices": product_constraints.get("availableOperatingSystems")}) + method_spec_deployment.get("infrastructureParameterSet").get("options").get( + "hanaVmSize" + ).update({"choices": product_constraints.get("availableHanaVmSizes")}) + method_spec_deployment.get("infrastructureParameterSet").get("options").get( + "centralServicesVmSize" + ).update( + {"choices": product_constraints.get("availableCentralServicesVmSizes")} + ) + method_spec_deployment.get("infrastructureParameterSet").get("options").get( + "enqueueReplicationServerVmSize" + ).update( + { + "choices": product_constraints.get( + "availableEnqueueReplicationServerVmSizes" + ) + } + ) + method_spec_deployment.get("infrastructureParameterSet").get("options").get( + "applicationServerVmSize" + ).update( + { + "choices": product_constraints.get( + "availableApplicationServerVmSizes" + ) + } + ) + method_spec_deployment.get("infrastructureParameterSet").get("options").get( + "webDispatcherVmSize" + ).update( + {"choices": product_constraints.get("availableWebDispatcherVmSizes")} + ) + + validation_result, new_module_args = self.validate_argument_spec( + method_spec_deployment + ) + system = SAPsystem(new_module_args) + system_request = system.get_props() + status, _, data = conn.post( + "/workloads/v1/systems/provisioning", payload=system_request + ) + result.update(status=status, response=str(data)) + elif method == "software_provisioning": + conn.login(oathUrl, apiEndPoint) + validation_result, new_module_args = self.validate_argument_spec( + method_spec_provisioning, required_together=required_together + ) + system = SAPsystem(new_module_args) + system_request = system.get_props() + status, _, data = conn.post( + "/workloads/v1/systems/softwareProvisioning", payload=system_request + ) + result.update( + status=status, response=str(data) + ) # Write response to output file + + result["changed"] = True + + return result diff --git a/deploy/ansible/configuration_menu.sh b/deploy/ansible/configuration_menu.sh index 3dfebc3e36..c23192ba15 100755 --- a/deploy/ansible/configuration_menu.sh +++ b/deploy/ansible/configuration_menu.sh @@ -61,7 +61,7 @@ export ANSIBLE_PASSWORD=$password_secret # entry associated with the specific setting. # export ANSIBLE_HOST_KEY_CHECKING=False -export ANSIBLE_INVENTORY="${sap_sid}_hosts.yaml" +export ANSIBLE_INVENTORY="${sap_sid%$'\r'}_hosts.yaml" export ANSIBLE_PRIVATE_KEY_FILE=sshkey export ANSIBLE_COLLECTIONS_PATHS=/opt/ansible/collections:${ANSIBLE_COLLECTIONS_PATHS:+${ANSIBLE_COLLECTIONS_PATHS}} @@ -149,7 +149,7 @@ all_playbooks=( # Set of options that will be passed to the ansible-playbook command playbook_options=( - --inventory-file="${sap_sid}_hosts.yaml" + --inventory-file="${sap_sid%$'\r'}_hosts.yaml" --private-key=${ANSIBLE_PRIVATE_KEY_FILE} --extra-vars="_workspace_directory=`pwd`" --extra-vars="@${sap_params_file}" diff --git a/deploy/ansible/playbook_00_validate_parameters.yaml b/deploy/ansible/playbook_00_validate_parameters.yaml index c1cfdb0980..ea7593d589 100644 --- a/deploy/ansible/playbook_00_validate_parameters.yaml +++ b/deploy/ansible/playbook_00_validate_parameters.yaml @@ -424,6 +424,16 @@ tags: - 0.0-agent-diskspace + - name: "0.0 Validations - Check SAP CAL variables are present and not empty" + when: enable_sap_cal is defined and enable_sap_cal + ansible.builtin.assert: + that: + - calapi_kv is defined + - calapi_kv | type_debug != 'NoneType' + - calapi_kv | trim | length > 1 + fail_msg: "Please provide the SAP CAL API key vault name in calapi_kv parameter" + tags: + - always # /*---------------------------------------------------------------------------8 # | | diff --git a/deploy/ansible/playbook_01_os_base_config.yaml b/deploy/ansible/playbook_01_os_base_config.yaml index 6f1d78cb60..c0c4e76705 100644 --- a/deploy/ansible/playbook_01_os_base_config.yaml +++ b/deploy/ansible/playbook_01_os_base_config.yaml @@ -91,6 +91,15 @@ tags: - always + - name: "OS configuration playbook: - Create directories" + become: true + ansible.builtin.file: + path: '/etc/sap_deployment_automation/{{ sap_sid | upper }}' + state: directory + mode: '0755' + tags: + - always + - name: "OS configuration playbook: - Set sudoers" ansible.builtin.include_role: name: roles-os/1.0-sudoers diff --git a/deploy/ansible/playbook_04_00_00_db_install.yaml b/deploy/ansible/playbook_04_00_00_db_install.yaml index b6bae6e527..cc2eec14ee 100644 --- a/deploy/ansible/playbook_04_00_00_db_install.yaml +++ b/deploy/ansible/playbook_04_00_00_db_install.yaml @@ -352,6 +352,21 @@ - name: "Run the Database installation Playbook" block: + - name: "Database Installation Playbook: - run HANA installation" + ansible.builtin.include_role: + name: roles-db/4.0.0-hdb-install + when: + - not db_scale_out + + - name: "Database Installation Playbook: - Clear the failed state of hosts" + ansible.builtin.meta: clear_host_errors + + # - name: "Database installation Playbook: - run HANA Scale-Out mounts" + # ansible.builtin.include_role: + # name: roles-sap-os/2.6-sap-mounts + # when: + # - db_scale_out | default(false) == true + - name: "Database Installation Playbook: - run HANA Scale-Out installation" ansible.builtin.include_role: name: roles-db/4.0.3-hdb-install-scaleout diff --git a/deploy/ansible/playbook_04_00_01_db_ha.yaml b/deploy/ansible/playbook_04_00_01_db_ha.yaml index adc810b7a9..3304c0cc9e 100644 --- a/deploy/ansible/playbook_04_00_01_db_ha.yaml +++ b/deploy/ansible/playbook_04_00_01_db_ha.yaml @@ -93,8 +93,7 @@ when: database_high_availability # +------------------------------------4--------------------------------------*/ -- hosts: "{{ sap_sid | upper }}_DB - {{ db_sid | upper }}_OBSERVER_DB" +- hosts: "{{ sap_sid | upper }}_DB" name: HANA DB HA Configuration remote_user: "{{ orchestration_ansible_user }}" gather_facts: true # Important to collect hostvars information @@ -152,10 +151,10 @@ main_password: "{{ hostvars.localhost.sap_password }}" - name: "Run the db/hdb-hsr role" - when: - - node_tier == 'hana' ansible.builtin.include_role: name: roles-db/4.0.1-hdb-hsr + when: + - node_tier == 'hana' tags: - 4.0.1-hdb-hsr @@ -185,7 +184,9 @@ - name: "HANA HA Setup: - run the Pacemaker role" ansible.builtin.include_role: name: roles-sap/5.5-hanadb-pacemaker - when: db_high_availability + when: + - db_high_availability or database_high_availability + - not database_scale_out tags: - 5.5-hanadb-pacemaker @@ -193,7 +194,7 @@ ansible.builtin.include_role: name: roles-sap/5.8-hanadb-scaleout-pacemaker when: - - db_high_availability + - db_high_availability or database_high_availability - database_scale_out tags: - 5.8-hanadb-scaleout-pacemaker diff --git a/deploy/ansible/playbook_06_02_sap_on_azure_quality_checks.yaml b/deploy/ansible/playbook_06_02_sap_on_azure_quality_checks.yaml new file mode 100644 index 0000000000..7c3a21747c --- /dev/null +++ b/deploy/ansible/playbook_06_02_sap_on_azure_quality_checks.yaml @@ -0,0 +1,71 @@ +# /*---------------------------------------------------------------------------8 +# | | +# | Playbook for SAP on Azure quality checks | +# | | +# +------------------------------------4--------------------------------------*/ +--- + +- hosts: localhost + name: "SAP on Azure quality checks: - setup deployer" + gather_facts: true + vars_files: + - vars/ansible-input-api.yaml # API Input template with defaults + + tasks: + + - name: "SAP on Azure quality checks: - Create Progress folder" + ansible.builtin.file: + path: "{{ _workspace_directory }}/.progress" + state: directory + mode: 0755 + + - name: "SAP on Azure quality checks: - Remove sap-on-azure-quality-checks-done flag" + ansible.builtin.file: + path: "{{ _workspace_directory }}/.progress/sap-on-azure-quality-checks-done" + state: absent + + - name: "SAP on Azure quality checks: - setup prerequisites" + ansible.builtin.include_role: + name: "roles-misc/0.9-sap-on-azure-quality-checks" + tasks_from: "setup" + + +- hosts: "{{ sap_sid | upper }}_DB : + {{ sap_sid | upper }}_SCS : + {{ sap_sid | upper }}_ERS : + {{ sap_sid | upper }}_PAS : + {{ sap_sid | upper }}_APP" + + name: "SAP on Azure quality checks: - run checks" + remote_user: "{{ orchestration_ansible_user }}" + gather_facts: true # Important to collect hostvars information + any_errors_fatal: true + vars_files: + - vars/ansible-input-api.yaml # API Input template with defaults + + tasks: + + - name: "SAP on Azure quality checks: - run check" + ansible.builtin.include_role: + name: "roles-misc/0.9-sap-on-azure-quality-checks" + tasks_from: "run_check" + + +- hosts: localhost + name: "SAP on Azure quality checks: - Done" + gather_facts: true + vars_files: + - vars/ansible-input-api.yaml # API Input template with defaults + + tasks: + + - name: "SAP on Azure quality checks: - Create sap-on-azure-quality-checks-done flag" + ansible.builtin.file: + path: "{{ _workspace_directory }}/.progress/sap-on-azure-quality-checks-done" + state: touch + mode: 0755 + +... +# /*---------------------------------------------------------------------------8 +# | END | +# +------------------------------------4--------------------------------------*/ diff --git a/deploy/ansible/playbook_sapcal_integration.yaml b/deploy/ansible/playbook_sapcal_integration.yaml new file mode 100644 index 0000000000..7ff35a53a6 --- /dev/null +++ b/deploy/ansible/playbook_sapcal_integration.yaml @@ -0,0 +1,127 @@ +--- + +- name: "SAP CAL Integration" + hosts: "{{ sap_sid | upper }}_DB : + {{ sap_sid | upper }}_SCS : + {{ sap_sid | upper }}_PAS : + {{ sap_sid | upper }}_APP" + become: true + gather_facts: true + vars_files: vars/ansible-input-api.yaml + tasks: + - name: "SAP-CAL Integration" + become: true + when: + - ansible_os_family | upper == "SUSE" or ansible_os_family | upper == "REDHAT" + - enable_sap_cal is defined and enable_sap_cal + block: + - name: "6.0.0-sapcal-install - Extend logical volumes" + when: ansible_os_family | upper == "REDHAT" + ansible.builtin.include_role: + name: roles-os/1.5.3-disk-setup-sapcal + + - name: "Retrieve Resource Group Name and ResourceID" + ansible.builtin.uri: + url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 + use_proxy: false + headers: + Metadata: true + register: azure_metadata + + - name: "Set ResourceID for SCS" + ansible.builtin.set_fact: + subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" + resource_group_name: "{{ azure_metadata.json.compute.resourceGroupName }}" + scs_resource_id: "{{ azure_metadata.json.compute.resourceId }}" + scs_physical_hostname: "{{ ansible_hostname }}" + scs_virtual_hostname: "{{ virtual_host }}" + when: + - "'scs' in supported_tiers" + - not scs_high_availability + + - name: "Set ResourceID for DB" + ansible.builtin.set_fact: + db_resource_id: "{{ azure_metadata.json.compute.resourceId }}" + db_physical_hostname: "{{ ansible_hostname }}" + db_virtual_hostname: "{{ virtual_host }}" + when: + - "'hana' in supported_tiers" + - not db_high_availability + + - name: "Set ResourceID for PAS" + ansible.builtin.set_fact: + pas_resource_id: "{{ azure_metadata.json.compute.resourceId }}" + pas_physical_hostname: "{{ ansible_hostname }}" + pas_virtual_hostname: "{{ virtual_host }}" + when: + - "'pas' in supported_tiers" + + - name: "Set ResourceID for APP" + ansible.builtin.set_fact: + app_resource_id: "{{ azure_metadata.json.compute.resourceId }}" + app_physical_hostname: "{{ ansible_hostname }}" + app_virtual_hostname: "{{ virtual_host }}" + when: + - "'app' in supported_tiers" + +- name: "Provision a new SAP environment" + hosts: localhost + connection: local + gather_facts: true + vars_files: vars/ansible-input-api.yaml + tasks: + + - name: "Check if Enable SAP CAL is true" + ansible.builtin.assert: + that: + - enable_sap_cal is defined + - enable_sap_cal | bool + fail_msg: "Please set enable_sap_cal to true in the sap-parameters.yaml file to enable SAP CAL integration" + + - name: Run the keyvault role + ansible.builtin.include_role: + name: roles-misc/0.2-kv-secrets + vars: + operation: sapcal + tags: + - kv-secrets + +# Once the Ansible Module is updated, this task will be moved to OS configuration playbook + - name: "SAP-CAL Integration: - Ensure azure-keyvault is installed" + become: true + when: enable_sap_cal is defined and enable_sap_cal + block: + - name: "SAP-CAL Integration: - Ensure azure-keyvault is installed" + ansible.builtin.pip: + name: + - azure-keyvault==1.1.0 + - azure-keyvault-secrets + state: present + tags: + - always + + - name: "Set facts from other hosts" + ansible.builtin.set_fact: + "{{ item.key }}": "{{ hostvars[groups[sap_sid | upper + '_' + item.value][0]][item.key] }}" + loop: + - { key: 'subscription_id', value: 'SCS' } + - { key: 'resource_group_name', value: 'SCS' } + - { key: 'scs_resource_id', value: 'SCS' } + - { key: 'scs_physical_hostname', value: 'SCS' } + - { key: 'scs_virtual_hostname', value: 'SCS' } + - { key: 'db_resource_id', value: 'DB' } + - { key: 'db_physical_hostname', value: 'DB' } + - { key: 'db_virtual_hostname', value: 'DB' } + - { key: 'pas_resource_id', value: 'PAS' } + - { key: 'pas_physical_hostname', value: 'PAS' } + - { key: 'pas_virtual_hostname', value: 'PAS' } + - { key: 'app_resource_id', value: 'APP' } + - { key: 'app_physical_hostname', value: 'APP' } + - { key: 'app_virtual_hostname', value: 'APP' } + + - name: "6.0.0-sapcal-install - CALL SAP CAL API" + when: enable_sap_cal is defined and enable_sap_cal + block: + - name: "Import the 6.0.0-sapcal-install role" + ansible.builtin.import_role: + name: "roles-sap/6.0.0-sapcal-install" diff --git a/deploy/ansible/roles-db/4.0.0-hdb-install/tasks/main.yaml b/deploy/ansible/roles-db/4.0.0-hdb-install/tasks/main.yaml index 57b28315da..b772bba222 100644 --- a/deploy/ansible/roles-db/4.0.0-hdb-install/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.0-hdb-install/tasks/main.yaml @@ -120,6 +120,11 @@ failed_when: hana_installation.rc > 0 when: hana_installation.rc == 1 rescue: + - name: "Fail if HANA installation failed while importing the delivery unit AHCO_INA_SERVICE" + ansible.builtin.fail: + msg: "INSTALL:0026:Execute hdblcm failed at delivery unit AHCO_INA_SERVICE." + when: hana_installation.stderr is search(".*Import of delivery units failed.*Cannot import delivery unit.*AHCO_INA_SERVICE.tgz.*") + - name: "Fail if HANA installation failed on second attempt." ansible.builtin.fail: msg: "INSTALL:0022:Execute hdblcm failed." diff --git a/deploy/ansible/roles-db/4.0.1-hdb-hsr/tasks/4.0.1.4-create_hana_backup.yml b/deploy/ansible/roles-db/4.0.1-hdb-hsr/tasks/4.0.1.4-create_hana_backup.yml index a2b6f54225..5502acbb23 100644 --- a/deploy/ansible/roles-db/4.0.1-hdb-hsr/tasks/4.0.1.4-create_hana_backup.yml +++ b/deploy/ansible/roles-db/4.0.1-hdb-hsr/tasks/4.0.1.4-create_hana_backup.yml @@ -43,8 +43,17 @@ - name: "HANA HSR: - Backup SYSTEMDB database for System Identifier {{ db_sid }}" ansible.builtin.shell: > {{ hdbsql_systemdb_command }} "{{ backup_cmd_for_systemdb }}" + register: backup_systemdb_result when: backup_systemdb + - name: "HANA HSR: - Backup completed SYSTEMDB database for System Identifier: {{ db_sid }}" + ansible.builtin.debug: + msg: + - "Backup results: {{ backup_systemdb_result }}" + when: + - backup_systemdb + - backup_systemdb_result.stdout is defined + - name: "HANA HSR: - Check if there is a tenant db needing backup" block: - name: "HANA HSR: - Check whether backup exists for tenant {{ hana_tenant_database_name }} database for System Identifier {{ db_sid }}" @@ -66,7 +75,17 @@ - name: "HANA HSR: - Backup {{ hana_tenant_database_name }} database for System Identifier {{ db_sid }}" ansible.builtin.shell: > {{ hdbsql_tenant_command }} "{{ backup_cmd_for_tenant }}" + register: backup_tenantdb_result when: backup_tenantdb + + - name: "HANA HSR: - Backup completed {{ hana_tenant_database_name }} database for System Identifier: {{ db_sid }}" + ansible.builtin.debug: + msg: + - "Backup results: {{ backup_tenantdb_result }}" + when: + - backup_tenantdb + - backup_tenantdb_result.stdout is defined + when: - hana_has_tenant_db is defined - hana_has_tenant_db diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/readme.md b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/readme.md index e69de29bb2..f048e28fdd 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/readme.md +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/readme.md @@ -0,0 +1,67 @@ +# Task 4.0.3-HDB-install-scaleout + +This task is part of the SAP Automation project and focuses on installing SAP HANA Database in a scale-out configuration using Ansible. +The supported configurations are +1. Scale out with Standby node. Requires HANA shared (single volume ), Data and log to be on an NFS4.1 compliant storage like ANF. +2. Scale out with two sites replicated via HSR and managed by Pacemaker. + +## Prerequisites + +Before running this task, make sure you have the following: + +- Ansible installed on your system +- Access to the SAP HANA installation media +- A valid SAP HANA license key +- A properly configured inventory file for your SAP HANA scale-out landscape + +For Scale out with Standby +- HANA hosts in single zone. +- Single HANA shared with access for all HANA hosts. Can be on AFS or ANF. +- Data volume on ANF, one volume for each HANA host. +- Log volume on ANF, one volume for each HANA host. +- (Optional) HANA backup on ANF/AFS. + +For Scale out in HSR replication +- HANA hosts ( Even number count ) distributed across two zones. +- Two HANA shared volumes , one for each site. Can be AFS/ANF +- Premium SSD V1 histing Data and log volume , unique per host. +- (Optional) HANA backup on ANF/AFS. + +The Bill of Material must have below lines in the DBLoad template + - "hanadb.landscape.reorg.useCheckProcedureFile = DONOTUSEFILE" +- "hanadb.landscape.reorg.useParameterFile = DONOTUSEFILE" + + +## Usage + +To use this task, follow these steps: + +1. Clone the SAP Automation repository to your local machine. +2. Navigate to the `ansible/roles-db/4.0.3-hdb-install-scaleout` directory. +3. Update the `inventory` file with the details of your SAP HANA scale-out landscape. +4. Modify the `group_vars` and `host_vars` files to match your specific requirements. +5. Run the Ansible playbook using the command `ansible-playbook site.yml`. + +## Configuration + +The configuration files for this task are located in the `ansible/roles-db/4.0.3-hdb-install-scaleout` directory. You can modify these files to customize the installation process according to your needs. + +## License + +This project is licensed under the [MIT License](LICENSE). +## Calling the Task + +To call the task `4.0.3-HDB-install-scaleout` with the correct parameters, follow these steps: + +1. Open the terminal or command prompt. +2. Navigate to the directory where the task code is located: `/D:/github/shsorot/sap-automation/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout`. +3. Make sure you have Ansible installed on your system. +4. Ensure that you have access to the SAP HANA installation media and a valid SAP HANA license key. +5. Verify that you have a properly configured inventory file for your SAP HANA scale-out landscape. +6. Modify the `inventory` file in the task directory to include the details of your SAP HANA scale-out landscape. +7. Customize the `group_vars` and `host_vars` files in the task directory to match your specific requirements. +8. Run the Ansible playbook using the following command: `ansible-playbook site.yml`. + +Make sure to replace `site.yml` with the actual name of the playbook file for the task. + +Please note that the specific parameters and configuration files may vary depending on your setup and requirements. diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml index 4c8ce68fb3..10452a2368 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml @@ -612,6 +612,19 @@ with_items: - "{{ ansible_play_hosts_all[0::2] }}" + - name: "Prepare global.ini for HANA hosts name resolution on replication network" + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: "system_replication_hostname_resolution" + mode: 0644 + state: present + option: "{{ hostvars[item].ansible_all_ipv4_addresses | ansible.utils.ipaddr(subnet_cidr_storage) | first | default(hostvars[item].ansible_host ) }}" + value: "{{ hostvars[item].virtual_host }}" + with_items: + - "{{ groups[(sap_sid | upper)~'_DB' ] }}" + when: + - subnet_cidr_storage is defined + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Restart HANA" block: - name: "Stop HANA Database" @@ -658,7 +671,7 @@ # /*---------------------------------------------------------------------------8 # | Secondary site setup with Shared nothing scale out | # +------------------------------------4--------------------------------------*/ - - name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - HSR ( Secondary Site )" + - name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - HSR ( Secondary Site )" block: - name: "4.0.3 - SAP HANA SCALE OUT-HSR: remove install response file if exists" ansible.builtin.file: @@ -872,6 +885,19 @@ with_items: - "{{ ansible_play_hosts_all[1::2] }}" + - name: "Prepare global.ini for HANA hosts name resolution on replication network" + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: "system_replication_hostname_resolution" + mode: 0644 + state: present + option: "{{ hostvars[item].ansible_all_ipv4_addresses | ansible.utils.ipaddr(subnet_cidr_storage) | first | default(hostvars[item].ansible_host) }}" + value: "{{ hostvars[item].virtual_host }}" + with_items: + - "{{ groups[(sap_sid | upper)~'_DB' ] }}" + when: + - subnet_cidr_storage is defined + - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Restart HANA" block: - name: "Stop HANA Database" @@ -921,7 +947,6 @@ # /*----------------------------End of setup----------------------------------8 - - name: "HANA Install status" block: @@ -987,7 +1012,7 @@ # Scale out Supplementary tasks -# Create {{ db_sid | lower}}adm account on majority maker VM +# Create {{ db_sid | lower}}adm account on majority maker VM, onlye required if its going to be added to Pacemaker - name: "4.0.3 - SAP HANA SCALE OUT: Supplementary tasks" block: - name: "Create Create SAP Groups on Observer VM" @@ -1023,7 +1048,8 @@ state: present shell: "/bin/sh" home: "/usr/sap/{{ db_sid | upper }}/home" - + when: + - database_high_availability ... # /*---------------------------------------------------------------------------8 # | END | diff --git a/deploy/ansible/roles-db/4.0.4-hdb-schema/tasks/main.yaml b/deploy/ansible/roles-db/4.0.4-hdb-schema/tasks/main.yaml new file mode 100644 index 0000000000..7371ad1f82 --- /dev/null +++ b/deploy/ansible/roles-db/4.0.4-hdb-schema/tasks/main.yaml @@ -0,0 +1,36 @@ +--- +# /*---------------------------------------------------------------------------8 +# | | +# | 0 Set 'schema_name' fact for HDB Schema Name | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "HDB Schema: Get DEFAULT.PFL" + ansible.builtin.slurp: + src: "/sapmnt/{{ sap_sid | upper }}/profile/DEFAULT.PFL" + register: profilefile + +- name: "HDB Schema: Get schema property" + ansible.builtin.set_fact: + schema_property: "{{ profilefile['content'] | b64decode | split('\n') | select('search', property_name ) }}" + loop: "{{ hdb_schema_property_names }}" + loop_control: + loop_var: property_name + when: + - (schema_property | default([])) | length <= 0 + +- name: "HDB Schema: Parse schema name" + ansible.builtin.set_fact: + schema_name: "{{ schema_property | first | split('=') | last | trim }}" + when: + - (schema_property | default([])) | length > 0 + +- name: "HDB Schema: Set default schema" + ansible.builtin.set_fact: + schema_name: "{{ hana_schema }}" + when: + - schema_name is not defined + +- name: "HDB Schema: Show schema name" + ansible.builtin.debug: + msg: "Schema name: {{ schema_name }}" diff --git a/deploy/ansible/roles-db/4.0.4-hdb-schema/vars/main.yaml b/deploy/ansible/roles-db/4.0.4-hdb-schema/vars/main.yaml new file mode 100644 index 0000000000..30ea3d1819 --- /dev/null +++ b/deploy/ansible/roles-db/4.0.4-hdb-schema/vars/main.yaml @@ -0,0 +1,3 @@ +hdb_schema_property_names: + - "dbs/hdb/schema" # ABAP schema + - "j2ee/dbschema" # JAVA schema diff --git a/deploy/ansible/roles-db/4.1.0-ora-install/tasks/main.yaml b/deploy/ansible/roles-db/4.1.0-ora-install/tasks/main.yaml index 7153423cb5..cb577f3f5a 100644 --- a/deploy/ansible/roles-db/4.1.0-ora-install/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.1.0-ora-install/tasks/main.yaml @@ -65,7 +65,7 @@ register: oracle_installed - name: "ORACLE: Install RPM Packages" - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ target_media_location }}/downloads/compat-libcap1-1.10-7.el7.x86_64.rpm" state: present disable_gpg_check: true diff --git a/deploy/ansible/roles-db/4.1.1-ora-asm-grid/tasks/main.yaml b/deploy/ansible/roles-db/4.1.1-ora-asm-grid/tasks/main.yaml index 66e700d7ce..660b1102e2 100644 --- a/deploy/ansible/roles-db/4.1.1-ora-asm-grid/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.1.1-ora-asm-grid/tasks/main.yaml @@ -172,7 +172,7 @@ - name: "ORACLE ASM: Install RPM Packages" - ansible.builtin.yum: + ansible.builtin.dnf: name: - "/oracle/GRID/{{ ora_version }}/cv/rpm/cvuqdisk-1.0.10-1.rpm" state: present diff --git a/deploy/ansible/roles-misc/0.1-passwords/tasks/0.1.1-ha_clusterpasswords.yaml b/deploy/ansible/roles-misc/0.1-passwords/tasks/0.1.1-ha_clusterpasswords.yaml index 07121188c5..8bdbf3846e 100644 --- a/deploy/ansible/roles-misc/0.1-passwords/tasks/0.1.1-ha_clusterpasswords.yaml +++ b/deploy/ansible/roles-misc/0.1-passwords/tasks/0.1.1-ha_clusterpasswords.yaml @@ -8,6 +8,7 @@ - name: "0.1 HA Cluster Password: - Construct SAP db cluster password secret name" ansible.builtin.set_fact: cluster_password_id: "{{ secret_prefix }}-{{ sap_sid }}-sap-db-cluster-password" + secret_expiry_date: "{{ '%Y-%m-%dT%H:%M:%SZ' | strftime(ansible_date_time.epoch | int + (60*60*24*365) )}}" - name: "0.1 HA Cluster Password: - SAP db cluster password secret name" ansible.builtin.debug: @@ -83,6 +84,7 @@ --vault-name {{ kv_name }} --name {{ cluster_password_id }} --value "{{ db_cluster_password }}" + --expires "{{ secret_expiry_date }}" when: not secret_exists - name: "0.1 HA Cluster Password: - Show SAP cluster Password" diff --git a/deploy/ansible/roles-misc/0.1-passwords/tasks/main.yaml b/deploy/ansible/roles-misc/0.1-passwords/tasks/main.yaml index 9653b5063d..2cad66325d 100644 --- a/deploy/ansible/roles-misc/0.1-passwords/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.1-passwords/tasks/main.yaml @@ -8,11 +8,12 @@ - name: "0.1 Password: - Construct SAP system password secret name" ansible.builtin.set_fact: sap_password_id: "{{ secret_prefix }}-{{ sap_sid }}-sap-password" + secret_expiry_date: "{{ '%Y-%m-%dT%H:%M:%SZ' | strftime(ansible_date_time.epoch | int + (60*60*24*365) )}}" tags: - always - name: "0.1 Password: - Create Password secret" - ansible.builtin.command: az keyvault secret set --vault-name {{ kv_name }} --name {{ sap_password_id }} --value "{{ main_password }}" + ansible.builtin.command: az keyvault secret set --vault-name {{ kv_name }} --name {{ sap_password_id }} --value "{{ main_password }}" --expires "{{ secret_expiry_date }}" when: - main_password is defined - "main_password | trim | length != 0" @@ -107,7 +108,7 @@ # sap_password: "S3{{ lookup('password', '/tmp/sappasswordfile length=10 chars=ascii_lowercase,ascii_uppercase,digits') }}" - name: "0.1 Password: - Create Password secret" - ansible.builtin.command: az keyvault secret set --vault-name {{ kv_name }} --name {{ sap_password_id }} --value "{{ sap_password }}" + ansible.builtin.command: az keyvault secret set --vault-name {{ kv_name }} --name {{ sap_password_id }} --value "{{ sap_password }}" --expires "{{ secret_expiry_date }}" tags: - always diff --git a/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml b/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml index 85940d854d..fd3334d8a3 100644 --- a/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml @@ -40,7 +40,7 @@ - name: "0.2 Key Vault: - Import S User tasks" ansible.builtin.import_tasks: "s_user.yaml" when: - - operation == "SoftwareAcquisition" + - operation == "SoftwareAcquisition" or operation == "sapcal" # -------------------------------------+---------------------------------------8 diff --git a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml index 77e386572e..29b454ffcc 100644 --- a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml @@ -4,10 +4,6 @@ - name: "0.5.1 acss registration: - Set Python version {{ distribution_id }}" ansible.builtin.set_fact: python_version: "python3" - -- name: "0.0 Validations: - Set Python version {{ distribution_id }}" - ansible.builtin.set_fact: - python_version: "python2" when: (ansible_distribution | lower ~ ansible_distribution_major_version) in ['sles_sap12'] - name: "0.5.1 acss registration: - Determine if SCS is running on {{ ansible_hostname }}" @@ -112,11 +108,35 @@ tags: - skip_ansible_lint - - name: "Create [ACSS] virtual instance" - ansible.builtin.command: "az workloads sap-virtual-instance create --sap-virtual-instance-name {{ acss_sid }} --resource-group {{ acss_resource_group }} --location {{ acss_location }} --environment {{ acss_environment }} --sap-product {{ acss_sap_product }} --configuration {{ acss_configuration }}" - when: - - ansible_hostname == primary_instance_name - - cluster_group_location.stdout != ansible_hostname + - name: "0.5.1 acss registration: - Create [ACSS] virtual instance" + ansible.builtin.uri: + url: "https://management.azure.com/subscriptions/{{ acss_subscription_id }}/resourceGroups/{{ acss_resource_group }}/providers/Microsoft.Workloads/sapVirtualInstances/{{ acss_sid }}?api-version=2023-04-01" + method: PUT + body_format: json + body: | + { + "properties": { + "environment": "{{ acss_environment }}", + "sapProduct": "{{ acss_sap_product }}", + "configuration": { + "configurationType": "Discovery", + "centralServerVmId": "{{ acss_resource_id }}" + } + }, + "location": "{{ acss_location }}" + } + # status_code: [200, 201] + headers: + Authorization: "Bearer {{ acss_access_token.stdout }}" + x-ms-rpaas-new-resource: "true" + x-ms-client-request-id: "SDAF-{{ acss_guid.stdout }}" + register: create_vis_response + failed_when: create_vis_response.json.properties.provisioningState != 'Accepted' and create_vis_response.json.properties.provisioningState != 'Succeeded' + no_log: false + + - name: "0.5.1 acss registration: - Debug [ACSS] virtual instance creation response" + ansible.builtin.debug: + msg: "{{ create_vis_response }}" tags: - skip_ansible_lint diff --git a/deploy/ansible/roles-misc/0.8-ams-providers/tasks/main.yaml b/deploy/ansible/roles-misc/0.8-ams-providers/tasks/main.yaml index ce8b332a7f..75016b6b98 100644 --- a/deploy/ansible/roles-misc/0.8-ams-providers/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.8-ams-providers/tasks/main.yaml @@ -8,31 +8,38 @@ ers_hosts: "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_ERS') }}" ha_cluster_port_number: "{{ 9664 if ansible_os_family | upper == 'SUSE' else 44322 }}" -- name: "0.8.1 ams provider creation: - Install [AMS] cli extension" - delegate_to: localhost +- name: "0.8.1 ams provider creation: - Install [AMS] cli extension" + delegate_to: localhost ansible.builtin.shell: >- - az extension add --name workloads --yes || exit 1 + az extension add --name workloads --yes || exit 1 tags: - skip_ansible_lint -- name: "0.8.1 ams provider creation: - Get Access Token" - delegate_to: localhost +- name: "0.8.1 ams provider creation: - perform az login" + delegate_to: localhost + ansible.builtin.command: >- + az login --identity --allow-no-subscriptions --output none + no_log: true + changed_when: false + +- name: "0.8.1 ams provider creation: - Get Access Token" + delegate_to: localhost ansible.builtin.shell: >- - az account get-access-token --resource https://management.azure.com \ - --query accessToken -o tsv - register: ams_access_token + az account get-access-token --resource https://management.azure.com \ + --query accessToken -o tsv + register: ams_access_token tags: - skip_ansible_lint -- name: "0.8.1 ams provider creation: - Generate a guid for the AMS provider instance" - delegate_to: localhost - ansible.builtin.command: uuidgen - register: ams_provider_guid +- name: "0.8.1 ams provider creation: - Generate a guid for the AMS provider instance" + delegate_to: localhost + ansible.builtin.command: uuidgen + register: ams_provider_guid tags: - skip_ansible_lint -- name: "0.8.1 ams provider creation: - Create PrometheusOS (OS) provider in AMS" - delegate_to: localhost +- name: "0.8.1 ams provider creation: - Create PrometheusOS (OS) provider in AMS" + delegate_to: localhost when: - ansible_os_family | upper == 'SUSE' or ansible_os_family | upper == 'REDHAT' - enable_os_monitoring diff --git a/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/run_check.yaml b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/run_check.yaml new file mode 100644 index 0000000000..d3b39779b5 --- /dev/null +++ b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/run_check.yaml @@ -0,0 +1,108 @@ +# /*---------------------------------------------------------------------------8 +# | | +# | Run quality check | +# | | +# +------------------------------------4--------------------------------------*/ +--- + +- name: "SAP on Azure quality checks: - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + + +- name: "SAP on Azure quality checks: - Retrieve Subscription ID and Resource Group Name" + ansible.builtin.uri: + url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 + use_proxy: false + headers: + Metadata: true + register: azure_metadata + + +# https://github.com/Azure/SAP-on-Azure-Scripts-and-Utilities/blob/main/QualityCheck/Readme.md#login-with-ssh-keys-no-password-required-for-sudo +- name: "SAP on Azure quality checks: - Set common quality check facts" + ansible.builtin.set_fact: + qc_subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" + qc_az_vm_resource_group: "{{ azure_metadata.json.compute.resourceGroupName }}" + qc_az_vm_name: "{{ azure_metadata.json.compute.name }}" + qc_vm_username: "{{ ansible_user }}" + qc_vm_hostname: "{{ ansible_hostname }}.{{ sap_fqdn }}" + qc_vm_operating_system: "{{ vm_operating_system_map[ansible_os_family | upper] }}" + qc_vm_database: "{{ vm_database_map[platform | upper] }}" + qc_vm_role: "{{ vm_role_map[node_tier | upper] }}" + qc_sid: "{{ db_sid if vm_role_map[node_tier | upper] == 'DB' else sap_sid }}" + qc_high_availability: "{{ (vm_role_map[node_tier | upper] == 'DB' and database_high_availability) or (vm_role_map[node_tier | upper] == 'ASCS' and scs_high_availability) }}" + + +- name: "SAP on Azure quality checks: - Debug variables" + ansible.builtin.debug: + msg: + - "Subscription ID: {{ qc_subscription_id }}" + - "Resource Group Name: {{ qc_az_vm_resource_group }}" + - "VM Name: {{ qc_az_vm_name }}" + - "VM Username: {{ qc_vm_username }}" + - "VM Hostname: {{ qc_vm_hostname }}" + - "VM Operating System: {{ qc_vm_operating_system }}" + - "VM Database: {{ qc_vm_database }}" + - "VM Role: {{ qc_vm_role }}" + - "SSH Key path {{ _workspace_directory }}/sshkey" + - "Output Directory {{ _workspace_directory }}/quality_assurance" + - "SID: {{ qc_sid }}" + - "High Availability: {{ qc_high_availability }}" + verbosity: 2 + + +- name: "SAP on Azure quality checks: - get access token in the context of azureadm on deployer" + delegate_to: localhost + no_log: true + ansible.builtin.command: az account get-access-token --subscription {{ qc_subscription_id }} --query "accessToken" + failed_when: qc_access_token_result.stdout == "" + register: qc_access_token_result + +- name: "SAP on Azure quality checks: - retrieve client id in the context of azureadm on deployer" + delegate_to: localhost + no_log: true + ansible.builtin.command: echo $ARM_CLIENT_ID + failed_when: gz_arm_client_id_result.stdout == "" + register: gz_arm_client_id_result + +- name: "SAP on Azure quality checks: - Run quality check" + ansible.builtin.shell: + cmd: >- + Connect-AzAccount -AccountId {{ gz_arm_client_id_result.stdout }} ` + -AccessToken {{ qc_access_token_result.stdout }} ` + -Subscription {{ qc_subscription_id }} + + ./QualityCheck.ps1 -LogonWithUserSSHKey ` + -VMOperatingSystem {{ qc_vm_operating_system }} ` + -VMDatabase {{ qc_vm_database }} ` + -VMRole {{ qc_vm_role }} ` + -AzVMResourceGroup {{ qc_az_vm_resource_group }} ` + -AzVMName {{ qc_az_vm_name }} ` + -VMHostname {{ qc_vm_hostname }} ` + -VMUsername {{ qc_vm_username }} ` + -VMConnectionPort 22 ` + -SubscriptionId {{ qc_subscription_id }} ` + -SSHKey {{ _workspace_directory }}/sshkey ` + -Hardwaretype VM ` + -SID {{ qc_sid }} ` + -HighAvailability {{ '$' ~ qc_high_availability }} ` + -OutputDirName {{ _workspace_directory }}/quality_assurance + args: + executable: "/usr/local/bin/pwsh" + chdir: "/opt/microsoft/quality_check" + no_log: true + delegate_to: localhost + become_user: root + become: true + register: quality_check_result + +- name: "SAP on Azure quality checks: - Debug quality check result" + ansible.builtin.debug: + msg: "{{ quality_check_result.stdout_lines }}" + verbosity: 2 + +... diff --git a/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/setup.yaml b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/setup.yaml new file mode 100644 index 0000000000..5f745c8d8d --- /dev/null +++ b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/setup.yaml @@ -0,0 +1,89 @@ +# /*---------------------------------------------------------------------------8 +# | | +# | Setup quality check prerequisites | +# | | +# +------------------------------------4--------------------------------------*/ +--- + +- name: "SAP on Azure quality checks: - setup directories" + become: true + become_user: root + ansible.builtin.file: + path: "{{ item.path }}" + state: directory + mode: 0755 + owner: "{{ item.owner }}" + loop: + - { path: "/opt/microsoft/powershell/v{{ powershell_version }}", owner: "root" } + - { path: "/opt/microsoft/quality_check", owner: "{{ orchestration_ansible_user }}" } + - { path: "{{ _workspace_directory }}/quality_assurance", owner: "{{ orchestration_ansible_user }}" } + + +- name: "SAP on Azure quality checks: - extract PowerShell binary" + become: true + become_user: root + ansible.builtin.unarchive: + src: "https://github.com/PowerShell/PowerShell/releases/download/v{{ powershell_version }}/powershell-{{ powershell_version }}-linux-x64.tar.gz" + dest: "/opt/microsoft/powershell/v{{ powershell_version }}" + creates: "/opt/microsoft/powershell/v{{ powershell_version }}/pwsh" + remote_src: true + + +- name: "SAP on Azure quality checks: - create PowerShell symbolic link" + become: true + become_user: root + ansible.builtin.file: + src: "/opt/microsoft/powershell/v{{ powershell_version }}/pwsh" + dest: "/usr/local/bin/pwsh" + state: link + mode: 0755 + + +- name: "SAP on Azure quality checks: - fetch quality check config" + become: true + become_user: root + ansible.builtin.get_url: + url: "{{ azure_utility_repo }}/main/QualityCheck/QualityCheck.json" + dest: "/opt/microsoft/quality_check/QualityCheck.json" + owner: "{{ orchestration_ansible_user }}" + mode: 0755 + timeout: 30 + register: qc_json_result + until: qc_json_result is succeeded or not qc_json_result.changed + retries: 2 + delay: 5 + + +- name: "SAP on Azure quality checks: - fetch quality check script" + become: true + become_user: root + ansible.builtin.get_url: + url: "{{ azure_utility_repo }}/main/QualityCheck/QualityCheck.ps1" + dest: "/opt/microsoft/quality_check/QualityCheck.ps1" + owner: "{{ orchestration_ansible_user }}" + mode: 0755 + timeout: 30 + register: qc_ps_result + until: qc_ps_result is succeeded or not qc_ps_result.changed + retries: 2 + delay: 5 + +- name: "SAP on Azure quality checks: - run PowerShell setup" + become: true + become_user: root + ansible.builtin.shell: >- + Update-AzConfig -EnableLoginByWam $false + + $modules = @("Az", "Az.NetAppFiles", "Posh-SSH") + + foreach ($module in $modules) { + if (-not (Get-Module -ListAvailable -Name $module)) { + Install-Module $module -Force -Scope AllUsers -Confirm:$false + } + } + register: qc_modules_result + failed_when: qc_modules_result.rc != 0 + args: + chdir: "/opt/microsoft/quality_check" + executable: "/usr/local/bin/pwsh" +... diff --git a/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/vars/main.yaml b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/vars/main.yaml new file mode 100644 index 0000000000..ce6e23c1f9 --- /dev/null +++ b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/vars/main.yaml @@ -0,0 +1,34 @@ +azure_utility_repo: https://raw.githubusercontent.com/Azure/SAP-on-Azure-Scripts-and-Utilities +powershell_version: 7.3.12 + +# https://github.com/Azure/SAP-on-Azure-Scripts-and-Utilities/blob/main/QualityCheck/QualityCheck.ps1 +vm_operating_system_map: + SUSE: "SUSE" + REDHAT: "RedHat" + ORACLELINUX: "OracleLinux" + WINDOWS: "Windows" + +vm_database_map: + HANA: HANA + DB2: Db2 + SYBASE: ASE + SQLSERVER: MSSQL + ORACLE: Oracle + ORACLE-ASM: Oracle + +vm_role_map: + HANA: DB + DB2: DB + SYBASE: DB + SQLSERVER: DB + ORACLE: DB + ORACLE-ASM: DB + SCS: ASCS + ERS: ASCS + PAS: APP + APP: APP + +high_availability_agent_map: + AFA: FencingAgent + SBD: SBD + ISCSI: SBD diff --git a/deploy/ansible/roles-os/1.1-swap/handlers/main.yaml b/deploy/ansible/roles-os/1.1-swap/handlers/main.yaml index 15633ae609..e0849b5d3d 100644 --- a/deploy/ansible/roles-os/1.1-swap/handlers/main.yaml +++ b/deploy/ansible/roles-os/1.1-swap/handlers/main.yaml @@ -6,7 +6,20 @@ state: restarted - name: "Swap reboot" + become: true + become_user: root ansible.builtin.reboot: + reboot_timeout: 300 + post_reboot_delay: 10 + failed_when: false +# +- name: "1.1 Swap: - Clear the failed state of hosts" + ansible.builtin.meta: clear_host_errors +# Wait for Connection after reboot +- name: "1.1 Swap: - Wait for system to become reachable" + ansible.builtin.wait_for_connection: + delay: 10 + timeout: 300 # ... diff --git a/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml b/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml index fb01875bc1..fc5c7c27d7 100644 --- a/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.10-networking/tasks/main.yaml @@ -196,6 +196,7 @@ - az_network_interfaces | length >= 1 - not azif.interfaceName in ['eth0', 'eth0:0'] - subnet_cidr_storage | ansible.utils.network_in_usable( azif.ipAddress ) + - subnet_cidr_anf is defined # since the storage nic is the 3rd added to the VM we will assume that the device is eth2 # and the connection is 'Wired connection 2' @@ -212,6 +213,7 @@ - az_network_interfaces | length > 2 - not azif.interfaceName in ['eth0', 'eth0:0'] - subnet_cidr_storage | ansible.utils.network_in_usable( azif.ipAddress ) + - subnet_cidr_anf is defined - name: "1.10 Networking - Print the network configuration details for client route" ansible.builtin.debug: @@ -231,6 +233,7 @@ - az_network_interfaces | length > 2 - not azif.interfaceName in ['eth0', 'eth0:0'] - subnet_cidr_client | ansible.utils.network_in_usable( azif.ipAddress ) + - subnet_cidr_app is defined - name: "1.10 Networking - Add route to the application subnet via client gateway" ansible.builtin.command: nmcli connection modify "Wired connection 1" +ipv4.routes "{{ subnet_cidr_app }} {{ azif.subnet | ansible.utils.ipmath(1) }}" @@ -243,9 +246,12 @@ - az_network_interfaces | length > 1 - not azif.interfaceName in ['eth0', 'eth0:0'] - subnet_cidr_client | ansible.utils.network_in_usable( azif.ipAddress ) + - subnet_cidr_app is defined # reboot VM after the new routes are added - name: "1.10 Networking - Reboot VM and wait for 5 minutes" + become: true + become_user: root ansible.builtin.reboot: reboot_timeout: 300 when: diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.0-set_runtime_facts.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.0-set_runtime_facts.yml index ea40eb3c90..d7614f980b 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.0-set_runtime_facts.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.0-set_runtime_facts.yml @@ -135,7 +135,7 @@ when: ansible_hostname == secondary_instance_name ansible.builtin.set_fact: is_primIP_defined_on_primaryNode: "{{ hostvars[primary_instance_name].primary_ip is defined }}" - retries: 30 + retries: 5 delay: 60 until: is_primIP_defined_on_primaryNode @@ -143,7 +143,7 @@ when: ansible_hostname == primary_instance_name ansible.builtin.set_fact: is_primIP_defined_on_secondaryNode: "{{ hostvars[secondary_instance_name].primary_ip is defined }}" - retries: 30 + retries: 5 delay: 60 until: is_primIP_defined_on_secondaryNode diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml index 110726b4fe..4ede274714 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml @@ -44,14 +44,14 @@ when: ansible_hostname == primary_instance_name - name: 1.17 Generic Pacemaker - Ensure trust relationship is working from primary to secondary - ansible.builtin.command: ssh -oStrictHostKeyChecking=no {{ secondary_instance_name }} "hostname -s" + ansible.builtin.shell: ssh -oStrictHostKeyChecking=no {{ secondary_instance_name }} "hostname -s" register: primary_to_secondary_ssh_result changed_when: false failed_when: primary_to_secondary_ssh_result.stdout_lines[0] != secondary_instance_name when: ansible_hostname == primary_instance_name - name: 1.17 Generic Pacemaker - Ensure trust relationship is working from secondary to primary" - ansible.builtin.command: ssh -oStrictHostKeyChecking=no {{ primary_instance_name }} "hostname -s" + ansible.builtin.shell: ssh -oStrictHostKeyChecking=no {{ primary_instance_name }} "hostname -s" register: secondary_to_primary_ssh_result changed_when: false failed_when: secondary_to_primary_ssh_result.stdout_lines[0] != primary_instance_name @@ -82,7 +82,6 @@ - ansible_facts.packages['resource-agents-sap-hana-scaleout'] is not defined - # Clustering commands are based on the Host OS - name: "1.17 Generic Pacemaker - Cluster based on {{ ansible_os_family }} on VM {{ ansible_hostname }}" ansible.builtin.include_tasks: "1.17.2.0-cluster-{{ ansible_os_family }}.yml" diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml index 24ae236ba9..1030399880 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml @@ -123,7 +123,7 @@ - name: "1.17 Generic Pacemaker - Check if the pacemaker package version is greater than pacemaker-2.0.4" when: ansible_distribution_major_version in ["8", "9"] ansible.builtin.set_fact: - is_pcmk_ver_gt_204: "{{ ansible_facts.packages['pacemaker'] is version('2.0.4', '>') | default(false) }}" + is_pcmk_ver_gt_204: "{{ ansible_facts.packages['pacemaker'][0].version is version('2.0.4', '>') | default(false) }}" - name: "1.17 Generic Pacemaker - Ensure STONITH timeout is raised" ansible.builtin.command: pcs property set stonith-timeout=900 @@ -167,18 +167,18 @@ - name: "1.17 Generic Pacemaker - Ensure the STONTIH device is configured" ansible.builtin.shell: > - pcs stonith create rsc_st_azure fence_azure_arm - login="{{ fencing_spn_client_id }}" - passwd="{{ fencing_spn_client_pwd }}" - resourceGroup="{{ resource_group_name }}" - tenantId="{{ fencing_spn_tenant_id }}" - subscriptionId="{{ fencing_spn_subscription_id }}" - power_timeout=240 - pcmk_reboot_timeout=900 - pcmk_monitor_timeout=120 - pcmk_monitor_retries=4 - pcmk_action_limit=3 - pcmk_host_map="{{ primary_instance_name }}:{{ hostvars[primary_instance_name]['primary_vm_name'] }};{{ secondary_instance_name }}:{{ hostvars[secondary_instance_name]['secondary_vm_name'] }}" + pcs stonith create rsc_st_azure fence_azure_arm \ + username="{{ fencing_spn_client_id }}" \ + password="{{ fencing_spn_client_pwd }}" \ + resourceGroup="{{ resource_group_name }}" \ + tenantId="{{ fencing_spn_tenant_id }}" \ + subscriptionId="{{ fencing_spn_subscription_id }}" \ + power_timeout=240 \ + pcmk_reboot_timeout=900 \ + pcmk_monitor_timeout=120 \ + pcmk_monitor_retries=4 \ + pcmk_action_limit=3 \ + pcmk_host_map="{{ primary_instance_name }}:{{ hostvars[primary_instance_name]['primary_vm_name'] }};{{ secondary_instance_name }}:{{ hostvars[secondary_instance_name]['secondary_vm_name'] }}" \ {%if not is_pcmk_ver_gt_204%}"pcmk_delay_max=15"{%endif%} when: - ansible_distribution_major_version in ["8", "9"] @@ -186,16 +186,16 @@ - name: "1.17 Generic Pacemaker - Ensure the STONTIH device is configured (MSI)" ansible.builtin.shell: > - pcs stonith create rsc_st_azure fence_azure_arm - msi=true - resourceGroup="{{ resource_group_name }}" - subscriptionId="{{ fencing_spn_subscription_id }}" - power_timeout=240 - pcmk_reboot_timeout=900 - pcmk_monitor_timeout=120 - pcmk_monitor_retries=4 - pcmk_action_limit=3 - pcmk_host_map="{{ primary_instance_name }}:{{ hostvars[primary_instance_name]['primary_vm_name'] }};{{ secondary_instance_name }}:{{ hostvars[secondary_instance_name]['secondary_vm_name'] }}" + pcs stonith create rsc_st_azure fence_azure_arm \ + msi=true \ + resourceGroup="{{ resource_group_name }}" \ + subscriptionId="{{ fencing_spn_subscription_id }}" \ + power_timeout=240 \ + pcmk_reboot_timeout=900 \ + pcmk_monitor_timeout=120 \ + pcmk_monitor_retries=4 \ + pcmk_action_limit=3 \ + pcmk_host_map="{{ primary_instance_name }}:{{ hostvars[primary_instance_name]['primary_vm_name'] }};{{ secondary_instance_name }}:{{ hostvars[secondary_instance_name]['secondary_vm_name'] }}" \ {%if not is_pcmk_ver_gt_204%}"pcmk_delay_max=15"{%endif%} when: - ansible_distribution_major_version in ["8", "9"] @@ -236,7 +236,7 @@ - name: "1.17 Generic Pacemaker - Install fence-agents-kdump package" when: - kdump_enabled | default("disabled") == "enabled" - ansible.builtin.yum: + ansible.builtin.dnf: name: fence-agents-kdump state: present register: fence_agents_kdump_package @@ -388,10 +388,10 @@ - "{{ secondary_instance_name }}" - name: "1.17 Generic Pacemaker - Configure the resources in Pacemaker" - ansible.builtin.command: pcs resource create health-azure-events ocf:heartbeat:azure-events-az op monitor interval=10s + ansible.builtin.command: pcs resource create health-azure-events ocf:heartbeat:azure-events-az op monitor interval=10s timeout=240s op start timeout=10s start-delay=90s - name: "1.17 Generic Pacemaker - Ensure clone resource azure-events is configured" - ansible.builtin.command: pcs resource clone health-azure-events allow-unhealthy-nodes=true + ansible.builtin.command: pcs resource clone health-azure-events allow-unhealthy-nodes=true failure-timeout=120s - name: "1.17 Generic Pacemaker - Ensure maintenance mode is disabled" ansible.builtin.command: pcs property set maintenance-mode=false diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml index 62500369ee..8b38f4ab87 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml @@ -6,14 +6,39 @@ - name: "1.17 Generic Pacemaker - Ensure a list of package version is available for checking the cloud-netconfig-azure version" ansible.builtin.package_facts: +- name: "1.17 Generic Pacemaker - Debug systemd version" + ansible.builtin.debug: + msg: "SystemD version {{ ansible_facts.packages['systemd'][0].version }}" + verbosity: 2 + # Pacemaker can create a large number of processes -- name: "1.17 Generic Pacemaker - Ensure Process limit is raised" +- name: "1.17 Generic Pacemaker - Ensure Process limit is raised (systemd < 234)" ansible.builtin.lineinfile: path: /etc/systemd/system.conf state: present regexp: "^#?\\s*DefaultTasksMax=" line: "DefaultTasksMax=4096" register: raise_process_limit + when: ansible_facts.packages['systemd'][0].version is version('234', '<') + +# Create a drop in file for systemd.conf to raise the process limit in the directory +# /etc/systemd/system.conf.d and update the value of DefaultTasksMax to 4096 +- name: "1.17 Generic Pacemaker - Create directory for drop file (systemd > 233)" + ansible.builtin.file: + path: /etc/systemd/system.conf.d + state: directory + mode: '0644' + when: ansible_facts.packages['systemd'][0].version is version('234', '>=') + +- name: "1.17 Generic Pacemaker - Ensure Process limit is raised (systemd > 233)" + ansible.builtin.copy: + dest: /etc/systemd/system.conf.d/99-pacemaker.conf + content: | + [Manager] + DefaultTasksMax=4096 + mode: '0644' + register: raise_process_limit + when: ansible_facts.packages['systemd'][0].version is version('234', '>=') # eth0 is the "db" NIC - name: "1.17 Generic Pacemaker - Ensure clustering can manage Virtual IPs on the Database Interface" @@ -24,7 +49,7 @@ line: "CLOUD_NETCONFIG_MANAGE='no'" when: - ansible_facts.packages['cloud-netconfig-azure'] - - (ansible_facts.packages['cloud-netconfig-azure'][0].version | float) < 1.3 + - ansible_facts.packages['cloud-netconfig-azure'][0].version is version('1.3', '<') - name: "1.17 Generic Pacemaker - Stop SBD service" ansible.builtin.systemd: @@ -160,25 +185,25 @@ - name: "1.17 Generic Pacemaker - Enable Stonith" ansible.builtin.shell: | - crm configure property stonith-enabled=true + crm configure property stonith-enabled=true \ crm configure property concurrent-fencing=true register: crm_configure_result failed_when: crm_configure_result.rc > 1 - name: "1.17 Generic Pacemaker - Create Azure Fencing Agent" ansible.builtin.shell: > - crm configure primitive rsc_st_azure stonith:fence_azure_arm params - subscriptionId="{{ fencing_spn_subscription_id }}" - resourceGroup="{{ resource_group_name }}" - tenantId="{{ fencing_spn_tenant_id }}" - login="{{ fencing_spn_client_id }}" - passwd="{{ fencing_spn_client_pwd }}" - pcmk_monitor_retries=4 - pcmk_action_limit=3 - power_timeout=240 - pcmk_reboot_timeout=900 - pcmk_delay_max=15 - pcmk_host_map="{{ primary_instance_name }}:{{ hostvars[primary_instance_name]['primary_vm_name'] }};{{ secondary_instance_name }}:{{ hostvars[secondary_instance_name]['secondary_vm_name'] }}" + crm configure primitive rsc_st_azure stonith:fence_azure_arm params \ + subscriptionId="{{ fencing_spn_subscription_id }}" \ + resourceGroup="{{ resource_group_name }}" \ + tenantId="{{ fencing_spn_tenant_id }}" \ + login="{{ fencing_spn_client_id }}" \ + passwd="{{ fencing_spn_client_pwd }}" \ + pcmk_monitor_retries=4 \ + pcmk_action_limit=3 \ + power_timeout=240 \ + pcmk_reboot_timeout=900 \ + pcmk_delay_max=15 \ + pcmk_host_map="{{ primary_instance_name }}:{{ hostvars[primary_instance_name]['primary_vm_name'] }};{{ secondary_instance_name }}:{{ hostvars[secondary_instance_name]['secondary_vm_name'] }}" \ op monitor interval=3600 timeout=120 when: - not use_msi_for_clusters or distribution_full_id in ["sles_sap12.4"] @@ -186,21 +211,21 @@ - name: "1.17 Generic Pacemaker - Create Azure Fencing Agent (MSI)" ansible.builtin.shell: > - crm configure primitive rsc_st_azure stonith:fence_azure_arm params - subscriptionId="{{ fencing_spn_subscription_id }}" - resourceGroup="{{ resource_group_name }}" - msi=true - pcmk_monitor_retries=4 - pcmk_action_limit=3 - power_timeout=240 - pcmk_reboot_timeout=900 - pcmk_delay_max=15 - pcmk_host_map="{{ primary_instance_name }}:{{ hostvars[primary_instance_name]['primary_vm_name'] }};{{ secondary_instance_name }}:{{ hostvars[secondary_instance_name]['secondary_vm_name'] }}" + crm configure primitive rsc_st_azure stonith:fence_azure_arm params \ + subscriptionId="{{ fencing_spn_subscription_id }}" \ + resourceGroup="{{ resource_group_name }}" \ + msi=true \ + pcmk_monitor_retries=4 \ + pcmk_action_limit=3 \ + power_timeout=240 \ + pcmk_reboot_timeout=900 \ + pcmk_delay_max=15 \ + pcmk_host_map="{{ primary_instance_name }}:{{ hostvars[primary_instance_name]['primary_vm_name'] }};{{ secondary_instance_name }}:{{ hostvars[secondary_instance_name]['secondary_vm_name'] }}" \ op monitor interval=3600 timeout=120 failed_when: crm_configure_result.rc > 1 when: - use_msi_for_clusters - - distribution_full_id in ["sles_sap12.5", "sles_sap15.1","sles_sap15.2", "sles_sap15.3", "sles_sap15.4", "sles_sap15.5"] + - distribution_full_id in ["sles_sap12.5", "sles_sap15.1","sles_sap15.2", "sles_sap15.3", "sles_sap15.4", "sles_sap15.5", "sles_sap15.6"] - name: "1.17 Generic Pacemaker - Stonith Timeout Property" diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/vars/main.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/vars/main.yml index 1e57c00bc2..5e919a470a 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/vars/main.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/vars/main.yml @@ -83,6 +83,9 @@ package_versions: redhat8.9: - {name: "pacemaker", version: "2.0.5", compare_operator: ">=", version_type: "loose"} - {name: "resource-agents", version: "4.9.0", compare_operator: ">=", version_type: "loose"} + redhat8.10: + - {name: "pacemaker", version: "2.0.5", compare_operator: ">=", version_type: "loose"} + - {name: "resource-agents", version: "4.9.0", compare_operator: ">=", version_type: "loose"} redhat9.0: - {name: "pacemaker", version: "2.0.5", compare_operator: ">=", version_type: "loose"} - {name: "resource-agents-cloud", version: "4.10.0", compare_operator: ">=", version_type: "loose"} @@ -107,3 +110,6 @@ package_versions: sles_sap15.5: - {name: "pacemaker", version: "1.1.23", compare_operator: ">=", version_type: "loose"} - {name: "resource-agents", version: "4.10.0", compare_operator: ">=", version_type: "semver"} + sles_sap15.6: + - {name: "pacemaker", version: "1.1.23", compare_operator: ">=", version_type: "loose"} + - {name: "resource-agents", version: "4.10.0", compare_operator: ">=", version_type: "semver"} diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2-provision.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2-provision.yml index 0afccfbf40..cdd170f0c4 100644 --- a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2-provision.yml +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2-provision.yml @@ -92,8 +92,6 @@ - ansible_facts.packages['resource-agents-sap-hana-scaleout'] is not defined - - # SSH access between nodes is only required on SUSE for crm_clustering - name: "1.18.2 Generic Pacemaker - SUSE specific network and SSH configuration" when: ansible_os_family | upper == "SUSE" diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/main.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/main.yml index 10a69446dc..fea076a0fd 100644 --- a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/main.yml +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/main.yml @@ -13,7 +13,6 @@ when: - (database_cluster_type == 'ISCSI') - - name: "1.18 Generic Pacemaker - SBD Devices" ansible.builtin.import_tasks: 1.18.1.2-sbd.yaml when: diff --git a/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml b/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml index 2bcd2a8ef1..d04d9267bb 100644 --- a/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml +++ b/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml @@ -14,7 +14,7 @@ - ansible_os_family | upper == "REDHAT" block: - name: "1.20 Packages: - Install pcp and pcp-pmda-hacluster package" - ansible.builtin.yum: + ansible.builtin.dnf: name: - "pcp" - "pcp-pmda-hacluster" diff --git a/deploy/ansible/roles-os/1.3-repository/tasks/1.3.1-repositories-Suse.yaml b/deploy/ansible/roles-os/1.3-repository/tasks/1.3.1-repositories-Suse.yaml index b37aeb2082..3e7533b7c9 100644 --- a/deploy/ansible/roles-os/1.3-repository/tasks/1.3.1-repositories-Suse.yaml +++ b/deploy/ansible/roles-os/1.3-repository/tasks/1.3.1-repositories-Suse.yaml @@ -33,7 +33,6 @@ - zypresult.rc != 0 - zypresult.rc != 4 - - name: "1.3 Repos: Add the repositories result" ansible.builtin.debug: var: zypresult @@ -43,12 +42,3 @@ ansible.builtin.debug: var: zypresult verbosity: 2 - -- name: "1.3 Repos: Add the HA repositories for RHEL" - ansible.builtin.dnf: - enablerepo: rhel-9-for-x86_64-highavailability-rpms - disable_gpg_check: true - changed_when: false - when: - - distribution_id in ['redhat9'] - - node_tier == 'ha' diff --git a/deploy/ansible/roles-os/1.3-repository/tasks/1.3.2-custom-repositories-RedHat.yaml b/deploy/ansible/roles-os/1.3-repository/tasks/1.3.2-custom-repositories-RedHat.yaml index e69de29bb2..70854f2a01 100644 --- a/deploy/ansible/roles-os/1.3-repository/tasks/1.3.2-custom-repositories-RedHat.yaml +++ b/deploy/ansible/roles-os/1.3-repository/tasks/1.3.2-custom-repositories-RedHat.yaml @@ -0,0 +1,39 @@ +# Analyse the repo list for this distribution selecting only those +# packages assigned to the active tier or 'all'. +- name: "1.3 Repository - Determine custom repos appropriate for tier {{ distribution_full_id }}" + ansible.builtin.set_fact: + custom_repos_for_tier: "{{ custom_repos[distribution_full_id] | + selectattr('tier', 'in', ['all', tier]) | + list }}" + +# Print list of matching repos if verbosity it 1 or greater +- name: "1.3 Repos: Print matching repos" + ansible.builtin.debug: + var: "{{ custom_repos_for_tier }}" + verbosity: 2 + when: + - custom_repos_for_tier is defined + - custom_repos_for_tier | length > 0 + +- name: "1.3 Repos: Add the repositories {{ ansible_os_family }}" + ansible.builtin.dnf: + name: "{{ item.url }}" + state: "{{ item.state }}" + disable_gpg_check: true + loop: "{{ custom_repos_for_tier }}" + register: custom_repos_zypresult + ignore_errors: true + +- name: "1.3 Repos: Add the repositories result" + ansible.builtin.debug: + var: custom_repos_zypresult + verbosity: 2 + +- name: "1.3 Repos: Add the HA repositories for RHEL" + ansible.builtin.dnf: + enablerepo: rhel-9-for-x86_64-highavailability-rpms + disable_gpg_check: true + changed_when: false + when: + - distribution_id in ['redhat9'] + - node_tier == 'ha' diff --git a/deploy/ansible/roles-os/1.3-repository/tasks/main.yml b/deploy/ansible/roles-os/1.3-repository/tasks/main.yml index 5b7bb12108..061468ec7f 100644 --- a/deploy/ansible/roles-os/1.3-repository/tasks/main.yml +++ b/deploy/ansible/roles-os/1.3-repository/tasks/main.yml @@ -65,12 +65,16 @@ - name: "1.3 Repository: - Manage the repositories." ansible.builtin.include_tasks: "1.3.2-custom-repositories-Suse.yaml" - when: ansible_os_family | upper == 'SUSE' + when: + - custom_repos is defined + - ansible_os_family | upper == 'SUSE' # Doing it this way to handle also Oracle Distros - name: "1.3 Repository: - Prepare the repositories." ansible.builtin.include_tasks: "1.3.2-custom-repositories-RedHat.yaml" - when: ansible_os_family | upper == 'REDHAT' + when: + - custom_repos is defined + - ansible_os_family | upper == 'REDHAT' # - name: "1.3 Repos: Install EPEL repo" # ansible.builtin.yum_repository: diff --git a/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml b/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml index d63fdbf9c4..f831e412a9 100644 --- a/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml +++ b/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml @@ -12,22 +12,31 @@ # For example, XX.Y where XX is the major version and Y is the minor version repos: redhat7.4: + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm', state: 'present' } redhat7.6: + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm', state: 'present' } redhat7.7: + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm', state: 'present' } redhat7.9: + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm', state: 'present' } redhat8.1: + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } redhat8.2: -# - { tier: 'ha', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } redhat8.4: -# - { tier: 'ha', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } redhat8.6: -# - { tier: 'ha', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } redhat8.8: -# - { tier: 'ha', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } redhat8.9: -# - { tier: 'ha', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } + redhat8.10: + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } redhat9.0: + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm', state: 'present' } redhat9.2: + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm', state: 'present' } # do not have any repos that are needed for RedHat at the moment. sles_sap12.3: sles_sap12.4: @@ -37,6 +46,7 @@ repos: sles_sap15.3: sles_sap15.4: sles_sap15.5: + sles_sap15.6: sles15.3: sles15.4: sles15.5: @@ -53,3 +63,4 @@ repos: oraclelinux8.7: oraclelinux8.8: oraclelinux8.9: + oraclelinux8.10: diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml index adb3559c87..f7ae05130e 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml @@ -44,7 +44,6 @@ - is_rhel_90_or_newer - init_d_exists - # /*----------------------------------------------------------------------------8 # | END | # +------------------------------------4---------------------------------------*/ diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.1-packages.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.1-packages.yaml index 773f7a09ea..b7af028a92 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.1-packages.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.1-packages.yaml @@ -48,7 +48,8 @@ state: "{{ item.state }}" register: package_result loop: - - { state: 'present' } # First install required packages + - { state: 'latest' } # Update necessary packages + - { state: 'present' } # Install required packages - { state: 'absent' } # Then remove packages that we don't want rescue: - name: "1.4 Packages: - Show result from packages module" @@ -71,7 +72,8 @@ state: "{{ item.state }}" register: package_result loop: - - { state: 'present' } # First install required packages + - { state: 'latest' } # Update necessary packages + - { state: 'present' } # Install required packages - { state: 'absent' } # Then remove packages that we don't want rescue: - name: "Print stderr before getting error code" @@ -93,7 +95,8 @@ state: "{{ item.state }}" register: package_result loop: - - { state: 'present' } # First install required packages + - { state: 'latest' } # Update necessary packages + - { state: 'present' } # Install required packages - { state: 'absent' } # Then remove packages that we don't want rescue: @@ -116,7 +119,8 @@ state: "{{ item.state }}" register: package_result loop: - - { state: 'present' } # First install required packages + - { state: 'latest' } # Update necessary packages + - { state: 'present' } # Install required packages - { state: 'absent' } # Then remove packages that we don't want rescue: - name: "Print stderr before getting error code" @@ -140,7 +144,8 @@ state: "{{ item.state }}" register: package_result loop: - - { state: 'present' } # First install required packages + - { state: 'latest' } # Update necessary packages + - { state: 'present' } # Install required packages - { state: 'absent' } # Then remove packages that we don't want rescue: - name: "1.4 Packages: - Show result from packages module" @@ -163,7 +168,8 @@ state: "{{ item.state }}" register: package_result loop: - - { state: 'present' } # First install required packages + - { state: 'latest' } # Update necessary packages + - { state: 'present' } # Install required packages - { state: 'absent' } # Then remove packages that we don't want rescue: - name: "Print stderr before getting error code" @@ -186,7 +192,8 @@ state: "{{ item.state }}" register: package_result loop: - - { state: 'present' } # First install required packages + - { state: 'latest' } # Update necessary packages + - { state: 'present' } # Install required packages - { state: 'absent' } # Then remove packages that we don't want rescue: @@ -208,7 +215,8 @@ list }}" state: "{{ item.state }}" loop: - - { state: 'present' } # First install required packages + - { state: 'latest' } # Update necessary packages + - { state: 'present' } # Install required packages - { state: 'absent' } # Then remove packages that we don't want rescue: - name: "Print stderr before getting error code" diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml index d21f088831..0ffbc8d64f 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml @@ -2,6 +2,14 @@ # | Update packages | # +------------------------------------4---------------------------------------*/ +# Note: in some rare cases, RHEL update task will fail because it expects the directory to exist before it can even check for presence of file. +- name: "1.4 Packages: - check if path /etc/sap_deployment_automation{{ sap_sid | upper }} exists" + ansible.builtin.file: + path: "/etc/sap_deployment_automation/{{ sap_sid | upper }}" + state: directory + mode: '0755' + + - name: "1.4 Packages: - check if {{ sap_sid }} is updated" ansible.builtin.stat: path: "/etc/sap_deployment_automation/{{ sap_sid | upper }}/packages_installed.txt" @@ -9,7 +17,7 @@ - name: "1.4 Packages: - Update packages" become: true - ansible.builtin.yum: + ansible.builtin.dnf: name: '*' state: latest skip_broken: true @@ -17,11 +25,12 @@ register: reboot_output when: - tier == 'os' + - ansible_distribution != "OracleLinux" # Analyse the package list for this distribution selecting only those # packages assigned to the active tier or 'all'. # - name: "1.4 Packages: - Upgrade all: {{ distribution_full_id }}" # noqa package-latest # become: true -# ansible.builtin.yum: +# ansible.builtin.dnf: # name: '*' # state: latest # skip_broken: true diff --git a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml index 9d3ef42ddb..0b01490abc 100644 --- a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml +++ b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml @@ -44,7 +44,7 @@ packages: - { tier: 'os', package: 'lvm2', node_tier: 'all', state: 'present' } - { tier: 'os', package: 'numad', node_tier: 'all', state: 'present' } - { tier: 'os', package: 'cifs-utils', node_tier: 'all', state: 'present' } - # - { tier: 'os', package: 'unrar', node_tier: 'all', state: 'present' } + - { tier: 'os', package: 'unar', node_tier: 'scs', state: 'present' } # --------------------------- Begin - Packages required for DB2 -----------------------------------------8 # https://www.ibm.com/docs/en/db2/11.5?topic=servers-linux - { tier: 'os', package: 'libaio', node_tier: 'db2', state: 'present' } @@ -152,6 +152,7 @@ packages: - { tier: 'os', package: 'lvm2', node_tier: 'all', state: 'present' } - { tier: 'os', package: 'numad', node_tier: 'all', state: 'present' } - { tier: 'os', package: 'cifs-utils', node_tier: 'all', state: 'present' } + - { tier: 'os', package: 'unar', node_tier: 'scs', state: 'present' } # --------------------------- Begin - Packages required for DB2 -----------------------------------------8 # https://www.ibm.com/docs/en/db2/11.5?topic=servers-linux - { tier: 'os', package: 'libaio', node_tier: 'db2', state: 'present' } @@ -159,6 +160,7 @@ packages: - { tier: 'os', package: 'mksh', node_tier: 'db2', state: 'present' } - { tier: 'os', package: 'libstdc++.so.6', node_tier: 'db2', state: 'present' } - { tier: 'os', package: 'unzip', node_tier: 'db2', state: 'present' } + - { tier: 'os', package: 'pam', node_tier: 'db2', state: 'latest' } - { tier: 'os', package: 'libpam.so.0', node_tier: 'db2', state: 'present' } - { tier: 'db2', package: 'acl', node_tier: 'db2', state: 'present' } # --------------------------- End - Packages required for DB2 -------------------------------------------8 @@ -213,6 +215,7 @@ packages: - { tier: 'os', package: 'lvm2', node_tier: 'all', state: 'present' } - { tier: 'os', package: 'numad', node_tier: 'all', state: 'present' } - { tier: 'os', package: 'cifs-utils', node_tier: 'all', state: 'present' } + - { tier: 'os', package: 'unar', node_tier: 'scs', state: 'present' } # --------------------------- Begin - Packages required for DB2 -----------------------------------------8 # https://www.ibm.com/docs/en/db2/11.5?topic=servers-linux - { tier: 'os', package: 'libaio', node_tier: 'db2', state: 'present' } @@ -244,6 +247,10 @@ packages: - { tier: 'ha', package: 'nmap', node_tier: 'all', state: 'present' } - { tier: 'ha', package: 'fence-agents-common', node_tier: 'all', state: 'present' } - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'all', state: 'present' } + - { tier: 'ha', package: 'python3-pip', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'python3-pip', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'python3-pip', node_tier: 'hana', state: 'present' } + - { tier: 'ha', package: 'python3-pip', node_tier: 'db2', state: 'present' } # ------------------------- End - Packages required for Clustering -----------------------------------------8 # ------------------------- Begin - Packages required for Start/Stop ------------------------------------8 - { tier: 'ha', package: 'sap-cluster-connector', node_tier: 'hana', state: 'present' } @@ -253,7 +260,7 @@ packages: redhat8.1: redhat8.2: redhat8.4: - redhat8.6: + redhat8.6: - { tier: 'os', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'present' } - { tier: 'ha', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'absent' } redhat8.8: @@ -262,6 +269,9 @@ packages: redhat8.9: - { tier: 'os', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'present' } - { tier: 'ha', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'absent' } + redhat8.10: + - { tier: 'os', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'present' } + - { tier: 'ha', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'absent' } redhat9.0: - { tier: 'os', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'present' } - { tier: 'ha', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'absent' } @@ -327,7 +337,7 @@ packages: - { tier: 'os', package: 'tuned', node_tier: 'all', state: 'present' } - { tier: 'os', package: 'numad', node_tier: 'all', state: 'present' } - { tier: 'os', package: 'ntp', node_tier: 'all', state: 'absent' } - - { tier: 'os', package: 'unrar', node_tier: 'all', state: 'present' } + - { tier: 'os', package: 'unrar', node_tier: 'scs', state: 'present' } # --------------------------- Begin - Packages required for DB2 -----------------------------------------8 # https://www.ibm.com/docs/en/db2/11.5?topic=servers-linux - { tier: 'os', package: 'libaio', node_tier: 'db2', state: 'present' } @@ -369,12 +379,6 @@ packages: - { tier: 'ha', package: 'azure-cli', node_tier: 'hana', state: 'present' } - { tier: 'ha', package: 'azure-cli', node_tier: 'scs', state: 'present' } - { tier: 'ha', package: 'azure-cli', node_tier: 'ers', state: 'present' } - - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'hana', state: 'present' } - - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'hana', state: 'present' } - - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'scs', state: 'present' } - - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'scs', state: 'present' } - - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'ers', state: 'present' } - - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'ers', state: 'present' } # Added as part of documentation update - { tier: 'ha', package: 'sap-suse-cluster-connector', node_tier: 'hana', state: 'present' } - { tier: 'ha', package: 'sap-suse-cluster-connector', node_tier: 'scs', state: 'present' } - { tier: 'ha', package: 'sap-suse-cluster-connector', node_tier: 'ers', state: 'present' } @@ -390,12 +394,24 @@ packages: # than /usr/bin/python. # Required to enable ansible to use /usr/bin/python on SLE 15 SP2 - { tier: 'os', package: 'python2-rpm', node_tier: 'all', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'hana', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'hana', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'ers', state: 'present' } sles_sap15.2: - { tier: 'os', package: 'python-xml', node_tier: 'all', state: 'present' } - { tier: 'os', package: 'python2-rpm', node_tier: 'all', state: 'present' } - { tier: 'db2', package: 'system-user-bin', node_tier: 'db2', state: 'present' } - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'scs', state: 'present' } - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'hana', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'hana', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'ers', state: 'present' } sles_sap15.3: # - { tier: 'os', package: 'sle-module-public-cloud', state: 'present' } - { tier: 'os', package: 'python-xml', node_tier: 'all', state: 'present' } @@ -403,6 +419,12 @@ packages: - { tier: 'db2', package: 'system-user-bin', node_tier: 'db2', state: 'present' } - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'scs', state: 'present' } - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'hana', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'hana', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'ers', state: 'present' } sles_sap15.4: # - { tier: 'os', package: 'sle-module-public-cloud', state: 'present' } - { tier: 'os', package: 'python3-xml', node_tier: 'all', state: 'present' } @@ -410,6 +432,10 @@ packages: - { tier: 'db2', package: 'system-user-bin', node_tier: 'db2', state: 'present' } - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'scs', state: 'present' } - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'hana', state: 'present' } + sles_sap15.5: # - { tier: 'os', package: 'sle-module-public-cloud', state: 'present' } - { tier: 'os', package: 'python3-xml', node_tier: 'all', state: 'present' } @@ -417,7 +443,23 @@ packages: - { tier: 'db2', package: 'system-user-bin', node_tier: 'db2', state: 'present' } - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'scs', state: 'present' } - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'ers', state: 'present' } - + # These package cause issues on SLES15 SP5 due to changes to the public cloud SDKs + # https://www.suse.com/c/incompatible-changes-ahead-for-public-cloud-sdks/ + - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'hana', state: 'present' } + sles_sap15.6: + # - { tier: 'os', package: 'sle-module-public-cloud', state: 'present' } + - { tier: 'os', package: 'python3-xml', node_tier: 'all', state: 'present' } + # SLES15 SP4 has removed python 2, no python2-rpm package. Additionally, python-xml is now part of python-base and referenced as python3-xml + - { tier: 'db2', package: 'system-user-bin', node_tier: 'db2', state: 'present' } + - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'ers', state: 'present' } + # These package cause issues on SLES15 SP5 due to changes to the public cloud SDKs + # https://www.suse.com/c/incompatible-changes-ahead-for-public-cloud-sdks/ + - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'hana', state: 'present' } # Adding packages for Oracle linux 8.4 to start with, copied the list from RHEL. # Adding additional Oracle linux packages as per SAP Note 2069760 - Oracle Linux 7.x SAP Installation and Upgrade. Need to add the groupinstall command. oraclelinux8: @@ -516,3 +558,7 @@ packages: - { tier: 'os', package: 'gdisk', node_tier: 'all', state: 'present' } # - { tier: 'os', package: 'kmod-oracleasm', node_tier: 'oracle-asm', state: 'present' } # - { tier: 'os', package: 'oracleasm-support', node_tier: 'oracle-asm', state: 'present' } + + oraclelinux8.10: + - { tier: 'os', package: 'oracle-database-preinstall-19c', node_tier: 'all', state: 'present' } + - { tier: 'os', package: 'gdisk', node_tier: 'all', state: 'present' } diff --git a/deploy/ansible/roles-os/1.5-disk-setup/tasks/1.5-custom-disks.yml b/deploy/ansible/roles-os/1.5-disk-setup/tasks/1.5-custom-disks.yml index 9666dc8236..228aaa3fb4 100644 --- a/deploy/ansible/roles-os/1.5-disk-setup/tasks/1.5-custom-disks.yml +++ b/deploy/ansible/roles-os/1.5-disk-setup/tasks/1.5-custom-disks.yml @@ -47,6 +47,7 @@ lv: "{{ item.lv }}" vg: "{{ item.vg }}" size: "{{ item.size }}" + opts: "{{ lvol_opts_from_lv_item }}" active: true state: present shrink: false diff --git a/deploy/ansible/roles-os/1.5-disk-setup/tasks/1.5-expand-volumes.yml b/deploy/ansible/roles-os/1.5-disk-setup/tasks/1.5-expand-volumes.yml new file mode 100644 index 0000000000..71b35d2728 --- /dev/null +++ b/deploy/ansible/roles-os/1.5-disk-setup/tasks/1.5-expand-volumes.yml @@ -0,0 +1,103 @@ +--- + +# /*---------------------------------------------------------------------------8 +# | | +# | OS Base Disk Configuration | +# | | +# +------------------------------------4--------------------------------------*/ +# -------------------------------------+---------------------------------------8 +# +# Task: 1.5 Expand volumes +# +# -------------------------------------+---------------------------------------8 + +# # Check the free size of the volume group +# Extend the logical volumes [tmplv & rootlv] to the required size and resize the FS +# + +# -------------------------------------+---------------------------------------8 +# + +- name: "Get Volume Group information" + ansible.builtin.shell: set -o pipefail && vgdisplay --units g {{ vg_root }} | grep 'Free PE / Size' | awk '{print $(NF-1)}' + register: vg_info + changed_when: false + +- name: "Extract free size of the VG" + ansible.builtin.set_fact: + vg_free_size: "{{ vg_info.stdout | float }}" + when: vg_info is defined and vg_info.stdout is defined + +- name: "Check if free size is more than 20 GB" + ansible.builtin.set_fact: + sufficient_vg_space: "{{ vg_free_size | default(0) | float > 20.0 }}" + when: + - vg_free_size is defined + - node_tier not in ['oracle', 'oracle-asm', 'ase', 'hana', 'db2'] + +- name: "Check if free size is more than 30 GB" + ansible.builtin.set_fact: + sufficient_vg_space_db: "{{ vg_free_size | default(0) | float > 30.0 }}" + when: + - vg_free_size is defined + - node_tier in ['oracle', 'oracle-asm', 'ase', 'hana', 'db2'] + + +# ------------------------------------- +- name: "Print volume group details" + ansible.builtin.debug: + msg: + - "vg_info: {{ vg_info }}" + - "vg_free_size: {{ vg_free_size }}" + - "sufficient_vg_space: {{ sufficient_vg_space | default(false)}}" + - "sufficient_vg_space_db: {{ sufficient_vg_space_db | default(false)}}" + - "host: {{ inventory_hostname }}" + verbosity: 2 +# ------------------------------------ + +- name: "Extend the logical volumes and resize the FS" + community.general.lvol: + vg: "{{ item.vg }}" + lv: "{{ item.lv }}" + size: "{{ item.size }}" + active: true + state: present + shrink: false + resizefs: true + loop: + - { vg: '{{ vg_root }}', lv: 'rootlv', size: '{{ lv_root_size }}' } + - { vg: '{{ vg_root }}', lv: 'tmplv', size: '{{ lv_tmp_size }}' } + when: + - sufficient_vg_space | default(false) + - node_tier not in ['oracle', 'oracle-asm', 'ase', 'hana', 'db2'] + +- name: "Extend the logical volumes and resize the FS" + community.general.lvol: + vg: "{{ item.vg }}" + lv: "{{ item.lv }}" + size: "{{ item.size }}" + active: true + state: present + shrink: false + resizefs: true + loop: + - { vg: '{{ vg_root }}', lv: 'rootlv', size: '{{ lv_root_size_db }}' } + - { vg: '{{ vg_root }}', lv: 'tmplv', size: '{{ lv_tmp_size }}' } + when: + - sufficient_vg_space_db | default(false) + - node_tier in ['oracle', 'oracle-asm', 'ase', 'hana', 'db2'] + +# ------------------------------------- +- name: "Print recent Volume Group size and Logical Volume information" + ansible.builtin.shell: | + set -o pipefail + vgdisplay --units g {{ vg_root }} | grep 'Free PE / Size' | awk '{print $(NF-1)}' + lvdisplay {{ vg_root }} + register: recent_info + +- name: "Print volume group details" + ansible.builtin.debug: + msg: + - "vg_info: {{ recent_info | to_nice_json }}" + verbosity: 2 +# ------------------------------------ diff --git a/deploy/ansible/roles-os/1.5-disk-setup/tasks/main.yml b/deploy/ansible/roles-os/1.5-disk-setup/tasks/main.yml index 8ef471b794..0890b90b8c 100644 --- a/deploy/ansible/roles-os/1.5-disk-setup/tasks/main.yml +++ b/deploy/ansible/roles-os/1.5-disk-setup/tasks/main.yml @@ -187,6 +187,10 @@ when: - custom_logical_volumes is defined +- name: "1.5 Disk setup: - Expand Volumes" + ansible.builtin.include_tasks: "1.5-expand-volumes.yml" + when: + - ansible_os_family | upper == 'REDHAT' ... # /*---------------------------------------------------------------------------8 diff --git a/deploy/ansible/roles-os/1.5.1-disk-setup-asm/tasks/main.yml b/deploy/ansible/roles-os/1.5.1-disk-setup-asm/tasks/main.yml index 10a273787f..9166b8a942 100644 --- a/deploy/ansible/roles-os/1.5.1-disk-setup-asm/tasks/main.yml +++ b/deploy/ansible/roles-os/1.5.1-disk-setup-asm/tasks/main.yml @@ -196,7 +196,7 @@ block: - name: "ORACLE ASM: Install RPM Packages" - ansible.builtin.yum: + ansible.builtin.dnf: name: - "{{ target_media_location }}/downloads/oracleasmlib-2.0.17-1.el8.x86_64.rpm" - "{{ target_media_location }}/downloads/oracleasm-support-2.1.12-1.el8.x86_64.rpm" diff --git a/deploy/ansible/roles-os/1.5.3-disk-setup-sapcal/tasks/main.yml b/deploy/ansible/roles-os/1.5.3-disk-setup-sapcal/tasks/main.yml new file mode 100644 index 0000000000..c70873d143 --- /dev/null +++ b/deploy/ansible/roles-os/1.5.3-disk-setup-sapcal/tasks/main.yml @@ -0,0 +1,75 @@ +--- + +# /*---------------------------------------------------------------------------8 +# | | +# | OS Base Disk Configuration | +# | | +# +------------------------------------4--------------------------------------*/ +# -------------------------------------+---------------------------------------8 +# +# Task: 1.5.3 - os-disk-setup SAP-CAL +# +# -------------------------------------+---------------------------------------8 + +# # Check the free size of the volume group +# Extend the logical volumes [tmplv & rootlv] to the required size and resize the FS +# + +# -------------------------------------+---------------------------------------8 +# + +- name: "Get Volume Group information" + ansible.builtin.shell: set -o pipefail && vgdisplay --units g {{ vg_root }} | grep 'Free PE / Size' | awk '{print $(NF-1)}' + register: vg_info + changed_when: false + +- name: "Extract free size of the VG" + ansible.builtin.set_fact: + vg_free_size: "{{ vg_info.stdout | float }}" + when: vg_info is defined and vg_info.stdout is defined + +- name: "Check if free size is more than 20 GB" + ansible.builtin.set_fact: + sufficient_vg_space: "{{ vg_free_size | default(0) | float > 20.0 }}" + when: vg_free_size is defined + failed_when: sufficient_vg_space is not defined or not sufficient_vg_space + +# ------------------------------------- +- name: "Print volume group details" + ansible.builtin.debug: + msg: + - "vg_info: {{ vg_info }}" + - "vg_free_size: {{ vg_free_size }}" + - "sufficient_vg_space: {{ sufficient_vg_space }}" + verbosity: 2 +# ------------------------------------ + +- name: "Extend the logical volumes and resize the FS" + community.general.lvol: + vg: "{{ item.vg }}" + lv: "{{ item.lv }}" + size: "{{ item.size }}" + active: true + state: present + shrink: false + resizefs: true + loop: + - { vg: '{{ vg_root }}', lv: 'rootlv', size: '{{ lv_root_size }}' } + - { vg: '{{ vg_root }}', lv: 'tmplv', size: '{{ lv_tmp_size }}' } + when: + - sufficient_vg_space is defined and sufficient_vg_space + +# ------------------------------------- +- name: "Print recent Volume Group size and Logical Volume information" + ansible.builtin.shell: | + set -o pipefail + vgdisplay --units g {{ vg_root }} | grep 'Free PE / Size' | awk '{print $(NF-1)}' + lvdisplay {{ vg_root }} + register: recent_info + +- name: "Print volume group details" + ansible.builtin.debug: + msg: + - "vg_info: {{ recent_info | to_nice_json }}" + verbosity: 2 +# ------------------------------------ diff --git a/deploy/ansible/roles-sap-os/2.10-sap-notes/handlers/main.yaml b/deploy/ansible/roles-sap-os/2.10-sap-notes/handlers/main.yaml index 8ad0c4d8d0..11020accfd 100644 --- a/deploy/ansible/roles-sap-os/2.10-sap-notes/handlers/main.yaml +++ b/deploy/ansible/roles-sap-os/2.10-sap-notes/handlers/main.yaml @@ -2,6 +2,17 @@ - name: "2.10-sap-notes: Reboot after the selinux is configured" become: true + become_user: root ansible.builtin.reboot: reboot_timeout: 300 -# ... + post_reboot_delay: 10 + failed_when: false +# +- name: "2.10-sap-notes: - Clear the failed state of hosts" + ansible.builtin.meta: clear_host_errors + +# Wait for Connection after reboot +- name: "2.10-sap-notes: - Wait for system to become reachable" + ansible.builtin.wait_for_connection: + delay: 10 + timeout: 300 diff --git a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml index b7927920b1..82734dbc0f 100644 --- a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml +++ b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml @@ -4,6 +4,8 @@ when: - bom is not defined - not is_run_with_infraCreate_only + - enable_sap_cal is not defined or not enable_sap_cal + ansible.builtin.include_role: name: roles-sap/3.3.1-bom-utility tasks_from: bom-register @@ -42,13 +44,12 @@ - name: "2.10.3 sap-notes : - Run saptune_check" ansible.builtin.shell: saptune_check register: saptune_check_result - + changed_when: false - name: "2.10.3 sap-notes : - Interrogate active saptune solution" ansible.builtin.shell: saptune solution enabled register: active_saptune_solution - when: - - saptune_check_result.rc == 0 + changed_when: false # We need to capture the first block of non-whitespace characters # output from saptune solution enabled command has an empty line followed by solution name @@ -90,6 +91,7 @@ when: - is_high_availability - node_tier in ['scs', 'ers', 'hana', 'db2', 'sybase'] + - saptune_solution_enabled is defined - saptune_solution_enabled == 'NONE' block: - name: "2.10.3 sap-notes : - Copy sapnote 2382421 to /etc/saptune/override" @@ -107,26 +109,42 @@ net.ipv4.tcp_tw_reuse = 0 net.ipv4.tcp_tw_recycle = 0 # /usr/lib/sysctl.d/99-sysctl.conf -- name: "2.10.3 sap-notes : - Set fact for saptune solution to use" +- name: "2.10.3 sap-notes : - Set fact for saptune solution to use" ansible.builtin.set_fact: saptune_solution_to_apply: >- - {%- if 'scs' in supported_tiers and 'hana' in supported_tiers and platform == 'HANA' and bom.product_ids.scs is search(':S4HANA') -%} - 'S4HANA-APP+DB' - {%- elif 'scs' in supported_tiers and 'hana' in supported_tiers and platform == 'HANA' and bom.product_ids.scs is search(':NW\d{3}') -%} - 'NETWEAVER+HANA' - {%- elif node_tier in ['scs', 'ers','pas','app'] and platform == 'HANA' and bom.product_ids.scs is search(':S4HANA') -%} - 'S4HANA-APPSERVER' - {%- elif node_tier == 'hana' and platform == 'HANA' and bom.product_ids.scs is search(':S4HANA') -%} - 'S4HANA-DBSERVER' - {%- elif node_tier in ['scs', 'ers', 'pas', 'app'] and platform == 'HANA' and bom.product_ids.scs is search(':BW4HANA') -%} - 'NETWEAVER' - {%- elif node_tier == 'hana' and platform == 'HANA' and bom.product_ids.scs is search(':BW4HANA') -%} - 'HANA' - {%- elif node_tier in ['scs', 'ers', 'pas', 'app'] and platform in ['SYBASE', 'DB2', 'ORACLE', 'ORACLE-ASM', 'SQLSERVER'] and bom.product_ids.scs is search(':NW\d{3}') -%} - 'NETWEAVER' - {%- elif node_tier == 'hana' and platform == 'HANA' and bom.product_ids.scs is search(':NW\d{3}') -%} - 'HANA' - {%- elif node_tier in ['sybase'] and platform == 'SYBASE' and bom.product_ids.scs is search(':NW\d{3}') -%} + {%- if 'scs' in supported_tiers and 'hana' in supported_tiers and platform == 'HANA' -%} + {%- if bom.product_ids is defined -%} + {%- if bom.product_ids.scs is search(':S4HANA') -%} + 'S4HANA-APP+DB' + {%- elif bom.product_ids.scs is search(':NW\d{3}') -%} + 'NETWEAVER+HANA' + {%- else -%} + 'NETWEAVER' + {%- endif -%} + {%- else -%} + 'HANA' + {%- endif -%} + {%- elif node_tier == 'hana' and platform == 'HANA' -%} + {%- if bom.product_ids is defined -%} + {%- if bom.product_ids.scs is search(':S4HANA') -%} + 'S4HANA-DBSERVER' + {%- elif bom.product_ids.scs is search(':BW4HANA') -%} + 'HANA' + {%- elif bom.product_ids.scs is search(':NW\d{3}') -%} + 'HANA' + {%- endif -%} + {%- else -%} + 'HANA' + {%- endif -%} + {%- elif node_tier in ['scs', 'ers', 'pas', 'app'] and platform == 'HANA' and bom.product_ids is defined -%} + {%- if bom.product_ids.scs is search(':S4HANA') -%} + 'S4HANA-APPSERVER' + {%- elif bom.product_ids.scs is search(':BW4HANA') -%} + 'NETWEAVER' + {%- elif bom.product_ids.scs is search(':NW\d{3}') -%} + 'NETWEAVER' + {%- endif -%} + {%- elif node_tier in ['sybase'] and platform == 'SYBASE' and bom.product_ids is defined and bom.product_ids.scs is search(':NW\d{3}') -%} 'SAP-ASE' {%- else -%} 'NETWEAVER' @@ -140,6 +158,7 @@ - name: "2.10.3 sap-notes : - Run saptune solution revert if verify fails" when: + - saptune_solution_enabled is defined - saptune_solution_enabled != 'NONE' - saptune_solution_verify.rc != 0 ansible.builtin.command: "saptune solution revert {{ saptune_solution_enabled }}" @@ -162,3 +181,4 @@ - name: "2.10.3 sap-notes : - Run saptune solution verify" ansible.builtin.command: "saptune solution verify {{ saptune_solution_to_apply }}" changed_when: false + failed_when: false diff --git a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml index 749adc073d..4bc892eb27 100644 --- a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml @@ -56,6 +56,7 @@ - name: "2.10.1 sap-notes: Reboot app VMs after selinux is configured" become: true + become_user: root ansible.builtin.reboot: reboot_timeout: 300 post_reboot_delay: 60 diff --git a/deploy/ansible/roles-sap-os/2.2-sapPermissions/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.2-sapPermissions/tasks/main.yaml index c14832f0b9..59c144d58d 100644 --- a/deploy/ansible/roles-sap-os/2.2-sapPermissions/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.2-sapPermissions/tasks/main.yaml @@ -16,6 +16,7 @@ - { node_tier: 'hana', path: '/hana', mode: '0755', owner: 'root', group: 'root', state: 'directory' } - { node_tier: 'pas', path: '/sapmnt', mode: '0755', owner: 'root', group: 'sapsys', state: 'directory' } - { node_tier: 'app', path: '/sapmnt', mode: '0755', owner: 'root', group: 'sapsys', state: 'directory' } + - { node_tier: 'scs', path: '/sapmnt', mode: '0755', owner: 'root', group: 'sapsys', state: 'directory' } when: - item.node_tier == "all" or item.node_tier == node_tier - not users_created.stat.exists diff --git a/deploy/ansible/roles-sap-os/2.3-sap-exports/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.3-sap-exports/tasks/main.yaml index e92e1f5dc1..977d8c82ce 100644 --- a/deploy/ansible/roles-sap-os/2.3-sap-exports/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.3-sap-exports/tasks/main.yaml @@ -1,72 +1,10 @@ --- - -- name: "Exports: Create SAP Directories - {{ target_media_location }}" - ansible.builtin.file: - path: "{{ target_media_location }}" - state: directory - mode: 0755 +- name: "2.3 Exports: - Create SAP install export" when: - node_tier == 'scs' - - MULTI_SIDS is undefined - usr_sap_install_mountpoint is undefined - -- name: "Exports: Create SAP Directories - saptrans" - ansible.builtin.file: - path: "/usr/sap/trans" - state: directory - mode: 0755 - when: - - node_tier == 'scs' - - MULTI_SIDS is undefined - - sap_trans is undefined - -- name: "Exports: Create SAP Directories - saptrans" - ansible.builtin.file: - path: "/sapmnt/{{ sap_sid | upper }}" - state: directory - mode: 0755 - when: - - node_tier == 'scs' - - MULTI_SIDS is undefined - - sap_mnt is undefined - - -- name: "Exports: Create SAP Directories for MSIDs" - ansible.builtin.file: - path: "{{ item.path }}" - state: directory - mode: 0755 - loop: - - { path: '/sapmnt' } - when: - - node_tier == 'scs' - - MULTI_SIDS is defined - -# Create Folders under sapmnt whilst using Local disk for Multi-SID installation. -- name: Create Filesystems under sapmnt block: - - name: Create Filesystems for multi-sid installation - ansible.builtin.file: - path: /sapmnt/{{ item.sid }} - state: directory - mode: 0755 - when: - - node_tier == 'scs' - - MULTI_SIDS is defined - loop: "{{ MULTI_SIDS }}" - -- name: "2.3 Exports: - Create SAP Directories (install)" - block: - - - name: "2.3 Exports: - Create SAP Directories (install)" - ansible.builtin.file: - path: "{{ item.path }}" - state: directory - mode: 0755 - loop: - - { path: '{{ target_media_location }}' } - - - name: "2.3 Exports: - NFS Server Config on Suse (install)" + - name: "2.3 Exports: - NFS Server Config on Suse (install)" ansible.builtin.lineinfile: path: "{{ item.path }}" regexp: "{{ item.regexp }}" @@ -76,7 +14,6 @@ mode: 0644 loop: - { path: '/etc/exports', regexp: '^{{ target_media_location }}', line: '{{ target_media_location }} *(rw,sync,no_wdelay,no_root_squash)' } - # - { tier: 'preparation', path: '/etc/sysconfig/nfs', regexp: '^NFS3_SERVER_SUPPORT=', line: 'NFS3_SERVER_SUPPORT="no"' } - { path: '/etc/sysconfig/nfs', regexp: '^NFS3_SERVER_SUPPORT=', line: 'NFS3_SERVER_SUPPORT="yes"' } - { path: '/etc/sysconfig/nfs', regexp: '^NFS4_SUPPORT=', line: 'NFS4_SUPPORT="yes"' } when: @@ -98,29 +35,19 @@ when: - distribution_id in ["redhat8", "redhat9"] +- name: "2.3 Exports: - Create SAP sapmnt export" when: - node_tier == 'scs' - - usr_sap_install_mountpoint is undefined - -- name: "2.3 Exports: - Create SAP Directories (sapmnt)" + - sap_mnt is undefined block: - - - name: "2.3 Exports: - Create SAP Directories (sapmnt)" - ansible.builtin.file: - path: "{{ item.path }}" - state: directory - mode: 0755 - loop: - - { path: '/sapmnt/{{ sap_sid | upper }}' } - - - name: "2.3 Exports: - NFS Server Config on Suse (sapmnt)" + - name: "2.3 Exports: - NFS Server Config on Suse (sapmnt)" ansible.builtin.lineinfile: - path: "{{ item.path }}" - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" - owner: root - group: root - mode: 0644 + path: "{{ item.path }}" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + owner: root + group: root + mode: '0644' loop: - { path: '/etc/exports', regexp: '^/sapmnt/{{ sap_sid | upper }}', line: '/sapmnt/{{ sap_sid | upper }} *(rw,sync,no_wdelay,no_root_squash)' } - { path: '/etc/sysconfig/nfs', regexp: '^# RPCNFSDARGS=', line: 'RPCNFSDARGS="-N 2 -N 3 -U"' } @@ -130,12 +57,12 @@ - name: "2.3 Exports: - NFS Server Config on : {{ ansible_os_family | lower ~ ansible_distribution_major_version }} (sapmnt)" ansible.builtin.lineinfile: - path: "{{ item.path }}" - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" - owner: root - group: root - mode: 0644 + path: "{{ item.path }}" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + owner: root + group: root + mode: '0644' loop: - { path: '/etc/exports', regexp: '^/sapmnt/{{ sap_sid | upper }}', line: '/sapmnt/{{ sap_sid | upper }} *(rw,sync,no_wdelay,no_root_squash)' } - { path: '/etc/nfs.conf', regexp: '^# vers3=', line: ' vers3=y' } @@ -144,21 +71,11 @@ when: - distribution_id == "redhat8" +- name: "2.3 Exports: - Create SAP trans export" when: - node_tier == 'scs' - - sap_mnt is undefined - -- name: "2.3 Exports: - Create SAP Directories (saptrans)" + - sap_trans is undefined block: - - - name: "2.3 Exports: - Exports: Create SAP Directories (saptrans)" - ansible.builtin.file: - path: "{{ item.path }}" - state: directory - mode: 0755 - loop: - - { path: '/usr/sap/trans' } - - name: "Exports: NFS Server Config on : {{ ansible_distribution | lower ~ ansible_distribution_major_version }}" ansible.builtin.lineinfile: path: "{{ item.path }}" @@ -166,7 +83,7 @@ line: "{{ item.line }}" owner: root group: root - mode: 0644 + mode: '0644' loop: - { path: '/etc/exports', regexp: '^/usr/sap/trans', line: '/usr/sap/trans *(rw,sync,no_wdelay,no_root_squash)' } - { path: '/etc/sysconfig/nfs', regexp: '^# RPCNFSDARGS=', line: 'RPCNFSDARGS="-N 2 -N 3 -U"' } @@ -181,7 +98,7 @@ line: "{{ item.line }}" owner: root group: root - mode: 0644 + mode: '0644' loop: - { path: '/etc/exports', regexp: '^/usr/sap/trans', line: '/usr/sap/trans *(rw,sync,no_wdelay,no_root_squash)' } - { path: '/etc/nfs.conf', regexp: '^# vers3=', line: ' vers3=y' } @@ -190,34 +107,6 @@ when: - distribution_id == "redhat8" - when: - - node_tier == 'scs' - - sap_trans is undefined - -- name: "Exports: Create SAP Directories for MSIDs" - ansible.builtin.file: - path: "{{ item.path }}" - state: directory - mode: 0755 - loop: - - { path: '/sapmnt' } - when: - - node_tier == 'scs' - - MULTI_SIDS is defined - -# Create Folders under sapmnt whilst using Local disk for Multi-SID installation. -- name: Create Filesystems under sapmnt - block: - - name: Create Filesystems for multi-sid installation - ansible.builtin.file: - path: /sapmnt/{{ item.sid }} - state: directory - mode: 0755 - when: - - node_tier == 'scs' - - MULTI_SIDS is defined - loop: "{{ MULTI_SIDS }}" - - name: "Exports: NFS Server Config on Oracle Linux 8" ansible.builtin.lineinfile: path: "{{ item.path }}" @@ -225,7 +114,7 @@ line: "{{ item.line }}" owner: root group: root - mode: 0644 + mode: '0644' loop: - { tier: 'preparation', path: '/etc/exports', regexp: '^/sapmnt/{{ sap_sid | upper }}', line: '/sapmnt/{{ sap_sid | upper }} *(rw,sync,no_wdelay,no_root_squash)' } - { tier: 'preparation', path: '/etc/exports', regexp: '^/usr/sap/trans', line: '/usr/sap/trans *(rw,sync,no_wdelay,no_root_squash)' } @@ -246,7 +135,7 @@ line: "{{ item.line }}" owner: root group: root - mode: 0644 + mode: '0644' loop: - { tier: 'preparation', path: '/etc/exports', regexp: '^/usr/sap/trans', line: '/usr/sap/trans *(rw,sync,no_wdelay,no_root_squash)' } - { tier: 'preparation', path: '/etc/exports', regexp: '^{{ target_media_location }}', line: '{{ target_media_location }} *(rw,sync,no_wdelay,no_root_squash)' } @@ -266,7 +155,7 @@ line: "/sapmnt/{{ item.sid | upper }} *(rw,sync,no_wdelay,no_root_squash)" owner: root group: root - mode: 0644 + mode: '0644' loop: "{{ MULTI_SIDS }}" when: - (ansible_distribution | lower ~ ansible_distribution_major_version) == "oraclelinux8" @@ -280,25 +169,28 @@ - custom_exports is defined - name: "2.3 Exports: - Local NFS" + when: + - node_tier == 'scs' + - sap_trans is undefined or usr_sap_install_mountpoint is undefined or sap_mnt is undefined block: - name: "2.3 Exports: - Set the NFS Service name {{ distribution_id }}" ansible.builtin.set_fact: - nfs_service: 'nfsserver' + nfs_service: nfsserver when: "'SUSE' == ansible_os_family | upper" - name: "2.3 Exports: - Set the NFS Service name {{ distribution_id }}" ansible.builtin.set_fact: - nfs_service: "nfs-server" + nfs_service: nfs-server when: "'redhat8' == distribution_id or 'redhat9' == distribution_id" - name: "2.3 Exports: - Set the NFS Service name oracle {{ distribution_id }}" ansible.builtin.set_fact: - nfs_service: "nfs-server" + nfs_service: nfs-server when: "'oraclelinux8' == distribution_id" - name: "2.3 Exports: - Set the NFS Service name {{ distribution_id }}" ansible.builtin.set_fact: - nfs_service: 'nfs' + nfs_service: nfs when: "'redhat7' == distribution_id" - name: "2.3 Exports: - NFS Ensure the NFS service is started" @@ -311,7 +203,3 @@ ansible.builtin.systemd: name: "{{ nfs_service }}" state: restarted - - when: - - node_tier == 'scs' - - (sap_trans is undefined) or (usr_sap_install_mountpoint is undefined) or (sap_mnt is undefined) diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 b/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 index 549eaa1478..142df8be1f 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 @@ -104,7 +104,7 @@ ansible_facts. {% endif %} {% elif tier in ['hana', 'oracle', 'oracle-asm', 'db2', 'sybase'] %} {% set db_virtual_host = hostvars[host]['custom_db_virtual_hostname'] if 'custom_db_virtual_hostname' in hostvars[host] else hostvars[host]['virtual_host'] %} -{% if db_virtual_host not in virtual_host_names and not database_high_availability %} +{% if db_virtual_host not in virtual_host_names %} {% set _ = virtual_host_names.append(db_virtual_host) %} {% endif %} {% endif %} @@ -129,16 +129,23 @@ ansible_facts. {% else %} {# Loop through remaining IPs for the virtual host #} {% for ip in host_ips[1:] %} -{% if (database_scale_out) %} -{% if (subnet_cidr_storage | ansible.utils.network_in_usable(ip)) %} +{% if (database_scale_out) %} +{% if (database_high_availability) %} +{% if (subnet_cidr_storage | ansible.utils.network_in_usable(ip)) %} +{{ '%-19s' | format(ip) }}{{ '%-80s ' | format(host + '-hsr.' + sap_fqdn) }}{{ '%-21s' | format(host + '-hsr') }} +{% elif (subnet_cidr_client | ansible.utils.network_in_usable(ip)) %} +{{ '%-19s' | format(ip) }}{{ '%-80s ' | format(host + '-inter.' + sap_fqdn) }}{{ '%-21s' | format(host + '-inter') }} +{% endif %} +{% else %} +{% if (subnet_cidr_storage | ansible.utils.network_in_usable(ip)) %} {{ '%-19s' | format(ip) }}{{ '%-80s ' | format(host + '-storage.' + sap_fqdn) }}{{ '%-21s' | format(host + '-storage') }} -{% elif (subnet_cidr_client | ansible.utils.network_in_usable(ip)) %} -{{ '%-19s' | format(ip) }}{{ '%-80s ' | format(host + '-client.' + sap_fqdn) }}{{ '%-21s' | format(host + '-client') }} -{% endif %} +{% elif (subnet_cidr_client | ansible.utils.network_in_usable(ip)) %} +{{ '%-19s' | format(ip) }}{{ '%-80s ' | format(host + '-hana.' + sap_fqdn) }}{{ '%-21s' | format(host + '-hana') }} +{% endif %} +{% endif %} {% else %} {% for vh_name in virtual_host_names if virtual_host_names | length >= 1 %} {{ '%-19s' | format(ip) }}{{ '%-80s ' | format(vh_name + '.' + sap_fqdn) }}{{ '%-21s' | format(vh_name) }} - {% endfor %} {% endif %} {% endfor %} diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6-set_runtime_facts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6-set_runtime_facts.yaml index 38fecf2c36..78c3097c94 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6-set_runtime_facts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6-set_runtime_facts.yaml @@ -12,6 +12,7 @@ when: - bom is not defined - not is_run_with_infraCreate_only + - enable_sap_cal is not defined or not enable_sap_cal # default to ASCS instance when BOM is not defined or instance type in BOM is not defined - name: "2.6 SCS HA Install: Default instance type" diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index f4a1059dad..09e64271a4 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -196,7 +196,7 @@ register: set_immutable_attribute when: - tier == 'sapos' - - node_tier != 'hana' + - node_tier not in ['hana' , 'observer'] - sap_mnt is defined # /*---------------------------------------------------------------------------8 diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index 5352446108..8cd6cdac43 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -12,12 +12,12 @@ - name: "ANF Mount: Set the NFSmount options" ansible.builtin.set_fact: mnt_options: 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys' - when: distribution_full_id not in ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5'] + when: distribution_full_id not in ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5', 'sles_sap15.6'] - name: "ANF Mount: Set the NFSmount options" ansible.builtin.set_fact: mnt_options: 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys,nconnect=8' - when: distribution_full_id in ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5'] + when: distribution_full_id in ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5', 'sles_sap15.6'] - name: "ANF Mount: Define this SID" ansible.builtin.set_fact: diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml index cd77dd9587..ad6f008fb5 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml @@ -187,7 +187,7 @@ register: set_immutable_attribute when: - tier == 'sapos' - - node_tier != 'hana' + - node_tier not in ['hana','observer'] - sap_mnt is defined - use_simple_mount is defined and use_simple_mount diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml index 8c1989c242..08400eb30a 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml @@ -12,12 +12,12 @@ - name: "ANF Mount: Set the NFSmount options" ansible.builtin.set_fact: mnt_options: 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys' - when: distribution_full_id not in ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5'] + when: distribution_full_id not in ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5', 'sles_sap15.6'] - name: "ANF Mount: Set the NFSmount options" ansible.builtin.set_fact: mnt_options: 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys,nconnect=8' - when: distribution_full_id in ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5'] + when: distribution_full_id in ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5', 'sles_sap15.6'] - name: "ANF Mount: Define this SID" ansible.builtin.set_fact: diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml index fe9250db4d..4004d69bba 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml @@ -30,7 +30,6 @@ map(attribute='type') | sort | unique | list | length | int }}" - - name: "2.6 SAP Mounts: - choose the shared disk" ansible.builtin.set_fact: sharedpath: "{% if shareddisk == '1' %}/dev/vg_hana_shared/lv_hana_shared\ @@ -59,7 +58,6 @@ - "Shared path: {{ sharedpath }}" # Mount Filesystems - - name: "2.6 SAP Mounts: - Check if the 'sap' disk exists" ansible.builtin.set_fact: sap_disk_exists: "{{ disks | selectattr('host', 'defined') | @@ -70,9 +68,9 @@ - name: "2.6 SAP Mounts: - Mount local sap file systems" ansible.posix.mount: - src: '/dev/vg_sap/lv_usrsap' - path: '/usr/sap' - fstype: 'xfs' + src: /dev/vg_sap/lv_usrsap + path: /usr/sap + fstype: xfs opts: defaults state: mounted when: @@ -82,9 +80,9 @@ - name: "2.6 SAP Mounts: - Mount local kdump file path to save vmcore" ansible.posix.mount: - src: "/dev/vg_{{ node_tier | lower }}_kdump/lv_{{ node_tier | lower }}_kdump" - path: '/usr/crash' - fstype: 'xfs' + src: /dev/vg_{{ node_tier | lower }}_kdump/lv_{{ node_tier | lower }}_kdump + path: /usr/crash + fstype: xfs opts: defaults state: mounted when: @@ -95,8 +93,8 @@ - name: "2.6 SAP Mounts: - Mount local file systems (shared)" ansible.posix.mount: src: "{{ sharedpath }}" - path: '/hana/shared' - fstype: 'xfs' + path: /hana/shared + fstype: xfs opts: defaults state: mounted when: @@ -105,9 +103,9 @@ - name: "2.6 SAP Mounts: - Mount local file systems (backup)" ansible.posix.mount: - src: '/dev/vg_hana_backup/lv_hana_backup' + src: /dev/vg_hana_backup/lv_hana_backup path: '{{ hana_backup_path }}' - fstype: 'xfs' + fstype: xfs opts: defaults state: mounted when: @@ -127,7 +125,7 @@ ansible.posix.mount: src: '/dev/vg_hana_data/lv_hana_data' path: "{{ hana_data_basepath }}" - fstype: 'xfs' + fstype: xfs opts: defaults state: mounted when: @@ -138,37 +136,26 @@ ansible.posix.mount: src: '/dev/vg_hana_log/lv_hana_log' path: "{{ hana_log_basepath }}" - fstype: 'xfs' + fstype: xfs opts: defaults state: mounted when: - node_tier == 'hana' - hana_log_mountpoint is undefined -- name: "Exports: Create SAP Trans MSIDs" +- name: "2.6 SAP Mounts: Create SAP Trans" ansible.builtin.file: - path: '/usr/sap/trans' + path: /usr/sap/trans state: directory - mode: 0755 + mode: '0755' when: - node_tier == 'scs' - - MULTI_SIDS is defined - - sap_trans is undefined - -- name: "Exports: Create SAP Trans on PAS and APP Servers" - ansible.builtin.file: - path: '/usr/sap/trans' - state: directory - mode: 0755 - when: - - node_tier in ['pas','app'] - sap_trans is undefined - # Mount SAP TransFilesystems - name: Mount Filesystems block block: - - name: Mount SAP Transport Filesystems when not using external NFS (all app tier) + - name: "2.6 SAP Mounts: Mount SAP Transport Filesystems when not using external NFS (all app tier)" ansible.posix.mount: src: "{{ item.src }}" path: "{{ item.path }}" @@ -181,25 +168,7 @@ - tier == 'sapos' - node_tier in ['pas', 'app'] - sap_trans is undefined - - nfs_server != ansible_hostname - rescue: - - name: Re-mount Filesystems when not using external NFS (app & pas) - ansible.builtin.debug: - msg: "Trying to remount sap transport " - - name: Re-mount Filesystems when not using external NFS (app & pas) - ansible.posix.mount: - src: "{{ item.src }}" - path: "{{ item.path }}" - fstype: "{{ item.type }}" - opts: defaults - state: remounted - loop: - - { type: 'nfs4', src: '{{ nfs_server }}:/usr/sap/trans', path: '/usr/sap/trans' } - when: - - tier == 'sapos' - - node_tier in ['pas', 'app'] - - sap_trans is undefined - - nfs_server != ansible_hostname + - nfs_server != ansible_hostname - name: "2.6 SAP Mounts: - Debug" ansible.builtin.debug: @@ -207,9 +176,9 @@ - name: "2.6 SAP Mounts: - Mount local install file system on SCS (when not using AFS)" ansible.posix.mount: - src: '/dev/vg_sap/lv_usrsapinstall' - path: '{{ target_media_location }}' - fstype: 'xfs' + src: /dev/vg_sap/lv_usrsapinstall + path: "{{ target_media_location }}" + fstype: xfs opts: defaults state: mounted when: @@ -225,15 +194,15 @@ ansible.builtin.file: path: "{{ tmp_directory }}" state: directory - mode: 0775 + mode: '0775' when: not tmp_dir.stat.isdir # Mount Filesystems - name: "2.6 SAP Mounts: - Mount local sapmnt on (scs) {{ ansible_hostname }}" ansible.posix.mount: - src: '/dev/vg_sap/lv_sapmnt' - path: '/sapmnt/{{ sap_sid | upper }}' - fstype: 'xfs' + src: /dev/vg_sap/lv_sapmnt + path: /sapmnt/{{ sap_sid | upper }} + fstype: xfs opts: defaults state: mounted when: @@ -244,25 +213,22 @@ - "'scs' in supported_tiers" - name: "2.6 SAP Mounts: - Create SAP Directories (sapmnt)" - become: true - become_user: root ansible.builtin.file: - owner: '{% if platform == "SYBASE" %}{{ asesidadm_uid }}{% else %}{{ sidadm_uid }}{% endif %}' + owner: "{% if platform == 'SYBASE' %}{{ asesidadm_uid }}{% else %}{{ sidadm_uid }}{% endif %}" group: sapsys - mode: 0755 - path: "/sapmnt/{{ sap_sid | upper }}" + mode: '0755' + path: /sapmnt/{{ sap_sid | upper }} state: directory - recurse: true when: - - node_tier not in ['oracle-asm', 'hana'] + - node_tier not in ['oracle-asm', 'hana', 'observer'] - name: "2.6 SAP Mounts: - sapmnt" block: - name: "2.6 SAP Mounts: - Mount sapmnt file system when not using external NFS (all app tier)" ansible.posix.mount: - src: '{{ nfs_server }}:/sapmnt/{{ sap_sid | upper }}' - path: '/sapmnt/{{ sap_sid | upper }}' - fstype: 'nfs4' + src: "{{ nfs_server }}:/sapmnt/{{ sap_sid | upper }}" + path: /sapmnt/{{ sap_sid | upper }} + fstype: nfs4 opts: defaults state: mounted when: @@ -270,16 +236,16 @@ - node_tier in ['pas', 'app', 'ers', 'oracle', 'db2', 'sybase'] - sap_mnt is undefined - MULTI_SIDS is undefined - - nfs_server != ansible_hostname + - nfs_server != ansible_hostname rescue: - name: "2.6 SAP Mounts: - Re-mount File systems when not using external NFS (app & pas)" ansible.builtin.debug: - msg: "Trying to remount sap_mnt" + msg: Trying to remount sap_mnt - name: "2.6 SAP Mounts: - Mount sapmnt file system when not using external NFS (all app tier)" ansible.posix.mount: - src: '{{ nfs_server }}:/sapmnt/{{ sap_sid | upper }}' - path: '/sapmnt/{{ sap_sid | upper }}' - fstype: 'nfs4' + src: "{{ nfs_server }}:/sapmnt/{{ sap_sid | upper }}" + path: /sapmnt/{{ sap_sid | upper }} + fstype: nfs4 opts: defaults state: remounted when: @@ -291,9 +257,9 @@ - name: "2.6 SAP Mounts: - Mount Install folder when not using AFS" ansible.posix.mount: - src: '{{ usr_sap_install_mount_point }}' - path: '{{ target_media_location }}' - fstype: 'nfs4' + src: "{{ usr_sap_install_mount_point }}" + path: "{{ target_media_location }}" + fstype: nfs4 opts: defaults state: mounted when: @@ -320,7 +286,7 @@ - name: "2.6 SAP Mounts: - Create file systems under sapmnt for oracle shared home installation" ansible.builtin.file: path: /sapmnt/{{ item.sid }} - owner: '{{ item.sidadm_uid }}' + owner: "{{ item.sidadm_uid }}" group: sapsys state: directory mode: '0644' @@ -332,12 +298,10 @@ - sap_mnt is undefined - name: "2.6 SAP Mounts: - Mount SAP File systems sapmnt for oracle shared home installation" - become: true - become_user: root ansible.posix.mount: - src: '{{ nfs_server }}:/sapmnt/{{ item.sid | upper }}' - path: '/sapmnt/{{ item.sid | upper }}' - fstype: 'nfs4' + src: "{{ nfs_server }}:/sapmnt/{{ item.sid | upper }}" + path: /sapmnt/{{ item.sid | upper }} + fstype: nfs4 opts: defaults state: mounted vars: @@ -366,7 +330,6 @@ - sap_mnt is defined or sap_trans is defined or usr_sap_install_mountpoint is defined - NFS_provider == 'AFS' - # Import this task only if the sap_mnt is defined, i.e. ANF is used - name: "2.6 SAP Mounts: - Import ANF tasks" ansible.builtin.import_tasks: 2.6.1-anf-mounts.yaml @@ -386,25 +349,20 @@ # - db_scale_out # - sap_mnt is defined or sap_trans is defined or usr_sap_install_mountpoint is defined - # Import this task only if the tier is ora. - name: "2.6 SAP Mounts: - Import Oracle tasks" ansible.builtin.import_tasks: "2.6.2-oracle-mounts.yaml" - when: - - node_tier == "oracle" + when: node_tier == "oracle" # Import this task only if the tier is ora for oracle-asm. - name: "2.6 SAP Mounts: - Import Oracle ASM pre-requisite tasks" ansible.builtin.import_tasks: "2.6.3-oracle-asm-prereq.yaml" - when: - - node_tier == "oracle-asm" + when: node_tier == "oracle-asm" # Import this task only if the tier is ora for oracle-asm. - name: "2.6 SAP Mounts: - Import Oracle ASM tasks" ansible.builtin.import_tasks: "2.6.3-oracle-asm-mounts.yaml" - when: - - node_tier == "oracle-asm" - # - tier == "ora" + when: node_tier == "oracle-asm" - name: "2.6 SAP Mounts: - Import Oracle observer tasks" ansible.builtin.import_tasks: "2.6.3-oracle-observer.yaml" @@ -422,14 +380,12 @@ # Import this task only if the node_tier is db2. - name: "2.6 SAP Mounts: - Import DB2 tasks" ansible.builtin.import_tasks: "2.6.4-db2-mounts.yaml" - when: - - node_tier == "db2" + when: node_tier == "db2" # Import this task only if the node_tier is ase. - name: "2.6 SAP Mounts: - Import SYBASE tasks" ansible.builtin.import_tasks: "2.6.6-sybase-mounts.yaml" - when: - - node_tier == "sybase" + when: node_tier == "sybase" # Update : Deprecated as the scale out anf mount code functionality is now integrated into 2.6.1 and 2.6.8 @@ -458,10 +414,7 @@ - platform != "HANA" # ensure that this is not triggered for HANA scale out with HSR which uses an observer tier VM - - name: "2.6 SAP Mounts: - Set permissions" - become: true - become_user: root when: node_tier == "hana" block: - name: "2.6 SAP Mounts: - Set permissions on hana folders" diff --git a/deploy/ansible/roles-sap/3.3-bom-processing/tasks/bom_processor.yaml b/deploy/ansible/roles-sap/3.3-bom-processing/tasks/bom_processor.yaml index 86c88c9ad9..8d5fa0cea2 100644 --- a/deploy/ansible/roles-sap/3.3-bom-processing/tasks/bom_processor.yaml +++ b/deploy/ansible/roles-sap/3.3-bom-processing/tasks/bom_processor.yaml @@ -35,7 +35,7 @@ # | 03) Create dynamic extract directories | # | 04) Download media from bom | # | 05) Extract files - SAPCAR | -# | 06) Extract files - UNRAR | +# | 06) Extract files - UNRAR/UNAR | # | 07) Extract files - UNZIP | # | 08) Purge BOM file from Ansible Controller - TBD | # | @@ -188,20 +188,28 @@ # 06) Extract files - UNRAR -- name: "3.3 BoM Processing: - Extract File, UNRAR" - ansible.builtin.command: unrar x "{{ target_media_location }}/{% if item.path is undefined %}downloads{% else %}{{ item.path }}{% endif %}/{% if item.filename is undefined %}{{ item.archive }}{% else %}{{ item.filename }}{% endif %}" - args: - chdir: "{{ target_media_location }}/{{ item.extractDir }}" - creates: "{{ target_media_location }}/{{ item.extractDir }}/\ - {% if item.creates is defined %}{{ item.creates }}{% else %}NOT_DEFINED{% endif %}" - loop: "{{ bom.materials.media | flatten(levels=1) }}" - when: - - item.extract is not undefined - - item.extract - - ( item.filename is undefined and (item.archive |regex_search('[^.]+(?=\\.*$)')|upper=="RAR") ) or - ( item.filename is defined and (item.filename|regex_search('[^.]+(?=\\.*$)')|upper=="RAR") ) or - ( item.filename is undefined and (item.archive |regex_search('[^.]+(?=\\.*$)')|upper=="EXE") ) or - ( item.filename is defined and (item.filename|regex_search('[^.]+(?=\\.*$)')|upper=="EXE") ) + +# Note: SAP has started to distribute Schema files packed as RAR files or spanned solid archives with self extracting EXE files against Zip files as before. +# This requires WinRAR or opensource RAR extractor. +# RHEL does not distibuted unrar anymore https://access.redhat.com/solutions/28959 +# RHEL recommends using unarchiver ( alias : unar ) in EPEL repository from Fedora as unrar from https://www.rarlab.com does not cplies with OpenSource Licenses. +# This also presents another problem as the archive files will get unpacked into /usr/sap/install/CD_EXPORT// and will cause setup to fail. +# This issue is now being handled by process_exe_archives.yaml + +# - name: "3.3 BoM Processing: - Extract File, UNRAR" +# ansible.builtin.command: "{% if (ansible_os_family | upper) == 'REDHAT' %}unar -s -D{% else %}unrar x{% endif %} {{ target_media_location }}/{% if item.path is undefined %}downloads{% else %}{{ item.path }}{% endif %}/{% if item.filename is undefined %}{{ item.archive }}{% else %}{{ item.filename }}{% endif %}" +# args: +# chdir: "{{ target_media_location }}/{{ item.extractDir }}" +# creates: "{{ target_media_location }}/{{ item.extractDir }}/\ +# {% if item.creates is defined %}{{ item.creates }}{% else %}NOT_DEFINED{% endif %}" +# loop: "{{ bom.materials.media | flatten(levels=1) }}" +# when: +# - item.extract is not undefined +# - item.extract +# - ( item.filename is undefined and (item.archive |regex_search('[^.]+(?=\\.*$)')|upper=="RAR") ) or +# ( item.filename is defined and (item.filename|regex_search('[^.]+(?=\\.*$)')|upper=="RAR") ) or +# ( item.filename is undefined and (item.archive |regex_search('[^.]+(?=\\.*$)')|upper=="EXE") ) or +# ( item.filename is defined and (item.filename|regex_search('[^.]+(?=\\.*$)')|upper=="EXE") ) # 07) Extract files - UNZIP @@ -217,8 +225,10 @@ when: - item.extract is not undefined - item.extract - - ( item.filename is undefined and (item.archive | regex_search('[^.]+(?=\\.*$)') | upper=="ZIP") ) or - ( item.filename is defined and (item.filename | regex_search('[^.]+(?=\\.*$)') | upper=="ZIP") ) + - ( item.filename is undefined and (item.archive |regex_search('[^.]+(?=\\.*$)')|upper=="ZIP") ) or + ( item.filename is defined and (item.filename|regex_search('[^.]+(?=\\.*$)')|upper=="ZIP") ) or + ( item.filename is undefined and (item.archive |regex_search('[^.]+(?=\\.*$)')|upper=="TGZ") ) or + ( item.filename is defined and (item.filename|regex_search('[^.]+(?=\\.*$)')|upper=="TGZ") ) # 07) Extract files - EXE # - name: "3.3 BoM Processing: - Extract File, UNRAR" @@ -235,6 +245,17 @@ # - ( item.filename is undefined and (item.archive |regex_search('[^.]+(?=\\.*$)')|upper=="EXE") ) or # ( item.filename is defined and (item.filename|regex_search('[^.]+(?=\\.*$)')|upper=="EXE") ) +# 08) Extract files - EXE + +- name: "3.3 BoM Processing: - Extract File, EXE" + ansible.builtin.include_tasks: "process_exe_archives.yaml" + loop: "{{ bom.materials.media | flatten(levels=1) }}" + when: + - item.extract is defined + - item.extract + - ( item.filename is undefined and (item.archive |regex_search('[^.]+(?=\\.*$)')|upper=="EXE") ) or + ( item.filename is defined and (item.filename|regex_search('[^.]+(?=\\.*$)')|upper=="EXE") ) + # -------------------------------------+---------------------------------------8 # diff --git a/deploy/ansible/roles-sap/3.3-bom-processing/tasks/process_exe_archives.yaml b/deploy/ansible/roles-sap/3.3-bom-processing/tasks/process_exe_archives.yaml new file mode 100644 index 0000000000..68f8fc850b --- /dev/null +++ b/deploy/ansible/roles-sap/3.3-bom-processing/tasks/process_exe_archives.yaml @@ -0,0 +1,42 @@ +--- + +- name: 'Create temporary extract directory {{ item.archive }}' + ansible.builtin.tempfile: + path: "/mnt" + state: directory + suffix: extract + register: tempdir + +- name: Show extract command + ansible.builtin.debug: + msg: + - "Extract directory: {{ tempdir.path }}" + - "Extract command: {% if (ansible_os_family | upper) == 'REDHAT' %}unar -s -D{% else %}unrar x{% endif %} {{ target_media_location }}/{% if item.path is undefined %}downloads{% else %}{{ item.path }}{% endif %}/{% if item.filename is undefined %}{{ item.archive }}{% else %}{{ item.filename }}{% endif %}" + +- name: "3.3 BoM Processing: - Extract File, exe file" + ansible.builtin.command : "{% if (ansible_os_family | upper) == 'REDHAT' %}unar -s -D{% else %}unrar x{% endif %} {{ target_media_location }}/{% if item.path is undefined %}downloads{% else %}{{ item.path }}{% endif %}/{% if item.filename is undefined %}{{ item.archive }}{% else %}{{ item.filename }}{% endif %}" + args: + chdir: '{{ tempdir.path }}' + creates: '{{ tempdir.path }}{% if item.tempDir is defined %}/{{ item.tempDir }}{% endif %}/{{ item.creates }}' + register: extract_result + +- name: Show extract result + ansible.builtin.debug: + var: extract_result + +# - name: Wait for file to exist before continuing +# ansible.builtin.wait_for: +# path: '{{ tempdir.path }}{% if item.tempDir is defined %}/{{ item.tempDir }}{% endif %}/{{ item.creates }}' + +- name: Copy the folder + ansible.builtin.copy: + src: '{{ tempdir.path }}{% if item.tempDir is defined %}/{{ item.tempDir }}{% endif %}/' + dest: '{{ target_media_location }}/{{ item.extractDir }}' + remote_src: true + +- name: Remove extract directory + ansible.builtin.file: + path: '{{ tempdir.path }}' + state: absent + +... diff --git a/deploy/ansible/roles-sap/3.3.1-bom-utility/tasks/bom-register.yaml b/deploy/ansible/roles-sap/3.3.1-bom-utility/tasks/bom-register.yaml index 495eb5c1db..00231beb09 100644 --- a/deploy/ansible/roles-sap/3.3.1-bom-utility/tasks/bom-register.yaml +++ b/deploy/ansible/roles-sap/3.3.1-bom-utility/tasks/bom-register.yaml @@ -225,7 +225,7 @@ # - name: "{{ task_prefix }} Register Microsoft Supplied BOM {{ bom_name }} from archives" ansible.builtin.include_vars: - file: "{{ microsoft_supplied_bom_archive.path }}" + file: "{{ microsoft_supplied_bom_archive.stat.path }}" name: bom_temp when: microsoft_supplied_bom_archive.stat.exists # Step: 05-05-02 - END @@ -260,6 +260,12 @@ # Step: 06 # Description: Validate that BoM was found # + +- name: "{{ task_prefix }} Show BOM object" + ansible.builtin.debug: + var: bom + verbosity: 2 + - name: "{{ task_prefix }} Validate that a BOM object is created" ansible.builtin.fail: msg: "Unable to find the Bill of materials file for {{ bom_name }}." diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml index 5a62483772..f71113010a 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml @@ -24,7 +24,7 @@ - { mode: '0755', path: '{{ tmp_directory }}/{{ sid_to_be_deployed.sid | upper }}' } - { mode: '0755', path: '/etc/sap_deployment_automation/{{ sap_sid | upper }}' } -- name: "DBLoad: - reset" +- name: "DBLoad: - reset" ansible.builtin.file: path: "/etc/sap_deployment_automation/{{ sid_to_be_deployed.sid | upper }}/sap_deployment_dbload.txt" state: absent @@ -334,9 +334,9 @@ - job_result.rc is defined - job_result.rc == 0 - - name: "DBLoad: results" + - name: "DBLoad: results" ansible.builtin.debug: - msg: "DBLoad succeeded" + msg: "DBLoad succeeded" when: - job_result.rc is defined - job_result.rc == 0 @@ -346,22 +346,12 @@ # when: # - node_tier in ["oracle","oracle-asm"] - - name: "DBLoad Install: Get DEFAULT.PFL" - ansible.builtin.slurp: - src: "/sapmnt/{{ sap_sid | upper }}/profile/DEFAULT.PFL" - register: profilefile - - - name: "DBLoad Install: Get schema name" - ansible.builtin.set_fact: - schema_name: "{{ profilefile['content'] | b64decode | split('\n') | select('search', 'dbs/hdb/schema') | first | split('=') | last | trim | default('{{ hana_schema }}') }}" - when: - - platform == 'HANA' - - - name: "DBLoad Install: Installation results" - ansible.builtin.debug: - msg: "Schema name {{ schema_name }}" + - name: "DBLoad Install: Set Schema Name" when: - - platform == 'HANA' + - platform == "HANA" + ansible.builtin.include_role: + name: "roles-db/4.0.4-hdb-schema" + public: true - name: "Backward Compatibility - Check required Database HA variables" ansible.builtin.set_fact: @@ -464,22 +454,12 @@ - db_high_availability is defined - database_high_availability is not defined - - name: "DBLoad Install: Get DEFAULT.PFL" - ansible.builtin.slurp: - src: "/sapmnt/{{ sap_sid | upper }}/profile/DEFAULT.PFL" - register: profilefile - - - name: "DBLoad Install: Get schema name" - ansible.builtin.set_fact: - schema_name: "{{ profilefile['content'] | b64decode | split('\n') | select('search', 'dbs/hdb/schema') | first | split('=') | last | trim | default('{{ hana_schema }}') }}" - when: - - platform == 'HANA' - - - name: "DBLoad Install: Installation results" - ansible.builtin.debug: - msg: "Schema name {{ schema_name }}" + - name: "DBLoad Install: Set Schema Name" when: - - platform == 'HANA' + - platform == "HANA" + ansible.builtin.include_role: + name: "roles-db/4.0.4-hdb-schema" + public: true - name: "DBLoad: Get hdbuserstore path" become: true diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml index 03e1d1dca0..1fdb02e444 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/oracle-postprocessing.yaml @@ -99,7 +99,7 @@ when: updatesga_results.rc == 0 # Wait for creation of HugePages -# Rebbot the VM to avoid the error "ORA-27102: out of memory" +# Reboot the VM to avoid the error "ORA-27102: out of memory" - name: "ORACLE Post Processing: DB VM reboot" block: @@ -110,6 +110,7 @@ - name: "ORACLE Post Processing: Reboot after the Enabling HugePages" become: true + become_user: root ansible.builtin.reboot: reboot_timeout: 300 failed_when: false diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index 2e60699bb0..996e0a877e 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -129,21 +129,12 @@ - platform == "HANA" - db_port_open.msg is defined -- name: "PAS Install: Set schema_name variable for HANA" - when: platform == "HANA" - block: - - name: "PAS Install: Get DEFAULT.PFL" - ansible.builtin.slurp: - src: "/sapmnt/{{ sap_sid | upper }}/profile/DEFAULT.PFL" - register: profilefile - - - name: "PAS Install: Get schema name" - ansible.builtin.set_fact: - schema_name: "{{ profilefile['content'] | b64decode | split('\n') | select('search', 'dbs/hdb/schema') | first | split('=') | last | trim | default('{{ hana_schema }}') }}" - - - name: "PAS Install: Show schema name" - ansible.builtin.debug: - msg: "Schema name {{ schema_name }}" +- name: "PAS Install: Set Schema Name" + when: + - platform == "HANA" + ansible.builtin.include_role: + name: "roles-db/4.0.4-hdb-schema" + public: true - name: "PAS Install" block: diff --git a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml index 96f7f331d1..6258bf14ae 100644 --- a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml @@ -143,21 +143,12 @@ - platform == "HANA" - db_port_open.msg is defined -- name: "APP Install: Set schema_name variable for HANA" - when: platform == "HANA" - block: - - name: "APP Install: Get DEFAULT.PFL" - ansible.builtin.slurp: - src: "/sapmnt/{{ sap_sid | upper }}/profile/DEFAULT.PFL" - register: profilefile - - - name: "APP Install: Get schema name" - ansible.builtin.set_fact: - schema_name: "{{ profilefile['content'] | b64decode | split('\n') | select('search', 'dbs/hdb/schema') | first | split('=') | last | trim | default('{{ hana_schema }}') }}" - - - name: "APP Install: Show schema name" - ansible.builtin.debug: - msg: "Schema name {{ schema_name }}" +- name: "APP Install: Set Schema Name" + when: + - platform == "HANA" + ansible.builtin.include_role: + name: "roles-db/4.0.4-hdb-schema" + public: true # *====================================4=======================================8 # SAP APP: Install diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.3-SAPHanaSR.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.3-SAPHanaSR.yml index 08f361e3a5..1dbce279a1 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.3-SAPHanaSR.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.3-SAPHanaSR.yml @@ -7,6 +7,7 @@ # | Begin: configuration for SAPHanaSR python hook | # | | # +------------------------------------4--------------------------------------*/ +# Note: Refer to https://access.redhat.com/articles/3004101 for RHEL hook installation for HSR ( scale up and scale out ) setups - name: HANA 2.0 only - Implement the Python system replication hook SAPHanaSR when: @@ -197,7 +198,7 @@ ansible.builtin.shell: | set -o pipefail awk '/ha_dr_SAPHanaSR.*crm_attribute/ \ - { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* | tail -n 1 + { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* args: chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ hostvars[primary_instance_name]['virtual_host'] }}/trace register: saphanasr @@ -216,7 +217,7 @@ ansible.builtin.shell: | set -o pipefail awk '/ha_dr_SAPHanaSR.*crm_attribute/ \ - { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* | tail -n 1 + { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* args: chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ hostvars[primary_instance_name]['virtual_host'] }}/trace register: saphanasr diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml index 51a1b93169..e6f9471e0c 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml @@ -158,19 +158,19 @@ loop_var: item when: is_nfs_secondary_configured - # - name: "Configure location constraints" - # ansible.builtin.shell: > - # pcs constraint location {{ item.group_name }} - # rule score=-INFINITY resource-discovery=never \#uname eq {{ item.node }} - # register: nfs_location_constraints - # failed_when: false - # ignore_errors: true - # loop: - # - { group_name: 'g_hana_{{ db_sid | upper }}_NFS_1', node: '{{ secondary_instance_name }}' } - # - { group_name: 'g_hana_{{ db_sid | upper }}_NFS_2', node: '{{ primary_instance_name }}' } - # loop_control: - # loop_var: item - # when: is_nfs_secondary_configured + - name: "Configure location constraints" + ansible.builtin.shell: > + pcs constraint location {{ item.group_name }} + rule score=-INFINITY resource-discovery=never \#uname eq {{ item.node }} + register: nfs_location_constraints + failed_when: false + ignore_errors: true + loop: + - { group_name: 'g_hana_{{ db_sid | upper }}_NFS_1', node: '{{ secondary_instance_name }}' } + - { group_name: 'g_hana_{{ db_sid | upper }}_NFS_2', node: '{{ primary_instance_name }}' } + loop_control: + loop_var: item + when: is_nfs_secondary_configured - name: "Check if location constraints did not error on {{ primary_instance_name }}" ansible.builtin.set_fact: diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index 89d6be3b69..d04d05764b 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -29,7 +29,7 @@ - name: "5.5.4.1 HANA Cluster configuration - Check if the pacemaker package version is greater than pacemaker-2.0.4" when: ansible_distribution_major_version in ["8", "9"] ansible.builtin.set_fact: - is_pcmk_ver_gt_204: "{{ ansible_facts.packages['pacemaker'] is version('2.0.4', '>') | default(false) }}" + is_pcmk_ver_gt_204: "{{ ansible_facts.packages['pacemaker'][0].version is version('2.0.4', '>') | default(false) }}" - name: "5.5.4.1 HANA Cluster configuration - Ensure the SAP HANA Topology resource is created" ansible.builtin.shell: > @@ -158,6 +158,12 @@ - database_high_availability - NFS_provider == "ANF" - ansible_hostname == primary_instance_name + - hana_data_mountpoint is defined + - hana_data_mountpoint | length > 1 + - hana_log_mountpoint is defined + - hana_log_mountpoint | length > 1 + - hana_shared_mountpoint is defined + - hana_shared_mountpoint | length > 1 block: - name: "5.5.4.1 HANA Cluster configuration - configure constraints between SAP HANA resources and NFS mounts" ansible.builtin.shell: pcs constraint location SAPHanaTopology_{{ db_sid | upper }}_{{ db_instance_number }}-clone rule score=-INFINITY attr_hana_{{ db_sid | upper }}_NFS_1_active ne true and attr_hana_{{ db_sid | upper }}_NFS_2_active ne true @@ -168,18 +174,16 @@ ansible.builtin.shell: pcs constraint location SAPHana_{{ db_sid | upper }}_{{ db_instance_number }}-master rule score=-INFINITY attr_hana_{{ db_sid | upper }}_NFS_1_active ne true and attr_hana_{{ db_sid | upper }}_NFS_2_active ne true register: constraint failed_when: constraint.rc > 1 - when: - - ansible_distribution_major_version == "7" + when: ansible_distribution_major_version == "7" - name: "5.5.4.1 HANA Cluster configuration - configure constraints on RHEL 8.x or 9.x" - when: - - ansible_distribution_major_version in ["8", "9"] + when: ansible_distribution_major_version in ["8", "9"] block: - name: "5.5.4.1 HANA Cluster configuration - configure location constraints on RHEL 8.x or 9.x" ansible.builtin.shell: > pcs constraint location SAPHana_{{ db_sid | upper }}_{{ db_instance_number }}-clone rule score=-INFINITY attr_hana_{{ db_sid | upper }}_NFS_1_active ne true and attr_hana_{{ db_sid | upper }}_NFS_2_active ne true - register: constraint - failed_when: constraint.rc > 1 + register: constraint + failed_when: constraint.rc > 1 - name: "5.5.4.1 HANA Cluster configuration - configure ordering constraints for SAPHana clone RHEL 8.x or 9.x" ansible.builtin.shell: > @@ -337,7 +341,8 @@ # | Systemd-Based SAP Startup Framework - BEGIN | # | | # +------------------------------------4--------------------------------------*/ -# Follow steps described in https://access.redhat.com/articles/6884531 +# Follow steps described in https://access.redhat.com/articles/6093611 + - name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" ansible.builtin.set_fact: diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml index 4d4683f84f..6dc775c753 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml @@ -165,12 +165,9 @@ when: - database_high_availability - NFS_provider == "ANF" - - hana_data_mountpoint is defined - - hana_data_mountpoint | length > 1 - - hana_log_mountpoint is defined - - hana_log_mountpoint | length > 1 - - hana_shared_mountpoint is defined - - hana_shared_mountpoint | length > 1 + - hana_data_mountpoint | default("") | length > 1 or + hana_log_mountpoint | default("") | length > 1 or + hana_shared_mountpoint | default("") | length > 1 - inventory_hostname == primary_instance_name block: - name: "5.5.4.1 HANA Pacemaker configuration - Stop HANA System on both nodes" diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml index f17e29eda1..918e60f17f 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml @@ -79,7 +79,7 @@ clus_fs_mon_timeout: >- {%- set _timeoutvalue = 40 -%} {%- if (NFS_provider == "ANF") -%} - {%- if NFS_version != "NFSv3" -%} + {%- if NFS_version == "NFSv3" -%} {%- set _timeoutvalue = 40 -%} {%- elif NFS_version == "NFSv4.1" -%} {%- set _timeoutvalue = 105 -%} @@ -96,7 +96,7 @@ clus_sap_mon_timeout: >- {%- set _timeoutvalue = 60 -%} {%- if (NFS_provider == "ANF") -%} - {%- if NFS_version != "NFSv3" -%} + {%- if NFS_version == "NFSv3" -%} {%- set _timeoutvalue = 60 -%} {%- elif NFS_version == "NFSv4.1" -%} {%- set _timeoutvalue = 105 -%} diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-RedHat.yml index a0570ca3e2..90da4bbaae 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-RedHat.yml @@ -35,10 +35,10 @@ ansible.builtin.shell: > pcs resource create fs_{{ sap_sid | upper }}_{{ instance_type | upper }} Filesystem \ device='{{ ascs_filesystem_device }}' \ - directory='{{ profile_directory }}' fstype='nfs' force_unmount=safe options='sec=sys,vers=4.1' \ + directory='{{ profile_directory }}' fstype='nfs' fast_stop=no force_unmount=safe options='sec=sys,vers=4.1' \ op start interval=0 timeout=60 \ op stop interval=0 timeout=120 \ - op monitor interval=200 timeout={{ clus_fs_mon_timeout | int }} \ + op monitor interval=20 timeout={{ clus_fs_mon_timeout | int }} \ --group g-{{ sap_sid | upper }}_{{ instance_type | upper }} register: ascs_fs_resource failed_when: ascs_fs_resource.rc > 1 @@ -178,10 +178,10 @@ ansible.builtin.shell: > pcs resource create fs_{{ sap_sid | upper }}_ERS Filesystem \ device='{{ ers_filesystem_device }}' \ - directory='/usr/sap/{{ sap_sid | upper }}/ERS{{ ers_instance_number }}' fstype='nfs' force_unmount=safe options='sec=sys,vers=4.1' \ + directory='/usr/sap/{{ sap_sid | upper }}/ERS{{ ers_instance_number }}' fstype='nfs' fast_stop=no force_unmount=safe options='sec=sys,vers=4.1' \ op start interval=0 timeout=60 \ op stop interval=0 timeout=120 \ - op monitor interval=200 timeout={{ clus_fs_mon_timeout | int }} \ + op monitor interval=20 timeout={{ clus_fs_mon_timeout | int }} \ --group g-{{ sap_sid | upper }}_ERS register: ers_fs_resource failed_when: ers_fs_resource.rc > 1 diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-Suse.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-Suse.yml index 2a783a3597..b59507763c 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-Suse.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-Suse.yml @@ -24,10 +24,10 @@ ansible.builtin.shell: > crm configure primitive fs_{{ sap_sid | upper }}_{{ instance_type | upper }} Filesystem \ device='{{ ascs_filesystem_device }}' \ - directory='{{ profile_directory }}' fstype='nfs' options='sec=sys,vers=4.1' \ + directory='{{ profile_directory }}' fstype='nfs' fast_stop=no options='sec=sys,vers=4.1' \ op start timeout="{{ cluster_sap_scs_timeouts.start }}" interval=0 \ op stop timeout="{{ cluster_sap_scs_timeouts.stop }}" interval=0 \ - op monitor interval=200 timeout={{ clus_fs_mon_timeout | int }} + op monitor interval=20 timeout={{ clus_fs_mon_timeout | int }} register: ascs_fs_resource failed_when: ascs_fs_resource.rc > 1 @@ -169,10 +169,10 @@ ansible.builtin.shell: > crm configure primitive fs_{{ sap_sid | upper }}_ERS Filesystem \ device='{{ ers_filesystem_device }}' \ - directory='/usr/sap/{{ sap_sid | upper }}/ERS{{ ers_instance_number }}' fstype='nfs' options='sec=sys,vers=4.1' \ + directory='/usr/sap/{{ sap_sid | upper }}/ERS{{ ers_instance_number }}' fstype='nfs' fast_stop=no options='sec=sys,vers=4.1' \ op start timeout="{{ cluster_sap_scs_timeouts.start }}" interval=0 \ op stop timeout="{{ cluster_sap_scs_timeouts.stop }}" interval=0 \ - op monitor interval=200 timeout={{ clus_fs_mon_timeout | int }} + op monitor interval=20 timeout={{ clus_fs_mon_timeout | int }} register: ers_fs_resource failed_when: ers_fs_resource.rc > 1 diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index 52f9bc0771..7db4635765 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -109,7 +109,7 @@ - name: "5.6 SCSERS - RHEL - Check if the pacemaker package version is greater than pacemaker-2.0.4" when: ansible_distribution_major_version in ["8", "9"] ansible.builtin.set_fact: - is_pcmk_ver_gt_204: "{{ ansible_facts.packages['pacemaker'] is version('2.0.4', '>') | default(false) }}" + is_pcmk_ver_gt_204: "{{ ansible_facts.packages['pacemaker'][0].version is version('2.0.4', '>') | default(false) }}" - name: "5.6 SCSERS - RHEL - Set properties for two node clusters" when: @@ -171,9 +171,6 @@ # | These are common tasks | # +------------------------------------+---------------------------------------*| -# - name: "5.6 SCSERS - RHEL - Enable Maintenance mode for the cluster" -# ansible.builtin.shell: pcs property set maintenance-mode=true - - name: "5.6 SCSERS - RHEL - Reboot and wait 5 minutes" ansible.builtin.debug: msg: "Reboot and wait 5 minutes" @@ -182,18 +179,25 @@ become: true become_user: root ansible.builtin.reboot: - reboot_timeout: 150 - post_reboot_delay: 150 + reboot_timeout: 180 + post_reboot_delay: 180 failed_when: false - name: "5.6 SCSERS - RHEL - Set the Cluster out of maintenance mode" ansible.builtin.shell: pcs property set maintenance-mode=false + run_once: true - name: "5.6 SCSERS - RHEL - Wait for 120 seconds for the cluster to stabilize" ansible.builtin.wait_for: timeout: 120 register: wait_for_connection_results +# SCS node has been put on standby and resources have moved. The resource move constraints need to be cleared +# Warning: Following resources have been moved and their move constraints are still in place: 'g-SID_ASCS' +- name: "5.6 SCSERS - RHEL - Clear move constraints" + ansible.builtin.shell: pcs resource clear g-{{ sap_sid | upper }}_{{ instance_type | upper }} + when: inventory_hostname == primary_instance_name + - name: "5.6 SCSERS - RHEL ensure SAPInstance resources are started" ansible.builtin.shell: | set -o pipefail @@ -205,7 +209,6 @@ run_once: true failed_when: false - - name: "5.6 SCSERS - RHEL - SCS cluster group validation" ansible.builtin.include_tasks: file: "5.6.6-validate.yml" @@ -214,8 +217,7 @@ become_user: root tags: - "5.6.6-validate" - when: - - inventory_hostname == primary_instance_name + when: inventory_hostname == primary_instance_name - name: "5.6 SCSERS: Set Resources Flag" ansible.builtin.file: diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml index 8d88753f42..0df7b494db 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml @@ -21,7 +21,7 @@ var: systemd_service_file_path verbosity: 2 -- name: "5.6 SCSERS - Set fact for the systemd services existance" +- name: "5.6 SCSERS - Set facts for the systemd services and files" ansible.builtin.set_fact: systemd_service_names: "{{ systemd_service_file_path.results @@ -29,6 +29,12 @@ | map(attribute='stat.path') | regex_replace('/etc/systemd/system/', '') }}" + scs_systemd_files: + - { file: "SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service", directory: /etc/systemd/system } + - { file: "10-SAP{{ sap_sid | upper }}-{{ scs_instance_number }}.rules", directory: /etc/polkit-1/rules.d } + ers_systemd_files: + - { file: "SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service", directory: /etc/systemd/system } + - { file: "10-SAP{{ sap_sid | upper }}-{{ ers_instance_number }}.rules", directory: /etc/polkit-1/rules.d } - name: "5.6 SCSERS - Show fact for the systemd services existance" ansible.builtin.debug: @@ -40,77 +46,83 @@ - systemd_service_names is defined - systemd_service_names | length > 0 block: - # - name: "5.6 SCSERS - Disable the services if they exist" - # ansible.builtin.systemd: - # name: "{{ service_name }}" - # enabled: false - # failed_when: false - # loop: "{{ systemd_service_names }}" - # loop_control: - # loop_var: service_name - - name: "5.6 SCSERS - Disable and Stop the services if they exist" - become: true - become_user: root - ansible.builtin.systemd: - name: "{{ service_name }}" - enabled: false - state: "stopped" - failed_when: false - loop: "{{ systemd_service_names }}" - loop_control: - loop_var: service_name + - name: "5.6 SCSERS - Fetch systemd files from (A)SCS node" + when: node_tier == 'scs' + ansible.builtin.fetch: + src: "{{ item.directory }}/{{ item.file }}" + dest: /tmp/{{ sap_sid }}/ + flat: true + loop: "{{ scs_systemd_files }}" + + - name: "5.6 SCSERS - Fetch systemd files from ERS node" + when: node_tier == 'ers' + ansible.builtin.fetch: + src: "{{ item.directory }}/{{ item.file }}" + dest: /tmp/{{ sap_sid }}/ + flat: true + loop: "{{ ers_systemd_files }}" - - name: "5.6 SCSERS Pacemaker - (systemd) Create drop-in file" - become: true - become_user: root - ansible.builtin.lineinfile: - path: '{{ dropfile }}' - create: true - backup: true + - name: "5.6 SCSERS - Copy ERS systemd files on (A)SCS node" + when: node_tier == 'scs' + ansible.builtin.copy: + src: /tmp/{{ sap_sid }}/{{ item.file }} + dest: "{{ item.directory }}" + mode: '0644' owner: root - group: root + group: sapinst + loop: "{{ ers_systemd_files }}" + + - name: "5.6 SCSERS - Copy (A)SCS systemd files on ERS node" + when: node_tier == 'ers' + ansible.builtin.copy: + src: /tmp/{{ sap_sid }}/{{ item.file }} + dest: "{{ item.directory }}" mode: '0644' - line: "[Service]" + owner: root + group: sapinst + loop: "{{ scs_systemd_files }}" + + - name: "5.6 SCSERS Pacemaker - Create systemd service override directory" + ansible.builtin.file: + path: "{{ override_dir }}" + state: directory + owner: root + group: root + mode: '0755' loop: - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" + - /etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d + - /etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d loop_control: - loop_var: dropfile + loop_var: override_dir - - name: "5.6 SCSERS Pacemaker - (systemd) Update drop-in file" - become: true - become_user: root - ansible.builtin.lineinfile: - path: '{{ dropfile }}' - create: true - backup: true + - name: "5.6 SCSERS Pacemaker - Create systemd HA override files" + ansible.builtin.copy: + content: |- + [Service] + Restart=no + dest: "{{ dropfile }}" owner: root group: root mode: '0644' - insertafter: '^[Service]$' - line: "Restart=no" loop: - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" + - /etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf + - /etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf loop_control: loop_var: dropfile - register: dropinfile - - name: "5.6 SCSERS - systemd reload" + - name: "5.6 SCSERS - Disable and Stop services" ansible.builtin.systemd: + name: "{{ service_name }}" daemon_reload: true - when: - - dropinfile.changed - - # - name: "5.6 SCSERS - validate that the drop-in file is active" - # when: - # ansible.builtin.shell: >- - # systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service' - # register: dropinfile_validation - # changed_when: false - # failed_when: dropinfile_validation.rc > 0 - + enabled: false + state: stopped + failed_when: false + loop: + - SAP{{ sap_sid | upper }}_{{ scs_instance_number }} + - SAP{{ sap_sid | upper }}_{{ ers_instance_number }} + loop_control: + loop_var: service_name # /*---------------------------------------------------------------------------8 # | | diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget.yml index 45d7f2e1fe..2ed1a9fb2d 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget.yml @@ -142,8 +142,8 @@ - ansible_hostname == primary_instance_name or ansible_hostname == secondary_instance_name - name: Wait 2 minutes for SAP system to stop - ansible.builtin.pause: - seconds: 120 + ansible.builtin.wait_for: + timeout: 120 # This is not needed any more as we are going to use the default path of the hook script installed by the package. # - name: copy SAPHanaSR-ScaleOut.py (SUSE) @@ -265,8 +265,8 @@ register: hana_system_started - name: Wait 5 minutes for SAP system to start - ansible.builtin.pause: - seconds: 300 + ansible.builtin.wait_for: + timeout: 300 # - name: Start HANA Database # ansible.builtin.import_tasks: ../../../roles-misc/0.4-helpers/tasks/04.01-start_hana.yml @@ -277,8 +277,8 @@ # awk '/ha_dr_SAPHanaSR-ScaleOut.*crm_attribute/ { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* # Verify that the SAPHanaSR-ScaleOut hook script is working as expected. - name: Pause to give HANA replication time to stabilize - ansible.builtin.pause: - seconds: "{{ hsr_status_report_wait_in_s }}" + ansible.builtin.wait_for: + timeout: "{{ hsr_status_report_wait_in_s }}" # REDHAT only # This needs to be run on all the nodes where HANA is deployed. @@ -302,8 +302,8 @@ delay: 30 rescue: - name: "[Rescue] - Pause to give HANA replication time to stabilize" - ansible.builtin.pause: - seconds: "{{ rescue_hsr_status_report_wait_in_s }}" + ansible.builtin.wait_for: + timeout: "{{ rescue_hsr_status_report_wait_in_s }}" - name: "[Rescue] - Verify the hook Installation (REDHAT)" become_user: "{{ db_sid | lower }}adm" @@ -342,8 +342,8 @@ # when: inventory_hostname == primary_instance_name rescue: - name: "[Rescue] - Pause to give HANA replication time to stabilize" - ansible.builtin.pause: - seconds: "{{ rescue_hsr_status_report_wait_in_s }}" + ansible.builtin.wait_for: + timeout: "{{ rescue_hsr_status_report_wait_in_s }}" - name: "[Rescue] - Verify the hook Installation (SUSE)" become_user: "{{ db_sid | lower }}adm" @@ -385,8 +385,8 @@ # when: inventory_hostname == primary_instance_name rescue: - name: "[Rescue] - Pause to give HANA replication time to stabilize" - ansible.builtin.pause: - seconds: "{{ rescue_hsr_status_report_wait_in_s }}" + ansible.builtin.wait_for: + timeout: "{{ rescue_hsr_status_report_wait_in_s }}" - name: "[Rescue] - Verify the hook Installation" become_user: "{{ db_sid | lower }}adm" diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-RedHat.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-RedHat.yml index b2a3e12132..36c8ed2ae2 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-RedHat.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-RedHat.yml @@ -51,8 +51,8 @@ when: ansible_hostname in ["{{ primary_instance_name }}","{{ secondary_instance_name }}"] - name: Wait 2 minutes for SAP system to stop - ansible.builtin.pause: - seconds: 120 + ansible.builtin.wait_for: + timeout: 120 - name: Unmount /hana/shared from all cluster participating nodes block: @@ -213,8 +213,8 @@ - ansible_hostname in ["{{ primary_instance_name }}","{{ secondary_instance_name }}"] - name: Wait 5 minutes for SAP system to stablize - ansible.builtin.pause: - seconds: 300 + ansible.builtin.wait_for: + timeout: 300 # End of HANA filesystem clustering resources # Ref : https://access.redhat.com/articles/3004101 - 4.3 Configure general cluster properties diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-RedHat.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-RedHat.yml index 965942442a..eb674c6f92 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-RedHat.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-RedHat.yml @@ -48,7 +48,7 @@ - name: Ensure the SAP HANA instance resource is created ansible.builtin.shell: > pcs resource create SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} SAPHanaController \ - SID={{ db_sid | upper }} InstanceNumber={{ db_instance_number }} PREFER_SITE_TAKEOVER=true DUPLICATE_PRIMARY_TIMEOUT=7200 AUTOMATED_REGISTER=false \ + SID={{ db_sid | upper }} InstanceNumber={{ db_instance_number }} PREFER_SITE_TAKEOVER=true DUPLICATE_PRIMARY_TIMEOUT=7200 AUTOMATED_REGISTER=true \ op start interval=0 timeout=3600 op stop interval=0 timeout=3600 op promote interval=0 timeout=3600 \ op monitor interval=60 role="Master" timeout=700 op monitor interval=61 role="Slave" timeout=700 register: sap_hana @@ -126,7 +126,7 @@ - name: Ensure the SAP HANA instance is created ansible.builtin.shell: > pcs resource create SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} SAPHanaController \ - SID={{ db_sid | upper }} InstanceNumber={{ db_instance_number }} PREFER_SITE_TAKEOVER=true DUPLICATE_PRIMARY_TIMEOUT=7200 AUTOMATED_REGISTER=false \ + SID={{ db_sid | upper }} InstanceNumber={{ db_instance_number }} PREFER_SITE_TAKEOVER=true DUPLICATE_PRIMARY_TIMEOUT=7200 AUTOMATED_REGISTER=true \ op demote interval=0s timeout=320 op methods interval=0s timeout=5 \ op start interval=0 timeout=3600 op stop interval=0 timeout=3600 op promote interval=0 timeout=3600 \ op monitor interval=60 role="Master" timeout=700 op monitor interval=61 role="Slave" timeout=700 diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-Suse.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-Suse.yml index 404fb4e265..4da049101f 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-Suse.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-Suse.yml @@ -90,7 +90,7 @@ op monitor interval="60" role="Master" timeout="700" \ op monitor interval="61" role="Slave" timeout="700" \ params SID="{{ db_sid | upper }}" InstanceNumber="{{ db_instance_number }}" PREFER_SITE_TAKEOVER="true" \ - DUPLICATE_PRIMARY_TIMEOUT="7200" AUTOMATED_REGISTER="false" + DUPLICATE_PRIMARY_TIMEOUT="7200" AUTOMATED_REGISTER="true" register: sap_hana_controller failed_when: sap_hana_controller.rc > 1 @@ -211,8 +211,8 @@ register: hana_system_stopped - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Wait 2 minutes for SAP system to stop" - ansible.builtin.pause: - seconds: 120 + ansible.builtin.wait_for: + timeout: 120 - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure maintenance mode is enabled" ansible.builtin.command: crm configure property maintenance-mode=true @@ -246,8 +246,8 @@ register: hana_system_started - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Wait 5 minutes for SAP system to start" - ansible.builtin.pause: - seconds: 300 + ansible.builtin.wait_for: + timeout: 300 - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure maintenance mode is disabled" ansible.builtin.command: crm configure property maintenance-mode=false diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.5-post_provision_report.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.5-post_provision_report.yml index 8787528fac..d31bef033a 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.5-post_provision_report.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.5-post_provision_report.yml @@ -6,8 +6,8 @@ # +------------------------------------4--------------------------------------*/ - name: Pause to give cluster time to stabilize - ansible.builtin.pause: - seconds: "{{ cluster_status_report_wait_in_s }}" + ansible.builtin.wait_for: + timeout: "{{ cluster_status_report_wait_in_s }}" - name: Check the post-provisioning cluster status ansible.builtin.command: "{{ cluster_status_cmd[ansible_os_family] }}" @@ -36,8 +36,8 @@ # awk '/ha_dr_SAPHanaSR.*crm_attribute/ { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* # Verify that the hook script is working as expected. - name: Pause to give HANA replication time to stabilize - ansible.builtin.pause: - seconds: "{{ hsr_status_report_wait_in_s }}" + ansible.builtin.wait_for: + timeout: "{{ hsr_status_report_wait_in_s }}" - name: "Verify that the hook script is working as expected" when: not database_scale_out @@ -59,8 +59,8 @@ when: inventory_hostname == primary_instance_name rescue: - name: "[Rescue] - Pause to give HANA replication time to stabilize" - ansible.builtin.pause: - seconds: "{{ rescue_hsr_status_report_wait_in_s }}" + ansible.builtin.wait_for: + timeout: "{{ rescue_hsr_status_report_wait_in_s }}" - name: "[Rescue] - Verify the hook Installation" become_user: "{{ db_sid | lower }}adm" @@ -99,8 +99,8 @@ delay: 30 rescue: - name: "[Rescue] - Pause to give HANA replication time to stabilize" - ansible.builtin.pause: - seconds: "{{ rescue_hsr_status_report_wait_in_s }}" + ansible.builtin.wait_for: + timeout: "{{ rescue_hsr_status_report_wait_in_s }}" - name: "[Rescue] - Verify the hook Installation (SUSE)" become_user: "{{ db_sid | lower }}adm" @@ -136,8 +136,8 @@ delay: 30 rescue: - name: "[Rescue] - Pause to give HANA replication time to stabilize" - ansible.builtin.pause: - seconds: "{{ rescue_hsr_status_report_wait_in_s }}" + ansible.builtin.wait_for: + timeout: "{{ rescue_hsr_status_report_wait_in_s }}" - name: "[Rescue] - Verify the hook Installation (REDHAT)" become_user: "{{ db_sid | lower }}adm" diff --git a/deploy/ansible/roles-sap/6.0.0-sapcal-install/defaults/main.yml b/deploy/ansible/roles-sap/6.0.0-sapcal-install/defaults/main.yml new file mode 100644 index 0000000000..35b4d2f16c --- /dev/null +++ b/deploy/ansible/roles-sap/6.0.0-sapcal-install/defaults/main.yml @@ -0,0 +1,11 @@ +--- +# defaults file for 6.0.0-sapcal-install + +db_sid_admin_user_id: "1050" +sap_sysadmin_user_id: "1079" +sap_sysadmin_group_id: "79" +sap_gui_default_language: "en" +sap_additional_languages: "" +number_of_dialog_work_processes: "10" +number_of_batch_work_processes: "7" +abap_message_server_port: "3600" diff --git a/deploy/ansible/roles-sap/6.0.0-sapcal-install/tasks/main.yml b/deploy/ansible/roles-sap/6.0.0-sapcal-install/tasks/main.yml new file mode 100644 index 0000000000..eb6606c04d --- /dev/null +++ b/deploy/ansible/roles-sap/6.0.0-sapcal-install/tasks/main.yml @@ -0,0 +1,76 @@ +--- +# tasks file for 6.0.0-sapcal-install + + +- name: "Retrieve SAP-CAL Product Id" + ansible.builtin.set_fact: + product_id: "{{ sap_cal_product | selectattr('name', 'equalto', sap_cal_product_name) | map(attribute='id') | first }}" + +# ------------------------------------- +- name: "Print SAP-CAL Parameters" + ansible.builtin.debug: + msg: + - "SAP SID : {{ sap_sid | upper }}" + - "DB SID : {{ db_sid | upper }}" + - "SAP-CAL Product Name : {{ sap_cal_product_name }}" + - "SAP-CAL Product Id : {{ product_id }}" + - "Domain Name : {{ sap_fqdn }}" + verbosity: 2 +# ------------------------------------ + +- name: Call provisioning API endpoint + public_api: + method: "software_provisioning" + calKeyvaultId: "https://{{ calapi_kv }}.vault.azure.net/" + outputDirectoryPath: "{{ _workspace_directory }}" + clientId: "" + clientSecret: "" + tenantId: "" + outputFile: "sapcal_provisioning.json" + productId: "{{ product_id }}" + availabilityScenario: "non-ha" + infrastructureParameterSet: + domainName: "{{ sap_fqdn }}" + remoteOsUser: "{{ orchestration_ansible_user }}" + secretStoreId: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ secret_prefix }}-INFRASTRUCTURE/providers/Microsoft.KeyVault/vaults/{{ kv_name }}" + sshPublicKeySecretName: "{{ secret_prefix }}-sid-sshkey-pub" + sshPrivateKeySecretName: "{{ secret_prefix }}-sid-sshkey" + deploymentServerResourceGroup: "{{ resource_group_name }}-SAPCAL-DS" + technicalCommunicationUser: "{{ s_user }}" + techUserPassword: "{{ s_password }}" + installationParameterSets: + hanaDeployment: + primaryVmResourceId: "{{ db_resource_id }}" + DBSID: "{{ db_sid | upper }}" + DBSIDAdminUserId: "{{ db_sid_admin_user_id }}" + instanceNumber: "{{ db_instance_number }}" + primaryPhysicalHostname: "{{ db_physical_hostname }}" + primaryVirtualHostname: "{{ db_virtual_hostname }}" + s4hanaDeployment: + SID: "{{ sap_sid | upper }}" + SAPSysAdminUserId: "{{ sap_sysadmin_user_id }}" + SAPSysAdminGroupId: "{{ sap_sysadmin_group_id }}" + sapGuiDefaultLanguage: "{{ sap_gui_default_language }}" + SAPSystemAdditionalLanguages: "{{ sap_additional_languages }}" + numberOfDialogWorkProcesses: "{{ number_of_dialog_work_processes }}" + numberOfBatchWorkProcesses: "{{ number_of_batch_work_processes }}" + centralServicesDeployment: + vmResourceId: "{{ scs_resource_id}}" + instanceNumber: "{{ scs_instance_number }}" + ABAPMessageServerPort: "{{ abap_message_server_port }}" + physicalHostname: "{{ scs_physical_hostname }}" + virtualHostname: "{{ scs_virtual_hostname }}" + applicationServersDeployment: + - vmResourceId: "{{ pas_resource_id }}" + instanceNumber: "{{ pas_instance_number }}" + physicalHostname: "{{ pas_physical_hostname }}" + virtualHostname: "{{ pas_virtual_hostname }}" + - vmResourceId: "{{ app_resource_id }}" + instanceNumber: "{{ app_instance_number }}" + physicalHostname: "{{ app_physical_hostname }}" + virtualHostname: "{{ app_virtual_hostname }}" + register: sapcal_provisioning + +- name: "Print SAP-CAL provisioning response" + ansible.builtin.debug: + var: sapcal_provisioning diff --git a/deploy/ansible/roles-sap/6.0.0-sapcal-install/vars/main.yml b/deploy/ansible/roles-sap/6.0.0-sapcal-install/vars/main.yml new file mode 100644 index 0000000000..5057996055 --- /dev/null +++ b/deploy/ansible/roles-sap/6.0.0-sapcal-install/vars/main.yml @@ -0,0 +1,14 @@ +--- +# vars file for 6.0.0-sapcal-install + +sap_cal_product: + - { name: "S/4HANA_2023-Initial_Shipment_Stack", id: "88f59e31-d776-45ea-811c-1da6577e4d25" } + - { name: "S/4HANA_2022-Initial_Shipment_Stack", id: "3b1dc287-c865-4f79-b9ed-d5ec2dc755e9" } + - { name: "S/4HANA_2021-Initial_Shipment_Stack", id: "108febf9-5e7b-4e47-a64d-231b6c4c821d" } + - { name: "S/4HANA_2022-FPS_01_022023", id: "1294f31c-2697-443c-bacc-117d5924fcb2" } + - { name: "S/4HANA_2022-FPS_02_052023", id: "c86d7a56-4130-4459-8060-ffad1a1118ce" } + - { name: "S/4HANA_2021-FPS_02_052022", id: "4d5f19a7-d3cb-4d47-9f44-0a9e133b11de" } + - { name: "S/4HANA_2021-FPS_01_022022", id: "1c796928-0617-490b-a87d-478568a49628" } + - { name: "S/4HANA_2021-04_052023", id: "29403c63-6504-4919-b5dd-319d7a99804e" } + - { name: "S/4HANA_2021-03_112022", id: "6921f2f8-169b-45bb-9e0b-d89b4abee1f3" } + - { name: "S/4HANA 2020-04_052022", id: "615c5c18-5226-4dcb-b0ab-19d0141baf9b" } diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index 00eecfcdfc..85df0c5291 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -5,7 +5,8 @@ become_user_name: root oracle_user_name: oracle orchestration_ansible_user: azureadm # ------------------- Begin - SDAF Ansible Version ---------------------------8 -SDAF_Version: "3.11.0.2" +SDAF_Version: "3.12.0.0" + # ------------------- End - SDAF Ansible Version ---------------------------8 # ------------------- Begin - OS Config Settings variables -------------------8 @@ -89,6 +90,8 @@ sybase_temp_stripe_size: 128 oracle_data_stripe_size: 256 oracle_log_stripe_size: 128 +default_stripe_size: 128 + # Custom virtual hostnames custom_db_virtual_hostname: "" custom_ers_virtual_hostname: "" @@ -258,5 +261,14 @@ enable_os_monitoring: false enable_ha_monitoring: false # ------------------- End - Azure Monitor for SAP (AMS) variables --------------8 +# ------------------- Begin - SAP CAL Integration variables --------------------8 +enable_sap_cal: false +calapi_kv: "" +sap_cal_product_name: "" +vg_root: "rootvg" +lv_root_size: 10g +lv_root_size_db: 20g +lv_tmp_size: 10g +# ------------------- End - SAP CAL Integration variables ----------------------8 python_version: "python3" diff --git a/deploy/ansible/vars/disks_config.yml b/deploy/ansible/vars/disks_config.yml index 9362c37733..6e843317ff 100644 --- a/deploy/ansible/vars/disks_config.yml +++ b/deploy/ansible/vars/disks_config.yml @@ -418,9 +418,9 @@ vg_stripecount_from_lv_item: >- # '-i -I ' only when the LV 'item' has # stripesize specified, otherwise it will be an empty string. lvol_opts_from_lv_item: >- - {{ ('stripesize' in item) | + {{ ('stripesize' in item or vg_stripecount_from_lv_item | int > 1) | ternary('-i ' ~ vg_stripecount_from_lv_item ~ - ' -I ' ~ (item.stripesize | default(0)), + ' -I ' ~ (item.stripesize | default(default_stripe_size)), '') }} # Define a dynamic expression based upon the 'item' fact that can diff --git a/deploy/configs/version.txt b/deploy/configs/version.txt index 064ddbda20..a57eb4c686 100644 --- a/deploy/configs/version.txt +++ b/deploy/configs/version.txt @@ -1 +1 @@ -3.11.0.2 +3.12.0.0 diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index a452d7d9a8..0a3d70d335 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -71,7 +71,7 @@ stages: clean: all steps: - template: templates\download.yaml - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 # Set Variables. - task: AzureCLI@2 continueOnError: false @@ -251,6 +251,7 @@ stages: --library_parameter_file ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) \ --subscription $ARM_SUBSCRIPTION_ID --auto-approve --ado --only_deployer --msi else + export ARM_CLIENT_ID="$CP_ARM_CLIENT_ID" export ARM_CLIENT_SECRET=$CP_ARM_CLIENT_SECRET $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ --deployer_parameter_file ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) \ @@ -400,7 +401,7 @@ stages: workspace: clean: all steps: - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - template: templates\download.yaml parameters: getLatestFromBranch: true @@ -619,6 +620,10 @@ stages: else if [ $USE_MSI != "true" ]; then echo -e "$cyan--- Using SPN ---$reset" + export ARM_CLIENT_ID=$CP_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$CP_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$CP_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID export ARM_USE_MSI=false az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none @@ -893,7 +898,8 @@ stages: TF_VAR_agent_pool: $(POOL) TF_VAR_agent_ado_url: $(System.CollectionUri) TF_VAR_tf_version: $(tf_version) - AZURE_DEVOPS_EXT_PAT: $(System.AccessToken) + TF_VAR_agent_pat: $(PAT) + AZURE_DEVOPS_EXT_PAT: $(PAT) IS_PIPELINE_DEPLOYMENT: true WEB_APP_CLIENT_SECRET: $(WEB_APP_CLIENT_SECRET) APP_REGISTRATION_APP_ID: $(APP_REGISTRATION_APP_ID) @@ -941,7 +947,7 @@ stages: clean: all steps: - template: templates\download.yaml - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - task: DotNetCoreCLI@2 displayName: "Build the Configuration Web Application" inputs: diff --git a/deploy/pipelines/02-sap-workload-zone.yaml b/deploy/pipelines/02-sap-workload-zone.yaml index ba5ef4385f..52b54b6e58 100644 --- a/deploy/pipelines/02-sap-workload-zone.yaml +++ b/deploy/pipelines/02-sap-workload-zone.yaml @@ -112,91 +112,91 @@ stages: clean: all steps: - template: templates\download.yaml - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - bash: | #!/bin/bash green="\e[1;32m" ; reset="\e[0m" ; boldred="\e[1;31m" ; cyan="\e[1;36m" echo "##vso[build.updatebuildnumber]Deploying the SAP Workload zone defined in $(workload_zone_folder)" - # Check if running on deployer - if [ ! -f /etc/profile.d/deploy_server.sh ]; then + # Check if running on deployer + if [ ! -f /etc/profile.d/deploy_server.sh ]; then echo -e "$green --- Install dos2unix ---$reset" sudo apt-get -qq install dos2unix - else + else source /etc/profile.d/deploy_server.sh - fi + fi - if [ ! -f $CONFIG_REPO_PATH/LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) ]; then + if [ ! -f $CONFIG_REPO_PATH/LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) ]; then echo -e "$boldred--- $(workload_zone_configuration_file) was not found ---$reset" echo "##vso[task.logissue type=error]File $(workload_zone_configuration_file) was not found." exit 2 - fi + fi - echo -e "$green--- Checkout $(Build.SourceBranchName) ---$reset" + echo -e "$green--- Checkout $(Build.SourceBranchName) ---$reset" - cd $CONFIG_REPO_PATH - mkdir -p .sap_deployment_automation - git checkout -q $(Build.SourceBranchName) + cd $CONFIG_REPO_PATH + mkdir -p .sap_deployment_automation + git checkout -q $(Build.SourceBranchName) - echo -e "$green--- Validations ---$reset" + echo -e "$green--- Validations ---$reset" - if [ -z $WL_ARM_SUBSCRIPTION_ID ]; then + if [ -z $WL_ARM_SUBSCRIPTION_ID ]; then echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined in the $(variable_group) variable group." exit 2 - fi - if [ $USE_MSI != "true" ]; then + fi + if [ $USE_MSI != "true" ]; then if [ -z $WL_ARM_CLIENT_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined in the $(variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined in the $(variable_group) variable group." + exit 2 fi if [ -z $WL_ARM_CLIENT_SECRET ]; then - echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined in the $(variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined in the $(variable_group) variable group." + exit 2 fi if [ -z $WL_ARM_TENANT_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined in the $(variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined in the $(variable_group) variable group." + exit 2 fi if [ -z $CP_ARM_SUBSCRIPTION_ID ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_SUBSCRIPTION_ID was not defined in the $(parent_variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable CP_ARM_SUBSCRIPTION_ID was not defined in the $(parent_variable_group) variable group." + exit 2 fi if [ -z $CP_ARM_CLIENT_ID ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_ID was not defined in the $(parent_variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_ID was not defined in the $(parent_variable_group) variable group." + exit 2 fi if [ -z $CP_ARM_CLIENT_SECRET ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_SECRET was not defined in the $(parent_variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_SECRET was not defined in the $(parent_variable_group) variable group." + exit 2 fi if [ -z $CP_ARM_TENANT_ID ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_TENANT_ID was not defined in the $(parent_variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable CP_ARM_TENANT_ID was not defined in the $(parent_variable_group) variable group." + exit 2 fi - fi + fi echo -e "$green--- Convert config file to UX format ---$reset" - dos2unix -q LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) + dos2unix -q LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) echo -e "$green--- Read details ---$reset" - ENVIRONMENT=$(grep "^environment" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs) - LOCATION=$(grep "^location" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs | tr 'A-Z' 'a-z') - NETWORK=$(grep "^network_logical_name" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs) - echo Environment: ${ENVIRONMENT} - echo Location: ${LOCATION} - echo Network: ${NETWORK} + ENVIRONMENT=$(grep "^environment" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs) + LOCATION=$(grep "^location" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs | tr 'A-Z' 'a-z') + NETWORK=$(grep "^network_logical_name" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs) + echo Environment: ${ENVIRONMENT} + echo Location: ${LOCATION} + echo Network: ${NETWORK} - ENVIRONMENT_IN_FILENAME=$(echo $(workload_zone_folder) | awk -F'-' '{print $1}' | xargs ) - LOCATION_CODE=$(echo $(workload_zone_folder) | awk -F'-' '{print $2}' | xargs ) - case "$LOCATION_CODE" in + ENVIRONMENT_IN_FILENAME=$(echo $(workload_zone_folder) | awk -F'-' '{print $1}' | xargs ) + LOCATION_CODE=$(echo $(workload_zone_folder) | awk -F'-' '{print $2}' | xargs ) + case "$LOCATION_CODE" in "AUCE") LOCATION_IN_FILENAME="australiacentral" ;; "AUC2") LOCATION_IN_FILENAME="australiacentral2" ;; "AUEA") LOCATION_IN_FILENAME="australiaeast" ;; @@ -253,418 +253,418 @@ stages: "WUS2") LOCATION_IN_FILENAME="westus2" ;; "WUS3") LOCATION_IN_FILENAME="westus3" ;; *) LOCATION_IN_FILENAME="westeurope" ;; - esac - - NETWORK_IN_FILENAME=$(echo $(workload_zone_folder) | awk -F'-' '{print $3}' | xargs ) - echo "Environment(filename): $ENVIRONMENT_IN_FILENAME" - echo "Location(filename): $LOCATION_IN_FILENAME" - echo "Network(filename): $NETWORK_IN_FILENAME" - echo "Deployer Environment $(deployer_environment)" - echo "Deployer Region $(deployer_region)" - echo "Workload TFvars $workload_zone_configuration_file" - echo "" - - echo "Agent: $(this_agent)" - echo "Organization: $(System.CollectionUri)" - echo "Project: $(System.TeamProject)" - echo "" - echo "Azure CLI version:" - echo "-------------------------------------------------" - az --version - - - if [ $ENVIRONMENT != $ENVIRONMENT_IN_FILENAME ]; then + esac + + NETWORK_IN_FILENAME=$(echo $(workload_zone_folder) | awk -F'-' '{print $3}' | xargs ) + echo "Environment(filename): $ENVIRONMENT_IN_FILENAME" + echo "Location(filename): $LOCATION_IN_FILENAME" + echo "Network(filename): $NETWORK_IN_FILENAME" + echo "Deployer Environment $(deployer_environment)" + echo "Deployer Region $(deployer_region)" + echo "Workload TFvars $workload_zone_configuration_file" + echo "" + + echo "Agent: $(this_agent)" + echo "Organization: $(System.CollectionUri)" + echo "Project: $(System.TeamProject)" + echo "" + echo "Azure CLI version:" + echo "-------------------------------------------------" + az --version + + + if [ $ENVIRONMENT != $ENVIRONMENT_IN_FILENAME ]; then echo "##vso[task.logissue type=error]The environment setting in $(workload_zone_configuration_file) '$ENVIRONMENT' does not match the $(workload_zone_configuration_file) file name '$ENVIRONMENT_IN_FILENAME'. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-INFRASTRUCTURE" exit 2 - fi + fi - if [ $LOCATION != $LOCATION_IN_FILENAME ]; then + if [ $LOCATION != $LOCATION_IN_FILENAME ]; then echo "##vso[task.logissue type=error]The location setting in $(workload_zone_configuration_file) '$LOCATION' does not match the $(workload_zone_configuration_file) file name '$LOCATION_IN_FILENAME'. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-INFRASTRUCTURE" exit 2 - fi + fi - if [ $NETWORK != $NETWORK_IN_FILENAME ]; then + if [ $NETWORK != $NETWORK_IN_FILENAME ]; then echo "##vso[task.logissue type=error]The network_logical_name setting in $(workload_zone_configuration_file) '$NETWORK' does not match the $(workload_zone_configuration_file) file name '$NETWORK_IN_FILENAME-. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-INFRASTRUCTURE" exit 2 - fi + fi echo -e "$green--- Configure devops CLI extension ---$reset" - az config set extension.use_dynamic_install=yes_without_prompt --output none + az config set extension.use_dynamic_install=yes_without_prompt --output none - az extension add --name azure-devops --output none + az extension add --name azure-devops --output none - az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none + az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none - export PARENT_VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(parent_variable_group)'].id | [0]") - echo '$(parent_variable_group) id: ' $PARENT_VARIABLE_GROUP_ID - if [ -z ${PARENT_VARIABLE_GROUP_ID} ]; then + export PARENT_VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(parent_variable_group)'].id | [0]") + echo '$(parent_variable_group) id: ' $PARENT_VARIABLE_GROUP_ID + if [ -z ${PARENT_VARIABLE_GROUP_ID} ]; then echo "##vso[task.logissue type=error]Variable group $(parent_variable_group) could not be found." exit 2 - fi + fi - export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") - echo '$(variable_group) id: ' $VARIABLE_GROUP_ID - if [ -z ${VARIABLE_GROUP_ID} ]; then + export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") + echo '$(variable_group) id: ' $VARIABLE_GROUP_ID + if [ -z ${VARIABLE_GROUP_ID} ]; then echo "##vso[task.logissue type=error]Variable group $(variable_group) could not be found." exit 2 - fi + fi - echo "Agent Pool: " $(this_agent) + echo "Agent Pool: " $(this_agent) echo -e "$green--- Set CONFIG_REPO_PATH variable ---$reset" - deployer_environment_file_name=$CONFIG_REPO_PATH/.sap_deployment_automation/$(deployer_environment)$(deployer_region) ; echo 'Deployer Environment File' $deployer_environment_file_name - workload_environment_file_name=$CONFIG_REPO_PATH/.sap_deployment_automation/${ENVIRONMENT}${LOCATION_CODE}${NETWORK} ; echo 'Workload Environment File' $workload_environment_file_name - dos2unix -q ${deployer_environment_file_name} - dos2unix -q ${workload_environment_file_name} + deployer_environment_file_name=$CONFIG_REPO_PATH/.sap_deployment_automation/$(deployer_environment)$(deployer_region) ; echo 'Deployer Environment File' $deployer_environment_file_name + workload_environment_file_name=$CONFIG_REPO_PATH/.sap_deployment_automation/${ENVIRONMENT}${LOCATION_CODE}${NETWORK} ; echo 'Workload Environment File' $workload_environment_file_name + dos2unix -q ${deployer_environment_file_name} + dos2unix -q ${workload_environment_file_name} - if [ ! -f ${deployer_environment_file_name} ]; then + if [ ! -f ${deployer_environment_file_name} ]; then echo -e "$boldred--- $(deployer_environment)$(deployer_region) was not found ---$reset" echo "##vso[task.logissue type=error]Control plane configuration file $(deployer_environment)$(deployer_region) was not found." exit 2 - fi + fi echo -e "$green--- Read parameter values ---$reset" - if [ "true" == $(inherit) ]; then + if [ "true" == $(inherit) ]; then az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Deployer_State_FileName.value" | tr -d \") if [ -z ${az_var} ]; then - deployer_tfstate_key=$(cat ${deployer_environment_file_name} | grep deployer_tfstate_key | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer State File' $deployer_tfstate_key + deployer_tfstate_key=$(cat ${deployer_environment_file_name} | grep deployer_tfstate_key | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer State File' $deployer_tfstate_key else - deployer_tfstate_key=${az_var} ; echo 'Deployer State File' $deployer_tfstate_key + deployer_tfstate_key=${az_var} ; echo 'Deployer State File' $deployer_tfstate_key fi az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" | tr -d \") if [ -z ${az_var} ]; then - key_vault=$(cat ${deployer_environment_file_name} | grep keyvault= | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer Key Vault' ${key_vault} + key_vault=$(cat ${deployer_environment_file_name} | grep keyvault= | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer Key Vault' ${key_vault} else - key_vault=${az_var}; echo 'Deployer Key Vault' ${key_vault} + key_vault=${az_var}; echo 'Deployer Key Vault' ${key_vault} fi az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" | tr -d \") if [ -z ${az_var} ]; then - REMOTE_STATE_SA=$(cat ${deployer_environment_file_name} | grep REMOTE_STATE_SA | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file storage account' $REMOTE_STATE_SA + REMOTE_STATE_SA=$(cat ${deployer_environment_file_name} | grep REMOTE_STATE_SA | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file storage account' $REMOTE_STATE_SA else - REMOTE_STATE_SA=${az_var}; echo 'Terraform state file storage account' $REMOTE_STATE_SA + REMOTE_STATE_SA=${az_var}; echo 'Terraform state file storage account' $REMOTE_STATE_SA fi az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Subscription.value" | tr -d \") if [ -z ${az_var} ]; then - STATE_SUBSCRIPTION=$(cat ${deployer_environment_file_name} | grep STATE_SUBSCRIPTION | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION + STATE_SUBSCRIPTION=$(cat ${deployer_environment_file_name} | grep STATE_SUBSCRIPTION | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION else - STATE_SUBSCRIPTION=${az_var}; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION + STATE_SUBSCRIPTION=${az_var}; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "ARM_SUBSCRIPTION_ID.value" | tr -d \") if [ -z ${az_var} ]; then - echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined." - exit 2 + echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined." + exit 2 else - echo 'Target subscription' $WL_ARM_SUBSCRIPTION_ID + echo 'Target subscription' $WL_ARM_SUBSCRIPTION_ID fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Workload_Key_Vault.value" | tr -d \") if [ -z ${az_var} ]; then - if [ -f ${workload_environment_file_name} ]; then - export workload_key_vault=$(cat ${workload_environment_file_name} | grep workload_key_vault | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Key Vault' ${workload_key_vault} - fi + if [ -f ${workload_environment_file_name} ]; then + export workload_key_vault=$(cat ${workload_environment_file_name} | grep workload_key_vault | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Key Vault' ${workload_key_vault} + fi else - export workload_key_vault=$(Workload_Key_Vault) ; echo 'Workload Key Vault' ${workload_key_vault} + export workload_key_vault=$(Workload_Key_Vault) ; echo 'Workload Key Vault' ${workload_key_vault} fi - else + else deployer_tfstate_key=$(cat ${workload_environment_file_name} | grep deployer_tfstate_key | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer State File' $deployer_tfstate_key key_vault=$(cat ${workload_environment_file_name} | grep workload_key_vault= -m1 | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer Key Vault' ${key_vault} REMOTE_STATE_SA=$(cat ${workload_environment_file_name} | grep REMOTE_STATE_SA | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file storage account' $REMOTE_STATE_SA STATE_SUBSCRIPTION=$(cat ${workload_environment_file_name} | grep STATE_SUBSCRIPTION | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION - fi + fi secrets_set=1 if [ ! -f /etc/profile.d/deploy_server.sh ]; then - echo -e "$green --- Install terraform ---$reset" + echo -e "$green --- Install terraform ---$reset" - wget -q $(tf_url) - return_code=$? - if [ 0 != $return_code ]; then - echo "##vso[task.logissue type=error]Unable to download Terraform version $(tf_version)." - exit 2 - fi - unzip -qq terraform_$(tf_version)_linux_amd64.zip ; sudo mv terraform /bin/ - rm -f terraform_$(tf_version)_linux_amd64.zip + wget -q $(tf_url) + return_code=$? + if [ 0 != $return_code ]; then + echo "##vso[task.logissue type=error]Unable to download Terraform version $(tf_version)." + exit 2 + fi + unzip -qq terraform_$(tf_version)_linux_amd64.zip ; sudo mv terraform /bin/ + rm -f terraform_$(tf_version)_linux_amd64.zip - if [ $USE_MSI != "true" ]; then - export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID - export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET - export ARM_TENANT_ID=$WL_ARM_TENANT_ID - export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID - export ARM_USE_MSI=false - - echo -e "$green--- az login ---$reset" - az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code - fi + if [ $USE_MSI != "true" ]; then + export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$WL_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + export ARM_USE_MSI=false + + echo -e "$green--- az login ---$reset" + az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi fi else - echo -e "$green--- az login ---$reset" + echo -e "$green--- az login ---$reset" if [ $LOGON_USING_SPN == "true" ]; then - echo "Using SPN" - az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none + echo "Using SPN" + az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none else - az login --identity --allow-no-subscriptions --output none + az login --identity --allow-no-subscriptions --output none fi return_code=$? if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code fi - if [ $USE_MSI != "true" ]; then - echo -e "$green --- Set secrets ---$reset" + if [ $USE_MSI != "true" ]; then + echo -e "$green --- Set secrets ---$reset" - $SAP_AUTOMATION_REPO_PATH/deploy/scripts/set_secrets.sh --workload --vault "${key_vault}" --environment "${ENVIRONMENT}" \ - --region "${LOCATION}" --subscription $WL_ARM_SUBSCRIPTION_ID --spn_id $WL_ARM_CLIENT_ID --spn_secret "${WL_ARM_CLIENT_SECRET}" \ - --tenant_id $WL_ARM_TENANT_ID --keyvault_subscription $STATE_SUBSCRIPTION - secrets_set=$? ; echo -e "$cyan Set Secrets returned $secrets_set $reset" - az keyvault set-policy --name "${key_vault}" --object-id $WL_ARM_OBJECT_ID --secret-permissions get list --output none - fi + $SAP_AUTOMATION_REPO_PATH/deploy/scripts/set_secrets.sh --workload --vault "${key_vault}" --environment "${ENVIRONMENT}" \ + --region "${LOCATION}" --subscription $WL_ARM_SUBSCRIPTION_ID --spn_id $WL_ARM_CLIENT_ID --spn_secret "${WL_ARM_CLIENT_SECRET}" \ + --tenant_id $WL_ARM_TENANT_ID --keyvault_subscription $STATE_SUBSCRIPTION + secrets_set=$? ; echo -e "$cyan Set Secrets returned $secrets_set $reset" + az keyvault set-policy --name "${key_vault}" --object-id $WL_ARM_OBJECT_ID --secret-permissions get list --subscription $STATE_SUBSCRIPTION --output none + fi fi debug_variable='--output none' debug_variable='' if [ $USE_MSI != "true" ]; then - az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none + az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none - isUserAccessAdmin=$(az role assignment list --role "User Access Administrator" --subscription $STATE_SUBSCRIPTION --query "[?principalType=='ServicePrincipal'].principalId | [0] " --assignee $CP_ARM_CLIENT_ID) + isUserAccessAdmin=$(az role assignment list --role "User Access Administrator" --subscription $STATE_SUBSCRIPTION --query "[?principalType=='ServicePrincipal'].principalId | [0] " --assignee $CP_ARM_CLIENT_ID) - tfstate_resource_id=$(az resource list --name "${REMOTE_STATE_SA}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Storage/storageAccounts --query "[].id | [0]" -o tsv) + tfstate_resource_id=$(az resource list --name "${REMOTE_STATE_SA}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Storage/storageAccounts --query "[].id | [0]" -o tsv) - if [ -n "${isUserAccessAdmin}" ]; then + if [ -n "${isUserAccessAdmin}" ]; then - echo -e "$green--- Set permissions ---$reset" - perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Reader" --query "[?principalId=='$WL_ARM_CLIENT_ID'].principalId | [0]" -o tsv --only-show-errors) - if [ -z "$perms" ]; then - echo -e "$green --- Assign subscription permissions to $perms ---$reset" - az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Reader" --scope "/subscriptions/${STATE_SUBSCRIPTION}" --output none - fi + echo -e "$green--- Set permissions ---$reset" + perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Reader" --query "[?principalId=='$WL_ARM_CLIENT_ID'].principalId | [0]" -o tsv --only-show-errors) + if [ -z "$perms" ]; then + echo -e "$green --- Assign subscription permissions to $perms ---$reset" + az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Reader" --scope "/subscriptions/${STATE_SUBSCRIPTION}" --output none + fi - perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Storage Account Contributor" --scope "${tfstate_resource_id}" --query "[?principalId=='$WL_ARM_OBJECT_ID'].principalName | [0]" -o tsv --only-show-errors) - if [ -z "$perms" ]; then - echo "Assigning Storage Account Contributor permissions for $WL_ARM_OBJECT_ID to ${tfstate_resource_id}" - az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Storage Account Contributor" --scope "${tfstate_resource_id}" --output none - fi + perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Storage Account Contributor" --scope "${tfstate_resource_id}" --query "[?principalId=='$WL_ARM_OBJECT_ID'].principalName | [0]" -o tsv --only-show-errors) + if [ -z "$perms" ]; then + echo "Assigning Storage Account Contributor permissions for $WL_ARM_OBJECT_ID to ${tfstate_resource_id}" + az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Storage Account Contributor" --scope "${tfstate_resource_id}" --output none + fi - resource_group_name=$(az resource show --id "${tfstate_resource_id}" --query resourceGroup -o tsv) + resource_group_name=$(az resource show --id "${tfstate_resource_id}" --query resourceGroup -o tsv) - if [ -n ${resource_group_name} ]; then - for scope in $(az resource list --resource-group "${resource_group_name}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Network/privateDnsZones --query "[].id" --output tsv); do - perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Private DNS Zone Contributor" --scope $scope --query "[?principalId=='$WL_ARM_OBJECT_ID'].principalId | [0]" -o tsv --only-show-errors) - if [ -z "$perms" ]; then - echo "Assigning DNS Zone Contributor permissions for $WL_ARM_OBJECT_ID to ${scope}" - az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Private DNS Zone Contributor" --scope $scope --output none - fi - done + if [ -n ${resource_group_name} ]; then + for scope in $(az resource list --resource-group "${resource_group_name}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Network/privateDnsZones --query "[].id" --output tsv); do + perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Private DNS Zone Contributor" --scope $scope --query "[?principalId=='$WL_ARM_OBJECT_ID'].principalId | [0]" -o tsv --only-show-errors) + if [ -z "$perms" ]; then + echo "Assigning DNS Zone Contributor permissions for $WL_ARM_OBJECT_ID to ${scope}" + az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Private DNS Zone Contributor" --scope $scope --output none fi + done + fi - resource_group_name=$(az keyvault show --name "${key_vault}" --query resourceGroup --subscription ${STATE_SUBSCRIPTION} -o tsv) + resource_group_name=$(az keyvault show --name "${key_vault}" --query resourceGroup --subscription ${STATE_SUBSCRIPTION} -o tsv) - if [ -n ${resource_group_name} ]; then - resource_group_id=$(az group show --name ${resource_group_name} --subscription ${STATE_SUBSCRIPTION} --query id -o tsv) + if [ -n ${resource_group_name} ]; then + resource_group_id=$(az group show --name ${resource_group_name} --subscription ${STATE_SUBSCRIPTION} --query id -o tsv) - vnet_resource_id=$(az resource list --resource-group "${resource_group_name}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Network/virtualNetworks -o tsv --query "[].id | [0]") - if [ -n "${vnet_resource_id}" ]; then - perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Network Contributor" --scope $vnet_resource_id --only-show-errors --query "[].principalId | [0]" --assignee $WL_ARM_OBJECT_ID -o tsv --only-show-errors) + vnet_resource_id=$(az resource list --resource-group "${resource_group_name}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Network/virtualNetworks -o tsv --query "[].id | [0]") + if [ -n "${vnet_resource_id}" ]; then + perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Network Contributor" --scope $vnet_resource_id --only-show-errors --query "[].principalId | [0]" --assignee $WL_ARM_OBJECT_ID -o tsv --only-show-errors) - if [ -z "$perms" ]; then - echo "Assigning Network Contributor rights for $WL_ARM_OBJECT_ID to ${vnet_resource_id}" - az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Network Contributor" --scope $vnet_resource_id --output none - fi - fi + if [ -z "$perms" ]; then + echo "Assigning Network Contributor rights for $WL_ARM_OBJECT_ID to ${vnet_resource_id}" + az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Network Contributor" --scope $vnet_resource_id --output none + fi fi - else - echo "##vso[task.logissue type=warning]Service Principal $CP_ARM_CLIENT_ID does not have 'User Access Administrator' permissions. Please ensure that the service principal $WL_ARM_CLIENT_ID has permissions on the Terrafrom state storage account and if needed on the Private DNS zone and the source management network resource" fi + else + echo "##vso[task.logissue type=warning]Service Principal $CP_ARM_CLIENT_ID does not have 'User Access Administrator' permissions. Please ensure that the service principal $WL_ARM_CLIENT_ID has permissions on the Terrafrom state storage account and if needed on the Private DNS zone and the source management network resource" + fi fi echo -e "$green--- Deploy the workload zone ---$reset" - cd $CONFIG_REPO_PATH/LANDSCAPE/$(workload_zone_folder) - if [ -f /etc/profile.d/deploy_server.sh ]; then + cd $CONFIG_REPO_PATH/LANDSCAPE/$(workload_zone_folder) + if [ -f /etc/profile.d/deploy_server.sh ]; then if [ $LOGON_USING_SPN == "true" ]; then - echo "Logon Using SPN" - - az logout --output none - export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID - export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET - export ARM_TENANT_ID=$WL_ARM_TENANT_ID - export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID - export ARM_USE_MSI=false - az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code - fi + echo "Logon Using SPN" + + az logout --output none + export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$WL_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + export ARM_USE_MSI=false + az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi else - export ARM_USE_MSI=true - az login --identity --allow-no-subscriptions --output none + export ARM_USE_MSI=true + az login --identity --allow-no-subscriptions --output none fi - else - export ARM_USE_AZUREAD=true + else if [ $USE_MSI != "true" ]; then - az logout --output none - export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID - export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET - export ARM_TENANT_ID=$WL_ARM_TENANT_ID - export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID - export ARM_USE_MSI=false - az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code - fi + az logout --output none + export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$WL_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + export ARM_USE_MSI=false + az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi fi - fi + fi - if [ $USE_MSI != "true" ]; then + if [ $USE_MSI != "true" ]; then $SAP_AUTOMATION_REPO_PATH/deploy/scripts/install_workloadzone.sh --parameterfile $(workload_zone_configuration_file) \ - --deployer_environment $(deployer_environment) --subscription $(ARM_SUBSCRIPTION_ID) \ - --spn_id $WL_ARM_CLIENT_ID --spn_secret $WL_ARM_CLIENT_SECRET --tenant_id $WL_ARM_TENANT_ID \ - --deployer_tfstate_key "${deployer_tfstate_key}" --keyvault "${key_vault}" --storageaccountname "${REMOTE_STATE_SA}" \ - --state_subscription "${STATE_SUBSCRIPTION}" --auto-approve --ado - else + --deployer_environment $(deployer_environment) --subscription $(ARM_SUBSCRIPTION_ID) \ + --spn_id $WL_ARM_CLIENT_ID --spn_secret $WL_ARM_CLIENT_SECRET --tenant_id $WL_ARM_TENANT_ID \ + --deployer_tfstate_key "${deployer_tfstate_key}" --keyvault "${key_vault}" --storageaccountname "${REMOTE_STATE_SA}" \ + --state_subscription "${STATE_SUBSCRIPTION}" --auto-approve --ado + else $SAP_AUTOMATION_REPO_PATH/deploy/scripts/install_workloadzone.sh --parameterfile $(workload_zone_configuration_file) \ - --deployer_environment $(deployer_environment) --subscription $(ARM_SUBSCRIPTION_ID) \ - --deployer_tfstate_key "${deployer_tfstate_key}" --keyvault "${key_vault}" --storageaccountname "${REMOTE_STATE_SA}" \ - --state_subscription "${STATE_SUBSCRIPTION}" --auto-approve --ado --msi - fi - return_code=$? - - echo "Return code: ${return_code}" - if [ -f ${workload_environment_file_name} ]; then - export workload_key_vault=$(cat ${workload_environment_file_name} | grep workloadkeyvault= | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Key Vault' ${workload_key_vault} - export workload_prefix=$(cat ${workload_environment_file_name} | grep workload_zone_prefix= | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Prefix' ${workload_prefix} + --deployer_environment $(deployer_environment) --subscription $(ARM_SUBSCRIPTION_ID) \ + --deployer_tfstate_key "${deployer_tfstate_key}" --keyvault "${key_vault}" --storageaccountname "${REMOTE_STATE_SA}" \ + --state_subscription "${STATE_SUBSCRIPTION}" --auto-approve --ado --msi + fi + return_code=$? + + echo "Return code: ${return_code}" + if [ -f ${workload_environment_file_name} ]; then + export workload_key_vault=$(cat ${workload_environment_file_name} | grep workloadkeyvault= | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Key Vault' ${workload_key_vault} + export workload_prefix=$(cat ${workload_environment_file_name} | grep workload_zone_prefix= | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Prefix' ${workload_prefix} export landscape_tfstate_key=$(cat ${workload_environment_file_name} | grep landscape_tfstate_key= | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Zone State File' $landscape_tfstate_key - fi + fi + + expiry_date=$(date -d "+365 days" +%Y-%m-%d) - az logout --output none - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "FENCING_SPN_ID.value") - if [ -z ${az_var} ]; then + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "FENCING_SPN_ID.value") + if [ -z ${az_var} ]; then echo "##vso[task.logissue type=warning]Variable FENCING_SPN_ID is not set. Required for highly available deployments" - else - export fencing_id=$(az keyvault secret list --vault-name $workload_key_vault --query [].name -o tsv | grep ${workload_prefix}-fencing-spn-id | xargs) + else + export fencing_id=$(az keyvault secret list --vault-name $workload_key_vault --subscription $STATE_SUBSCRIPTION --query [].name -o tsv | grep ${workload_prefix}-fencing-spn-id | xargs) if [ -z "$fencing_id" ]; then - az keyvault secret set --name ${workload_prefix}-fencing-spn-id --vault-name $workload_key_vault --value $(FENCING_SPN_ID) --output none - az keyvault secret set --name ${workload_prefix}-fencing-spn-pwd --vault-name $workload_key_vault --value=$FENCING_SPN_PWD --output none - az keyvault secret set --name ${workload_prefix}-fencing-spn-tenant --vault-name $workload_key_vault --value $(FENCING_SPN_TENANT) --output none + az keyvault secret set --name ${workload_prefix}-fencing-spn-id --vault-name $workload_key_vault --value $(FENCING_SPN_ID) --subscription $STATE_SUBSCRIPTION --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --output none + az keyvault secret set --name ${workload_prefix}-fencing-spn-pwd --vault-name $workload_key_vault --value=$FENCING_SPN_PWD --subscription $STATE_SUBSCRIPTION --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --output none + az keyvault secret set --name ${workload_prefix}-fencing-spn-tenant --vault-name $workload_key_vault --value $(FENCING_SPN_TENANT) --subscription $STATE_SUBSCRIPTION --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --output none fi - fi - + fi + az logout --output none echo -e "$green--- Add & update files in the DevOps Repository ---$reset" - cd $(Build.Repository.LocalPath) - git pull + cd $(Build.Repository.LocalPath) + git pull - echo -e "$green--- Pull latest ---$reset" - cd $CONFIG_REPO_PATH - git pull + echo -e "$green--- Pull latest ---$reset" + cd $CONFIG_REPO_PATH + git pull - added=0 - if [ -f ${workload_environment_file_name} ]; then + added=0 + if [ -f ${workload_environment_file_name} ]; then git add ${workload_environment_file_name} added=1 - fi - if [ -f ${workload_environment_file_name}.md ]; then + fi + if [ -f ${workload_environment_file_name}.md ]; then git add ${workload_environment_file_name}.md added=1 - fi - if [ -f $(Deployment_Configuration_Path)/LANDSCAPE/$(workload_zone_folder)/.terraform/terraform.tfstate ]; then + fi + if [ -f $(Deployment_Configuration_Path)/LANDSCAPE/$(workload_zone_folder)/.terraform/terraform.tfstate ]; then git add -f $(Deployment_Configuration_Path)/LANDSCAPE/$(workload_zone_folder)/.terraform/terraform.tfstate added=1 - fi - if [ 1 == $added ]; then + fi + if [ 1 == $added ]; then git config --global user.email "$(Build.RequestedForEmail)" git config --global user.name "$(Build.RequestedFor)" git commit -m "Added updates from devops deployment $(Build.DefinitionName) [skip ci]" git -c http.extraheader="AUTHORIZATION: bearer $(System.AccessToken)" push --set-upstream origin $(Build.SourceBranchName) - fi + fi - if [ -f ${workload_environment_file_name}.md ]; then + if [ -f ${workload_environment_file_name}.md ]; then echo "##vso[task.uploadsummary]${workload_environment_file_name}.md" - fi + fi echo -e "$green--- Adding variables to the variable group" $(variable_group) "---$reset" - if [ -n $VARIABLE_GROUP_ID ]; then + if [ -n $VARIABLE_GROUP_ID ]; then az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query Terraform_Remote_Storage_Account_Name.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --value "${REMOTE_STATE_SA}" --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --value "${REMOTE_STATE_SA}" --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --value "${REMOTE_STATE_SA}" --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --value "${REMOTE_STATE_SA}" --output none --only-show-errors fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query Terraform_Remote_Storage_Subscription.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Subscription --value "${STATE_SUBSCRIPTION}" --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Subscription --value "${STATE_SUBSCRIPTION}" --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Subscription --value "${STATE_SUBSCRIPTION}" --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Subscription --value "${STATE_SUBSCRIPTION}" --output none --only-show-errors fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query Deployer_State_FileName.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --value "${deployer_tfstate_key}" --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --value "${deployer_tfstate_key}" --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --value "${deployer_tfstate_key}" --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --value "${deployer_tfstate_key}" --output none --only-show-errors fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query Deployer_Key_Vault.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --value ${key_vault} --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --value ${key_vault} --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --value ${key_vault} --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --value ${key_vault} --output none --only-show-errors fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "${NETWORK}"Workload_Key_Vault.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Key_Vault --value $workload_key_vault --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Key_Vault --value $workload_key_vault --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Key_Vault --value $workload_key_vault --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Key_Vault --value $workload_key_vault --output none --only-show-errors fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "${NETWORK}"Workload_Secret_Prefix.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Secret_Prefix --value "${workload_prefix}" --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Secret_Prefix --value "${workload_prefix}" --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Secret_Prefix --value "${workload_prefix}" --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Secret_Prefix --value "${workload_prefix}" --output none --only-show-errors fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "${NETWORK}"Workload_Zone_State_FileName.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Zone_State_FileName --value "${landscape_tfstate_key}" --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Zone_State_FileName --value "${landscape_tfstate_key}" --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Zone_State_FileName --value "${landscape_tfstate_key}" --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Zone_State_FileName --value "${landscape_tfstate_key}" --output none --only-show-errors fi - fi + fi - if [ 0 != $return_code ]; then + if [ 0 != $return_code ]; then echo "##vso[task.logissue type=error]Return code from install_workloadzone $return_code." if [ -f ${workload_environment_file_name}.err ]; then - error_message=$(cat ${workload_environment_file_name}.err) - echo "##vso[task.logissue type=error]Error message: $error_message." + error_message=$(cat ${workload_environment_file_name}.err) + echo "##vso[task.logissue type=error]Error message: $error_message." fi - fi + fi exit $return_code diff --git a/deploy/pipelines/03-sap-system-deployment.yaml b/deploy/pipelines/03-sap-system-deployment.yaml index 164d2248af..719fd60763 100644 --- a/deploy/pipelines/03-sap-system-deployment.yaml +++ b/deploy/pipelines/03-sap-system-deployment.yaml @@ -44,7 +44,7 @@ stages: clean: all steps: - template: templates\download.yaml - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - script: | #!/bin/bash echo "##vso[build.updatebuildnumber]Deploying the SAP System defined in $(sap_system_folder)" @@ -70,7 +70,6 @@ stages: else source /etc/profile.d/deploy_server.sh fi - export AZURE_DEVOPS_EXT_PAT=$PAT HOME_CONFIG=${CONFIG_REPO_PATH}/$(Deployment_Configuration_Path) cd $HOME_CONFIG; mkdir -p .sap_deployment_automation @@ -256,16 +255,6 @@ stages: fi fi - if [ -z $USE_MSI ]; then - USE_MSI="false" - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query USE_MSI.value --output table) - if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name USE_MSI --value false --output none --only-show-errors - else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name USE_MSI --value false --output none --only-show-errors - fi - fi - if [ $USE_MSI != "true" ]; then echo "Using SPN" @@ -393,6 +382,11 @@ stages: added=1 fi + if [ -f ${SID}_virtual_machines.json ]; then + git add ${SID}_virtual_machines.json + added=1 + fi + if [ 1 == $added ]; then git config --global user.email "$(Build.RequestedForEmail)" git config --global user.name "$(Build.RequestedFor)" diff --git a/deploy/pipelines/04-sap-software-download.yaml b/deploy/pipelines/04-sap-software-download.yaml index ce58cbbbae..0966999505 100644 --- a/deploy/pipelines/04-sap-software-download.yaml +++ b/deploy/pipelines/04-sap-software-download.yaml @@ -138,7 +138,7 @@ stages: echo "##vso[task.setvariable variable=SUSERNAME;isOutput=true]$SUsernamefromVault" else echo -e "$green--- Setting the S username in key vault ---$reset" - az keyvault secret set --name "S-Username" --vault-name $kv_name --value="${SUsername}" --subscription "${ARM_SUBSCRIPTION_ID}" --output none + az keyvault secret set --name "S-Username" --vault-name $kv_name --value="${SUsername}" --subscription "${ARM_SUBSCRIPTION_ID}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --output none echo "##vso[task.setvariable variable=SUSERNAME;isOutput=true]$SUsername" fi @@ -148,7 +148,7 @@ stages: echo -e "$green--- Password present in keyvault. In case of download errors check that user and password are correct ---$reset" else echo -e "$green--- Setting the S user name password in key vault ---$reset" - az keyvault secret set --name "S-Password" --vault-name $kv_name --value "${SPassword}" --subscription "${ARM_SUBSCRIPTION_ID}" --output none + az keyvault secret set --name "S-Password" --vault-name $kv_name --value "${SPassword}" --subscription "${ARM_SUBSCRIPTION_ID}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --output none echo "##vso[task.setvariable variable=SPASSWORD;isOutput=true]${SPassword}" fi displayName: Prepare download diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index 1effad8bfd..96fdd975e2 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -82,6 +82,11 @@ parameters: type: boolean default: false + - name: sap_on_azure_quality_checks + displayName: SAP on Azure Quality Checks + type: boolean + default: false + - name: ams_provider displayName: Configure AMS Provider type: boolean @@ -137,7 +142,7 @@ stages: - template: templates\download.yaml parameters: getLatestFromBranch: true - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - bash: | #!/bin/bash # Exit immediately if a command exits with a non-zero status. @@ -257,13 +262,6 @@ stages: new_parameters="$EXTRA_PARAMETERS $PIPELINE_EXTRA_PARAMETERS" fi - if [[ $EXTRA_PARAMETERS = "'$(EXTRA_PARAMETERS)'" ]]; then - new_parameters=$PIPELINE_EXTRA_PARAMETERS - else - echo "##vso[task.logissue type=warning]Extra parameters were provided - '$EXTRA_PARAMETERS'" - new_parameters="$EXTRA_PARAMETERS $PIPELINE_EXTRA_PARAMETERS" - fi - echo "##vso[task.setvariable variable=SSH_KEY_NAME;isOutput=true]${workload_prefix}-sid-sshkey" echo "##vso[task.setvariable variable=VAULT_NAME;isOutput=true]$workload_key_vault" echo "##vso[task.setvariable variable=PASSWORD_KEY_NAME;isOutput=true]${workload_prefix}-sid-password" @@ -550,6 +548,24 @@ stages: azureTenantId: $(ARM_TENANT_ID) azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) USE_MSI: $(USE_MSI) + - ${{ if eq(parameters.sap_on_azure_quality_checks, true) }}: + - template: templates\run-ansible.yaml + parameters: + displayName: SAP on Azure quality checks + ansibleFilePath: ${{ parameters.sap_automation_repo_path }}/deploy/ansible/playbook_06_02_sap_on_azure_quality_checks.yaml + secretName: "$(Preparation.SSH_KEY_NAME)" + passwordSecretName: "$(Preparation.PASSWORD_KEY_NAME)" + userNameSecretName: "$(Preparation.USERNAME_KEY_NAME)" + vaultName: $(Preparation.VAULT_NAME) + parametersFolder: $(Preparation.FOLDER) + sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" + sidHosts: $(Preparation.HOSTS) + extraParams: "$(Preparation.NEW_PARAMETERS)" + azureClientId: $(ARM_CLIENT_ID) + azureClientSecret: $(ARM_CLIENT_SECRET) + azureTenantId: $(ARM_TENANT_ID) + azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + USE_MSI: $(USE_MSI) - ${{ if eq(parameters.acss_registration, true) }}: - template: templates\acss-registration.yaml parameters: @@ -587,6 +603,8 @@ stages: azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) - - template: templates\collect-log-files.yaml + - template: templates\collect-log-files.yaml parameters: - logPath: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/logs + logPath: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/logs + qualityAssuranceResultsPath: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/quality_assurance + collectQualityChecks: ${{ parameters.sap_on_azure_quality_checks }} diff --git a/deploy/pipelines/06-post-installation-tooling.yaml b/deploy/pipelines/06-post-installation-tooling.yaml index 34eb9338c0..dbb4dcd1b7 100644 --- a/deploy/pipelines/06-post-installation-tooling.yaml +++ b/deploy/pipelines/06-post-installation-tooling.yaml @@ -76,7 +76,7 @@ stages: workspace: clean: all steps: - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - checkout: self persistCredentials: true submodules: true @@ -408,7 +408,7 @@ stages: parameters_folder: $[ stageDependencies.Preparation_for_Ansible.Preparation_step.outputs['Preparation.FOLDER'] ] DEPLOYMENT_REPO_PATH: $[ stageDependencies.Preparation_for_Ansible.Preparation_step.outputs['Preparation.DEPLOYMENT_REPO_PATH'] ] steps: - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - script: | #!/bin/bash echo "nothing to do here right now" diff --git a/deploy/pipelines/07-sap-cal-installation.yaml b/deploy/pipelines/07-sap-cal-installation.yaml new file mode 100644 index 0000000000..61fe7edd8f --- /dev/null +++ b/deploy/pipelines/07-sap-cal-installation.yaml @@ -0,0 +1,404 @@ +--- +# /*---------------------------------------------------------------------------8 +# | | +# | This pipeline performs the software installation | +# | and must run on a self hosted deployment agent | +# | due to long run time. | +# | | +# +------------------------------------4--------------------------------------*/ + +parameters: + - name: sap_system_configuration_name + displayName: "SAP System configuration name, use the following syntax: ENV-LOCA-VNET-SID" + type: string + default: DEV-WEEU-SAP01-X00 + + - name: environment + displayName: Workload Environment (DEV, QUA, PRD, ...) + type: string + default: DEV + + - name: sap_cal_product_name + displayName: SAP CAL Product Name + type: string + + - name: extra_params + displayName: Extra Parameters + type: string + default: "" + + - name: base_os_configuration + displayName: Core Operating System Configuration + type: boolean + default: true + + - name: sap_os_configuration + displayName: SAP Operating System Configuration + type: boolean + default: true + + - name: sapcal_integration + displayName: SAP CAL Integration + type: boolean + default: false + +# 20220929 MKD - ACSS Registration + - name: acss_registration + displayName: Register System in ACSS + type: boolean + default: true + + - name: acss_environment + displayName: ACSS Prod/NonProd + type: string + values: + - NonProd + - Prod + + - name: acss_sap_product + displayName: System Type + type: string + values: + - S4HANA + - ECC + - Other + # 20220929 MKD - ACSS Registration + + - name: sap_automation_repo_path + displayName: The local path on the agent where the sap_automation repo can be found + type: string + + - name: config_repo_path + displayName: The local path on the agent where the config repo can be found + type: string + +stages: + - stage: Preparation_for_Ansible + condition: and(not(failed()), not(canceled())) + variables: + - template: variables/07-sap-cal-installation-variables.yaml + parameters: + environment: ${{ parameters.environment }} + displayName: OS Configuration and SAP Installation + jobs: + - job: Installation_step + displayName: OS Configuration and SAP Installation + timeoutInMinutes: 0 + workspace: + clean: all + steps: + - template: templates\download.yaml + parameters: + getLatestFromBranch: true + - task: PostBuildCleanup@4 + - bash: | + #!/bin/bash + # Exit immediately if a command exits with a non-zero status. + set -e + + green="\e[1;32m" ; reset="\e[0m" ; boldred="\e[1;31m" + if [[ ! -f /etc/profile.d/deploy_server.sh ]]; then + echo -e "$green --- Install dos2unix ---$reset" + sudo apt-get -qq install dos2unix + fi + echo -e "$green--- Convert config file to UX format ---$reset" + echo -e "$green--- Update .sap_deployment_automation/config as DEPLOYMENT_REPO_PATH can change on devops agent ---$reset" + export HOME=${CONFIG_REPO_PATH}/$(Deployment_Configuration_Path) + cd $HOME + + echo -e "$green--- Configure devops CLI extension ---$reset" + az config set extension.use_dynamic_install=yes_without_prompt --output none + + az extension add --name azure-devops --output none + + az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none + + echo -e "$green--- Validations ---$reset" + ENVIRONMENT=$(echo ${SAP_SYSTEM_CONFIGURATION_NAME} | awk -F'-' '{print $1}' | xargs) ; echo Environment $ENVIRONMENT + LOCATION=$(echo ${SAP_SYSTEM_CONFIGURATION_NAME} | awk -F'-' '{print $2}' | xargs) ; echo Location $LOCATION + NETWORK=$(echo ${SAP_SYSTEM_CONFIGURATION_NAME} | awk -F'-' '{print $3}' | xargs) ; echo Virtual network logical name $NETWORK + SID=$(echo ${SAP_SYSTEM_CONFIGURATION_NAME} | awk -F'-' '{print $4}' | xargs) ; echo SID $SID + + environment_file_name=$HOME/.sap_deployment_automation/$ENVIRONMENT$LOCATION$NETWORK ; echo configuration_file $environment_file_name + params_file=$HOME/SYSTEM/${SAP_SYSTEM_CONFIGURATION_NAME}/sap-parameters.yaml ; echo sap_parameters_file $params_file + + if [ "azure pipelines" = "$(this_agent)" ]; then + echo "##vso[task.logissue type=error]Please use a self hosted agent for this playbook. Define it in the SDAF-${ENVIRONMENT} variable group using the 'POOL' variable." + exit 2 + fi + + if [ ! -f $environment_file_name ]; then + echo -e "$boldred--- $environment_file_name was not found ---$reset" + echo "##vso[task.logissue type=error]Workload zone configuration file $environment_file_name was not found." + exit 2 + fi + + if [ ! -f $params_file ]; then + echo -e "$boldred--- $HOME/SYSTEM/${SAP_SYSTEM_CONFIGURATION_NAME}/sap-parameters.yaml was not found ---$reset" + echo "##vso[task.logissue type=error]File $HOME/SYSTEM/${SAP_SYSTEM_CONFIGURATION_NAME}/sap-parameters.yaml was not found." + exit 2 + else + dos2unix -q $HOME/SYSTEM/${SAP_SYSTEM_CONFIGURATION_NAME}/sap-parameters.yaml + fi + + if [ ! -n ${SID} ]; then + echo "##vso[task.logissue type=error]SID was not found in ${SAP_SYSTEM_CONFIGURATION_NAME}." + exit 2 + fi + + if [ ! -f $HOME/SYSTEM/${SAP_SYSTEM_CONFIGURATION_NAME}/${SID}_hosts.yaml ]; then + echo -e "$boldred--- $HOME/SYSTEM/${SAP_SYSTEM_CONFIGURATION_NAME}/${SID}_hosts.yaml was not found ---$reset" + echo "##vso[task.logissue type=error]File $HOME/SYSTEM/${SAP_SYSTEM_CONFIGURATION_NAME}/${SID}_hosts.yaml was not found." + exit 2 + fi + dos2unix -q $HOME/SYSTEM/${SAP_SYSTEM_CONFIGURATION_NAME}/${SID}_hosts.yaml + + export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") + echo '$(variable_group) id: ' $VARIABLE_GROUP_ID + if [ -z ${VARIABLE_GROUP_ID} ]; then + echo "##vso[task.logissue type=error]Variable group $(variable_group) could not be found." + exit 2 + fi + + echo "##vso[build.updatebuildnumber]Deploying ${SAP_SYSTEM_CONFIGURATION_NAME} using SAP CAL" + + echo "##vso[task.setvariable variable=SID;isOutput=true]${SID}" + echo "##vso[task.setvariable variable=SAP_PARAMETERS;isOutput=true]sap-parameters.yaml" + echo "##vso[task.setvariable variable=FOLDER;isOutput=true]$HOME/SYSTEM/${SAP_SYSTEM_CONFIGURATION_NAME}" + echo "##vso[task.setvariable variable=HOSTS;isOutput=true]${SID}_hosts.yaml" + + echo -e "$green--- Get Files from the DevOps Repository ---$reset" + cd ${CONFIG_REPO_PATH}/$(Deployment_Configuration_Path)/SYSTEM/${SAP_SYSTEM_CONFIGURATION_NAME} + sap_params_updated=0 + + fqdn="$(grep -m1 "$sap_fqdn:" sap-parameters.yaml | cut -d':' -f2- | tr -d ' ' | tr -d '"')" + if [ -z $fqdn ] ; then + sed -i 's|sap_fqdn:.*|sap_fqdn: '"$(sap_fqdn)"'|' sap-parameters.yaml + fi + + if [[ -n "${sapcalProductName}" ]]; then + echo -e "$green--- Add SAP CAL Product Name $sapcalProductName to sap-parameters.yaml ---$reset" + sed -i 's|sap_cal_product_name:.*|sap_cal_product_name: '"$sapcalProductName"'|' sap-parameters.yaml + fi + + echo -e "$green--- Get connection details ---$reset" + mkdir -p artifacts + + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "${NETWORK}"Workload_Key_Vault.value --output tsv) + if [ -z ${az_var} ]; then + export workload_key_vault=$(cat "${environment_file_name}" | grep workloadkeyvault | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Key Vault' ${workload_key_vault} + else + export workload_key_vault=${az_var} ; echo 'Workload Key Vault' ${workload_key_vault} ; + fi + + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "${NETWORK}"Workload_Secret_Prefix.value --output tsv) + if [ -z ${az_var} ]; then + export workload_prefix=$(cat "${environment_file_name}" | grep workload_zone_prefix | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Prefix' ${workload_prefix} + else + export workload_prefix=${az_var} ; echo 'Workload Prefix' ${workload_prefix}; + fi + + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query Terraform_Remote_Storage_Subscription.value --output tsv) + if [ -z ${az_var} ]; then + export control_plane_subscription=$(cat "${environment_file_name}" | grep STATE_SUBSCRIPTION | awk -F'=' '{print $2}' | xargs) ; echo 'Control Plane Subscription' ${control_plane_subscription} + else + export control_plane_subscription=${az_var} ; echo 'Control Plane Subscription' ${control_plane_subscription} + fi + + if [[ $EXTRA_PARAMETERS = "'$(EXTRA_PARAMETERS)'" ]]; then + new_parameters=$PIPELINE_EXTRA_PARAMETERS + else + echo "##vso[task.logissue type=warning]Extra parameters were provided - '$EXTRA_PARAMETERS'" + new_parameters="$EXTRA_PARAMETERS $PIPELINE_EXTRA_PARAMETERS" + fi + + echo "##vso[task.setvariable variable=SSH_KEY_NAME;isOutput=true]${workload_prefix}-sid-sshkey" + echo "##vso[task.setvariable variable=VAULT_NAME;isOutput=true]$workload_key_vault" + echo "##vso[task.setvariable variable=PASSWORD_KEY_NAME;isOutput=true]${workload_prefix}-sid-password" + echo "##vso[task.setvariable variable=USERNAME_KEY_NAME;isOutput=true]${workload_prefix}-sid-username" + echo "##vso[task.setvariable variable=NEW_PARAMETERS;isOutput=true]${new_parameters}" + echo "##vso[task.setvariable variable=CP_SUBSCRIPTION;isOutput=true]${control_plane_subscription}" + + + echo -e "$green--- az login ---$reset" + # If the deployer_file exists we run on a deployer configured by the framework instead of a azdo hosted one + deployer_file=/etc/profile.d/deploy_server.sh + if [ "$USE_MSI" = "true" ]; then + echo "Using MSI" + source /etc/profile.d/deploy_server.sh + az account set --subscription $control_plane_subscription + + else + if [ ! -n $AZURE_CLIENT_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined." + exit 2 + fi + + if [ ! -n $AZURE_CLIENT_SECRET ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined." + exit 2 + fi + + if [ ! -n $AZURE_TENANT_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined." + exit 2 + fi + az login --service-principal --username $AZURE_CLIENT_ID --password=${AZURE_CLIENT_SECRET} --tenant $AZURE_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi + az account set --subscription $control_plane_subscription + fi + + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi + + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" | tr -d \") + if [ -n ${az_var} ]; then + kv_name=${az_var}; echo "Key Vault="$kv_name + else + kv_name=$(cat .sap_deployment_automation/$(environment_code)$(location_code) | grep keyvault |awk -F'=' '{print $2}'); echo "Key Vault="$kv_name + fi + + echo "##vso[task.setvariable variable=KV_NAME;isOutput=true]$kv_name" + + if [ "your S User" == "${SUsername}" ]; then + echo "##vso[task.logissue type=error]Please define the S-Username variable." + exit 2 + fi + + if [ "your S user password" == "${SPassword}" ]; then + echo "##vso[task.logissue type=error]Please define the S-Password variable." + exit 2 + fi + + echo -e "$green--- Set S-Username and S-Password in the key_vault if not yet there ---$reset" + + export SUsernamefromVault=$(az keyvault secret list --vault-name "${kv_name}" --subscription "${ARM_SUBSCRIPTION_ID}" --query "[].{Name:name} | [? contains(Name,'S-Username')] | [0]" -o tsv) + if [ $SUsernamefromVault == $SUsername ]; then + echo -e "$green--- $SUsername present in keyvault. In case of download errors check that user and password are correct ---$reset" + echo "##vso[task.setvariable variable=SUSERNAME;isOutput=true]$SUsernamefromVault" + else + echo -e "$green--- Setting the S username in key vault ---$reset" + az keyvault secret set --name "S-Username" --vault-name $kv_name --value="${SUsername}" --subscription "${ARM_SUBSCRIPTION_ID}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --output none + echo "##vso[task.setvariable variable=SUSERNAME;isOutput=true]$SUsername" + fi + + export SPasswordfromVault=$(az keyvault secret list --vault-name "${kv_name}" --subscription "${ARM_SUBSCRIPTION_ID}" --query "[].{Name:name} | [? contains(Name,'S-Password')] | [0]" -o tsv) + if [ ${SPassword} == $SPasswordfromVault ]; then + echo "##vso[task.setvariable variable=SPASSWORD;isOutput=true]${SPasswordfromVault}" + echo -e "$green--- Password present in keyvault. In case of download errors check that user and password are correct ---$reset" + else + echo -e "$green--- Setting the S user name password in key vault ---$reset" + az keyvault secret set --name "S-Password" --vault-name $kv_name --value "${SPassword}" --subscription "${ARM_SUBSCRIPTION_ID}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --output none + echo "##vso[task.setvariable variable=SPASSWORD;isOutput=true]${SPassword}" + fi + + az keyvault secret show --name ${workload_prefix}-sid-sshkey --vault-name $workload_key_vault --subscription $AZURE_SUBSCRIPTION_ID --query value -o tsv > artifacts/${SAP_SYSTEM_CONFIGURATION_NAME}_sshkey + cp sap-parameters.yaml artifacts/. + cp ${SID}_hosts.yaml artifacts/. + + 2> >(while read line; do (>&2 echo "STDERROR: $line"); done) + name: Preparation + displayName: Preparation for Ansible + env: + SCRIPT_PATH: $${{ parameters.sap_automation_repo_path }}/deploy/pipelines/templates/*.sh + SYSTEM_ACCESSTOKEN: $(System.AccessToken) + AZURE_DEVOPS_EXT_PAT: $(System.AccessToken) + ANSIBLE_HOST_KEY_CHECKING: false + AZURE_CLIENT_ID: $(ARM_CLIENT_ID) + AZURE_CLIENT_SECRET: $(ARM_CLIENT_SECRET) + AZURE_TENANT_ID: $(ARM_TENANT_ID) + AZURE_SUBSCRIPTION_ID: $(Terraform_Remote_Storage_Subscription) + ANSIBLE_COLLECTIONS_PATHS: /opt/ansible/collections + CONFIG_REPO_PATH: ${{ parameters.config_repo_path }} + SAP_SYSTEM_CONFIGURATION_NAME: ${{ parameters.sap_system_configuration_name }} + EXTRA_PARAMETERS: $(EXTRA_PARAMETERS) + PIPELINE_EXTRA_PARAMETERS: ${{ parameters.extra_params }} + USE_MSI: $(USE_MSI) + SUsername: $(S-Username) + SPassword: $(S-Password) + sapcalProductName: ${{ parameters.sap_cal_product_name }} + + - template: templates\run-ansible.yaml + parameters: + displayName: "Parameter validation" + ansibleFilePath: ${{ parameters.sap_automation_repo_path }}/deploy/ansible/playbook_00_validate_parameters.yaml + secretName: "$(Preparation.SSH_KEY_NAME)" + passwordSecretName: "$(Preparation.PASSWORD_KEY_NAME)" + userNameSecretName: "$(Preparation.USERNAME_KEY_NAME)" + vaultName: $(Preparation.VAULT_NAME) + parametersFolder: $(Preparation.FOLDER) + sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" + sidHosts: $(Preparation.HOSTS) + extraParams: "$(Preparation.NEW_PARAMETERS)" + azureClientId: $(ARM_CLIENT_ID) + azureClientSecret: $(ARM_CLIENT_SECRET) + azureTenantId: $(ARM_TENANT_ID) + azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + USE_MSI: $(USE_MSI) + - ${{ if eq(parameters.base_os_configuration, true) }}: + - template: templates\run-ansible.yaml + parameters: + displayName: "Operating System Configuration" + ansibleFilePath: ${{ parameters.sap_automation_repo_path }}/deploy/ansible/playbook_01_os_base_config.yaml + secretName: "$(Preparation.SSH_KEY_NAME)" + passwordSecretName: "$(Preparation.PASSWORD_KEY_NAME)" + userNameSecretName: "$(Preparation.USERNAME_KEY_NAME)" + vaultName: $(Preparation.VAULT_NAME) + parametersFolder: $(Preparation.FOLDER) + sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" + sidHosts: $(Preparation.HOSTS) + extraParams: "$(Preparation.NEW_PARAMETERS)" + azureClientId: $(ARM_CLIENT_ID) + azureClientSecret: $(ARM_CLIENT_SECRET) + azureTenantId: $(ARM_TENANT_ID) + azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + USE_MSI: $(USE_MSI) + - ${{ if eq(parameters.sap_os_configuration, true) }}: + - template: templates\run-ansible.yaml + parameters: + displayName: "SAP Specific Operating System Configuration" + ansibleFilePath: ${{ parameters.sap_automation_repo_path }}/deploy/ansible/playbook_02_os_sap_specific_config.yaml + secretName: "$(Preparation.SSH_KEY_NAME)" + passwordSecretName: "$(Preparation.PASSWORD_KEY_NAME)" + userNameSecretName: "$(Preparation.USERNAME_KEY_NAME)" + vaultName: $(Preparation.VAULT_NAME) + parametersFolder: $(Preparation.FOLDER) + sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" + sidHosts: $(Preparation.HOSTS) + extraParams: "$(Preparation.NEW_PARAMETERS)" + azureClientId: $(ARM_CLIENT_ID) + azureClientSecret: $(ARM_CLIENT_SECRET) + azureTenantId: $(ARM_TENANT_ID) + azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + USE_MSI: $(USE_MSI) + - ${{ if eq(parameters.sapcal_integration, true) }}: + - template: templates\run-ansible.yaml + parameters: + displayName: "SAPCAL Integration" + ansibleFilePath: ${{ parameters.sap_automation_repo_path }}/deploy/ansible/playbook_sapcal_integration.yaml + secretName: "$(Preparation.SSH_KEY_NAME)" + passwordSecretName: "$(Preparation.PASSWORD_KEY_NAME)" + userNameSecretName: "$(Preparation.USERNAME_KEY_NAME)" + vaultName: $(Preparation.VAULT_NAME) + parametersFolder: $(Preparation.FOLDER) + sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" + sidHosts: $(Preparation.HOSTS) + extraParams: "$(Preparation.NEW_PARAMETERS)" + azureClientId: $(ARM_CLIENT_ID) + azureClientSecret: $(ARM_CLIENT_SECRET) + azureTenantId: $(ARM_TENANT_ID) + azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + sapcalProductName: ${{ parameters.sap_cal_product_name }} + USE_MSI: $(USE_MSI) + - template: templates\collect-calapi-file.yaml + parameters: + filePath: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }} diff --git a/deploy/pipelines/12-remove-control-plane.yaml b/deploy/pipelines/12-remove-control-plane.yaml index f2bbd87f43..747cd637cd 100644 --- a/deploy/pipelines/12-remove-control-plane.yaml +++ b/deploy/pipelines/12-remove-control-plane.yaml @@ -59,7 +59,7 @@ stages: clean: all steps: - template: templates\download.yaml - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - bash: | #!/bin/bash # Treat unset variables as an error when substituting. @@ -407,7 +407,7 @@ stages: - template: templates\download.yaml parameters: getLatestFromBranch: true - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - task: AzureCLI@2 continueOnError: false inputs: diff --git a/deploy/pipelines/21-deploy-web-app.yaml b/deploy/pipelines/21-deploy-web-app.yaml index c62a9028b0..46edd5d0f8 100644 --- a/deploy/pipelines/21-deploy-web-app.yaml +++ b/deploy/pipelines/21-deploy-web-app.yaml @@ -78,7 +78,7 @@ stages: workspace: clean: all steps: - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - checkout: self persistCredentials: true - bash: | diff --git a/deploy/pipelines/22-sample-deployer-config.yaml b/deploy/pipelines/22-sample-deployer-config.yaml index e9ff87dd91..c7e8e7bcbc 100644 --- a/deploy/pipelines/22-sample-deployer-config.yaml +++ b/deploy/pipelines/22-sample-deployer-config.yaml @@ -90,7 +90,7 @@ stages: workspace: clean: all steps: - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - checkout: self persistCredentials: true - task: PowerShell@2 diff --git a/deploy/pipelines/23-levelup-configuration.yaml b/deploy/pipelines/23-levelup-configuration.yaml index 60b0ae3065..7c8cd801e1 100644 --- a/deploy/pipelines/23-levelup-configuration.yaml +++ b/deploy/pipelines/23-levelup-configuration.yaml @@ -54,7 +54,7 @@ stages: workspace: clean: all steps: - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - checkout: self persistCredentials: true submodules: true diff --git a/deploy/pipelines/templates/collect-calapi-file.yaml b/deploy/pipelines/templates/collect-calapi-file.yaml new file mode 100644 index 0000000000..b250fff633 --- /dev/null +++ b/deploy/pipelines/templates/collect-calapi-file.yaml @@ -0,0 +1,37 @@ +parameters: + filePath: "" +steps: + - script: | + #!/bin/bash + set -eu + echo "Collecting sapcal_provisioning.json ${{ parameters.filePath }}" + cd ${FILE_PATH} + if [ -f "sapcal_provisioning.json" ]; then + echo "Found sapcal_provisioning.json" + git config --global user.email "${USER_EMAIL}" + git config --global user.name "${USER_NAME}" + echo "Checking out ${SOURCE_BRANCH} branch..." + git checkout -q ${SOURCE_BRANCH} + echo "Pulling last changes..." + git pull + echo "Adding sapcal_provisioning.json..." + git add sapcal_provisioning.json + if [ $(git diff --name-only --cached | wc -l) -gt 0 ]; then + echo "Committing changes..." + git commit -m "Adding sapcal_provisioning.json" + echo "Pushing changes..." + git push + else + echo "No changes to commit for sapcal_provisioning.json" + fi + else + echo "sapcal_provisioning.json not found" + fi + displayName: Store SAP-CAL API response in repository + enabled: true + env: + USER_EMAIL: $(Build.RequestedForEmail) + USER_NAME: $(Build.RequestedFor) + SOURCE_BRANCH: $(Build.SourceBranchName) + SYSTEM_ACCESSTOKEN: $(System.AccessToken) + FILE_PATH: ${{ parameters.filePath }} diff --git a/deploy/pipelines/templates/collect-log-files.yaml b/deploy/pipelines/templates/collect-log-files.yaml index 6fc4996df8..0155e60604 100644 --- a/deploy/pipelines/templates/collect-log-files.yaml +++ b/deploy/pipelines/templates/collect-log-files.yaml @@ -1,5 +1,7 @@ parameters: logPath: "" + qualityAssuranceResultsPath: "" + collectQualityChecks: false steps: - script: | #!/bin/bash @@ -80,3 +82,48 @@ steps: condition: always() env: LOG_PATH: ${{ parameters.logPath }} + + - script: | + #!/bin/bash + # Exit immediately if a command exits with a non-zero status. + # Treat unset variables as an error when substituting. + set -eu + + echo "Collecting quality assurance results files from ${{ parameters.qualityAssuranceResultsPath }}" + + if [ -d ${QUALITY_ASSURANCE_RESULTS_PATH} ] && [ $(ls ${QUALITY_ASSURANCE_RESULTS_PATH}/*.html | wc -l ) -gt 0 ]; then + echo "Found new quality assurance results files in ${QUALITY_ASSURANCE_RESULTS_PATH}" + + cd ${QUALITY_ASSURANCE_RESULTS_PATH} + ls -ltr + + git config --global user.email "${USER_EMAIL}" + git config --global user.name "${USER_NAME}" + + echo "Checking out ${SOURCE_BRANCH} branch..." + git checkout -q ${SOURCE_BRANCH} + echo "Pulling last changes..." + git pull + + echo "Adding new quality assurance files..." + git add --ignore-errors *.html + if [ $(git diff --name-only --cached | wc -l) -gt 0 ]; then + echo "Committing changes..." + git commit -m "Adding new quality assurance files" + echo "Pushing changes..." + git push + else + echo "No changes to commit" + fi + else + echo No quality assurance files found in "${QUALITY_ASSURANCE_RESULTS_PATH}" + fi + displayName: Store quality assurance files in repository + enabled: true + condition: ${{ eq(parameters.collectQualityChecks, true) }} + env: + USER_EMAIL: $(Build.RequestedForEmail) + USER_NAME: $(Build.RequestedFor) + SOURCE_BRANCH: $(Build.SourceBranchName) + SYSTEM_ACCESSTOKEN: $(System.AccessToken) + QUALITY_ASSURANCE_RESULTS_PATH: ${{ parameters.qualityAssuranceResultsPath }} diff --git a/deploy/pipelines/variables/07-sap-cal-installation-variables.yaml b/deploy/pipelines/variables/07-sap-cal-installation-variables.yaml new file mode 100644 index 0000000000..94f39757d8 --- /dev/null +++ b/deploy/pipelines/variables/07-sap-cal-installation-variables.yaml @@ -0,0 +1,25 @@ +#--------------------------------------+------------------------------------------------8 +# | +# Defines the parameters and variables for the SAP Software Install using SAP CAL | +# | +#--------------------------------------+------------------------------------------------8 + +parameters: + environment: "" + +variables: + - group: "SDAF-General" + + - group: SDAF-${{ parameters.environment }} + + - name: agent_name + value: $[coalesce(variables['POOL'], variables['Agent'])] + + - name: this_agent + value: $[lower(coalesce(variables['POOL'], variables['Agent']))] + + - name: variable_group + value: SDAF-${{ parameters.environment }} + + - name: key_vault + value: $[variables['Deployer_Key_Vault']] diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index 115a8bd8f8..b9687df43e 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -25,7 +25,7 @@ $ControlPlaneSubscriptionName = $Env:SDAF_ControlPlaneSubscriptionName if ($IsWindows) { $pathSeparator = "\" } else { $pathSeparator = "/" } #endregion -$versionLabel = "v3.11.0.2" +$versionLabel = "v3.11.0.3" # az logout @@ -38,8 +38,6 @@ $versionLabel = "v3.11.0.2" # az login --output none --tenant $ARM_TENANT_ID --only-show-errors --scope https://graph.microsoft.com//.default # } -Write-Host "" -Write-Host "" # Check if access to the Azure DevOps organization is available and prompt for PAT if needed # Exact permissions required, to be validated, and included in the Read-Host text. @@ -65,21 +63,8 @@ else { Write-Host "" Write-Host "" -if (Test-Path ".${pathSeparator}start.md") { Write-Host "Removing start.md" ; Remove-Item ".${pathSeparator}start.md" } - -if ($Env:SDAF_AuthenticationMethod.Length -eq 0) { - $Title = "Select the authentication method to use" - $data = @('Service Principal', 'Managed Identity') - Show-Menu($data) - $selection = Read-Host $Title - $authenticationMethod = $data[$selection - 1] -} -else { - $authenticationMethod = $Env:SDAF_AuthenticationMethod -} - -Write-Host "Using authentication method: $authenticationMethod" -ForegroundColor Yellow +if (Test-Path ".${pathSeparator}start.md") { Write-Host "Removing start.md" ; Remove-Item ".${pathSeparator}start.md" } if ($Env:SDAF_AuthenticationMethod.Length -eq 0) { $Title = "Select the authentication method to use" @@ -224,7 +209,6 @@ else { Write-Host "Using an existing project" - $repo_id = (az repos list --query "[?name=='$ADO_Project'].id | [0]" --out tsv) az devops configure --defaults organization=$ADO_ORGANIZATION project=$ADO_PROJECT $repo_id = (az repos list --query "[?name=='$ADO_Project'].id | [0]" --output tsv) @@ -557,7 +541,7 @@ Write-Host "Creating the variable group SDAF-General" -ForegroundColor Green $general_group_id = (az pipelines variable-group list --query "[?name=='SDAF-General'].id | [0]" --only-show-errors) if ($general_group_id.Length -eq 0) { - az pipelines variable-group create --name SDAF-General --variables ANSIBLE_HOST_KEY_CHECKING=false Deployment_Configuration_Path=WORKSPACES Branch=main tf_version="1.7.4" ansible_core_version="2.15" S-Username=$SUserName S-Password=$SPassword --output yaml --authorize true --output none + az pipelines variable-group create --name SDAF-General --variables ANSIBLE_HOST_KEY_CHECKING=false Deployment_Configuration_Path=WORKSPACES Branch=main tf_version="1.9.5" ansible_core_version="2.15" S-Username=$SUserName S-Password=$SPassword --output yaml --authorize true --output none $general_group_id = (az pipelines variable-group list --query "[?name=='SDAF-General'].id | [0]" --only-show-errors) az pipelines variable-group variable update --group-id $general_group_id --name "S-Password" --value $SPassword --secret true --output none --only-show-errors } @@ -643,6 +627,18 @@ $this_pipeline_url = $ADO_ORGANIZATION + "/" + [uri]::EscapeDataString($ADO_Proj $log = ("[" + $pipeline_name + "](" + $this_pipeline_url + ")") Add-Content -Path $fname -Value $log +$pipeline_name = 'SAP installation using SAP-CAL' +$sapcal_installation_pipeline_id = (az pipelines list --query "[?name=='$pipeline_name'].id | [0]") +if ($sapcal_installation_pipeline_id.Length -eq 0) { + az pipelines create --name $pipeline_name --branch main --description 'Configures the Operating System and installs the SAP application using SAP CAL' --skip-run --yaml-path "/pipelines/07-sap-cal-installation.yml" --repository $repo_id --repository-type tfsgit --output none --only-show-errors + $sapcal_installation_pipeline_id = (az pipelines list --query "[?name=='$pipeline_name'].id | [0]") +} +$pipelines.Add($sapcal_installation_pipeline_id) + +$this_pipeline_url = $ADO_ORGANIZATION + "/" + [uri]::EscapeDataString($ADO_Project) + "/_build?definitionId=" + $sapcal_installation_pipeline_id +$log = ("[" + $pipeline_name + "](" + $this_pipeline_url + ")") +Add-Content -Path $fname -Value $log + $pipeline_name = 'Remove System or Workload Zone' $pipeline_id = (az pipelines list --query "[?name=='$pipeline_name'].id | [0]") if ($pipeline_id.Length -eq 0) { @@ -762,25 +758,47 @@ if ($WebApp) { $ExistingData = (az ad app list --all --filter "startswith(displayName, '$ApplicationName')" --query "[?displayName=='$ApplicationName']| [0]" --only-show-errors) | ConvertFrom-Json $APP_REGISTRATION_ID = $ExistingData.appId + $APP_REGISTRATION_OBJECTID = $ExistingData.id - $confirmation = Read-Host "Reset the app registration secret y/n?" - if ($confirmation -eq 'y') { - $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors) - } - else { - $WEB_APP_CLIENT_SECRET = Read-Host "Please enter the app registration secret" - } + # $confirmation = Read-Host "Reset the app registration secret y/n?" + # if ($confirmation -eq 'y') { + # $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors --display-name "SDAF") + # } + # else { + # $WEB_APP_CLIENT_SECRET = Read-Host "Please enter the app registration secret" + # } } else { Write-Host "Creating an App Registration for" $ApplicationName -ForegroundColor Green + if ($IsWindows) { $manifestPath = ".\manifest.json" } else { $manifestPath = "./manifest.json" } Add-Content -Path manifest.json -Value '[{"resourceAppId":"00000003-0000-0000-c000-000000000000","resourceAccess":[{"id":"e1fe6dd8-ba31-4d61-89e7-88639da4683d","type":"Scope"}]}]' - $APP_REGISTRATION_ID = (az ad app create --display-name $ApplicationName --enable-id-token-issuance true --sign-in-audience AzureADMyOrg --required-resource-access ".${pathSeparator}manifest.json" --query "appId").Replace('"', "") + $APP_REGISTRATION_ID = (az ad app create --display-name $ApplicationName --enable-id-token-issuance true --sign-in-audience AzureADMyOrg --required-resource-access $manifestPath --query "appId" --output tsv) + $ExistingData = (az ad app list --all --filter "startswith(displayName, '$ApplicationName')" --query "[?displayName=='$ApplicationName']| [0]" --only-show-errors) | ConvertFrom-Json + $APP_REGISTRATION_OBJECTID = $ExistingData.id + + if (Test-Path $manifestPath) { Write-Host "Removing manifest.json" ; Remove-Item $manifestPath } + + + # $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors --display-name "SDAF") + } + + if ($MSI_objectId -ne $null) { + $configureAuth = Read-Host "Configuring authentication for the App Registration (y/n)?" + if ($configureAuth -eq 'y') { + az rest --method POST --uri "https://graph.microsoft.com/beta/applications/$APP_REGISTRATION_OBJECTID/federatedIdentityCredentials\" --body "{'name': 'ManagedIdentityFederation', 'issuer': 'https://login.microsoftonline.com/$ARM_TENANT_ID/v2.0', 'subject': '$MSI_objectId', 'audiences': [ 'api://AzureADTokenExchange' ]}" - if (Test-Path ".${pathSeparator}manifest.json") { Write-Host "Removing manifest.json" ; Remove-Item ".${pathSeparator}manifest.json" } + $API_URL = "https://portal.azure.com/#view/Microsoft_AAD_RegisteredApps/ApplicationMenuBlade/~/ProtectAnAPI/appId/$APP_REGISTRATION_ID/isMSAApp~/false" + + Write-Host "The browser will now open, Please Add a new scope, by clicking the '+ Add a new scope link', accept the default name and click 'Save and Continue'" + Write-Host "In the Add a scope page enter the scope name 'user_impersonation'. Choose 'Admins and Users' in the who can consent section, next provide the Admin consent display name 'Access the SDAF web application' and 'Use SDAF' as the Admin consent description, accept the changes by clicking the 'Add scope' button" + + Start-Process $API_URL + Read-Host -Prompt "Once you have created and validated the scope, Press any key to continue" + } - $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors) } + } #endregion @@ -815,117 +833,14 @@ if ($authenticationMethod -eq "Service Principal") { $CP_ARM_OBJECT_ID = $ExistingData.Id $CP_ARM_TENANT_ID = $ExistingData.appOwnerOrganizationId - $confirmation = Read-Host "Reset the Control Plane Service Principal password y/n?" - if ($confirmation -eq 'y') { - - $CP_ARM_CLIENT_SECRET = (az ad sp credential reset --id $CP_ARM_CLIENT_ID --append --query "password" --out tsv --only-show-errors).Replace("""", "") - } - } -} -else { - - if ($Env:MSI_OBJECT_ID.Length -ne 0) { - $MSI_objectId = $Env:MSI_OBJECT_ID - } - else { - - $Title = "Choose the subscription that contains the Managed Identity" - $subscriptions = $(az account list --query "[].{Name:name}" -o table | Sort-Object) - Show-Menu($subscriptions[2..($subscriptions.Length - 1)]) - $selection = Read-Host $Title - - $subscription = $subscriptions[$selection - 1] - - $Title = "Choose the Managed Identity" - $identities = $(az identity list --query "[].{Name:name}" --subscription $subscription --output table | Sort-Object) - Show-Menu($identities[2..($identities.Length - 1)]) - $selection = Read-Host $Title - $selectionOffset = [convert]::ToInt32($selection, 10) + 1 - - $identity = $identities[$selectionOffset] - Write-Host "Using Managed Identity:" $identity - - $id = $(az identity list --query "[?name=='$identity'].id" --subscription $subscription --output tsv) - $MSI_objectId = $(az identity show --ids $id --query "principalId" --output tsv) - - $postBody = [PSCustomObject]@{ - accessLevel = @{ - accountLicenseType = "Basic" - } - projectEntitlements = @([ordered]@{ - group = @{ - groupType = "projectAdministrator" - } - projectRef = @{ - id = $Project_ID - } - - }) - servicePrincipal = @{ - origin = "aad" - originId = $id - subjectKind = "servicePrincipal" - } - - } - - Set-Content -Path "user.json" -Value ($postBody | ConvertTo-Json -Depth 6) - - az devops invoke --area MemberEntitlementManagement --resource ServicePrincipalEntitlements --in-file user.json --api-version "7.1-preview" --http-method POST - - } -} - - -#region App registration -if ($WebApp) { - Write-Host "Creating the App registration in Azure Active Directory" -ForegroundColor Green - - $found_appRegistration = (az ad app list --all --filter "startswith(displayName, '$ApplicationName')" --query "[?displayName=='$ApplicationName'].displayName | [0]" --only-show-errors) - - if ($found_appRegistration.Length -ne 0) { - Write-Host "Found an existing App Registration:" $ApplicationName - $ExistingData = (az ad app list --all --filter "startswith(displayName, '$ApplicationName')" --query "[?displayName=='$ApplicationName']| [0]" --only-show-errors) | ConvertFrom-Json - - $APP_REGISTRATION_ID = $ExistingData.appId - $APP_REGISTRATION_OBJECTID = $ExistingData.id - - # $confirmation = Read-Host "Reset the app registration secret y/n?" + #$confirmation = Read-Host "Reset the Control Plane Service Principal password y/n?" # if ($confirmation -eq 'y') { - # $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors --display-name "SDAF") + + # $CP_ARM_CLIENT_SECRET = (az ad sp credential reset --id $CP_ARM_CLIENT_ID --append --query "password" --out tsv --only-show-errors).Replace("""", "") # } # else { - # $WEB_APP_CLIENT_SECRET = Read-Host "Please enter the app registration secret" + $CP_ARM_CLIENT_SECRET = Read-Host "Please enter the Control Plane Service Principal $spn_name password" # } - } - else { - Write-Host "Creating an App Registration for" $ApplicationName -ForegroundColor Green - if ($IsWindows) { $manifestPath = ".\manifest.json" } else { $manifestPath = "./manifest.json" } - Add-Content -Path manifest.json -Value '[{"resourceAppId":"00000003-0000-0000-c000-000000000000","resourceAccess":[{"id":"e1fe6dd8-ba31-4d61-89e7-88639da4683d","type":"Scope"}]}]' - - $APP_REGISTRATION_ID = (az ad app create --display-name $ApplicationName --enable-id-token-issuance true --sign-in-audience AzureADMyOrg --required-resource-access $manifestPath --query "appId" --output tsv) - $ExistingData = (az ad app list --all --filter "startswith(displayName, '$ApplicationName')" --query "[?displayName=='$ApplicationName']| [0]" --only-show-errors) | ConvertFrom-Json - $APP_REGISTRATION_OBJECTID = $ExistingData.id - - if (Test-Path $manifestPath) { Write-Host "Removing manifest.json" ; Remove-Item $manifestPath } - - - # $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors --display-name "SDAF") - } - - az role assignment create --assignee $CP_ARM_CLIENT_ID --role "Contributor" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none - - az role assignment create --assignee $CP_ARM_CLIENT_ID --role "User Access Administrator" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none - - $Control_plane_groupID = (az pipelines variable-group list --query "[?name=='$ControlPlanePrefix'].id | [0]" --only-show-errors) - if ($Control_plane_groupID.Length -eq 0) { - Write-Host "Creating the variable group" $ControlPlanePrefix -ForegroundColor Green - if ($WebApp) { - az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' APP_REGISTRATION_APP_ID=$APP_REGISTRATION_ID APP_REGISTRATION_OBJECTID=$APP_REGISTRATION_OBJECTID APP_TENANT_ID=$ARM_TENANT_ID CP_ARM_CLIENT_ID=$CP_ARM_CLIENT_ID CP_ARM_OBJECT_ID=$CP_ARM_OBJECT_ID CP_ARM_CLIENT_SECRET='Enter your SPN password here' CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID CP_ARM_TENANT_ID=$CP_ARM_TENANT_ID POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF --output none --authorize true - } - else { - $CP_ARM_CLIENT_SECRET = Read-Host "Please enter the Control Plane Service Principal password" - } } else { @@ -944,13 +859,12 @@ if ($WebApp) { az role assignment create --assignee $CP_ARM_CLIENT_ID --role "User Access Administrator" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none -} -else { $Control_plane_groupID = (az pipelines variable-group list --query "[?name=='$ControlPlanePrefix'].id | [0]" --only-show-errors) if ($Control_plane_groupID.Length -eq 0) { Write-Host "Creating the variable group" $ControlPlanePrefix -ForegroundColor Green + if ($WebApp) { - az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' APP_REGISTRATION_APP_ID=$APP_REGISTRATION_ID APP_REGISTRATION_OBJECTID=$APP_REGISTRATION_OBJECTID APP_TENANT_ID=$ARM_TENANT_ID CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF USE_MSI=true --output none --authorize true + az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' APP_REGISTRATION_APP_ID=$APP_REGISTRATION_ID APP_REGISTRATION_OBJECTID=$APP_REGISTRATION_OBJECTID APP_TENANT_ID=$ARM_TENANT_ID CP_ARM_CLIENT_ID=$CP_ARM_CLIENT_ID CP_ARM_OBJECT_ID=$CP_ARM_OBJECT_ID CP_ARM_CLIENT_SECRET='Enter your SPN password here' PAT='Enter your personal access token here' CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID CP_ARM_TENANT_ID=$CP_ARM_TENANT_ID POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF --output none --authorize true } else { az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' CP_ARM_CLIENT_ID=$CP_ARM_CLIENT_ID CP_ARM_OBJECT_ID=$CP_ARM_OBJECT_ID CP_ARM_CLIENT_SECRET='Enter your SPN password here' CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID CP_ARM_TENANT_ID=$CP_ARM_TENANT_ID PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF --output none --authorize true @@ -990,14 +904,20 @@ else { $Control_plane_groupID = (az pipelines variable-group list --query "[?name=='$ControlPlanePrefix'].id | [0]" --only-show-errors) if ($Control_plane_groupID.Length -eq 0) { Write-Host "Creating the variable group" $ControlPlanePrefix -ForegroundColor Green - az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' APP_REGISTRATION_APP_ID=$APP_REGISTRATION_ID CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID WEB_APP_CLIENT_SECRET=$WEB_APP_CLIENT_SECRET PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF USE_MSI=true --output none --authorize true + if ($WebApp) { + az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' APP_REGISTRATION_APP_ID=$APP_REGISTRATION_ID APP_REGISTRATION_OBJECTID=$APP_REGISTRATION_OBJECTID APP_TENANT_ID=$ARM_TENANT_ID CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF USE_MSI=true --output none --authorize true + } + else { + az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF USE_MSI=true --output none --authorize true + } + $Control_plane_groupID = (az pipelines variable-group list --query "[?name=='$ControlPlanePrefix'].id | [0]" --only-show-errors) } Write-Host Write-Host "" - Write-Host "The browser will now open, Please create a service connection with the name 'Control_Plane_Service_Connection'." + Write-Host "The browser will now open, Please create an 'Azure Resource Manager' service connection with the name 'Control_Plane_Service_Connection'." $connections_url = $ADO_ORGANIZATION + "/" + [uri]::EscapeDataString($ADO_Project) + "/_settings/adminservices" Write-Host "URL: " $connections_url @@ -1009,8 +929,6 @@ else { $groups.Add($Control_plane_groupID) -az pipelines variable-group variable update --group-id $Control_plane_groupID --name "WEB_APP_CLIENT_SECRET" --value $WEB_APP_CLIENT_SECRET --secret true --output none --only-show-errors - #endregion @@ -1104,7 +1022,7 @@ if (!$AlreadySet -or $ResetPAT ) { accessLevel = @{ accountLicenseType = "stakeholder" } - user = @{ + user = @{ origin = "aad" originId = $MSI_objectId subjectKind = "servicePrincipal" @@ -1147,7 +1065,7 @@ Write-Host "" Write-Host "The browser will now open, Select the '"$ADO_PROJECT "Build Service' user and ensure that it has 'Allow' in the Contribute section." $permissions_url = $ADO_ORGANIZATION + "/" + [uri]::EscapeDataString($ADO_Project) + "/_settings/repositories?_a=permissions" - +Write-Host "URL: " $permissions_url Start-Process $permissions_url Read-Host -Prompt "Once you have verified the permission, Press any key to continue" @@ -1213,4 +1131,4 @@ else { } -Write-Host "The script has completed" -ForegroundColor Green +Write-Host "The script has completed" -ForegroundColor Green \ No newline at end of file diff --git a/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 b/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 index 6928d25560..30ab069ed0 100644 --- a/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 +++ b/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 @@ -212,7 +212,7 @@ if ($authenticationMethod -eq "Service Principal") { Write-Host "Creating the Service Principal" $workload_zone_spn_name -ForegroundColor Green $Data = (az ad sp create-for-rbac --role="Contributor" --scopes=$workload_zone_scopes --name=$workload_zone_spn_name --only-show-errors) | ConvertFrom-Json $ARM_CLIENT_SECRET = $Data.password - $ExistingData = (az ad sp list --all --filter "startswith(displayName,'$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'] | [0]" --only-show-errors) | ConvertFrom-Json + $ExistingData = (az ad sp list --all --filter "startswith(displayName,'$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'] | [0]" --only-show-errors) | ConvertFrom-Json $ARM_CLIENT_ID = $ExistingData.appId $ARM_TENANT_ID = $ExistingData.appOwnerOrganizationId $ARM_OBJECT_ID = $ExistingData.Id @@ -224,7 +224,7 @@ if ($authenticationMethod -eq "Service Principal") { $GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --organization $ADO_ORGANIZATION --project $ADO_Project --only-show-errors ) if ($GroupID.Length -eq 0) { Write-Host "Creating the variable group" $WorkloadZonePrefix -ForegroundColor Green - az pipelines variable-group create --name $WorkloadZonePrefix --variables Agent='Azure Pipelines' ARM_CLIENT_ID=$ARM_CLIENT_ID ARM_OBJECT_ID=$ARM_OBJECT_ID ARM_CLIENT_SECRET=$ARM_CLIENT_SECRET ARM_SUBSCRIPTION_ID=$Workload_zone_subscriptionID ARM_TENANT_ID=$ARM_TENANT_ID WZ_PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME=$Service_Connection_Name TF_LOG=OFF Logon_Using_SPN=true USE_MSI=false--output none --authorize true --organization $ADO_ORGANIZATION --project $ADO_Project + az pipelines variable-group create --name $WorkloadZonePrefix --variables Agent='Azure Pipelines' ARM_CLIENT_ID=$ARM_CLIENT_ID ARM_OBJECT_ID=$ARM_OBJECT_ID ARM_CLIENT_SECRET=$ARM_CLIENT_SECRET ARM_SUBSCRIPTION_ID=$Workload_zone_subscriptionID ARM_TENANT_ID=$ARM_TENANT_ID WZ_PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME=$Service_Connection_Name TF_LOG=OFF Logon_Using_SPN=true USE_MSI=false --output none --authorize true --organization $ADO_ORGANIZATION --project $ADO_Project $GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --organization $ADO_ORGANIZATION --project $ADO_Project --only-show-errors) } diff --git a/deploy/scripts/Test-SDAFReadiness.ps1 b/deploy/scripts/Test-SDAFReadiness.ps1 index d5bd34fabb..4c59353e6d 100644 --- a/deploy/scripts/Test-SDAFReadiness.ps1 +++ b/deploy/scripts/Test-SDAFReadiness.ps1 @@ -13,21 +13,11 @@ function Show-Menu($data) { $rnd = $(Get-Random -Minimum 1 -Maximum 1000).ToString() -if (Test-Path $LogFileDir) { - $LogFileName = "SDAF-" + $(Get-Date -Format "yyyyMMdd-HHmm") + ".md" - $LogFileName = Join-Path $LogFileDir -ChildPath $LogFileName -} -else { - Write-Host "The directory does not exist" - return -} - $LogFileDir = $Env:LogFileDir if ($null -eq $LogFileDir -or $LogFileDir -eq "") { $LogFileDir = Read-Host "Please enter the directory to save the log file" } - if (Test-Path $LogFileDir) { $LogFileName = "SDAF-" + $(Get-Date -Format "yyyyMMdd-HHmm") + ".md" $LogFileName = Join-Path $LogFileDir -ChildPath $LogFileName @@ -70,7 +60,8 @@ else { } if ($null -eq $ARM_CLIENT_SECRET -or $ARM_CLIENT_SECRET -eq "") { - $ARM_CLIENT_SECRET = Read-Host "Please enter the Service Principals App ID Password" -AsSecureString + $SecureString = Read-Host "Please enter the Service Principals App ID Password" -AsSecureString + $ARM_CLIENT_SECRET = ConvertFrom-SecureString $SecureString } $VM_password = $ARM_CLIENT_SECRET @@ -260,9 +251,9 @@ if ($selection.ToUpper() -eq "Y") { $OutputString = "Creating Key vault: " + $kvName Write-Host $OutputString -foregroundcolor Yellow Add-Content -Path $LogFileName $OutputString - az vault create --name $kvName --resource-group $resourceGroupName --location $Location --query "provisioningState" --enable-purge-protection false --retention-days 7 + az keyvault create --name $kvName --resource-group $resourceGroupName --location $Location --query "provisioningState" --enable-purge-protection false --retention-days 7 - az vault secret set --vault-name $kvName --name "sdaftestsecret" --value "sdaftestsecretvalue" --query "id" + az keyvault secret set --vault-name $kvName --name "sdaftestsecret" --value "sdaftestsecretvalue" --query "id" } $vmssName = "SDAF-VmssFlex" diff --git a/deploy/scripts/deploy_controlplane.sh b/deploy/scripts/deploy_controlplane.sh index 7d1c79b452..610c592e5b 100755 --- a/deploy/scripts/deploy_controlplane.sh +++ b/deploy/scripts/deploy_controlplane.sh @@ -547,10 +547,10 @@ if [ 2 == $step ]; then then v=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --query value | tr -d \") if [ "${v}" != "${TF_VAR_sa_connection_string}" ] ; then - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --value "${TF_VAR_sa_connection_string}" --only-show-errors --output none + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --value "${TF_VAR_sa_connection_string}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none fi else - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --value "${TF_VAR_sa_connection_string}" --only-show-errors --output none + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --value "${TF_VAR_sa_connection_string}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none fi cd "${curdir}" || exit diff --git a/deploy/scripts/prepare_region.sh b/deploy/scripts/prepare_region.sh index 9432195f9b..5206c0f371 100755 --- a/deploy/scripts/prepare_region.sh +++ b/deploy/scripts/prepare_region.sh @@ -457,7 +457,7 @@ if [ 2 == $step ]; then echo "#########################################################################################" terraform_module_directory="${DEPLOYMENT_REPO_PATH}"/deploy/terraform/bootstrap/sap_library/ TF_VAR_sa_connection_string=$(terraform -chdir="${terraform_module_directory}" output -no-color -raw sa_connection_string | tr -d \") - az keyvault secret set --vault-name "${keyvault}" --name "sa-connection-string" --value "${TF_VAR_sa_connection_string}" + az keyvault secret set --vault-name "${keyvault}" --name "sa-connection-string" --value "${TF_VAR_sa_connection_string} --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" fi cd "${curdir}" || exit diff --git a/deploy/scripts/set_secrets.sh b/deploy/scripts/set_secrets.sh index 2044dbc7ef..046929bedf 100755 --- a/deploy/scripts/set_secrets.sh +++ b/deploy/scripts/set_secrets.sh @@ -320,7 +320,7 @@ if [ "${deleted}" == "${secretname}" ]; then v=$(az keyvault secret list --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query [].name | tee grep "${secretname}") if [ "${v}" != "${subscription}" ] ; then - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${subscription}" --only-show-errors --output none + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${subscription}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none fi else exists=$(az keyvault secret list --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) @@ -328,10 +328,10 @@ else v=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query value -o tsv) if [ "${v}" != "${subscription}" ] ; then echo -e "\t $cyan Setting secret ${secretname} in keyvault ${keyvault} $resetformatting \n" - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${subscription}" >stdout.az 2>&1 + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${subscription}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" >stdout.az 2>&1 fi else - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${subscription}" >stdout.az 2>&1 + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${subscription}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" >stdout.az 2>&1 fi fi @@ -385,10 +385,10 @@ if [ 0 = "${deploy_using_msi_only:-}" ]; then then v=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query value -o tsv) if [ "${v}" != "${client_id}" ] ; then - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${client_id}" --only-show-errors --output none + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${client_id}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none fi else - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${client_id}" --only-show-errors --output none + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${client_id}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none fi secretname="${environment}"-tenant-id @@ -404,10 +404,10 @@ if [ 0 = "${deploy_using_msi_only:-}" ]; then then v=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query value -o tsv) if [ "${v}" != "${tenant_id}" ] ; then - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${tenant_id}" --only-show-errors --output none + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${tenant_id}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none fi else - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${tenant_id}" --only-show-errors --output none + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${tenant_id}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none fi secretname="${environment}"-client-secret @@ -424,10 +424,10 @@ if [ 0 = "${deploy_using_msi_only:-}" ]; then then v=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query value -o tsv) if [ "${v}" != "${client_secret}" ] ; then - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value="${client_secret}" --only-show-errors --output none + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value="${client_secret}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none fi else - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value="${client_secret}" --only-show-errors --output none + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value="${client_secret}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none fi fi exit $return_code diff --git a/deploy/scripts/update_sas_token.sh b/deploy/scripts/update_sas_token.sh index 138a1dc5ae..ab01dcb420 100755 --- a/deploy/scripts/update_sas_token.sh +++ b/deploy/scripts/update_sas_token.sh @@ -24,4 +24,4 @@ end=`date -u -d "90 days" '+%Y-%m-%dT%H:%MZ'` sas=?$(az storage container generate-sas --permissions rl --account-name $saplib --name sapbits --https-only --expiry $end -o tsv --account-key "${key}") -az keyvault secret set --vault-name $kv_name --name "sapbits-sas-token" --value "${sas}" +az keyvault secret set --vault-name $kv_name --name "sapbits-sas-token" --value "${sas}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" diff --git a/deploy/terraform/bootstrap/sap_deployer/module.tf b/deploy/terraform/bootstrap/sap_deployer/module.tf index fb0d867adf..9beee66193 100644 --- a/deploy/terraform/bootstrap/sap_deployer/module.tf +++ b/deploy/terraform/bootstrap/sap_deployer/module.tf @@ -31,7 +31,6 @@ module "sap_deployer" { configure = false deployer = local.deployer deployer_vm_count = var.deployer_count - dns_zone_names = var.dns_zone_names enable_firewall_for_keyvaults_and_storage = var.enable_firewall_for_keyvaults_and_storage enable_purge_control_for_keyvaults = var.enable_purge_control_for_keyvaults firewall_deployment = local.firewall_deployment @@ -39,8 +38,6 @@ module "sap_deployer" { firewall_allowed_ipaddresses = local.firewall_allowed_ipaddresses infrastructure = local.infrastructure key_vault = local.key_vault - management_dns_resourcegroup_name = var.management_dns_resourcegroup_name - management_dns_subscription_id = var.management_dns_subscription_id options = local.options place_delete_lock_on_resources = var.place_delete_lock_on_resources public_network_access_enabled = var.public_network_access_enabled @@ -51,11 +48,11 @@ module "sap_deployer" { ssh-timeout = var.ssh-timeout subnets_to_add = var.subnets_to_add_to_firewall_for_keyvaults_and_storage tf_version = var.tf_version - use_custom_dns_a_registration = var.use_custom_dns_a_registration use_private_endpoint = var.use_private_endpoint use_service_endpoint = var.use_service_endpoint use_webapp = var.use_webapp webapp_client_secret = var.webapp_client_secret + dns_settings = local.dns_settings } module "sap_namegenerator" { diff --git a/deploy/terraform/bootstrap/sap_deployer/providers.tf b/deploy/terraform/bootstrap/sap_deployer/providers.tf index 464f38bc6c..c0b52989cf 100644 --- a/deploy/terraform/bootstrap/sap_deployer/providers.tf +++ b/deploy/terraform/bootstrap/sap_deployer/providers.tf @@ -57,6 +57,15 @@ provider "azurerm" { alias = "dnsmanagement" } +provider "azurerm" { + features {} + subscription_id = try(coalesce(var.privatelink_dns_subscription_id, var.management_dns_subscription_id), null) + alias = "privatelinkdnsmanagement" + skip_provider_registration = true + storage_use_azuread = true + } + + terraform { required_version = ">= 1.0" required_providers { diff --git a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf index 594e40b6f6..68876b134e 100644 --- a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf @@ -381,7 +381,7 @@ variable "deployer_diagnostics_account_arm_id" { variable "tf_version" { description = "Terraform version to install on deployer" - default = "1.8.0" + default = "1.9.5" } variable "name_override_file" { @@ -429,13 +429,13 @@ variable "use_custom_dns_a_registration" { variable "management_dns_subscription_id" { description = "String value giving the possibility to register custom dns a records in a separate subscription" - default = null + default = "" type = string } variable "management_dns_resourcegroup_name" { description = "String value giving the possibility to register custom dns a records in a separate resourcegroup" - default = null + default = "" type = string } @@ -452,6 +452,18 @@ variable "dns_zone_names" { } } +variable "privatelink_dns_subscription_id" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate subscription" + default = "" + type = string + } + +variable "privatelink_dns_resourcegroup_name" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate resourcegroup" + default = "" + type = string + } + ######################################################################################### # # diff --git a/deploy/terraform/bootstrap/sap_deployer/transform.tf b/deploy/terraform/bootstrap/sap_deployer/transform.tf index 49ca8fa23b..02f3280995 100644 --- a/deploy/terraform/bootstrap/sap_deployer/transform.tf +++ b/deploy/terraform/bootstrap/sap_deployer/transform.tf @@ -224,4 +224,14 @@ locals { app_id = var.app_registration_app_id client_secret = var.webapp_client_secret } + + dns_settings = { + use_custom_dns_a_registration = var.use_custom_dns_a_registration + dns_zone_names = var.dns_zone_names + management_dns_resourcegroup_name = trimspace(var.management_dns_resourcegroup_name) + management_dns_subscription_id = trimspace(var.management_dns_subscription_id) + privatelink_dns_subscription_id = trimspace(coalesce(var.management_dns_subscription_id,var.privatelink_dns_subscription_id, " ")) + privatelink_dns_resourcegroup_name = trimspace(coalesce(var.management_dns_resourcegroup_name, var.privatelink_dns_resourcegroup_name, " ")) + } + } diff --git a/deploy/terraform/bootstrap/sap_library/module.tf b/deploy/terraform/bootstrap/sap_library/module.tf index f015481b40..deb16748ba 100644 --- a/deploy/terraform/bootstrap/sap_library/module.tf +++ b/deploy/terraform/bootstrap/sap_library/module.tf @@ -8,27 +8,24 @@ module "sap_library" { azurerm.main = azurerm.main azurerm.deployer = azurerm.deployer azurerm.dnsmanagement = azurerm.dnsmanagement + azurerm.privatelinkdnsmanagement = azurerm.privatelinkdnsmanagement } Agent_IP = var.add_Agent_IP ? var.Agent_IP : "" bootstrap = true deployer = local.deployer deployer_tfstate = try(data.terraform_remote_state.deployer[0].outputs, []) - dns_label = var.dns_label - dns_zone_names = var.dns_zone_names infrastructure = local.infrastructure key_vault = local.key_vault - management_dns_resourcegroup_name = trimspace(var.management_dns_resourcegroup_name) - management_dns_subscription_id = trimspace(var.management_dns_subscription_id) naming = length(var.name_override_file) > 0 ? local.custom_names : module.sap_namegenerator.naming place_delete_lock_on_resources = var.place_delete_lock_on_resources service_principal = var.use_deployer ? local.service_principal : local.account short_named_endpoints_nics = var.short_named_endpoints_nics storage_account_sapbits = local.storage_account_sapbits storage_account_tfstate = local.storage_account_tfstate - use_custom_dns_a_registration = var.use_custom_dns_a_registration use_private_endpoint = var.use_private_endpoint use_webapp = var.use_webapp + dns_settings = local.dns_settings } module "sap_namegenerator" { diff --git a/deploy/terraform/bootstrap/sap_library/providers.tf b/deploy/terraform/bootstrap/sap_library/providers.tf index 165b85580b..688be3bf6c 100644 --- a/deploy/terraform/bootstrap/sap_library/providers.tf +++ b/deploy/terraform/bootstrap/sap_library/providers.tf @@ -66,6 +66,17 @@ provider "azurerm" { storage_use_azuread = true } +provider "azurerm" { + features {} + subscription_id = try(coalesce(var.privatelink_dns_subscription_id, local.spn.subscription_id), null) + client_id = local.use_spn ? local.spn.client_id : null + client_secret = local.use_spn ? local.spn.client_secret : null + tenant_id = local.use_spn ? local.spn.tenant_id : null + alias = "privatelinkdnsmanagement" + skip_provider_registration = true + storage_use_azuread = true + } + provider "azuread" { client_id = local.spn.client_id client_secret = local.spn.client_secret diff --git a/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf index 6653d9f325..37db9ba236 100644 --- a/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf @@ -39,7 +39,7 @@ variable "place_delete_lock_on_resources" { variable "use_spn" { description = "Log in using a service principal when performing the deployment" - default = true + default = false } #######################################4#######################################8 @@ -256,19 +256,31 @@ variable "add_Agent_IP" { ######################################################################################### variable "use_custom_dns_a_registration" { - description = "Boolean value indicating if a custom dns a record should be created when using private endpoints" + description = "Boolean value indicating if a custom DNS A record should be created when using private endpoints" default = false type = bool } variable "management_dns_subscription_id" { - description = "String value giving the possibility to register custom dns a records in a separate subscription" + description = "String value giving the possibility to register custom DNS A records in a separate subscription" default = "" type = string } variable "management_dns_resourcegroup_name" { - description = "String value giving the possibility to register custom dns a records in a separate resourcegroup" + description = "String value giving the possibility to register custom DNS A records in a separate resourcegroup" + default = "" + type = string + } + +variable "privatelink_dns_subscription_id" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate subscription" + default = "" + type = string + } + +variable "privatelink_dns_resourcegroup_name" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate resourcegroup" default = "" type = string } @@ -285,3 +297,14 @@ variable "dns_zone_names" { } } +variable "register_storage_accounts_keyvaults_with_dns" { + description = "Boolean value indicating if storage accounts and key vaults should be registered to the corresponding dns zones" + default = true + type = bool + } + +variable "register_endpoints_with_dns" { + description = "Boolean value indicating if endpoints should be registered to the dns zone" + default = true + type = bool + } diff --git a/deploy/terraform/bootstrap/sap_library/transform.tf b/deploy/terraform/bootstrap/sap_library/transform.tf index fa4dc57114..a49eb28725 100644 --- a/deploy/terraform/bootstrap/sap_library/transform.tf +++ b/deploy/terraform/bootstrap/sap_library/transform.tf @@ -118,4 +118,17 @@ locals { public_network_access_enabled = var.public_network_access_enabled } + dns_settings = { + use_custom_dns_a_registration = var.use_custom_dns_a_registration + dns_label = var.dns_label + dns_zone_names = var.dns_zone_names + management_dns_resourcegroup_name = trimspace(var.management_dns_resourcegroup_name) + management_dns_subscription_id = trimspace(var.management_dns_subscription_id) + privatelink_dns_subscription_id = trimspace(var.privatelink_dns_subscription_id) + privatelink_dns_resourcegroup_name = trimspace(var.privatelink_dns_resourcegroup_name) + register_storage_accounts_keyvaults_with_dns = var.register_storage_accounts_keyvaults_with_dns + register_endpoints_with_dns = var.register_endpoints_with_dns + } + + } diff --git a/deploy/terraform/run/sap_deployer/module.tf b/deploy/terraform/run/sap_deployer/module.tf index 37c7b63032..8a1967650d 100644 --- a/deploy/terraform/run/sap_deployer/module.tf +++ b/deploy/terraform/run/sap_deployer/module.tf @@ -31,7 +31,6 @@ module "sap_deployer" { configure = true deployer = local.deployer deployer_vm_count = var.deployer_count - dns_zone_names = var.dns_zone_names enable_firewall_for_keyvaults_and_storage = var.enable_firewall_for_keyvaults_and_storage enable_purge_control_for_keyvaults = var.enable_purge_control_for_keyvaults firewall_deployment = local.firewall_deployment @@ -39,8 +38,6 @@ module "sap_deployer" { firewall_allowed_ipaddresses = local.firewall_allowed_ipaddresses infrastructure = local.infrastructure key_vault = local.key_vault - management_dns_resourcegroup_name = var.management_dns_resourcegroup_name - management_dns_subscription_id = var.management_dns_subscription_id options = local.options place_delete_lock_on_resources = var.place_delete_lock_on_resources public_network_access_enabled = var.public_network_access_enabled @@ -51,11 +48,11 @@ module "sap_deployer" { ssh-timeout = var.ssh-timeout subnets_to_add = var.subnets_to_add_to_firewall_for_keyvaults_and_storage tf_version = var.tf_version - use_custom_dns_a_registration = var.use_custom_dns_a_registration use_private_endpoint = var.use_private_endpoint use_service_endpoint = var.use_service_endpoint use_webapp = var.use_webapp webapp_client_secret = var.webapp_client_secret + dns_settings = local.dns_settings } diff --git a/deploy/terraform/run/sap_deployer/providers.tf b/deploy/terraform/run/sap_deployer/providers.tf index 07199ae105..4588670d4b 100644 --- a/deploy/terraform/run/sap_deployer/providers.tf +++ b/deploy/terraform/run/sap_deployer/providers.tf @@ -88,7 +88,7 @@ terraform { } azurerm = { source = "hashicorp/azurerm" - version = ">=3.3" + version = "~> 3.3" } } } diff --git a/deploy/terraform/run/sap_deployer/tfvar_variables.tf b/deploy/terraform/run/sap_deployer/tfvar_variables.tf index cfed10f8a4..5bd696982b 100644 --- a/deploy/terraform/run/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/run/sap_deployer/tfvar_variables.tf @@ -378,7 +378,7 @@ variable "deployer_diagnostics_account_arm_id" { variable "tf_version" { description = "Terraform version to install on deployer" - default = "1.8.0" + default = "1.9.5" } variable "name_override_file" { @@ -432,13 +432,13 @@ variable "use_custom_dns_a_registration" { variable "management_dns_subscription_id" { description = "String value giving the possibility to register custom dns a records in a separate subscription" - default = null + default = "" type = string } variable "management_dns_resourcegroup_name" { description = "String value giving the possibility to register custom dns a records in a separate resourcegroup" - default = null + default = "" type = string } variable "dns_zone_names" { @@ -453,6 +453,19 @@ variable "dns_zone_names" { } } +variable "privatelink_dns_subscription_id" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate subscription" + default = "" + type = string + } + +variable "privatelink_dns_resourcegroup_name" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate resourcegroup" + default = "" + type = string + } + + ######################################################################################### # # # ADO definitioms # @@ -565,7 +578,7 @@ variable "add_system_assigned_identity" { variable "use_spn" { description = "Log in using a service principal when performing the deployment" - default = true + default = false } diff --git a/deploy/terraform/run/sap_deployer/transform.tf b/deploy/terraform/run/sap_deployer/transform.tf index a59d4634b7..2744368f99 100644 --- a/deploy/terraform/run/sap_deployer/transform.tf +++ b/deploy/terraform/run/sap_deployer/transform.tf @@ -222,4 +222,15 @@ locals { client_secret = var.webapp_client_secret } + dns_settings = { + use_custom_dns_a_registration = var.use_custom_dns_a_registration + dns_zone_names = var.dns_zone_names + + management_dns_resourcegroup_name = trimspace(coalesce(var.management_dns_resourcegroup_name,local.saplib_resource_group_name, " ")) + management_dns_subscription_id = trimspace(coalesce(var.management_dns_subscription_id, local.saplib_subscription_id, " ")) + + privatelink_dns_subscription_id = trimspace(coalesce(var.privatelink_dns_subscription_id,var.management_dns_subscription_id, local.saplib_subscription_id," ")) + privatelink_dns_resourcegroup_name = trimspace(coalesce(var.privatelink_dns_resourcegroup_name,var.management_dns_resourcegroup_name,local.saplib_resource_group_name, " ")) + } + } diff --git a/deploy/terraform/run/sap_landscape/module.tf b/deploy/terraform/run/sap_landscape/module.tf index ab1ae59733..13a5f7782c 100644 --- a/deploy/terraform/run/sap_landscape/module.tf +++ b/deploy/terraform/run/sap_landscape/module.tf @@ -20,9 +20,6 @@ module "sap_landscape" { create_transport_storage = var.create_transport_storage deployer_tfstate = try(data.terraform_remote_state.deployer[0].outputs, []) diagnostics_storage_account = local.diagnostics_storage_account - dns_label = var.dns_label - dns_server_list = var.dns_server_list - dns_zone_names = var.dns_zone_names enable_firewall_for_keyvaults_and_storage = var.enable_firewall_for_keyvaults_and_storage enable_purge_control_for_keyvaults = var.enable_purge_control_for_keyvaults enable_rbac_authorization_for_keyvault = var.enable_rbac_authorization_for_keyvault @@ -33,11 +30,6 @@ module "sap_landscape" { install_volume_size = var.install_volume_size key_vault = local.key_vault keyvault_private_endpoint_id = var.keyvault_private_endpoint_id - management_dns_subscription_id = try(var.management_dns_subscription_id, local.saplib_subscription_id) - management_dns_resourcegroup_name = lower(length(var.management_dns_resourcegroup_name) > 0 ? ( - var.management_dns_resourcegroup_name) : ( - local.saplib_resource_group_name - )) naming = length(var.name_override_file) > 0 ? ( local.custom_names) : ( module.sap_namegenerator.naming @@ -47,8 +39,6 @@ module "sap_landscape" { peer_with_control_plane_vnet = var.peer_with_control_plane_vnet place_delete_lock_on_resources = var.place_delete_lock_on_resources public_network_access_enabled = var.public_network_access_enabled - register_endpoints_with_dns = var.register_endpoints_with_dns - register_virtual_network_to_dns = var.register_virtual_network_to_dns service_principal = var.use_spn ? local.service_principal : local.account soft_delete_retention_days = var.soft_delete_retention_days storage_account_replication_type = var.storage_account_replication_type @@ -58,12 +48,12 @@ module "sap_landscape" { transport_storage_account_id = var.transport_storage_account_id transport_volume_size = var.transport_volume_size use_AFS_for_shared_storage = var.use_AFS_for_shared_storage - use_custom_dns_a_registration = var.use_custom_dns_a_registration use_deployer = length(var.deployer_tfstate_key) > 0 use_private_endpoint = var.use_private_endpoint use_service_endpoint = var.use_service_endpoint vm_settings = local.vm_settings witness_storage_account = local.witness_storage_account + dns_settings = local.dns_settings } diff --git a/deploy/terraform/run/sap_landscape/output.tf b/deploy/terraform/run/sap_landscape/output.tf index 1a5a88b9ab..5312417571 100644 --- a/deploy/terraform/run/sap_landscape/output.tf +++ b/deploy/terraform/run/sap_landscape/output.tf @@ -200,9 +200,19 @@ output "management_dns_resourcegroup_name" { output "management_dns_subscription_id" { description = "Subscription ID for the public Private DNS Zone" - value = var.management_dns_subscription_id + value = coalesce(var.management_dns_subscription_id, local.saplib_subscription_id) } +output "privatelink_dns_resourcegroup_name" { + value = coalesce(var.privatelink_dns_resourcegroup_name,var.management_dns_resourcegroup_name, local.saplib_resource_group_name) + } + +output "privatelink_dns_subscription_id" { + description = "Subscription ID for the PrivateLink Private DNS Zones" + value = coalesce(var.privatelink_dns_subscription_id, var.management_dns_subscription_id, local.saplib_subscription_id) + } + + output "privatelink_file_id" { description = "Azure resource identifier for the zone for the file resources" value = module.sap_landscape.privatelink_file_id @@ -213,6 +223,11 @@ output "register_virtual_network_to_dns" { value = var.register_virtual_network_to_dns } +output "register_storage_accounts_keyvaults_with_dns" { + description = "Boolean flag to indicate if the stor4agte accounts and key vaults are registered to DNS" + value = var.register_storage_accounts_keyvaults_with_dns + } + output "use_custom_dns_a_registration" { description = "Defines if custom DNS is used" value = var.use_custom_dns_a_registration diff --git a/deploy/terraform/run/sap_landscape/providers.tf b/deploy/terraform/run/sap_landscape/providers.tf index ef3f031b6a..5838805ab4 100644 --- a/deploy/terraform/run/sap_landscape/providers.tf +++ b/deploy/terraform/run/sap_landscape/providers.tf @@ -118,7 +118,7 @@ terraform { } azurerm = { source = "hashicorp/azurerm" - version = ">=3.3" + version = "~> 3.3" } azapi = { source = "Azure/azapi" diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index 62f59a5901..1db3605fb9 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -376,6 +376,12 @@ variable "soft_delete_retention_days" { default = 7 } +variable "set_secret_expiry" { + description = "Set expiry date for secrets" + default = false + type = bool + } + ######################################################################################### # # # Authentication variables # @@ -404,7 +410,7 @@ variable "automation_path_to_private_key" { variable "use_spn" { description = "Log in using a service principal when performing the deployment" - default = true + default = false } variable "user_assigned_identity_id" { @@ -526,6 +532,18 @@ variable "management_dns_resourcegroup_name" { type = string } +variable "privatelink_dns_subscription_id" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate subscription" + default = "" + type = string + } + +variable "privatelink_dns_resourcegroup_name" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate resourcegroup" + default = "" + type = string + } + variable "dns_server_list" { description = "DNS server list" @@ -557,6 +575,13 @@ variable "register_endpoints_with_dns" { type = bool } +variable "register_storage_accounts_keyvaults_with_dns" { + description = "Boolean value indicating if storage accounts and key vaults should be registered to the corresponding dns zones" + default = true + type = bool + } + + ######################################################################################### # # # ANF variables # @@ -781,6 +806,18 @@ variable "utility_vm_nic_ips" { default = [] } +variable "patch_mode" { + description = "If defined, define the patch mode for the virtual machines" + default = "ImageDefault" + } + +variable "patch_assessment_mode" { + description = "If defined, define the patch mode for the virtual machines" + default = "ImageDefault" + } + + + ######################################################################################### # # # Tags # @@ -876,3 +913,26 @@ variable "nat_gateway_public_ip_tags" { type = map(string) default = null } + +#######################################4#######################################8 +# # +# Terraform variables # +# # +#######################################4#######################################8 + +variable "tfstate_resource_id" { + description = "Resource id of tfstate storage account" + validation { + condition = ( + length(split("/", var.tfstate_resource_id)) == 9 + ) + error_message = "The Azure Resource ID for the storage account containing the Terraform state files must be provided and be in correct format." + } + } + +variable "deployer_tfstate_key" { + description = "The name of deployer's remote tfstate file" + type = string + default = "" + } + diff --git a/deploy/terraform/run/sap_landscape/transform.tf b/deploy/terraform/run/sap_landscape/transform.tf index 9507d6f04c..cb13262968 100644 --- a/deploy/terraform/run/sap_landscape/transform.tf +++ b/deploy/terraform/run/sap_landscape/transform.tf @@ -186,6 +186,8 @@ locals { deploy_monitoring_extension = var.deploy_monitoring_extension deploy_defender_extension = var.deploy_defender_extension user_assigned_identity_id = var.user_assigned_identity_id + patch_mode = var.patch_mode + patch_assessment_mode = var.patch_assessment_mode } authentication = { @@ -198,9 +200,10 @@ locals { enable_secure_transfer = true use_spn = var.use_spn || try(var.options.use_spn, true) } - key_vault_temp = { - exists = length(var.user_keyvault_id) > 0 - } + key_vault_temp = { + exists = length(var.user_keyvault_id) > 0 + set_secret_expiry = var.set_secret_expiry + } user_keyvault_specified = length(var.user_keyvault_id) > 0 @@ -625,4 +628,21 @@ locals { } + dns_settings = { + use_custom_dns_a_registration = var.use_custom_dns_a_registration + dns_label = var.dns_label + dns_zone_names = var.dns_zone_names + dns_server_list = var.dns_server_list + + management_dns_resourcegroup_name = coalesce(var.management_dns_resourcegroup_name,local.saplib_resource_group_name) + management_dns_subscription_id = coalesce(var.management_dns_subscription_id, local.saplib_subscription_id) + + privatelink_dns_subscription_id = coalesce(var.privatelink_dns_subscription_id,var.management_dns_subscription_id, local.saplib_subscription_id) + privatelink_dns_resourcegroup_name = coalesce(var.privatelink_dns_resourcegroup_name,var.management_dns_resourcegroup_name,local.saplib_resource_group_name) + + register_storage_accounts_keyvaults_with_dns = var.register_storage_accounts_keyvaults_with_dns + register_endpoints_with_dns = var.register_endpoints_with_dns + register_virtual_network_to_dns = var.register_virtual_network_to_dns + } + } diff --git a/deploy/terraform/run/sap_landscape/variables_global.tf b/deploy/terraform/run/sap_landscape/variables_global.tf index 634cba4217..d545c790e7 100644 --- a/deploy/terraform/run/sap_landscape/variables_global.tf +++ b/deploy/terraform/run/sap_landscape/variables_global.tf @@ -4,17 +4,6 @@ # # #######################################4#######################################8 -variable "tfstate_resource_id" { - description = "Resource id of tfstate storage account" - validation { - condition = ( - length(split("/", var.tfstate_resource_id)) == 9 - ) - error_message = "The Azure Resource ID for the storage account containing the Terraform state files must be provided and be in correct format." - } - } - -variable "deployer_tfstate_key" { description = "The key of deployer's remote tfstate file" } variable "NFS_provider" { type = string diff --git a/deploy/terraform/run/sap_landscape/variables_local.tf b/deploy/terraform/run/sap_landscape/variables_local.tf index 3e1cc00b5c..2efe0b1642 100644 --- a/deploy/terraform/run/sap_landscape/variables_local.tf +++ b/deploy/terraform/run/sap_landscape/variables_local.tf @@ -27,10 +27,12 @@ locals { "") ) - deployer_subscription_id = length(local.spn_key_vault_arm_id) > 0 ? ( - split("/", local.spn_key_vault_arm_id)[2]) : ( - "" - ) + deployer_subscription_id = coalesce( + try(data.terraform_remote_state.deployer[0].outputs.created_resource_group_subscription_id,""), + length(local.spn_key_vault_arm_id) > 0 ? ( + split("/", local.spn_key_vault_arm_id)[2]) : ( + "" + )) spn = { subscription_id = data.azurerm_key_vault_secret.subscription_id.value, diff --git a/deploy/terraform/run/sap_library/module.tf b/deploy/terraform/run/sap_library/module.tf index e55874592e..eac1cf6372 100644 --- a/deploy/terraform/run/sap_library/module.tf +++ b/deploy/terraform/run/sap_library/module.tf @@ -8,17 +8,14 @@ module "sap_library" { azurerm.main = azurerm.main azurerm.deployer = azurerm.deployer azurerm.dnsmanagement = azurerm.dnsmanagement + azurerm.privatelinkdnsmanagement = azurerm.privatelinkdnsmanagement } Agent_IP = var.add_Agent_IP ? var.Agent_IP : "" bootstrap = false deployer = local.deployer deployer_tfstate = try(data.terraform_remote_state.deployer[0].outputs, []) - dns_label = var.dns_label - dns_zone_names = var.dns_zone_names infrastructure = local.infrastructure key_vault = local.key_vault - management_dns_resourcegroup_name = var.management_dns_resourcegroup_name - management_dns_subscription_id = var.management_dns_subscription_id naming = length(var.name_override_file) > 0 ? local.custom_names : module.sap_namegenerator.naming place_delete_lock_on_resources = var.place_delete_lock_on_resources service_principal = var.use_deployer ? local.service_principal : local.account @@ -28,6 +25,8 @@ module "sap_library" { use_custom_dns_a_registration = var.use_custom_dns_a_registration use_private_endpoint = var.use_private_endpoint use_webapp = var.use_webapp || length(try(data.terraform_remote_state.deployer[0].outputs.webapp_id,"")) > 0 + dns_settings = local.dns_settings + } module "sap_namegenerator" { diff --git a/deploy/terraform/run/sap_library/providers.tf b/deploy/terraform/run/sap_library/providers.tf index cbc8b786d8..7e6a6a8edd 100644 --- a/deploy/terraform/run/sap_library/providers.tf +++ b/deploy/terraform/run/sap_library/providers.tf @@ -68,6 +68,18 @@ provider "azurerm" { use_msi = var.use_spn ? false : true } +provider "azurerm" { + features {} + subscription_id = try(coalesce(var.privatelink_dns_subscription_id, local.spn.subscription_id), null) + client_id = local.use_spn ? local.spn.client_id : null + client_secret = local.use_spn ? local.spn.client_secret : null + tenant_id = local.use_spn ? local.spn.tenant_id : null + alias = "privatelinkdnsmanagement" + skip_provider_registration = true + storage_use_azuread = true + } + + provider "azuread" { client_id = local.use_spn ? local.spn.client_id : null client_secret = local.use_spn ? local.spn.client_secret : null @@ -96,7 +108,7 @@ terraform { } azurerm = { source = "hashicorp/azurerm" - version = ">=3.3" + version = "~> 3.3" } } } diff --git a/deploy/terraform/run/sap_library/tfvar_variables.tf b/deploy/terraform/run/sap_library/tfvar_variables.tf index b2785da311..0e4fbd75f5 100644 --- a/deploy/terraform/run/sap_library/tfvar_variables.tf +++ b/deploy/terraform/run/sap_library/tfvar_variables.tf @@ -45,7 +45,7 @@ variable "short_named_endpoints_nics" { variable "use_spn" { description = "Log in using a service principal when performing the deployment" - default = true + default = false } #######################################4#######################################8 @@ -300,3 +300,26 @@ variable "dns_zone_names" { } } +variable "register_storage_accounts_keyvaults_with_dns" { + description = "Boolean value indicating if storage accounts and key vaults should be registered to the corresponding dns zones" + default = true + type = bool + } + +variable "register_endpoints_with_dns" { + description = "Boolean value indicating if endpoints should be registered to the dns zone" + default = true + type = bool + } + +variable "privatelink_dns_subscription_id" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate subscription" + default = "" + type = string + } + +variable "privatelink_dns_resourcegroup_name" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate resourcegroup" + default = "" + type = string + } diff --git a/deploy/terraform/run/sap_library/transform.tf b/deploy/terraform/run/sap_library/transform.tf index 284266a49c..e344d7f942 100644 --- a/deploy/terraform/run/sap_library/transform.tf +++ b/deploy/terraform/run/sap_library/transform.tf @@ -120,4 +120,15 @@ locals { public_network_access_enabled = var.public_network_access_enabled } + dns_settings = { + use_custom_dns_a_registration = var.use_custom_dns_a_registration + dns_label = var.dns_label + dns_zone_names = var.dns_zone_names + management_dns_resourcegroup_name = trimspace(var.management_dns_resourcegroup_name) + management_dns_subscription_id = trimspace(var.management_dns_subscription_id) + privatelink_dns_subscription_id = trimspace(var.privatelink_dns_subscription_id) + privatelink_dns_resourcegroup_name = trimspace(var.privatelink_dns_resourcegroup_name) + register_storage_accounts_keyvaults_with_dns = var.register_storage_accounts_keyvaults_with_dns + register_endpoints_with_dns = var.register_endpoints_with_dns + } } diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index d579ef8d38..a30e70a745 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -66,7 +66,6 @@ module "common_infrastructure" { deploy_application_security_groups = var.deploy_application_security_groups deployer_tfstate = length(var.deployer_tfstate_key) > 0 ? data.terraform_remote_state.deployer[0].outputs : null deployment = var.deployment - dns_zone_names = var.dns_zone_names enable_purge_control_for_keyvaults = var.enable_purge_control_for_keyvaults ha_validator = format("%d%d-%s", local.application_tier.scs_high_availability ? 1 : 0, @@ -79,8 +78,6 @@ module "common_infrastructure" { key_vault = local.key_vault landscape_tfstate = data.terraform_remote_state.landscape.outputs license_type = var.license_type - management_dns_resourcegroup_name = try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) - management_dns_subscription_id = try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, null) naming = length(var.name_override_file) > 0 ? local.custom_names : module.sap_namegenerator.naming NFS_provider = var.NFS_provider options = local.options @@ -90,11 +87,10 @@ module "common_infrastructure" { service_principal = var.use_spn ? local.service_principal : local.account tags = var.tags terraform_template_version = var.terraform_template_version - use_custom_dns_a_registration = try(data.terraform_remote_state.landscape.outputs.use_custom_dns_a_registration, true) use_private_endpoint = var.use_private_endpoint use_random_id_for_storageaccounts = var.use_random_id_for_storageaccounts use_scalesets_for_deployment = var.use_scalesets_for_deployment - register_endpoints_with_dns = var.register_endpoints_with_dns + dns_settings = local.dns_settings } #------------------------------------------------------------------------------- @@ -139,13 +135,10 @@ module "hdb_node" { infrastructure = local.infrastructure landscape_tfstate = data.terraform_remote_state.landscape.outputs license_type = var.license_type - management_dns_resourcegroup_name = try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) - management_dns_subscription_id = try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, null) naming = length(var.name_override_file) > 0 ? local.custom_names : module.sap_namegenerator.naming NFS_provider = var.NFS_provider options = local.options ppg = module.common_infrastructure.ppg - register_virtual_network_to_dns = try(data.terraform_remote_state.landscape.outputs.register_virtual_network_to_dns, true) resource_group = module.common_infrastructure.resource_group sap_sid = local.sap_sid scale_set_id = length(var.scaleset_id) > 0 ? var.scaleset_id : module.common_infrastructure.scale_set_id @@ -157,13 +150,12 @@ module "hdb_node" { storage_subnet = module.common_infrastructure.storage_subnet tags = var.tags terraform_template_version = var.terraform_template_version - use_custom_dns_a_registration = try(data.terraform_remote_state.landscape.outputs.use_custom_dns_a_registration, false) use_loadbalancers_for_standalone_deployments = var.use_loadbalancers_for_standalone_deployments use_msi_for_clusters = var.use_msi_for_clusters use_observer = var.database_HANA_use_ANF_scaleout_scenario && local.database.high_availability use_scalesets_for_deployment = var.use_scalesets_for_deployment use_secondary_ips = var.use_secondary_ips - register_endpoints_with_dns = var.register_endpoints_with_dns + dns_settings = local.dns_settings } ######################################################################################### @@ -194,16 +186,12 @@ module "app_tier" { infrastructure = local.infrastructure landscape_tfstate = data.terraform_remote_state.landscape.outputs license_type = var.license_type - management_dns_resourcegroup_name = try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) - management_dns_subscription_id = try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, null) naming = length(var.name_override_file) > 0 ? local.custom_names : module.sap_namegenerator.naming network_location = module.common_infrastructure.network_location network_resource_group = module.common_infrastructure.network_resource_group options = local.options order_deployment = null ppg = var.use_app_proximityplacementgroups ? module.common_infrastructure.app_ppg : module.common_infrastructure.ppg - register_virtual_network_to_dns = try(data.terraform_remote_state.landscape.outputs.register_virtual_network_to_dns, true) - register_endpoints_with_dns = var.register_endpoints_with_dns resource_group = module.common_infrastructure.resource_group route_table_id = module.common_infrastructure.route_table_id sap_sid = local.sap_sid @@ -215,11 +203,11 @@ module "app_tier" { storage_bootdiag_endpoint = module.common_infrastructure.storage_bootdiag_endpoint tags = var.tags terraform_template_version = var.terraform_template_version - use_custom_dns_a_registration = try(data.terraform_remote_state.landscape.outputs.use_custom_dns_a_registration, false) use_loadbalancers_for_standalone_deployments = var.use_loadbalancers_for_standalone_deployments use_msi_for_clusters = var.use_msi_for_clusters use_scalesets_for_deployment = var.use_scalesets_for_deployment use_secondary_ips = var.use_secondary_ips + dns_settings = local.dns_settings } ######################################################################################### @@ -259,8 +247,6 @@ module "anydb_node" { infrastructure = local.infrastructure landscape_tfstate = data.terraform_remote_state.landscape.outputs license_type = var.license_type - management_dns_resourcegroup_name = try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) - management_dns_subscription_id = try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, null) naming = length(var.name_override_file) > 0 ? local.custom_names : module.sap_namegenerator.naming options = local.options order_deployment = local.enable_db_deployment ? ( @@ -269,8 +255,6 @@ module "anydb_node" { ) : (null) ) : (null) ppg = module.common_infrastructure.ppg - register_virtual_network_to_dns = try(data.terraform_remote_state.landscape.outputs.register_virtual_network_to_dns, true) - register_endpoints_with_dns = var.register_endpoints_with_dns resource_group = module.common_infrastructure.resource_group sap_sid = local.sap_sid scale_set_id = try(module.common_infrastructure.scale_set_id, null) @@ -281,12 +265,12 @@ module "anydb_node" { storage_bootdiag_endpoint = module.common_infrastructure.storage_bootdiag_endpoint tags = var.tags terraform_template_version = var.terraform_template_version - use_custom_dns_a_registration = data.terraform_remote_state.landscape.outputs.use_custom_dns_a_registration use_loadbalancers_for_standalone_deployments = var.use_loadbalancers_for_standalone_deployments use_msi_for_clusters = var.use_msi_for_clusters use_observer = var.use_observer use_scalesets_for_deployment = var.use_scalesets_for_deployment use_secondary_ips = var.use_secondary_ips + dns_settings = local.dns_settings } ######################################################################################### @@ -404,6 +388,7 @@ module "output_files" { scs_instance_number = var.scs_instance_number scs_server_loadbalancer_ip = module.app_tier.scs_server_loadbalancer_ip scs_server_ips = module.app_tier.scs_server_ips + scs_server_vm_resource_ids = module.app_tier.scs_vm_ids scs_server_secondary_ips = module.app_tier.scs_server_secondary_ips scs_vm_names = module.app_tier.scs_vm_names use_local_credentials = module.common_infrastructure.use_local_credentials @@ -468,4 +453,11 @@ module "output_files" { ams_resource_id = try(coalesce(var.ams_resource_id, try(data.terraform_remote_state.landscape.outputs.ams_resource_id, "")),"") enable_ha_monitoring = var.enable_ha_monitoring enable_os_monitoring = var.enable_os_monitoring + + ######################################################################################### + # SAP CAL # + ######################################################################################### + enable_sap_cal = var.enable_sap_cal + calapi_kv = var.calapi_kv + sap_cal_product_name = var.sap_cal_product_name } diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index a85a7a55fc..8703fc8eb8 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -119,7 +119,7 @@ variable "app_proximityplacementgroup_arm_ids" { variable "use_private_endpoint" { description = "Boolean value indicating if private endpoint should be used for the deployment" - default = false + default = true type = bool } @@ -351,7 +351,7 @@ variable "automation_path_to_private_key" { variable "use_spn" { description = "Log in using a service principal when performing the deployment" - default = true + default = false } @@ -1063,6 +1063,18 @@ variable "management_dns_resourcegroup_name" { type = string } +variable "privatelink_dns_subscription_id" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate subscription" + default = "" + type = string + } + +variable "privatelink_dns_resourcegroup_name" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate resourcegroup" + default = "" + type = string + } + variable "dns_zone_names" { description = "Private DNS zone names" @@ -1087,6 +1099,12 @@ variable "register_endpoints_with_dns" { type = bool } +variable "register_storage_accounts_keyvaults_with_dns" { + description = "Boolean value indicating if storage accounts and key vaults should be registered to the corresponding dns zones" + default = true + type = bool + } + ######################################################################################### # # # NFS and Shared Filed settings # @@ -1259,6 +1277,11 @@ variable "ANF_usr_sap_throughput" { # /sapmnt +variable "ANF_sapmnt" { + description = "If defined, will create ANF volumes for /sapmnt" + default = false + } + variable "ANF_sapmnt_use_existing" { description = "Use existing sapmnt volume" default = false @@ -1269,11 +1292,6 @@ variable "ANF_sapmnt_use_clone_in_secondary_zone" { default = false } -variable "ANF_sapmnt" { - description = "Use existing sapmnt volume" - default = false - } - variable "ANF_sapmnt_volume_name" { description = "sapmnt volume name" default = "" @@ -1406,6 +1424,11 @@ variable "patch_mode" { default = "ImageDefault" } +variable "patch_assessment_mode" { + description = "If defined, define the patch mode for the virtual machines" + default = "ImageDefault" + } + ######################################################################################### # # # Scaleout variables # @@ -1426,3 +1449,21 @@ variable "stand_by_node_count" { description = "The number of standby nodes" default = 0 } +######################################################################################### +# # +# SAP CAL Integration variables # +# # +######################################################################################### + +variable "enable_sap_cal" { + description = "If true, will enable the SAP CAL integration" + default = false + } +variable "calapi_kv" { + description = "The SAP CAL API Key Vault" + default = "" + } +variable "sap_cal_product_name" { + description = "If defined, will use SAP CAL for system installation" + default = "" + } diff --git a/deploy/terraform/run/sap_system/transform.tf b/deploy/terraform/run/sap_system/transform.tf index 1e9dc360a5..f236e23afb 100644 --- a/deploy/terraform/run/sap_system/transform.tf +++ b/deploy/terraform/run/sap_system/transform.tf @@ -12,6 +12,7 @@ locals { deploy_monitoring_extension = var.deploy_monitoring_extension deploy_defender_extension = var.deploy_defender_extension patch_mode = var.patch_mode + patch_assessment_mode = var.patch_assessment_mode } @@ -753,5 +754,19 @@ locals { } + dns_settings = { + use_custom_dns_a_registration = var.use_custom_dns_a_registration + dns_zone_names = var.dns_zone_names + management_dns_resourcegroup_name = coalesce(var.management_dns_resourcegroup_name, try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name)) + management_dns_subscription_id = coalesce(var.management_dns_subscription_id, try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, null)) + + privatelink_dns_resourcegroup_name = coalesce(var.privatelink_dns_resourcegroup_name, try(data.terraform_remote_state.landscape.outputs.privatelink_dns_resourcegroup_name, try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name))) + privatelink_dns_subscription_id = coalesce(var.privatelink_dns_subscription_id, try(data.terraform_remote_state.landscape.outputs.privatelink_dns_subscription_id, try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, null))) + + register_storage_accounts_keyvaults_with_dns = var.register_storage_accounts_keyvaults_with_dns + register_endpoints_with_dns = var.register_endpoints_with_dns + + register_virtual_network_to_dns = try(data.terraform_remote_state.landscape.outputs.register_virtual_network_to_dns, false) + } } diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf index 6f81524ed7..4913fc0c71 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf @@ -143,10 +143,13 @@ resource "azurerm_windows_web_app" "webapp" { key_vault_reference_identity_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].id : data.azurerm_user_assigned_identity.deployer[0].id identity { - type = length(var.deployer.user_assigned_identity_id) == 0 ? ( - "SystemAssigned") : ( - "SystemAssigned, UserAssigned" - ) + # type = length(var.deployer.user_assigned_identity_id) == 0 ? ( + # "SystemAssigned") : ( + # "SystemAssigned, UserAssigned" + # ) + # for now set the identity type to "SystemAssigned, UserAssigned" as assigning identities + # is not supported by the provider when type is "SystemAssigned" + type = "SystemAssigned, UserAssigned" identity_ids = [length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].id : data.azurerm_user_assigned_identity.deployer[0].id ] } connection_string { diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf index 7d3799dc88..52bff83978 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf @@ -88,6 +88,12 @@ resource "azurerm_storage_account" "deployer" { min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false shared_access_key_enabled = var.deployer.shared_access_key_enabled + network_rules { + default_action = "Deny" + virtual_network_subnet_ids = [azurerm_subnet.subnet_mgmt[0].id] + } + cross_tenant_replication_enabled = false + depends_on = [ azurerm_subnet.subnet_mgmt ] } data "azurerm_storage_account" "deployer" { diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl index 355bf50961..20866dba8b 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl @@ -809,6 +809,7 @@ else ;; esac else + echo "NO TOKEN specified" echo export "PATH=$${ansible_bin}:$${tf_bin}:"'$${PATH}'::"$${DOTNET_ROOT}":'$${AZADHOME}/Azure_SAP_Automated_Deployment/sap-automation/deploy/scripts' | sudo tee -a /etc/profile.d/deploy_server.sh echo "export SAP_AUTOMATION_REPO_PATH='$${AZADHOME}/Azure_SAP_Automated_Deployment/sap-automation'" | sudo tee -a /etc/profile.d/deploy_server.sh diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_deployer/variables_global.tf index f11374a850..0e7fb90abf 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/variables_global.tf @@ -77,34 +77,10 @@ variable "subnets_to_add" { # # ######################################################################################### -variable "use_custom_dns_a_registration" { - description = "Boolean value indicating if a custom dns a record should be created when using private endpoints" - default = false - type = bool - } - -variable "management_dns_subscription_id" { - description = "String value giving the possibility to register custom dns a records in a separate subscription" - default = null - type = string - } - -variable "management_dns_resourcegroup_name" { - description = "String value giving the possibility to register custom dns a records in a separate resourcegroup" - default = null - type = string - } -variable "dns_zone_names" { - description = "Private DNS zone names" - type = map(string) - - default = { - "file_dns_zone_name" = "privatelink.file.core.windows.net" - "blob_dns_zone_name" = "privatelink.blob.core.windows.net" - "table_dns_zone_name" = "privatelink.table.core.windows.net" - "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" - } - } +variable "dns_settings" { + description = "DNS details for the deployment" + default = {} + } ############################################################################### # # diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf b/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf index 712b034e31..9cfb588d7b 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf @@ -219,7 +219,9 @@ resource "azurerm_virtual_machine_extension" "configure" { count = var.auto_configure_deployer ? var.deployer_vm_count : 0 depends_on = [ - time_sleep.wait_for_VM + time_sleep.wait_for_VM, + azurerm_virtual_machine_extension.monitoring_extension_deployer_lnx, + azurerm_virtual_machine_extension.monitoring_defender_deployer_lnx ] name = "configure_deployer" diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf b/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf index 7a082b6751..d123757e2c 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf @@ -7,6 +7,18 @@ data "azurerm_subnet" "ams" { resource_group_name = split("/", local.ams_subnet_arm_id)[4] # Get RG name from actual arm_id } +resource "azurerm_subnet_route_table_association" "ams" { + provider = azurerm.main + count = local.create_ams_instance && local.ams_subnet_defined && !local.SAP_virtualnetwork_exists && !local.ams_subnet_existing ? (local.create_nat_gateway ? 0 : 1) : 0 + depends_on = [ + azurerm_route_table.rt, + azurerm_subnet.ams + ] + subnet_id = local.ams_subnet_existing ? var.infrastructure.vnets.sap.subnet_ams.arm_id : azurerm_subnet.ams[0].id + route_table_id = azurerm_route_table.rt[0].id +} + + # Created AMS instance if log analytics workspace is NOT defined resource "azapi_resource" "ams_instance" { type = "Microsoft.Workloads/monitors@2023-04-01" diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_landscape/infrastructure.tf index cc857475b5..0da138aa69 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/infrastructure.tf @@ -49,12 +49,12 @@ data "azurerm_virtual_network" "vnet_sap" { resource "azurerm_virtual_network_dns_servers" "vnet_sap_dns_servers" { provider = azurerm.main - count = local.SAP_virtualnetwork_exists && length(var.dns_server_list) > 0 ? 1 : 0 + count = local.SAP_virtualnetwork_exists && length(var.dns_settings.dns_server_list) > 0 ? 1 : 0 virtual_network_id = local.SAP_virtualnetwork_exists ? ( data.azurerm_virtual_network.vnet_sap[0].id) : ( azurerm_virtual_network.vnet_sap[0].id ) - dns_servers = var.dns_server_list + dns_servers = var.dns_settings.dns_server_list } # // Peers management VNET to SAP VNET @@ -136,7 +136,7 @@ resource "azurerm_virtual_network_peering" "peering_sap_management" { //Route table resource "azurerm_route_table" "rt" { provider = azurerm.main - count = local.SAP_virtualnetwork_exists ? 0 : 1 + count = local.SAP_virtualnetwork_exists ? 0 : (local.create_nat_gateway ? 0 : 1) depends_on = [ azurerm_virtual_network.vnet_sap ] @@ -154,13 +154,12 @@ resource "azurerm_route_table" "rt" { data.azurerm_virtual_network.vnet_sap[0].location) : ( azurerm_virtual_network.vnet_sap[0].location ) - disable_bgp_route_propagation = false tags = var.tags } resource "azurerm_route" "admin" { provider = azurerm.main - count = length(local.firewall_ip) > 0 ? local.SAP_virtualnetwork_exists ? 0 : 1 : 0 + count = length(local.firewall_ip) > 0 ? local.SAP_virtualnetwork_exists ? 0 : (local.create_nat_gateway ? 0 : 1) : 0 depends_on = [ azurerm_route_table.rt ] @@ -183,7 +182,7 @@ resource "azurerm_route" "admin" { resource "azurerm_private_dns_zone_virtual_network_link" "vnet_sap" { provider = azurerm.dnsmanagement - count = local.use_Azure_native_DNS && var.use_private_endpoint && var.register_virtual_network_to_dns ? 1 : 0 + count = local.use_Azure_native_DNS && var.use_private_endpoint && var.dns_settings.register_virtual_network_to_dns ? 1 : 0 depends_on = [ azurerm_virtual_network.vnet_sap ] @@ -194,9 +193,9 @@ resource "azurerm_private_dns_zone_virtual_network_link" "vnet_sap" { var.naming.resource_suffixes.dns_link ) - resource_group_name = var.management_dns_resourcegroup_name + resource_group_name = var.dns_settings.management_dns_resourcegroup_name - private_dns_zone_name = var.dns_label + private_dns_zone_name = var.dns_settings.dns_label virtual_network_id = azurerm_virtual_network.vnet_sap[0].id registration_enabled = true } @@ -214,18 +213,18 @@ resource "azurerm_private_dns_zone_virtual_network_link" "vnet_sap_file" { var.naming.resource_suffixes.dns_link ) - resource_group_name = var.management_dns_resourcegroup_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name - private_dns_zone_name = var.dns_zone_names.file_dns_zone_name + private_dns_zone_name = var.dns_settings.dns_zone_names.file_dns_zone_name virtual_network_id = azurerm_virtual_network.vnet_sap[0].id registration_enabled = false } data "azurerm_private_dns_zone" "file" { provider = azurerm.dnsmanagement - count = var.use_private_endpoint ? 1 : 0 - name = var.dns_zone_names.file_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + count = var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 + name = var.dns_settings.dns_zone_names.file_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name } resource "azurerm_private_dns_zone_virtual_network_link" "storage" { @@ -241,16 +240,16 @@ resource "azurerm_private_dns_zone_virtual_network_link" "storage" { var.naming.resource_suffixes.dns_link ) - resource_group_name = var.management_dns_resourcegroup_name - private_dns_zone_name = var.dns_zone_names.blob_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name + private_dns_zone_name = var.dns_settings.dns_zone_names.blob_dns_zone_name virtual_network_id = azurerm_virtual_network.vnet_sap[0].id } data "azurerm_private_dns_zone" "storage" { provider = azurerm.dnsmanagement - count = var.use_private_endpoint ? 1 : 0 - name = var.dns_zone_names.blob_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + count = var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 + name = var.dns_settings.dns_zone_names.blob_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name } resource "azurerm_management_lock" "vnet_sap" { diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf index c864edf0db..6b58678af4 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf @@ -59,6 +59,19 @@ data "azurerm_network_security_group" "iscsi" { resource_group_name = split("/", local.sub_iscsi_nsg_arm_id)[4] } + + +resource "azurerm_subnet_route_table_association" "iscsi" { + provider = azurerm.main + count = local.enable_iscsi && !local.SAP_virtualnetwork_exists && !local.sub_iscsi_exists ? (local.create_nat_gateway ? 0 : 1) : 0 + depends_on = [ + azurerm_route_table.rt, + azurerm_subnet.iscsi + ] + subnet_id = local.sub_iscsi_exists ? var.infrastructure.vnets.sap.sub_iscsi.arm_id : azurerm_subnet.iscsi[0].id + route_table_id = azurerm_route_table.rt[0].id +} + // TODO: Add nsr to iSCSI's nsg /* @@ -102,6 +115,37 @@ resource "azurerm_network_interface" "iscsi" { } } +// Add SSH network security rule +resource "azurerm_network_security_rule" "nsr_controlplane_iscsi" { + provider = azurerm.main + count = local.enable_sub_iscsi ? local.sub_iscsi_nsg_exists ? 0 : 1 : 0 + depends_on = [ + azurerm_network_security_group.iscsi + ] + name = "ConnectivityToISCSISubnetFromControlPlane-ssh-rdp-winrm" + resource_group_name = local.SAP_virtualnetwork_exists ? ( + data.azurerm_virtual_network.vnet_sap[0].resource_group_name + ) : ( + azurerm_virtual_network.vnet_sap[0].resource_group_name + ) + network_security_group_name = try(azurerm_network_security_group.iscsi[0].name, azurerm_network_security_group.app[0].name) + priority = 100 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_ranges = [22, 443, 3389, 5985, 5986, 2049, 111] + source_address_prefixes = compact(concat( + var.deployer_tfstate.subnet_mgmt_address_prefixes, + var.deployer_tfstate.subnet_bastion_address_prefixes, + local.SAP_virtualnetwork_exists ? ( + data.azurerm_virtual_network.vnet_sap[0].address_space) : ( + azurerm_virtual_network.vnet_sap[0].address_space + ))) + destination_address_prefixes = local.sub_iscsi_exists ? data.azurerm_subnet.iscsi[0].address_prefixes : azurerm_subnet.iscsi[0].address_prefixes +} + + // Manages the association between NIC and NSG resource "azurerm_network_interface_security_group_association" "iscsi" { provider = azurerm.main @@ -145,6 +189,11 @@ resource "azurerm_linux_virtual_machine" "iscsi" { //custom_data = try(data.template_cloudinit_config.config_growpart.rendered, "Cg==") + patch_mode = var.infrastructure.patch_mode + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + os_disk { name = format("%s%s%s%s%s", var.naming.resource_prefixes.osdisk, @@ -215,6 +264,11 @@ resource "azurerm_key_vault_secret" "iscsi_ppk" { name = local.iscsi_ppk_name value = local.iscsi_private_key key_vault_id = local.user_keyvault_exist ? local.user_key_vault_id : azurerm_key_vault.kv_user[0].id + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) + } resource "azurerm_key_vault_secret" "iscsi_pk" { @@ -230,6 +284,10 @@ resource "azurerm_key_vault_secret" "iscsi_pk" { name = local.iscsi_pk_name value = local.iscsi_public_key key_vault_id = local.user_keyvault_exist ? local.user_key_vault_id : azurerm_key_vault.kv_user[0].id + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) } resource "azurerm_key_vault_secret" "iscsi_username" { @@ -245,6 +303,10 @@ resource "azurerm_key_vault_secret" "iscsi_username" { name = local.iscsi_username_name value = local.iscsi_auth_username key_vault_id = local.user_keyvault_exist ? local.user_key_vault_id : azurerm_key_vault.kv_user[0].id + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) } resource "azurerm_key_vault_secret" "iscsi_password" { @@ -260,6 +322,10 @@ resource "azurerm_key_vault_secret" "iscsi_password" { name = local.iscsi_pwd_name value = local.iscsi_auth_password key_vault_id = local.user_keyvault_exist ? local.user_key_vault_id : azurerm_key_vault.kv_user[0].id + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) } // Generate random password if password is set as authentication type and user doesn't specify a password, and save in KV @@ -327,6 +393,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_iscsi_lnx" { type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } resource "azurerm_virtual_machine_extension" "monitoring_defender_iscsi_lnx" { @@ -341,6 +408,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_iscsi_lnx" { type = "AzureSecurityLinuxAgent" type_handler_version = "2.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf index 26b796de47..593fcfd9fd 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf @@ -135,6 +135,10 @@ resource "random_password" "created_password" { min_numeric = 2 } +## Add an expiry date to the secrets +resource "time_offset" "secret_expiry_date" { + offset_months = 12 +} // Key pair/password will be stored in the existing KV if specified, otherwise will be stored in a newly provisioned KV resource "azurerm_key_vault_secret" "sid_ppk" { @@ -150,6 +154,10 @@ resource "azurerm_key_vault_secret" "sid_ppk" { name = local.sid_ppk_name value = local.sid_private_key key_vault_id = local.user_keyvault_exist ? local.user_key_vault_id : azurerm_key_vault.kv_user[0].id + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) } data "azurerm_key_vault_secret" "sid_ppk" { @@ -175,6 +183,10 @@ resource "azurerm_key_vault_secret" "sid_pk" { local.user_key_vault_id) : ( azurerm_key_vault.kv_user[0].id ) + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) } data "azurerm_key_vault_secret" "sid_pk" { @@ -202,6 +214,10 @@ resource "azurerm_key_vault_secret" "sid_username" { local.user_key_vault_id) : ( azurerm_key_vault.kv_user[0].id ) + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) } data "azurerm_key_vault_secret" "sid_username" { @@ -227,6 +243,10 @@ resource "azurerm_key_vault_secret" "sid_password" { local.user_key_vault_id) : ( azurerm_key_vault.kv_user[0].id ) + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) } data "azurerm_key_vault_secret" "sid_password" { @@ -268,6 +288,10 @@ resource "azurerm_key_vault_secret" "witness_access_key" { local.user_key_vault_id) : ( azurerm_key_vault.kv_user[0].id ) + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) } //Witness access key @@ -301,6 +325,10 @@ resource "azurerm_key_vault_secret" "witness_name" { local.user_key_vault_id) : ( azurerm_key_vault.kv_user[0].id ) + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) } resource "azurerm_key_vault_access_policy" "kv_user_msi" { @@ -350,6 +378,10 @@ resource "azurerm_key_vault_secret" "deployer_keyvault_user_name" { local.user_key_vault_id) : ( azurerm_key_vault.kv_user[0].id ) + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) } @@ -439,9 +471,9 @@ resource "azurerm_private_endpoint" "kv_user" { } dynamic "private_dns_zone_group" { - for_each = range(var.register_endpoints_with_dns ? 1 : 0) + for_each = range(var.dns_settings.register_endpoints_with_dns ? 1 : 0) content { - name = var.dns_zone_names.vault_dns_zone_name + name = var.dns_settings.dns_zone_names.vault_dns_zone_name private_dns_zone_ids = [data.azurerm_private_dns_zone.keyvault[0].id] } } @@ -450,9 +482,9 @@ resource "azurerm_private_endpoint" "kv_user" { data "azurerm_private_dns_zone" "keyvault" { provider = azurerm.dnsmanagement - count = var.use_private_endpoint && !var.use_custom_dns_a_registration ? 1 : 0 - name = var.dns_zone_names.vault_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + count = var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 + name = var.dns_settings.dns_zone_names.vault_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name } resource "azurerm_private_dns_a_record" "keyvault" { @@ -461,8 +493,8 @@ resource "azurerm_private_dns_a_record" "keyvault" { name = lower( format("%s", local.user_keyvault_name) ) - zone_name = var.dns_zone_names.vault_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + zone_name = var.dns_settings.dns_zone_names.vault_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name ttl = 10 records = [ length(var.keyvault_private_endpoint_id) > 0 ? ( @@ -487,19 +519,12 @@ resource "azurerm_private_dns_zone_virtual_network_link" "vault" { var.naming.separator, "vault" ) - resource_group_name = var.management_dns_resourcegroup_name - private_dns_zone_name = var.dns_zone_names.vault_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name + private_dns_zone_name = var.dns_settings.dns_zone_names.vault_dns_zone_name virtual_network_id = azurerm_virtual_network.vnet_sap[0].id registration_enabled = false } -data "azurerm_private_dns_zone" "vault" { - provider = azurerm.dnsmanagement - count = var.use_private_endpoint && var.register_endpoints_with_dns ? 1 : 0 - name = var.dns_zone_names.vault_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name -} - ############################################################################### # # diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf b/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf index 1fbb471183..35722baebe 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf @@ -3,7 +3,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement, azurerm.peering] - version = ">= 3.3" + version = "~> 3.23" } azapi = { diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf index 26c7dfa9e5..331d4d139e 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf @@ -28,17 +28,17 @@ resource "azurerm_storage_account" "storage_bootdiag" { enable_https_traffic_only = true min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false - + cross_tenant_replication_enabled = false tags = var.tags } resource "azurerm_private_dns_a_record" "storage_bootdiag" { provider = azurerm.dnsmanagement - count = var.use_custom_dns_a_registration ? 0 : 0 + count = var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 0 : 0 name = lower(local.storageaccount_name) - zone_name = var.dns_zone_names.blob_dns_zone_name + zone_name = var.dns_settings.dns_zone_names.blob_dns_zone_name resource_group_name = local.resource_group_exists ? ( data.azurerm_resource_group.resource_group[0].name) : ( azurerm_resource_group.resource_group[0].name @@ -108,9 +108,9 @@ resource "azurerm_private_endpoint" "storage_bootdiag" { } dynamic "private_dns_zone_group" { - for_each = range(var.register_endpoints_with_dns ? 1 : 0) + for_each = range(var.dns_settings.register_endpoints_with_dns ? 1 : 0) content { - name = var.dns_zone_names.blob_dns_zone_name + name = var.dns_settings.dns_zone_names.blob_dns_zone_name private_dns_zone_ids = [data.azurerm_private_dns_zone.storage[0].id] } } @@ -145,7 +145,7 @@ resource "azurerm_storage_account" "witness_storage" { enable_https_traffic_only = true min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false - + cross_tenant_replication_enabled = false public_network_access_enabled = var.public_network_access_enabled tags = var.tags @@ -173,10 +173,10 @@ resource "azurerm_storage_account" "witness_storage" { resource "azurerm_private_dns_a_record" "witness_storage" { provider = azurerm.dnsmanagement - count = var.use_custom_dns_a_registration ? 0 : 0 + count = var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 0 : 0 name = lower(local.witness_storageaccount_name) - zone_name = var.dns_zone_names.blob_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + zone_name = var.dns_settings.dns_zone_names.blob_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name ttl = 3600 records = [data.azurerm_network_interface.witness_storage[count.index].ip_configuration[0].private_ip_address] @@ -250,9 +250,9 @@ resource "azurerm_private_endpoint" "witness_storage" { } dynamic "private_dns_zone_group" { - for_each = range(var.register_endpoints_with_dns ? 1 : 0) + for_each = range(var.dns_settings.register_endpoints_with_dns ? 1 : 0) content { - name = var.dns_zone_names.blob_dns_zone_name + name = var.dns_settings.dns_zone_names.blob_dns_zone_name private_dns_zone_ids = [data.azurerm_private_dns_zone.storage[0].id] } } @@ -294,6 +294,7 @@ resource "azurerm_storage_account" "transport" { allow_nested_items_to_be_public = false # shared_access_key_enabled = false + cross_tenant_replication_enabled = false public_network_access_enabled = var.public_network_access_enabled network_rules { @@ -322,7 +323,7 @@ resource "azurerm_storage_account" "transport" { resource "azurerm_private_dns_a_record" "transport" { provider = azurerm.dnsmanagement - count = var.use_private_endpoint && var.create_transport_storage && local.use_Azure_native_DNS && local.use_AFS_for_shared && length(var.transport_private_endpoint_id) == 0 ? 1 : 0 + count = var.create_transport_storage && local.use_Azure_native_DNS && local.use_AFS_for_shared && length(var.transport_private_endpoint_id) == 0 ? 1 : 0 name = replace( lower( format("%s", local.landscape_shared_transport_storage_account_name) @@ -330,8 +331,8 @@ resource "azurerm_private_dns_a_record" "transport" { "/[^a-z0-9]/", "" ) - zone_name = var.dns_zone_names.file_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + zone_name = var.dns_settings.dns_zone_names.file_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name ttl = 10 records = [ length(var.transport_private_endpoint_id) > 0 ? ( @@ -351,8 +352,8 @@ data "azurerm_private_dns_a_record" "transport" { "/[^a-z0-9]/", "" ) - zone_name = var.dns_zone_names.file_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + zone_name = var.dns_settings.dns_zone_names.file_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name } @@ -448,9 +449,9 @@ resource "azurerm_private_endpoint" "transport" { ] } dynamic "private_dns_zone_group" { - for_each = range(var.register_endpoints_with_dns ? 1 : 0) + for_each = range(var.dns_settings.register_endpoints_with_dns ? 1 : 0) content { - name = var.dns_zone_names.file_dns_zone_name + name = var.dns_settings.dns_zone_names.file_dns_zone_name private_dns_zone_ids = [data.azurerm_private_dns_zone.file[0].id] } } @@ -512,6 +513,7 @@ resource "azurerm_storage_account" "install" { allow_nested_items_to_be_public = false enable_https_traffic_only = false min_tls_version = "TLS1_2" + cross_tenant_replication_enabled = false public_network_access_enabled = var.public_network_access_enabled tags = var.tags # shared_access_key_enabled = false @@ -564,8 +566,8 @@ resource "azurerm_private_dns_a_record" "install" { "/[^a-z0-9]/", "" ) - zone_name = var.dns_zone_names.file_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + zone_name = var.dns_settings.dns_zone_names.file_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name ttl = 10 records = [ length(var.install_private_endpoint_id) > 0 ? ( @@ -589,8 +591,8 @@ data "azurerm_private_dns_a_record" "install" { "/[^a-z0-9]/", "" ) - zone_name = var.dns_zone_names.file_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + zone_name = var.dns_settings.dns_zone_names.file_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name } @@ -675,9 +677,9 @@ resource "azurerm_private_endpoint" "install" { } dynamic "private_dns_zone_group" { - for_each = range(var.register_endpoints_with_dns ? 1 : 0) + for_each = range(var.dns_settings.register_endpoints_with_dns ? 1 : 0) content { - name = var.dns_zone_names.file_dns_zone_name + name = var.dns_settings.dns_zone_names.file_dns_zone_name private_dns_zone_ids = [data.azurerm_private_dns_zone.file[0].id] } } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf b/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf index cf33a1823a..956b0f1b19 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf @@ -7,7 +7,7 @@ resource "azurerm_subnet" "admin" { virtual_network_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].name : azurerm_virtual_network.vnet_sap[0].name address_prefixes = [local.admin_subnet_prefix] - private_endpoint_network_policies_enabled = var.use_private_endpoint + enforce_private_link_endpoint_network_policies = var.use_private_endpoint service_endpoints = var.use_service_endpoint ? ( ["Microsoft.Storage", "Microsoft.KeyVault"] @@ -34,7 +34,7 @@ resource "azurerm_subnet" "db" { virtual_network_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].name : azurerm_virtual_network.vnet_sap[0].name address_prefixes = [local.database_subnet_prefix] - private_endpoint_network_policies_enabled = var.use_private_endpoint + enforce_private_link_endpoint_network_policies = var.use_private_endpoint service_endpoints = var.use_service_endpoint ? ( ["Microsoft.Storage", "Microsoft.KeyVault"] ) : ( @@ -59,7 +59,7 @@ resource "azurerm_subnet" "app" { virtual_network_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].name : azurerm_virtual_network.vnet_sap[0].name address_prefixes = [local.application_subnet_prefix] - private_endpoint_network_policies_enabled = var.use_private_endpoint + enforce_private_link_endpoint_network_policies = var.use_private_endpoint service_endpoints = var.use_service_endpoint ? ( ["Microsoft.Storage", "Microsoft.KeyVault"] @@ -86,7 +86,7 @@ resource "azurerm_subnet" "web" { virtual_network_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].name : azurerm_virtual_network.vnet_sap[0].name address_prefixes = [local.web_subnet_prefix] - private_endpoint_network_policies_enabled = var.use_private_endpoint + enforce_private_link_endpoint_network_policies = var.use_private_endpoint service_endpoints = var.use_service_endpoint ? ( ["Microsoft.Storage", "Microsoft.KeyVault"] @@ -114,7 +114,7 @@ resource "azurerm_subnet" "storage" { virtual_network_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].name : azurerm_virtual_network.vnet_sap[0].name address_prefixes = [local.subnet_cidr_storage] - private_endpoint_network_policies_enabled = var.use_private_endpoint + enforce_private_link_endpoint_network_policies = var.use_private_endpoint service_endpoints = var.use_service_endpoint ? ( ["Microsoft.Storage", "Microsoft.KeyVault"] @@ -177,12 +177,11 @@ resource "azurerm_subnet" "ams" { } } - #Associate the subnets to the route table resource "azurerm_subnet_route_table_association" "admin" { provider = azurerm.main - count = local.admin_subnet_defined && !local.SAP_virtualnetwork_exists && !local.admin_subnet_existing ? 1 : 0 + count = local.admin_subnet_defined && !local.SAP_virtualnetwork_exists && !local.admin_subnet_existing ? (local.create_nat_gateway ? 0 : 1) : 0 depends_on = [ azurerm_route_table.rt, azurerm_subnet.admin @@ -193,7 +192,7 @@ resource "azurerm_subnet_route_table_association" "admin" { resource "azurerm_subnet_route_table_association" "db" { provider = azurerm.main - count = local.database_subnet_defined && !local.SAP_virtualnetwork_exists && !local.database_subnet_existing ? 1 : 0 + count = local.database_subnet_defined && !local.SAP_virtualnetwork_exists && !local.database_subnet_existing ? (local.create_nat_gateway ? 0 : 1) : 0 depends_on = [ azurerm_route_table.rt, azurerm_subnet.db @@ -204,7 +203,7 @@ resource "azurerm_subnet_route_table_association" "db" { resource "azurerm_subnet_route_table_association" "app" { provider = azurerm.main - count = local.application_subnet_defined && !local.SAP_virtualnetwork_exists && !local.application_subnet_existing ? 1 : 0 + count = local.application_subnet_defined && !local.SAP_virtualnetwork_exists && !local.application_subnet_existing ? (local.create_nat_gateway ? 0 : 1) : 0 depends_on = [ azurerm_route_table.rt, azurerm_subnet.db @@ -215,7 +214,7 @@ resource "azurerm_subnet_route_table_association" "app" { resource "azurerm_subnet_route_table_association" "web" { provider = azurerm.main - count = local.web_subnet_defined && !local.SAP_virtualnetwork_exists && !local.web_subnet_existing ? 1 : 0 + count = local.web_subnet_defined && !local.SAP_virtualnetwork_exists && !local.web_subnet_existing ? (local.create_nat_gateway ? 0 : 1) : 0 depends_on = [ azurerm_route_table.rt, azurerm_subnet.web @@ -224,17 +223,6 @@ resource "azurerm_subnet_route_table_association" "web" { route_table_id = azurerm_route_table.rt[0].id } -resource "azurerm_subnet_route_table_association" "ams" { - provider = azurerm.main - count = local.create_ams_instance && local.ams_subnet_defined && !local.SAP_virtualnetwork_exists && !local.ams_subnet_existing ? 1 : 0 - depends_on = [ - azurerm_route_table.rt, - azurerm_subnet.ams - ] - subnet_id = local.ams_subnet_existing ? var.infrastructure.vnets.sap.subnet_ams.arm_id : azurerm_subnet.ams[0].id - route_table_id = azurerm_route_table.rt[0].id -} - # Creates network security rule to allow internal traffic for SAP db subnet resource "azurerm_network_security_rule" "nsr_internal_db" { provider = azurerm.main diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf index 4451866759..e77fa7fe3b 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf @@ -218,43 +218,7 @@ variable "place_delete_lock_on_resources" { description = "If def ######################################################################################### -variable "dns_label" { description = "DNS label for the system, for example azure.contoso.net" } - -variable "dns_server_list" { - description = "The list of DNS Servers to associate with the VNet" - default = [] - } - -variable "register_virtual_network_to_dns" { - description = "Boolean value indicating if the vnet should be registered to the dns zone" - type = bool - } - -variable "register_endpoints_with_dns" { - description = "Boolean value indicating if endpoints should be registered to the dns zone" - type = bool - } - -variable "use_custom_dns_a_registration" { - description = "Boolean value indicating if a custom dns a records should be created for private endpoints" - default = false - type = bool - } - -variable "management_dns_subscription_id" { description = "String value giving the possibility to register custom dns a records in a separate subscription" } - -variable "management_dns_resourcegroup_name" { description = "String value giving the possibility to register custom dns a records in a separate resourcegroup" } - -variable "dns_zone_names" { - description = "Private DNS zone names" - type = map(string) - default = { - "file_dns_zone_name" = "privatelink.file.core.windows.net" - "blob_dns_zone_name" = "privatelink.blob.core.windows.net" - "table_dns_zone_name" = "privatelink.table.core.windows.net" - "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" - } - } +variable "dns_settings" { description = "DNS settings for the deployment" } variable "use_private_endpoint" { description = "Boolean value indicating if private endpoint should be used for the deployment" diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf index 9facd0f913..10f71190f4 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf @@ -802,7 +802,7 @@ locals { )] ) - use_Azure_native_DNS = length(var.dns_label) > 0 && !var.use_custom_dns_a_registration && !local.SAP_virtualnetwork_exists + use_Azure_native_DNS = length(var.dns_settings.dns_label) > 0 && !var.dns_settings.use_custom_dns_a_registration && !local.SAP_virtualnetwork_exists use_AFS_for_shared = (var.NFS_provider == "ANF" && var.use_AFS_for_shared_storage) || var.NFS_provider == "AFS" diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf index 7260e24f00..ddd5224d2f 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf @@ -67,6 +67,14 @@ resource "azurerm_windows_virtual_machine" "utility_vm" { admin_username = local.input_sid_username admin_password = local.input_sid_password + // ImageDefault = Manual on Windows + // https://learn.microsoft.com/en-us/azure/virtual-machines/automatic-vm-guest-patching#patch-orchestration-modes + patch_mode = var.infrastructure.patch_mode == "ImageDefault" ? "Manual" : var.infrastructure.patch_mode + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + enable_automatic_updates = !(var.infrastructure.patch_mode == "ImageDefault") + os_disk { name = format("%s%s%s%s%s", var.naming.resource_prefixes.osdisk, @@ -130,6 +138,11 @@ resource "azurerm_linux_virtual_machine" "utility_vm" { admin_password = local.input_sid_password disable_password_authentication = true + patch_mode = var.infrastructure.patch_mode + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + dynamic "admin_ssh_key" { for_each = range(1) content { @@ -180,6 +193,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_utility_lnx" type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } @@ -193,6 +207,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_utility_win" type = "AzureMonitorWindowsAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } @@ -205,6 +220,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_utility_lnx" { type = "AzureSecurityLinuxAgent" type_handler_version = "2.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { @@ -224,6 +240,8 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_utility_win" { type = "AzureSecurityWindowsAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true + settings = jsonencode( { "enableGenevaUpload" = true, diff --git a/deploy/terraform/terraform-units/modules/sap_library/dns.tf b/deploy/terraform/terraform-units/modules/sap_library/dns.tf index 5eae5a3a7a..8bc9cd9720 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/dns.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/dns.tf @@ -10,7 +10,7 @@ resource "azurerm_private_dns_zone" "dns" { depends_on = [ azurerm_resource_group.library ] - name = var.dns_label + name = var.dns_settings.dns_label resource_group_name = local.resource_group_exists ? ( split("/", var.infrastructure.resource_group.arm_id)[4]) : ( azurerm_resource_group.library[0].name @@ -18,11 +18,11 @@ resource "azurerm_private_dns_zone" "dns" { } resource "azurerm_private_dns_zone" "blob" { provider = azurerm.main - count = local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 + count = local.use_local_privatelink_dns && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 depends_on = [ azurerm_resource_group.library ] - name = var.dns_zone_names.blob_dns_zone_name + name = var.dns_settings.dns_zone_names.blob_dns_zone_name resource_group_name = local.resource_group_exists ? ( split("/", var.infrastructure.resource_group.arm_id)[4]) : ( azurerm_resource_group.library[0].name @@ -31,11 +31,11 @@ resource "azurerm_private_dns_zone" "blob" { resource "azurerm_private_dns_zone" "table" { provider = azurerm.main - count = local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 + count = local.use_local_privatelink_dns && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 depends_on = [ azurerm_resource_group.library ] - name = var.dns_zone_names.table_dns_zone_name + name = var.dns_settings.dns_zone_names.table_dns_zone_name resource_group_name = local.resource_group_exists ? ( split("/", var.infrastructure.resource_group.arm_id)[4]) : ( azurerm_resource_group.library[0].name @@ -44,11 +44,11 @@ resource "azurerm_private_dns_zone" "table" { resource "azurerm_private_dns_zone" "file" { provider = azurerm.main - count = local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 + count = local.use_local_privatelink_dns && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 depends_on = [ azurerm_resource_group.library ] - name = var.dns_zone_names.file_dns_zone_name + name = var.dns_settings.dns_zone_names.file_dns_zone_name resource_group_name = local.resource_group_exists ? ( split("/", var.infrastructure.resource_group.arm_id)[4]) : ( azurerm_resource_group.library[0].name @@ -57,11 +57,11 @@ resource "azurerm_private_dns_zone" "file" { resource "azurerm_private_dns_zone" "vault" { provider = azurerm.main - count = local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 + count = local.use_local_privatelink_dns && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 depends_on = [ azurerm_resource_group.library ] - name = var.dns_zone_names.vault_dns_zone_name + name = var.dns_settings.dns_zone_names.vault_dns_zone_name resource_group_name = local.resource_group_exists ? ( split("/", var.infrastructure.resource_group.arm_id)[4]) : ( azurerm_resource_group.library[0].name @@ -69,10 +69,16 @@ resource "azurerm_private_dns_zone" "vault" { } data "azurerm_private_dns_zone" "vault" { - provider = azurerm.dnsmanagement - count = !local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 - name = var.dns_zone_names.vault_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + provider = azurerm.privatelinkdnsmanagement + count = !local.use_local_privatelink_dns && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 + name = var.dns_settings.dns_zone_names.vault_dns_zone_name + resource_group_name = coalesce(var.dns_settings.privatelink_dns_resourcegroup_name, + var.dns_settings.management_dns_resourcegroup_name, + local.resource_group_exists ? ( + split("/", var.infrastructure.resource_group.arm_id)[4]) : ( + azurerm_resource_group.library[0].name + )) + } diff --git a/deploy/terraform/terraform-units/modules/sap_library/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_library/infrastructure.tf index 48a6e30322..5537939900 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/infrastructure.tf @@ -31,7 +31,7 @@ data "azurerm_resource_group" "library" { resource "azurerm_private_dns_zone_virtual_network_link" "vnet_mgmt" { provider = azurerm.dnsmanagement - count = length(var.dns_label) > 0 && !var.use_custom_dns_a_registration && var.use_private_endpoint ? 1 : 0 + count = length(var.dns_settings.dns_label) > 0 && !var.use_custom_dns_a_registration && var.use_private_endpoint ? 1 : 0 depends_on = [ azurerm_private_dns_zone.dns ] @@ -42,21 +42,19 @@ resource "azurerm_private_dns_zone_virtual_network_link" "vnet_mgmt" { var.naming.resource_suffixes.dns_link ) - resource_group_name = length(var.management_dns_subscription_id) == 0 ? ( + resource_group_name = coalesce(var.dns_settings.management_dns_resourcegroup_name, local.resource_group_exists ? ( split("/", var.infrastructure.resource_group.arm_id)[4]) : ( azurerm_resource_group.library[0].name - )) : ( - var.management_dns_resourcegroup_name - ) - private_dns_zone_name = var.dns_label + )) + private_dns_zone_name = var.dns_settings.dns_label virtual_network_id = var.deployer_tfstate.vnet_mgmt_id registration_enabled = true } resource "azurerm_private_dns_zone_virtual_network_link" "vnet_mgmt_blob" { provider = azurerm.dnsmanagement - count = length(var.dns_label) > 0 && !var.use_custom_dns_a_registration && var.use_private_endpoint ? 1 : 0 + count = var.dns_settings.register_storage_accounts_keyvaults_with_dns && !var.use_custom_dns_a_registration && var.use_private_endpoint ? 1 : 0 depends_on = [ azurerm_storage_account.storage_tfstate, azurerm_private_dns_zone.blob @@ -68,14 +66,13 @@ resource "azurerm_private_dns_zone_virtual_network_link" "vnet_mgmt_blob" { var.naming.resource_suffixes.dns_link ) - resource_group_name = length(var.management_dns_subscription_id) == 0 ? ( + resource_group_name = coalesce(var.dns_settings.privatelink_dns_resourcegroup_name, + var.dns_settings.management_dns_resourcegroup_name, local.resource_group_exists ? ( split("/", var.infrastructure.resource_group.arm_id)[4]) : ( azurerm_resource_group.library[0].name - )) : ( - var.management_dns_resourcegroup_name - ) - private_dns_zone_name = var.dns_zone_names.blob_dns_zone_name + )) + private_dns_zone_name = var.dns_settings.dns_zone_names.blob_dns_zone_name virtual_network_id = var.deployer_tfstate.vnet_mgmt_id registration_enabled = false } diff --git a/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf b/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf index 7470f37608..9a402c5636 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf @@ -83,10 +83,14 @@ resource "azurerm_key_vault_secret" "tfstate" { resource "azurerm_private_dns_a_record" "kv_user" { provider = azurerm.deployer - count = var.use_private_endpoint && var.use_custom_dns_a_registration ? 1 : 0 + count = var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 name = lower(split("/", var.key_vault.kv_spn_id)[8]) - zone_name = var.dns_zone_names.vault_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + zone_name = var.dns_settings.dns_zone_names.vault_dns_zone_name + resource_group_name = coalesce( + var.dns_settings.privatelink_dns_resourcegroup_name, + var.dns_settings.management_dns_resourcegroup_name, + local.resource_group_name + ) ttl = 3600 records = [azurerm_private_endpoint.kv_user[0].private_service_connection[0].private_ip_address] diff --git a/deploy/terraform/terraform-units/modules/sap_library/keyvault_endpoint.tf b/deploy/terraform/terraform-units/modules/sap_library/keyvault_endpoint.tf index 5b41d25b88..da8de606d1 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/keyvault_endpoint.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/keyvault_endpoint.tf @@ -38,10 +38,10 @@ resource "azurerm_private_endpoint" "kv_user" { } dynamic "private_dns_zone_group" { - for_each = range(var.use_private_endpoint && !var.use_custom_dns_a_registration ? 1 : 0) + for_each = range(var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0) content { - name = var.dns_zone_names.vault_dns_zone_name - private_dns_zone_ids = [local.use_local_private_dns ? azurerm_private_dns_zone.vault[0].id : data.azurerm_private_dns_zone.vault[0].id] + name = var.dns_settings.dns_zone_names.vault_dns_zone_name + private_dns_zone_ids = [local.use_local_privatelink_dns ? azurerm_private_dns_zone.vault[0].id : data.azurerm_private_dns_zone.vault[0].id] } } @@ -49,7 +49,7 @@ resource "azurerm_private_endpoint" "kv_user" { resource "azurerm_private_dns_zone_virtual_network_link" "vault" { provider = azurerm.dnsmanagement - count = length(var.dns_label) > 0 && !var.use_custom_dns_a_registration && var.use_private_endpoint ? 1 : 0 + count = var.dns_settings.register_storage_accounts_keyvaults_with_dns && var.use_private_endpoint ? 1 : 0 depends_on = [ azurerm_private_dns_zone.vault ] @@ -60,14 +60,14 @@ resource "azurerm_private_dns_zone_virtual_network_link" "vault" { var.naming.separator, "vault" ) - resource_group_name = length(var.management_dns_subscription_id) == 0 ? ( + resource_group_name = length(var.dns_settings.privatelink_dns_subscription_id) == 0 ? ( local.resource_group_exists ? ( split("/", var.infrastructure.resource_group.arm_id)[4]) : ( azurerm_resource_group.library[0].name )) : ( - var.management_dns_resourcegroup_name + var.dns_settings.privatelink_dns_resourcegroup_name ) - private_dns_zone_name = var.dns_zone_names.vault_dns_zone_name + private_dns_zone_name = var.dns_settings.dns_zone_names.vault_dns_zone_name virtual_network_id = var.deployer_tfstate.vnet_mgmt_id registration_enabled = false } diff --git a/deploy/terraform/terraform-units/modules/sap_library/providers.tf b/deploy/terraform/terraform-units/modules/sap_library/providers.tf index d3398211b5..e08192b874 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/providers.tf @@ -2,7 +2,7 @@ terraform { required_providers { azurerm = { source = "hashicorp/azurerm" - configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement] + configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement, azurerm.privatelinkdnsmanagement] version = "~> 3.0" } } diff --git a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf index 4b36ff4f80..5ead7bdc78 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf @@ -31,6 +31,7 @@ resource "azurerm_storage_account" "storage_tfstate" { enable_https_traffic_only = true + cross_tenant_replication_enabled = false shared_access_key_enabled = var.storage_account_sapbits.shared_access_key_enabled blob_properties { @@ -105,14 +106,15 @@ resource "azurerm_role_assignment" "storage_tfstate_contributor_ssi" { resource "azurerm_private_dns_a_record" "storage_tfstate_pep_a_record_registry" { provider = azurerm.dnsmanagement - count = var.use_private_endpoint && var.use_custom_dns_a_registration && !local.sa_tfstate_exists ? 1 : 0 + count = var.dns_settings.register_storage_accounts_keyvaults_with_dns && var.use_private_endpoint && var.use_custom_dns_a_registration && !local.sa_tfstate_exists ? 1 : 0 depends_on = [ azurerm_private_dns_zone.blob ] name = lower(azurerm_storage_account.storage_tfstate[0].name) - zone_name = var.dns_zone_names.blob_dns_zone_name + zone_name = var.dns_settings.dns_zone_names.blob_dns_zone_name resource_group_name = coalesce( - var.management_dns_resourcegroup_name, + var.dns_settings.privatelink_dns_resourcegroup_name, + var.dns_settings.management_dns_resourcegroup_name, local.resource_group_exists ? ( data.azurerm_resource_group.library[0].name ) : ( @@ -184,9 +186,9 @@ resource "azurerm_private_endpoint" "storage_tfstate" { } dynamic "private_dns_zone_group" { - for_each = range(var.use_private_endpoint && !var.use_custom_dns_a_registration ? 1 : 0) + for_each = range(var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0) content { - name = var.dns_zone_names.blob_dns_zone_name + name = var.dns_settings.dns_zone_names.blob_dns_zone_name private_dns_zone_ids = [local.use_local_private_dns ? azurerm_private_dns_zone.blob[0].id : data.azurerm_private_dns_zone.storage[0].id] } } @@ -242,9 +244,9 @@ resource "azurerm_private_endpoint" "table_tfstate" { } dynamic "private_dns_zone_group" { - for_each = range(var.use_private_endpoint && !var.use_custom_dns_a_registration && var.use_webapp ? 1 : 0) + for_each = range(var.dns_settings.register_storage_accounts_keyvaults_with_dns && var.use_webapp ? 1 : 0) content { - name = var.dns_zone_names.blob_dns_zone_name + name = var.dns_settings.dns_zone_names.blob_dns_zone_name private_dns_zone_ids = [local.use_local_private_dns ? azurerm_private_dns_zone.table[0].id : data.azurerm_private_dns_zone.table[0].id] } } @@ -304,6 +306,7 @@ resource "azurerm_storage_account" "storage_sapbits" { allow_nested_items_to_be_public = false + cross_tenant_replication_enabled = false public_network_access_enabled = var.storage_account_sapbits.public_network_access_enabled routing { @@ -346,9 +349,10 @@ resource "azurerm_private_dns_a_record" "storage_sapbits_pep_a_record_registry" ] name = lower(azurerm_storage_account.storage_sapbits[0].name) - zone_name = var.dns_zone_names.blob_dns_zone_name + zone_name = var.dns_settings.dns_zone_names.blob_dns_zone_name resource_group_name = coalesce( - var.management_dns_resourcegroup_name, + var.dns_settings.privatelink_dns_resourcegroup_name, + var.dns_settings.management_dns_resourcegroup_name, local.resource_group_exists ? ( data.azurerm_resource_group.library[0].name) : ( azurerm_resource_group.library[0].name) @@ -416,7 +420,7 @@ resource "azurerm_private_endpoint" "storage_sapbits" { dynamic "private_dns_zone_group" { for_each = range(var.use_private_endpoint && !var.use_custom_dns_a_registration ? 1 : 0) content { - name = var.dns_zone_names.blob_dns_zone_name + name = var.dns_settings.dns_zone_names.blob_dns_zone_name private_dns_zone_ids = [local.use_local_private_dns ? azurerm_private_dns_zone.blob[0].id : data.azurerm_private_dns_zone.storage[0].id] } @@ -486,18 +490,24 @@ resource "azurerm_role_assignment" "storage_sapbits_contributor_ssi" { data "azurerm_private_dns_zone" "storage" { - provider = azurerm.dnsmanagement - count = !local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 - name = var.dns_zone_names.blob_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + provider = azurerm.privatelinkdnsmanagement + count = !local.use_local_private_dns && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 + name = var.dns_settings.dns_zone_names.blob_dns_zone_name + resource_group_name = coalesce( + var.dns_settings.privatelink_dns_resourcegroup_name, + var.dns_settings.management_dns_resourcegroup_name + ) } data "azurerm_private_dns_zone" "table" { - provider = azurerm.dnsmanagement - count = !local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 - name = var.dns_zone_names.table_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + provider = azurerm.privatelinkdnsmanagement + count = !local.use_local_private_dns && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 + name = var.dns_settings.dns_zone_names.table_dns_zone_name + resource_group_name = coalesce( + var.dns_settings.privatelink_dns_resourcegroup_name, + var.dns_settings.management_dns_resourcegroup_name + ) } diff --git a/deploy/terraform/terraform-units/modules/sap_library/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_library/variables_global.tf index 01b3d1aad2..11a885c230 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/variables_global.tf @@ -19,6 +19,10 @@ variable "infrastructure" { variable "storage_account_sapbits" {} variable "storage_account_tfstate" {} +variable "dns_settings" { + description = "DNS details for the deployment" + default = {} + } variable "deployer" { description = "Details of deployer" default = {} @@ -59,24 +63,6 @@ variable "key_vault" { } -variable "dns_label" { - description = "DNS label for the deployment" - default = "" - - } - -variable "dns_zone_names" { - description = "Private DNS zone names" - type = map(string) - default = { - "file_dns_zone_name" = "privatelink.file.core.windows.net" - "blob_dns_zone_name" = "privatelink.blob.core.windows.net" - "table_dns_zone_name" = "privatelink.table.core.windows.net" - "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" - } - } - - variable "naming" { description = "naming convention data structure" } @@ -101,18 +87,6 @@ variable "use_custom_dns_a_registration" { type = bool } -variable "management_dns_subscription_id" { - description = "String value giving the possibility to register custom dns a records in a separate subscription" - default = null - type = string - } - -variable "management_dns_resourcegroup_name" { - description = "String value giving the possibility to register custom dns a records in a separate resourcegroup" - default = null - type = string - } - variable "enable_purge_control_for_keyvaults" { description = "Allow the deployment to control the purge protection" type = bool diff --git a/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf index 2a2940cb97..015338c91b 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf @@ -56,7 +56,8 @@ locals { enable_firewall_for_keyvaults_and_storage = try(var.deployer_tfstate.enable_firewall_for_keyvaults_and_storage, false) - use_local_private_dns = (length(var.dns_label) > 0 && !var.use_custom_dns_a_registration && length(trimspace(var.management_dns_resourcegroup_name)) == 0) + use_local_private_dns = (length(var.dns_settings.dns_label) > 0 && !var.use_custom_dns_a_registration && length(trimspace(var.dns_settings.management_dns_resourcegroup_name)) == 0) + use_local_privatelink_dns = !var.use_custom_dns_a_registration && length(trimspace(var.dns_settings.privatelink_dns_resourcegroup_name)) == 0 keyvault_id = try(var.deployer_tfstate.deployer_kv_user_arm_id, "") diff --git a/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf index 3960773b27..d113ad9d07 100644 --- a/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf @@ -352,6 +352,7 @@ variable "resource_prefixes" { "scs_fs_rule" = "" "scs_scs_rule" = "" "sdu_rg" = "" + "sdu_secret" = "" "tfstate" = "" "transport_volume" = "" "vm" = "" @@ -372,6 +373,7 @@ variable "resource_prefixes" { "witness_accesskey" = "" "witness_name" = "" "ams_subnet" = "" + "nat_gateway" = "" } } @@ -484,6 +486,7 @@ variable "resource_suffixes" { "scs_fs_rule" = "scsFs-rule" "scs_scs_rule" = "scsScs-rule" "sdu_rg" = "" + "sdu_secret" = "" "tfstate" = "tfstate" "transport_volume" = "transport" "usrsap" = "usrsap" @@ -506,6 +509,7 @@ variable "resource_suffixes" { "witness_name" = "-witness-name" "ams_subnet" = "ams-subnet" "ams_instance" = "-AMS" + "nat_gateway" = "-nat-gateway" } } diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/infrastructure.tf index 091336fdcf..844f14527b 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/infrastructure.tf @@ -151,9 +151,9 @@ data "azurerm_availability_set" "anydb" { resource "azurerm_private_dns_a_record" "db" { provider = azurerm.dnsmanagement - count = local.enable_db_lb_deployment && length(local.dns_label) > 0 && var.register_virtual_network_to_dns ? 1 : 0 + count = local.enable_db_lb_deployment && length(local.dns_label) > 0 && var.dns_settings.register_virtual_network_to_dns ? 1 : 0 name = lower(format("%s%sdb%scl", var.sap_sid, local.anydb_sid, "00")) - resource_group_name = coalesce(var.management_dns_resourcegroup_name, var.landscape_tfstate.dns_resource_group_name) + resource_group_name = coalesce(var.dns_settings.management_dns_resourcegroup_name, var.landscape_tfstate.dns_resource_group_name) zone_name = local.dns_label ttl = 300 records = [azurerm_lb.anydb[0].frontend_ip_configuration[0].private_ip_address] diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_global.tf index 08de04649a..e814c7ff26 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_global.tf @@ -51,29 +51,8 @@ variable "use_secondary_ips" { # # ######################################################################################### -variable "use_custom_dns_a_registration" { - description = "Boolean value indicating if a custom dns a record should be created when using private endpoints" - default = false - type = bool - } -variable "management_dns_subscription_id" { - description = "String value giving the possibility to register custom dns a records in a separate subscription" - default = null - type = string - } -variable "management_dns_resourcegroup_name" { - description = "String value giving the possibility to register custom dns a records in a separate resourcegroup" - default = null - type = string - } -variable "register_virtual_network_to_dns" { - description = "Boolean value indicating if the vnet should be registered to the dns zone" - type = bool - } - -variable "register_endpoints_with_dns" { - description = "Boolean value indicating if endpoints should be registered to the dns zone" - type = bool +variable "dns_settings" { + description = "DNS Settings" } ######################################################################################### diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf index f1f8d5e25f..b5f7b8ec0d 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf @@ -18,7 +18,7 @@ resource "azurerm_network_interface" "anydb_db" { location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = true + accelerated_networking_enabled = true tags = var.tags dynamic "ip_configuration" { iterator = pub @@ -77,7 +77,7 @@ resource "azurerm_network_interface" "anydb_admin" { ) location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = true + accelerated_networking_enabled = true tags = var.tags ip_configuration { @@ -161,8 +161,6 @@ resource "azurerm_linux_virtual_machine" "dbserver" { size = local.anydb_sku source_image_id = var.database.os.type == "custom" ? var.database.os.source_image_id : null license_type = length(var.license_type) > 0 ? var.license_type : null - # ToDo Add back later -# patch_mode = var.infrastructure.patch_mode admin_username = var.sid_username admin_password = local.enable_auth_key ? null : var.sid_password @@ -170,6 +168,11 @@ resource "azurerm_linux_virtual_machine" "dbserver" { custom_data = var.deployment == "new" ? var.cloudinit_growpart_config : null + patch_mode = var.infrastructure.patch_mode + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + tags = merge(local.tags, var.tags) dynamic "admin_ssh_key" { @@ -301,9 +304,14 @@ resource "azurerm_windows_virtual_machine" "dbserver" { size = local.anydb_sku source_image_id = var.database.os.type == "custom" ? var.database.os.source_image_id : null license_type = length(var.license_type) > 0 ? var.license_type : null - # ToDo Add back later -# patch_mode = var.infrastructure.patch_mode + // ImageDefault = Manual on Windows + // https://learn.microsoft.com/en-us/azure/virtual-machines/automatic-vm-guest-patching#patch-orchestration-modes + patch_mode = var.infrastructure.patch_mode == "ImageDefault" ? "Manual" : var.infrastructure.patch_mode + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + enable_automatic_updates = !(var.infrastructure.patch_mode == "ImageDefault") admin_username = var.sid_username admin_password = var.sid_password @@ -714,6 +722,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_db_lnx" { type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } @@ -729,6 +738,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_db_win" { type = "AzureMonitorWindowsAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } @@ -745,6 +755,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_db_lnx" { type = "AzureSecurityLinuxAgent" type_handler_version = "2.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { @@ -767,6 +778,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_db_win" { type = "AzureSecurityWindowsAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-observer.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-observer.tf index 084cf3e463..5c42c95ad1 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-observer.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-observer.tf @@ -17,7 +17,7 @@ resource "azurerm_network_interface" "observer" { ) resource_group_name = var.resource_group[0].name location = var.resource_group[0].location - enable_accelerated_networking = false + accelerated_networking_enabled = false tags = var.tags ip_configuration { diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf index 7ca4ec7ba4..ebb7fb2eee 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf @@ -198,7 +198,7 @@ resource "azurerm_lb_rule" "scs" { backend_address_pool_ids = [azurerm_lb_backend_address_pool.scs[0].id] probe_id = azurerm_lb_probe.scs[0].id enable_floating_ip = true - enable_tcp_reset = true + enable_tcp_reset = false idle_timeout_in_minutes = var.idle_timeout_scs_ers } @@ -230,7 +230,7 @@ resource "azurerm_lb_rule" "ers" { backend_address_pool_ids = [azurerm_lb_backend_address_pool.scs[0].id] probe_id = azurerm_lb_probe.scs[1].id enable_floating_ip = true - enable_tcp_reset = true + enable_tcp_reset = false idle_timeout_in_minutes = var.idle_timeout_scs_ers } @@ -431,12 +431,12 @@ resource "azurerm_subnet_route_table_association" "subnet_sap_web" { resource "azurerm_private_dns_a_record" "scs" { provider = azurerm.dnsmanagement - count = local.enable_scs_lb_deployment && length(local.dns_label) > 0 && var.register_virtual_network_to_dns ? 1 : 0 + count = local.enable_scs_lb_deployment && length(local.dns_label) > 0 && var.dns_settings.register_virtual_network_to_dns ? 1 : 0 name = lower(format("%sscs%scl1", local.sid, var.application_tier.scs_instance_number )) - resource_group_name = coalesce(var.management_dns_resourcegroup_name, var.landscape_tfstate.dns_resource_group_name) + resource_group_name = coalesce(var.dns_settings.management_dns_resourcegroup_name, var.landscape_tfstate.dns_resource_group_name) zone_name = var.landscape_tfstate.dns_label ttl = 300 records = [azurerm_lb.scs[0].frontend_ip_configuration[0].private_ip_address] @@ -444,12 +444,12 @@ resource "azurerm_private_dns_a_record" "scs" { resource "azurerm_private_dns_a_record" "ers" { provider = azurerm.dnsmanagement - count = local.enable_scs_lb_deployment && length(local.dns_label) > 0 && var.register_virtual_network_to_dns ? 1 : 0 + count = local.enable_scs_lb_deployment && length(local.dns_label) > 0 && var.dns_settings.register_virtual_network_to_dns ? 1 : 0 name = lower(format("%sers%scl2", local.sid, local.ers_instance_number )) - resource_group_name = coalesce(var.management_dns_resourcegroup_name, var.landscape_tfstate.dns_resource_group_name) + resource_group_name = coalesce(var.dns_settings.management_dns_resourcegroup_name, var.landscape_tfstate.dns_resource_group_name) zone_name = local.dns_label ttl = 300 records = [azurerm_lb.scs[0].frontend_ip_configuration[1].private_ip_address] @@ -457,11 +457,11 @@ resource "azurerm_private_dns_a_record" "ers" { resource "azurerm_private_dns_a_record" "web" { provider = azurerm.dnsmanagement - count = local.enable_web_lb_deployment && length(local.dns_label) > 0 && var.register_virtual_network_to_dns ? 1 : 0 + count = local.enable_web_lb_deployment && length(local.dns_label) > 0 && var.dns_settings.register_virtual_network_to_dns ? 1 : 0 name = lower(format("%sweb%s", local.sid, var.application_tier.web_instance_number )) - resource_group_name = coalesce(var.management_dns_resourcegroup_name, var.landscape_tfstate.dns_resource_group_name) + resource_group_name = coalesce(var.dns_settings.management_dns_resourcegroup_name, var.landscape_tfstate.dns_resource_group_name) zone_name = local.dns_label ttl = 300 records = [azurerm_lb.web[0].frontend_ip_configuration[0].private_ip_address] diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_global.tf index 656d956576..360b72eb09 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_global.tf @@ -29,8 +29,6 @@ variable "network_resource_group" { description = "Resourc variable "options" { description = "Dictionary of miscallaneous parameters" } variable "order_deployment" { description = "psuedo condition for ordering deployment" } variable "ppg" { description = "Details of the proximity placement group" } -variable "register_virtual_network_to_dns" { description = "Boolean value indicating if the vnet should be registered to the dns zone" } -variable "register_endpoints_with_dns" { description = "Boolean value indicating if endpoints should be registered to the dns zone" } variable "resource_group" { description = "Details of the resource group" } variable "route_table_id" { description = "Route table (if any) id" } variable "sap_sid" { description = "The SID of the application" } @@ -56,20 +54,8 @@ variable "use_msi_for_clusters" { description = "If true # # ######################################################################################### -variable "use_custom_dns_a_registration" { - description = "Boolean value indicating if a custom dns a record should be created when using private endpoints" - default = false - type = bool - } -variable "management_dns_subscription_id" { - description = "String value giving the possibility to register custom dns a records in a separate subscription" - default = null - type = string - } -variable "management_dns_resourcegroup_name" { - description = "String value giving the possibility to register custom dns a records in a separate resourcegroup" - default = null - type = string +variable "dns_settings" { + description = "DNS Settings" } diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index 47041e1856..826df1ec25 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -17,7 +17,7 @@ resource "azurerm_network_interface" "app" { ) location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = local.app_sizing.compute.accelerated_networking + accelerated_networking_enabled = local.app_sizing.compute.accelerated_networking tags = var.tags dynamic "ip_configuration" { @@ -76,7 +76,7 @@ resource "azurerm_network_interface" "app_admin" { ) location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = local.app_sizing.compute.accelerated_networking + accelerated_networking_enabled = local.app_sizing.compute.accelerated_networking tags = var.tags ip_configuration { @@ -144,6 +144,12 @@ resource "azurerm_linux_virtual_machine" "app" { virtual_machine_scale_set_id = length(var.scale_set_id) > 0 ? var.scale_set_id : null + patch_mode = var.infrastructure.patch_mode + + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + //If length of zones > 1 distribute servers evenly across zones zone = var.application_tier.app_use_avset ? null : try(local.app_zones[count.index % max(local.app_zone_count, 1)], null) @@ -289,6 +295,15 @@ resource "azurerm_windows_virtual_machine" "app" { virtual_machine_scale_set_id = length(var.scale_set_id) > 0 ? var.scale_set_id : null + + // ImageDefault = Manual on Windows + // https://learn.microsoft.com/en-us/azure/virtual-machines/automatic-vm-guest-patching#patch-orchestration-modes + patch_mode = var.infrastructure.patch_mode == "ImageDefault" ? "Manual" : var.infrastructure.patch_mode + enable_automatic_updates = !(var.infrastructure.patch_mode == "ImageDefault") + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + //If length of zones > 1 distribute servers evenly across zones zone = var.application_tier.app_use_avset ? null : try(local.app_zones[count.index % max(local.app_zone_count, 1)], null) @@ -305,10 +320,7 @@ resource "azurerm_windows_virtual_machine" "app" { admin_username = var.sid_username admin_password = var.sid_password - #ToDo: Remove once feature is GA patch_mode = "Manual" license_type = length(var.license_type) > 0 ? var.license_type : null - # ToDo Add back later -# patch_mode = var.infrastructure.patch_mode tags = merge(var.application_tier.app_tags, var.tags) @@ -511,6 +523,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_app_lnx" { type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } @@ -526,6 +539,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_app_win" { type = "AzureMonitorWindowsAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } @@ -540,6 +554,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_app_lnx" { type = "AzureSecurityLinuxAgent" type_handler_version = "2.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { @@ -561,6 +576,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_app_win" { type = "AzureSecurityWindowsAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index 1e172ef4fb..b380dcc93d 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -16,7 +16,7 @@ resource "azurerm_network_interface" "scs" { ) location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = local.scs_sizing.compute.accelerated_networking + accelerated_networking_enabled = local.scs_sizing.compute.accelerated_networking tags = var.tags dynamic "ip_configuration" { @@ -79,7 +79,7 @@ resource "azurerm_network_interface" "scs_admin" { ) location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = local.scs_sizing.compute.accelerated_networking + accelerated_networking_enabled = local.scs_sizing.compute.accelerated_networking ip_configuration { name = "IPConfig1" @@ -140,6 +140,11 @@ resource "azurerm_linux_virtual_machine" "scs" { virtual_machine_scale_set_id = length(var.scale_set_id) > 0 ? var.scale_set_id : null + patch_mode = var.infrastructure.patch_mode + + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true //If length of zones > 1 distribute servers evenly across zones zone = local.use_scs_avset ? null : try(local.scs_zones[count.index % max(local.scs_zone_count, 1)], null) network_interface_ids = var.application_tier.dual_nics ? ( @@ -327,6 +332,13 @@ resource "azurerm_windows_virtual_machine" "scs" { virtual_machine_scale_set_id = length(var.scale_set_id) > 0 ? var.scale_set_id : null + // ImageDefault = Manual on Windows + // https://learn.microsoft.com/en-us/azure/virtual-machines/automatic-vm-guest-patching#patch-orchestration-modes + patch_mode = var.infrastructure.patch_mode == "ImageDefault" ? "Manual" : var.infrastructure.patch_mode + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + enable_automatic_updates = !(var.infrastructure.patch_mode == "ImageDefault") //If length of zones > 1 distribute servers evenly across zones zone = local.use_scs_avset ? ( null) : ( @@ -357,9 +369,7 @@ resource "azurerm_windows_virtual_machine" "scs" { admin_username = var.sid_username admin_password = var.sid_password - #ToDo: Remove once feature is GA patch_mode = "Manual" license_type = length(var.license_type) > 0 ? var.license_type : null - patch_mode = var.infrastructure.patch_mode tags = merge(var.application_tier.scs_tags, var.tags) @@ -709,6 +719,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_scs_lnx" { type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } @@ -723,6 +734,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_scs_win" { type = "AzureMonitorWindowsAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } @@ -737,6 +749,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_lnx" { type = "AzureSecurityLinuxAgent" type_handler_version = "2.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { @@ -758,6 +771,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_win" { type = "AzureSecurityWindowsAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf index 69f8ce36a0..b7747a5fea 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf @@ -16,7 +16,7 @@ resource "azurerm_network_interface" "web" { ) location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = local.web_sizing.compute.accelerated_networking + accelerated_networking_enabled = local.web_sizing.compute.accelerated_networking tags = var.tags dynamic "ip_configuration" { @@ -84,7 +84,7 @@ resource "azurerm_network_interface" "web_admin" { ) location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = local.web_sizing.compute.accelerated_networking + accelerated_networking_enabled = local.web_sizing.compute.accelerated_networking ip_configuration { name = "IPConfig1" @@ -136,6 +136,11 @@ resource "azurerm_linux_virtual_machine" "web" { virtual_machine_scale_set_id = length(var.scale_set_id) > 0 ? var.scale_set_id : null + patch_mode = var.infrastructure.patch_mode + + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true //If length of zones > 1 distribute servers evenly across zones zone = local.use_web_avset ? null : try(local.web_zones[count.index % max(local.web_zone_count, 1)], null) @@ -284,6 +289,14 @@ resource "azurerm_windows_virtual_machine" "web" { virtual_machine_scale_set_id = length(var.scale_set_id) > 0 ? var.scale_set_id : null + // ImageDefault = Manual on Windows + // https://learn.microsoft.com/en-us/azure/virtual-machines/automatic-vm-guest-patching#patch-orchestration-modes + patch_mode = var.infrastructure.patch_mode == "ImageDefault" ? "Manual" : var.infrastructure.patch_mode + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + enable_automatic_updates = !(var.infrastructure.patch_mode == "ImageDefault") + //If length of zones > 1 distribute servers evenly across zones zone = local.use_web_avset ? ( null) : ( @@ -639,6 +652,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_web_lnx" { type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } @@ -653,6 +667,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_web_win" { type = "AzureMonitorWindowsAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } resource "azurerm_virtual_machine_extension" "monitoring_defender_web_lnx" { @@ -666,6 +681,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_web_lnx" { type = "AzureSecurityLinuxAgent" type_handler_version = "2.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { @@ -687,6 +703,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_web_win" { type = "AzureSecurityWindowsAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/key_vault_sap_system.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/key_vault_sap_system.tf index c1dcaeeb6f..4aa763ad93 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/key_vault_sap_system.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/key_vault_sap_system.tf @@ -103,12 +103,11 @@ resource "random_password" "password" { override_special = "_%@" } - // Store the logon username in KV when authentication type is password resource "azurerm_key_vault_secret" "auth_username" { provider = azurerm.main count = local.enable_sid_deployment && local.use_local_credentials ? 1 : 0 - name = format("%s-username", local.prefix) + name = format("%s-username", try(coalesce(var.naming.resource_prefixes.sdu_secret, local.prefix), "")) value = local.sid_auth_username key_vault_id = length(local.user_key_vault_id) > 0 ? data.azurerm_key_vault.sid_keyvault_user[0].id : azurerm_key_vault.sid_keyvault_user[0].id tags = var.tags @@ -118,7 +117,7 @@ resource "azurerm_key_vault_secret" "auth_username" { resource "azurerm_key_vault_secret" "auth_password" { provider = azurerm.main count = local.enable_sid_deployment && local.use_local_credentials ? 1 : 0 - name = format("%s-password", local.prefix) + name = format("%s-password", try(coalesce(var.naming.resource_prefixes.sdu_secret, local.prefix), "")) value = local.sid_auth_password key_vault_id = length(local.user_key_vault_id) > 0 ? data.azurerm_key_vault.sid_keyvault_user[0].id : azurerm_key_vault.sid_keyvault_user[0].id tags = var.tags @@ -134,12 +133,11 @@ resource "tls_private_key" "sdu" { rsa_bits = 2048 } - // By default the SSH keys are stored in landscape key vault. By defining the authenticationb block the SDU keyvault resource "azurerm_key_vault_secret" "sdu_private_key" { provider = azurerm.main count = local.enable_sid_deployment && local.use_local_credentials ? 1 : 0 - name = format("%s-sshkey", local.prefix) + name = format("%s-sshkey", try(coalesce(var.naming.resource_prefixes.sdu_secret, local.prefix), "")) value = local.sid_private_key key_vault_id = length(local.user_key_vault_id) > 0 ? data.azurerm_key_vault.sid_keyvault_user[0].id : azurerm_key_vault.sid_keyvault_user[0].id tags = var.tags @@ -148,7 +146,7 @@ resource "azurerm_key_vault_secret" "sdu_private_key" { resource "azurerm_key_vault_secret" "sdu_public_key" { provider = azurerm.main count = local.enable_sid_deployment && local.use_local_credentials ? 1 : 0 - name = format("%s-sshkey-pub", local.prefix) + name = format("%s-sshkey-pub", try(coalesce(var.naming.resource_prefixes.sdu_secret, local.prefix), "")) value = local.sid_public_key key_vault_id = length(local.user_key_vault_id) > 0 ? data.azurerm_key_vault.sid_keyvault_user[0].id : azurerm_key_vault.sid_keyvault_user[0].id tags = var.tags diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/outputs.tf index 054e337f8e..71f7b0914b 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/outputs.tf @@ -238,7 +238,7 @@ output "sapmnt_path" { format("%s:/%s/%s", length(var.sapmnt_private_endpoint_id) == 0 ? ( try(azurerm_private_endpoint.sapmnt[0].private_dns_zone_configs[0].record_sets[0].fqdn, - azurerm_private_endpoint.sapmnt[0].private_service_connection[0].private_ip_address + try(azurerm_private_endpoint.sapmnt[0].private_service_connection[0].private_ip_address,"") )) : ( data.azurerm_private_endpoint_connection.sapmnt[0].private_service_connection[0].private_ip_address ), diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf index bd18cf9712..c171193f51 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf @@ -37,6 +37,7 @@ resource "azurerm_storage_account" "sapmnt" { enable_https_traffic_only = false min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false + cross_tenant_replication_enabled = false public_network_access_enabled = try(var.landscape_tfstate.public_network_access_enabled, true) shared_access_key_enabled = false @@ -78,7 +79,7 @@ data "azurerm_storage_account" "sapmnt" { resource "azurerm_private_endpoint" "sapmnt" { provider = azurerm.main - count = var.NFS_provider == "AFS" ? ( + count = var.NFS_provider == "AFS" && var.use_private_endpoint ? ( length(var.sapmnt_private_endpoint_id) > 0 ? ( 0) : ( 1 @@ -128,9 +129,9 @@ resource "azurerm_private_endpoint" "sapmnt" { dynamic "private_dns_zone_group" { - for_each = range(length(try(var.landscape_tfstate.privatelink_file_id, "")) > 0 && var.register_endpoints_with_dns ? 1 : 0) + for_each = range(length(try(var.landscape_tfstate.privatelink_file_id, "")) > 0 && var.dns_settings.register_endpoints_with_dns ? 1 : 0) content { - name = var.dns_zone_names.file_dns_zone_name + name = var.dns_settings.dns_zone_names.file_dns_zone_name private_dns_zone_ids = [var.landscape_tfstate.privatelink_file_id] } } diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf index 2388871d4d..922e45fa16 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf @@ -224,41 +224,9 @@ variable "use_private_endpoint" { ######################################################################################### -variable "use_custom_dns_a_registration" { - description = "Boolean value indicating if a custom dns a record should be created when using private endpoints" - default = false - type = bool - } - -variable "management_dns_subscription_id" { - description = "String value giving the possibility to register custom dns a records in a separate subscription" - default = null - type = string - } - -variable "management_dns_resourcegroup_name" { - description = "String value giving the possibility to register custom dns a records in a separate resourcegroup" - default = null - type = string - } - -variable "register_endpoints_with_dns" { - description = "Boolean value indicating if endpoints should be registered to the dns zone" - type = bool - } - -variable "dns_zone_names" { - description = "Private DNS zone names" - type = map(string) - - default = { - "file_dns_zone_name" = "privatelink.file.core.windows.net" - "blob_dns_zone_name" = "privatelink.blob.core.windows.net" - "table_dns_zone_name" = "privatelink.table.core.windows.net" - "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" - } - } - +variable "dns_settings" { + description = "DNS Settings" + } variable "sapmnt_private_endpoint_id" { description = "Azure Resource Identifier for an private endpoint connection" type = string diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_local.tf index a21dd0aaea..35a5447222 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_local.tf @@ -468,7 +468,7 @@ locals { sid_private_key = local.use_local_credentials ? ( try( file(var.authentication.path_to_private_key), - tls_private_key.sdu[0].private_key_pem + try(tls_private_key.sdu[0].private_key_pem, "") )) : ( "" ) diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/vm-anchor.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/vm-anchor.tf index 35ed5409d7..9c2a826350 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/vm-anchor.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/vm-anchor.tf @@ -12,7 +12,7 @@ resource "azurerm_network_interface" "anchor" { ) resource_group_name = local.resource_group_exists ? data.azurerm_resource_group.resource_group[0].name : azurerm_resource_group.resource_group[0].name location = local.resource_group_exists ? data.azurerm_resource_group.resource_group[0].location : azurerm_resource_group.resource_group[0].location - enable_accelerated_networking = var.infrastructure.anchor_vms.accelerated_networking + accelerated_networking_enabled = var.infrastructure.anchor_vms.accelerated_networking ip_configuration { name = "IPConfig1" @@ -40,6 +40,13 @@ resource "azurerm_linux_virtual_machine" "anchor" { resource_group_name = local.resource_group_exists ? data.azurerm_resource_group.resource_group[0].name : azurerm_resource_group.resource_group[0].name location = local.resource_group_exists ? data.azurerm_resource_group.resource_group[0].location : azurerm_resource_group.resource_group[0].location proximity_placement_group_id = local.ppg_exists ? data.azurerm_proximity_placement_group.ppg[count.index].id : azurerm_proximity_placement_group.ppg[count.index].id + + patch_mode = var.infrastructure.patch_mode + + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + zone = local.zones[count.index] network_interface_ids = [ @@ -134,6 +141,13 @@ resource "azurerm_windows_virtual_machine" "anchor" { proximity_placement_group_id = local.ppg_exists ? data.azurerm_proximity_placement_group.ppg[count.index].id : azurerm_proximity_placement_group.ppg[count.index].id zone = local.zones[count.index] + // ImageDefault = Manual on Windows + // https://learn.microsoft.com/en-us/azure/virtual-machines/automatic-vm-guest-patching#patch-orchestration-modes + patch_mode = var.infrastructure.patch_mode == "ImageDefault" ? "Manual" : var.infrastructure.patch_mode + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + network_interface_ids = [ azurerm_network_interface.anchor[count.index].id ] @@ -180,6 +194,5 @@ resource "azurerm_windows_virtual_machine" "anchor" { ] } - patch_mode = "Manual" license_type = length(var.license_type) > 0 ? var.license_type : null } diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/infrastructure.tf index bc630e1e8c..bc2641fac9 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/infrastructure.tf @@ -145,9 +145,9 @@ resource "azurerm_lb_rule" "hdb" { resource "azurerm_private_dns_a_record" "db" { provider = azurerm.dnsmanagement - count = local.enable_db_lb_deployment && length(local.dns_label) > 0 && var.register_virtual_network_to_dns ? 1 : 0 + count = local.enable_db_lb_deployment && length(local.dns_label) > 0 && var.dns_settings.register_virtual_network_to_dns ? 1 : 0 name = lower(format("%s%sdb%scl", var.sap_sid, local.database_sid, local.database_instance)) - resource_group_name = coalesce(var.management_dns_resourcegroup_name, var.landscape_tfstate.dns_resource_group_name) + resource_group_name = coalesce(var.dns_settings.management_dns_resourcegroup_name, var.landscape_tfstate.dns_resource_group_name) zone_name = local.dns_label ttl = 300 records = [try(azurerm_lb.hdb[0].frontend_ip_configuration[0].private_ip_address, "")] diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf index 27d5d42017..3a86e0e091 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf @@ -73,36 +73,10 @@ variable "use_secondary_ips" { ######################################################################################### -variable "use_custom_dns_a_registration" { - description = "Boolean value indicating if a custom dns a record should be created when using private endpoints" - default = false - type = bool +variable "dns_settings" { + description = "DNS Settings" } -variable "management_dns_subscription_id" { - description = "String value giving the possibility to register custom dns a records in a separate subscription" - default = null - type = string - } - -variable "management_dns_resourcegroup_name" { - description = "String value giving the possibility to register custom dns a records in a separate resourcegroup" - default = null - type = string - } - - -variable "register_virtual_network_to_dns" { - description = "Boolean value indicating if the vnet should be registered to the dns zone" - type = bool - } - -variable "register_endpoints_with_dns" { - description = "Boolean value indicating if endpoints should be registered to the dns zone" - type = bool - } - - ######################################################################################### # # # ANF settings # diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf index 7839b80b0b..e81afe012d 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf @@ -35,7 +35,7 @@ resource "azurerm_network_interface" "nics_dbnodes_admin" { location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = true + accelerated_networking_enabled = true tags = var.tags ip_configuration { @@ -74,7 +74,7 @@ resource "azurerm_network_interface" "nics_dbnodes_db" { location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = true + accelerated_networking_enabled = true tags = var.tags dynamic "ip_configuration" { iterator = pub @@ -131,7 +131,7 @@ resource "azurerm_network_interface" "nics_dbnodes_storage" { location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = true + accelerated_networking_enabled = true tags = var.tags ip_configuration { @@ -187,6 +187,11 @@ resource "azurerm_linux_virtual_machine" "vm_dbnode" { disable_password_authentication = !local.enable_auth_password tags = merge(var.tags, local.tags) + patch_mode = var.infrastructure.patch_mode + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + zone = local.use_avset ? null : try(local.zones[count.index % max(local.db_zone_count, 1)], null) size = local.hdb_vm_sku @@ -567,6 +572,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_db_lnx" { type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } @@ -583,6 +589,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_db_lnx" { type = "AzureSecurityLinuxAgent" type_handler_version = "2.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf index ecadfbcd64..2780be48ec 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf @@ -250,6 +250,9 @@ resource "local_file" "sap-parameters_yml" { ams_resource_id = var.ams_resource_id enable_os_monitoring = var.enable_os_monitoring enable_ha_monitoring = var.enable_ha_monitoring + enable_sap_cal = var.enable_sap_cal + calapi_kv = var.calapi_kv + sap_cal_product_name = var.sap_cal_product_name } ) @@ -346,3 +349,14 @@ resource "local_file" "sap_inventory_for_wiki_md" { file_permission = "0660" directory_permission = "0770" } + + +resource "local_file" "sap_vms_resource_id" { + content = templatefile(format("%s/sap-vm-resources.tmpl", path.module), { + scs_server_vms = length(var.scs_server_vm_resource_ids) > 0 ? element(var.scs_server_vm_resource_ids, 0) : "" + } + ) + filename = format("%s/%s_virtual_machines.json", path.cwd, var.sap_sid) + file_permission = "0660" + directory_permission = "0770" +} diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl index 1b503f016b..cba151b693 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl @@ -214,4 +214,18 @@ ams_resource_id: ${ams_resource_id} enable_os_monitoring: ${enable_os_monitoring} enable_ha_monitoring: ${enable_ha_monitoring} +%{~ if enable_sap_cal } + +############################################################################# +# # +# SAP CAL Integration # +# # +############################################################################# + +# Defines if the installation is to be deployed using SAP CAL +enable_sap_cal: ${enable_sap_cal} +calapi_kv: ${calapi_kv} +sap_cal_product_name: ${sap_cal_product_name} + +%{~ endif } ... diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-vm-resources.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-vm-resources.tmpl new file mode 100644 index 0000000000..0f4d8ac37b --- /dev/null +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-vm-resources.tmpl @@ -0,0 +1,4 @@ +{ + "configurationType": "Discovery", + "centralServerVmId": "${scs_server_vms}" +} \ No newline at end of file diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf index 8ad31ffff5..b8bb496685 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf @@ -172,6 +172,7 @@ variable "scs_server_count" { } variable "scs_server_ips" { description = "List of IP addresses for the SCS Servers" } variable "scs_server_secondary_ips" { description = "List of secondary IP addresses for the SCS Servers" } +variable "scs_server_vm_resource_ids" { description = "List of Virtual Machine resource IDs for the SCS servers" } variable "scs_vm_names" { description = "List of VM names for the SCS Servers" } variable "shared_home" { description = "If defined provides shared-home support" } variable "sid_keyvault_user_id" { description = "Defines the names for the resources" } @@ -211,3 +212,16 @@ variable "ams_resource_id" { description = "Resource ID for variable "enable_os_monitoring" { description = "Enable OS monitoring" } variable "enable_ha_monitoring" { description = "Enable HA monitoring" } +variable "enable_sap_cal" { + description = "Enable SAP CAL" + default = false + type = bool + } +variable "calapi_kv" { + description = "Keyvault for CAL API" + default = "" + } +variable "sap_cal_product_name" { + description = "Product name of SAP CAL" + default = "" + } From 341dcbe763f8c9b1b9bced5c3d2bf86a4e70609d Mon Sep 17 00:00:00 2001 From: "Shekhar Sorot ( MSFT )" Date: Fri, 13 Sep 2024 15:47:06 +0530 Subject: [PATCH 600/607] Merging experimental into scaleout-hsr prior to unit testing (#630) Bring in commits from experimental --------- Co-authored-by: Steffen Bo Thomsen Co-authored-by: devanshjain Co-authored-by: Nadeen Noaman <95418928+nnoaman@users.noreply.github.com> Co-authored-by: hdamecharla Co-authored-by: daradicscsaba <62608496+daradicscsaba@users.noreply.github.com> Co-authored-by: Csaba Daradics Co-authored-by: Jesper Severinsen <30658160+jesperseverinsen@users.noreply.github.com> Co-authored-by: Devansh Jain <86314060+devanshjainms@users.noreply.github.com> Co-authored-by: Jaskirat Singh <108129510+jaskisin@users.noreply.github.com> Co-authored-by: jasksingh Co-authored-by: Kimmo Forss Co-authored-by: Harm Jan Stam Co-authored-by: Kimmo Forss --- deploy/ansible/playbook_04_00_00_db_install.yaml | 3 --- .../ansible/roles-os/1.3-repository/vars/repos.yaml | 3 ++- .../roles-sap-os/2.6-sap-mounts/tasks/main.yaml | 12 ++++++------ .../tasks/5.5.4.1-cluster-RedHat.yml | 2 +- deploy/scripts/New-SDAFDevopsProject.ps1 | 2 +- deploy/terraform/bootstrap/sap_deployer/transform.tf | 12 ++++++++++++ .../terraform-units/modules/sap_landscape/outputs.tf | 11 ----------- 7 files changed, 22 insertions(+), 23 deletions(-) diff --git a/deploy/ansible/playbook_04_00_00_db_install.yaml b/deploy/ansible/playbook_04_00_00_db_install.yaml index 5cc6b98052..cc2eec14ee 100644 --- a/deploy/ansible/playbook_04_00_00_db_install.yaml +++ b/deploy/ansible/playbook_04_00_00_db_install.yaml @@ -361,9 +361,6 @@ - name: "Database Installation Playbook: - Clear the failed state of hosts" ansible.builtin.meta: clear_host_errors - - name: "Database Installation Playbook: - Clear the failed state of hosts" - ansible.builtin.meta: clear_host_errors - # - name: "Database installation Playbook: - run HANA Scale-Out mounts" # ansible.builtin.include_role: # name: roles-sap-os/2.6-sap-mounts diff --git a/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml b/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml index 054efdd0ea..f831e412a9 100644 --- a/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml +++ b/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml @@ -30,8 +30,9 @@ repos: redhat8.8: - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } redhat8.9: -# - { tier: 'ha', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } redhat8.10: + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } redhat9.0: - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm', state: 'present' } redhat9.2: diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml index 4dacddbb4a..3c64b643e3 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml @@ -342,13 +342,13 @@ # Update : Deprecated as the scale out anf mount code functionality is now integrated into 2.6.1 and 2.6.8 # This will be removed in the next release, left here for tracing and documentation -# Import this task only if db_scale_out is defined. Implies that sap_mnt is defined, i.e. ANF is used +# Import this task only if database_scale_out is defined. Implies that sap_mnt is defined, i.e. ANF is used # - name: "2.6 SAP Mounts: - Import ANF tasks for Scale-Out" # ansible.builtin.import_tasks: 2.6.1.2-anf-mounts-scaleout.yaml # when: # - NFS_provider == 'ANF' -# - db_scale_out is defined -# - db_scale_out +# - database_scale_out is defined +# - database_scale_out # - sap_mnt is defined or sap_trans is defined or usr_sap_install_mountpoint is defined # Import this task only if the tier is ora. @@ -389,13 +389,13 @@ # Update : Deprecated as the scale out anf mount code functionality is now integrated into 2.6.1 and 2.6.8 # This will be removed in the next release, left here for tracing and documentation -# Import this task only if db_scale_out is defined. Implies that sap_mnt is defined, i.e. ANF is used +# Import this task only if database_scale_out is defined. Implies that sap_mnt is defined, i.e. ANF is used # - name: "2.6 SAP Mounts: - Import ANF tasks for Scale-Out" # ansible.builtin.import_tasks: 2.6.1.2-anf-mounts-scaleout.yaml # when: # - NFS_provider == 'ANF' -# - db_scale_out is defined -# - db_scale_out +# - database_scale_out is defined +# - database_scale_out # - sap_mnt is defined or sap_trans is defined or usr_sap_install_mountpoint is defined # Import this task only if the tier is ora. diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index adff46a97c..2f5fdf6e66 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -136,7 +136,7 @@ - name: "5.5.4.1 HANA Cluster configuration - Set the cluster properties for two node clusters" when: - database_high_availability - - not db_scale_out + - not database_scale_out - is_pcmk_ver_gt_204 block: - name: "5.5.4.1 HANA Cluster configuration - set resource defaults 'priority'" diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index b9687df43e..e6bf7d4a31 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -25,7 +25,7 @@ $ControlPlaneSubscriptionName = $Env:SDAF_ControlPlaneSubscriptionName if ($IsWindows) { $pathSeparator = "\" } else { $pathSeparator = "/" } #endregion -$versionLabel = "v3.11.0.3" +$versionLabel = "v3.12.0.0" # az logout diff --git a/deploy/terraform/bootstrap/sap_deployer/transform.tf b/deploy/terraform/bootstrap/sap_deployer/transform.tf index 8f12166bad..74411d498b 100644 --- a/deploy/terraform/bootstrap/sap_deployer/transform.tf +++ b/deploy/terraform/bootstrap/sap_deployer/transform.tf @@ -220,4 +220,16 @@ locals { app_id = var.app_registration_app_id client_secret = var.webapp_client_secret } + + dns_settings = { + use_custom_dns_a_registration = var.use_custom_dns_a_registration + dns_zone_names = var.dns_zone_names + + management_dns_resourcegroup_name = trimspace(var.management_dns_resourcegroup_name) + management_dns_subscription_id = trimspace(var.management_dns_subscription_id) + + privatelink_dns_subscription_id = trimspace(coalesce(var.privatelink_dns_subscription_id,var.management_dns_subscription_id, " ")) + privatelink_dns_resourcegroup_name = trimspace(coalesce(var.management_dns_resourcegroup_name, var.privatelink_dns_resourcegroup_name, " ")) + } + } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf b/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf index c4b0b1c4c1..dc565bb8c1 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf @@ -563,14 +563,3 @@ output "ng_resource_id" { description = "Azure resource identifier for the NAT Gateway" value = local.create_nat_gateway ? azurerm_nat_gateway.ng[0].id : "" } - -############################################################################### -# # -# NAT Gateway resource properties # -# # -############################################################################### - -output "ng_resource_id" { - description = "Azure resource identifier for the NAT Gateway" - value = local.create_nat_gateway ? azurerm_nat_gateway.ng[0].id : "" - } From de4211294a7cc27a146741f1c56774fc289f7eab Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 13 Sep 2024 16:16:13 +0300 Subject: [PATCH 601/607] Terraform updates (#631) * Refactor ANF volume group creation in HDB node module * Update provider configurations to use version 4.0 or higher * Refactor ANF volume group creation in HDB node module and update provider configurations * chore: replace db_scale_out with database_scale_out * Bump up the version * Add User creation * Set permissions on /sapmnt * Fix capitalization of source in providers.tf * Refactor NAT Gateway resource properties in outputs.tf * Refactor azapi provider source in providers.tf * Refactor private endpoint network policies in subnet resources * For scaleout use the admin subnet ID * TF 4.0 support * For scale out make the admin nic the primary * Refactor Variable class to remove nullable value property Update Azure.ResourceManager.Network package to version 1.9.0 * Refactor inventory.tf to support scale-out for SAP system * Add site information * Refactor inventory.tf to include scale_out variable * Refactor vm-hdb.tf to include site information for scale-out databases * Refactor HANA computer and secondary DNS names for scale-out databases --------- Co-authored-by: Kimmo Forss Co-authored-by: hdamecharla --- Webapp/SDAF/Models/EnvironmentModel.cs | 2 +- Webapp/SDAF/SDAFWebApp.csproj | 2 +- .../ansible/playbook_04_00_00_db_install.yaml | 4 +- deploy/ansible/playbook_05_01_sap_dbload.yaml | 38 ++-- .../tasks/2.6.1-anf-mounts.yaml | 8 + deploy/ansible/vars/ansible-input-api.yaml | 2 +- deploy/configs/version.txt | 2 +- deploy/scripts/New-SDAFDevopsProject.ps1 | 4 +- .../bootstrap/sap_deployer/module.tf | 2 +- .../bootstrap/sap_deployer/providers.tf | 9 +- .../bootstrap/sap_library/providers.tf | 12 +- .../terraform/run/sap_deployer/providers.tf | 5 +- .../terraform/run/sap_landscape/providers.tf | 22 +- deploy/terraform/run/sap_library/providers.tf | 8 +- deploy/terraform/run/sap_system/module.tf | 2 + deploy/terraform/run/sap_system/providers.tf | 5 +- .../modules/sap_deployer/app_service.tf | 2 +- .../modules/sap_deployer/bastion.tf | 2 +- .../modules/sap_deployer/firewall.tf | 2 +- .../modules/sap_deployer/infrastructure.tf | 4 +- .../modules/sap_deployer/providers.tf | 2 +- .../modules/sap_landscape/iscsi.tf | 4 +- .../modules/sap_landscape/nsg.tf | 20 +- .../modules/sap_landscape/providers.tf | 2 +- .../modules/sap_landscape/storage_accounts.tf | 8 +- .../modules/sap_landscape/subnets.tf | 10 +- .../modules/sap_library/providers.tf | 2 +- .../modules/sap_namegenerator/output.tf | 12 +- .../modules/sap_namegenerator/vm.tf | 34 ++-- .../sap_system/anydb_node/providers.tf | 2 +- .../modules/sap_system/app_tier/providers.tf | 2 +- .../common_infrastructure/providers.tf | 2 +- .../common_infrastructure/storage_accounts.tf | 2 +- .../modules/sap_system/hdb_node/anf.tf | 14 +- .../modules/sap_system/hdb_node/avg.tf | 191 +++++++++++++++++- .../sap_system/hdb_node/infrastructure.tf | 4 +- .../modules/sap_system/hdb_node/outputs.tf | 10 +- .../modules/sap_system/hdb_node/providers.tf | 2 +- .../sap_system/hdb_node/variables_local.tf | 9 + .../modules/sap_system/hdb_node/vm-hdb.tf | 13 +- .../sap_system/hdb_node/vm-observer.tf | 2 +- .../output_files/ansible_inventory.tmpl | 4 + .../sap_system/output_files/inventory.tf | 5 +- .../sap_system/output_files/providers.tf | 2 +- .../output_files/variables_global.tf | 2 + 45 files changed, 359 insertions(+), 138 deletions(-) diff --git a/Webapp/SDAF/Models/EnvironmentModel.cs b/Webapp/SDAF/Models/EnvironmentModel.cs index b6588125ec..2fd83a75ed 100644 --- a/Webapp/SDAF/Models/EnvironmentModel.cs +++ b/Webapp/SDAF/Models/EnvironmentModel.cs @@ -42,7 +42,7 @@ public class Variables public class Variable { - public string? value { get; set; } + public string value { get; set; } [JsonIgnore] public bool? isSecret { get; set; } [JsonIgnore] diff --git a/Webapp/SDAF/SDAFWebApp.csproj b/Webapp/SDAF/SDAFWebApp.csproj index 8e64b487b3..75e4b08868 100644 --- a/Webapp/SDAF/SDAFWebApp.csproj +++ b/Webapp/SDAF/SDAFWebApp.csproj @@ -20,7 +20,7 @@ - + diff --git a/deploy/ansible/playbook_04_00_00_db_install.yaml b/deploy/ansible/playbook_04_00_00_db_install.yaml index cc2eec14ee..ea498a1fd4 100644 --- a/deploy/ansible/playbook_04_00_00_db_install.yaml +++ b/deploy/ansible/playbook_04_00_00_db_install.yaml @@ -356,7 +356,7 @@ ansible.builtin.include_role: name: roles-db/4.0.0-hdb-install when: - - not db_scale_out + - not database_scale_out - name: "Database Installation Playbook: - Clear the failed state of hosts" ansible.builtin.meta: clear_host_errors @@ -365,7 +365,7 @@ # ansible.builtin.include_role: # name: roles-sap-os/2.6-sap-mounts # when: - # - db_scale_out | default(false) == true + # - database_scale_out | default(false) == true - name: "Database Installation Playbook: - run HANA Scale-Out installation" ansible.builtin.include_role: diff --git a/deploy/ansible/playbook_05_01_sap_dbload.yaml b/deploy/ansible/playbook_05_01_sap_dbload.yaml index 0f6c844a75..bfbf23d2f7 100644 --- a/deploy/ansible/playbook_05_01_sap_dbload.yaml +++ b/deploy/ansible/playbook_05_01_sap_dbload.yaml @@ -78,6 +78,10 @@ - name: "DBLoad Playbook: - Perform DB Load on HANA" become: true become_user: root + when: + - platform == 'HANA' + - "'pas' in supported_tiers" + - ansible_os_family != "Windows" block: - name: "DBLoad Playbook: - Setting the dbload facts" ansible.builtin.set_fact: @@ -97,13 +101,6 @@ tags: - always - - - name: "DBLoad Playbook: - Mounting" - ansible.builtin.include_role: - name: roles-sap-os/2.6-sap-mounts - tags: - - 2.6-sap-mounts - - name: "DBLoad Playbook: Define this SID" ansible.builtin.set_fact: this_sid: @@ -120,6 +117,28 @@ ansible.builtin.set_fact: all_sids: "{% if MULTI_SIDS is defined %}{{ MULTI_SIDS }}{% else %}{{ all_sids | default([]) + [this_sid] }}{% endif %}" + - name: Generic Users and Groups for SAP Installation + ansible.builtin.include_role: + name: roles-sap-os/2.5-sap-users + tasks_from: user_nw.yaml + vars: + scs_instance_number: "{{ sid_to_be_deployed.ascs_inst_no }}" + tier: generic + main_password: "{{ hostvars.localhost.sap_password }}" + sapbits_location_base_path: "{{ hostvars.localhost.sapbits_location_base_path }}" + sapbits_sas_token: "{{ hostvars.localhost.sapbits_sas_token }}" + loop: "{{ all_sids }}" + loop_control: + loop_var: sid_to_be_deployed + tags: + - 2.5-sap-users + + - name: "DBLoad Playbook: - Mounting" + ansible.builtin.include_role: + name: roles-sap-os/2.6-sap-mounts + tags: + - 2.6-sap-mounts + - name: Run the DBLoad Playbook block: - name: "DBLoad Playbook: - Run DBLoad" @@ -170,11 +189,6 @@ tags: - 5.1-dbload - when: - - platform == 'HANA' - - "'pas' in supported_tiers" - - ansible_os_family != "Windows" - # /*----------------------------------------------------------------------------8 # | | # | Playbook for Oracle DB Load | diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index 8cd6cdac43..088d0734a6 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -386,6 +386,14 @@ - node_tier != 'hana' - sap_mnt is defined +- name: "ANF Mount: Set Permissons on /sapmnt directory" + ansible.builtin.file: + owner: '{{ sidadm_uid }}' + group: sapsys + path: "/sapmnt/{{ sap_sid | upper }}" + state: directory + recurse: true + - name: "ANF Mount: usr/sap/{{ sap_sid | upper }}/SYS" ansible.posix.mount: src: "{{ item.src }}" diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index 85df0c5291..aad2619ee0 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -5,7 +5,7 @@ become_user_name: root oracle_user_name: oracle orchestration_ansible_user: azureadm # ------------------- Begin - SDAF Ansible Version ---------------------------8 -SDAF_Version: "3.12.0.0" +SDAF_Version: "3.13.0.0" # ------------------- End - SDAF Ansible Version ---------------------------8 diff --git a/deploy/configs/version.txt b/deploy/configs/version.txt index a57eb4c686..c21c6f6867 100644 --- a/deploy/configs/version.txt +++ b/deploy/configs/version.txt @@ -1 +1 @@ -3.12.0.0 +3.13.0.0 diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index e6bf7d4a31..e23ad4f650 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -25,7 +25,7 @@ $ControlPlaneSubscriptionName = $Env:SDAF_ControlPlaneSubscriptionName if ($IsWindows) { $pathSeparator = "\" } else { $pathSeparator = "/" } #endregion -$versionLabel = "v3.12.0.0" +$versionLabel = "v3.13.0.0" # az logout @@ -1131,4 +1131,4 @@ else { } -Write-Host "The script has completed" -ForegroundColor Green \ No newline at end of file +Write-Host "The script has completed" -ForegroundColor Green diff --git a/deploy/terraform/bootstrap/sap_deployer/module.tf b/deploy/terraform/bootstrap/sap_deployer/module.tf index 9beee66193..e464508dd2 100644 --- a/deploy/terraform/bootstrap/sap_deployer/module.tf +++ b/deploy/terraform/bootstrap/sap_deployer/module.tf @@ -52,7 +52,7 @@ module "sap_deployer" { use_service_endpoint = var.use_service_endpoint use_webapp = var.use_webapp webapp_client_secret = var.webapp_client_secret - dns_settings = local.dns_settings + } module "sap_namegenerator" { diff --git a/deploy/terraform/bootstrap/sap_deployer/providers.tf b/deploy/terraform/bootstrap/sap_deployer/providers.tf index c0b52989cf..ddfa4e92b7 100644 --- a/deploy/terraform/bootstrap/sap_deployer/providers.tf +++ b/deploy/terraform/bootstrap/sap_deployer/providers.tf @@ -29,7 +29,7 @@ provider "azurerm" { purge_soft_deleted_certificates_on_destroy = !var.enable_purge_control_for_keyvaults } } - skip_provider_registration = true + } provider "azurerm" { @@ -44,7 +44,7 @@ provider "azurerm" { purge_soft_deleted_certificates_on_destroy = !var.enable_purge_control_for_keyvaults } } - skip_provider_registration = true + partner_id = "f94f50f2-2539-42f8-9c8e-c65b28c681f7" alias = "main" } @@ -52,7 +52,7 @@ provider "azurerm" { provider "azurerm" { features {} subscription_id = try(var.management_dns_subscription_id, null) - skip_provider_registration = true + partner_id = "f94f50f2-2539-42f8-9c8e-c65b28c681f7" alias = "dnsmanagement" } @@ -61,7 +61,6 @@ provider "azurerm" { features {} subscription_id = try(coalesce(var.privatelink_dns_subscription_id, var.management_dns_subscription_id), null) alias = "privatelinkdnsmanagement" - skip_provider_registration = true storage_use_azuread = true } @@ -87,7 +86,7 @@ terraform { } azurerm = { source = "hashicorp/azurerm" - version = ">=3.3" + version = ">=4.0" } } } diff --git a/deploy/terraform/bootstrap/sap_library/providers.tf b/deploy/terraform/bootstrap/sap_library/providers.tf index 688be3bf6c..0c8737b0b0 100644 --- a/deploy/terraform/bootstrap/sap_library/providers.tf +++ b/deploy/terraform/bootstrap/sap_library/providers.tf @@ -24,7 +24,7 @@ provider "azurerm" { } } - skip_provider_registration = true + storage_use_azuread = true } @@ -42,7 +42,7 @@ provider "azurerm" { tenant_id = local.use_spn ? local.spn.tenant_id : null alias = "main" - skip_provider_registration = true + storage_use_azuread = true } @@ -51,7 +51,7 @@ provider "azurerm" { features { } alias = "deployer" - skip_provider_registration = true + storage_use_azuread = true } @@ -62,7 +62,7 @@ provider "azurerm" { client_secret = local.use_spn ? local.spn.client_secret : null tenant_id = local.use_spn ? local.spn.tenant_id : null alias = "dnsmanagement" - skip_provider_registration = true + storage_use_azuread = true } @@ -73,7 +73,7 @@ provider "azurerm" { client_secret = local.use_spn ? local.spn.client_secret : null tenant_id = local.use_spn ? local.spn.tenant_id : null alias = "privatelinkdnsmanagement" - skip_provider_registration = true + storage_use_azuread = true } @@ -104,7 +104,7 @@ terraform { } azurerm = { source = "hashicorp/azurerm" - version = ">=3.3" + version = ">=4.0" } } } diff --git a/deploy/terraform/run/sap_deployer/providers.tf b/deploy/terraform/run/sap_deployer/providers.tf index 4588670d4b..3c0f7e4e89 100644 --- a/deploy/terraform/run/sap_deployer/providers.tf +++ b/deploy/terraform/run/sap_deployer/providers.tf @@ -26,7 +26,6 @@ provider "azurerm" { } } partner_id = "f94f50f2-2539-42f8-9c8e-c65b28c681f7" - skip_provider_registration = true storage_use_azuread = !var.shared_access_key_enabled use_msi = var.use_spn ? false : true } @@ -44,7 +43,6 @@ provider "azurerm" { } } partner_id = "f94f50f2-2539-42f8-9c8e-c65b28c681f7" - skip_provider_registration = true subscription_id = local.spn.subscription_id client_id = var.use_spn ? local.spn.client_id : null @@ -62,7 +60,6 @@ provider "azurerm" { client_id = var.use_spn ? local.spn.client_id : null client_secret = var.use_spn ? local.spn.client_secret: null tenant_id = var.use_spn ? local.spn.tenant_id: null - skip_provider_registration = true storage_use_azuread = !var.shared_access_key_enabled use_msi = var.use_spn ? false : true } @@ -88,7 +85,7 @@ terraform { } azurerm = { source = "hashicorp/azurerm" - version = "~> 3.3" + version = "~> 4.0" } } } diff --git a/deploy/terraform/run/sap_landscape/providers.tf b/deploy/terraform/run/sap_landscape/providers.tf index 5838805ab4..cbb2b418a4 100644 --- a/deploy/terraform/run/sap_landscape/providers.tf +++ b/deploy/terraform/run/sap_landscape/providers.tf @@ -16,7 +16,6 @@ provider "azurerm" { features {} subscription_id = length(local.deployer_subscription_id) > 0 ? local.deployer_subscription_id : null use_msi = var.use_spn ? false : true - skip_provider_registration = true storage_use_azuread = true } @@ -41,7 +40,7 @@ provider "azurerm" { partner_id = "25c87b5f-716a-4067-bcd8-116956916dd6" alias = "workload" - skip_provider_registration = true + } provider "azurerm" { @@ -52,7 +51,7 @@ provider "azurerm" { client_secret = var.use_spn ? local.cp_spn.client_secret : null tenant_id = var.use_spn ? local.cp_spn.tenant_id : null use_msi = var.use_spn ? false : true - skip_provider_registration = true + } @@ -68,7 +67,7 @@ provider "azurerm" { client_secret = var.use_spn ? local.cp_spn.client_secret : null tenant_id = var.use_spn ? local.cp_spn.tenant_id : null use_msi = var.use_spn ? false : true - skip_provider_registration = true + } provider "azurerm" { @@ -79,7 +78,7 @@ provider "azurerm" { client_secret = var.use_spn ? local.cp_spn.client_secret : null tenant_id = var.use_spn ? local.cp_spn.tenant_id : null alias = "peering" - skip_provider_registration = true + } provider "azuread" { @@ -90,11 +89,12 @@ provider "azuread" { } provider "azapi" { - alias = "api" - subscription_id = local.spn.subscription_id - client_id = local.spn.client_id - client_secret = local.spn.client_secret - tenant_id = local.spn.tenant_id + alias = "api" + subscription_id = local.spn.subscription_id + client_id = var.use_spn ? local.spn.client_id : null + client_secret = var.use_spn ? local.spn.client_secret : null + tenant_id = local.spn.tenant_id + use_msi = var.use_spn ? false : true } terraform { @@ -118,7 +118,7 @@ terraform { } azurerm = { source = "hashicorp/azurerm" - version = "~> 3.3" + version = ">= 4.0" } azapi = { source = "Azure/azapi" diff --git a/deploy/terraform/run/sap_library/providers.tf b/deploy/terraform/run/sap_library/providers.tf index 7e6a6a8edd..6760605ed5 100644 --- a/deploy/terraform/run/sap_library/providers.tf +++ b/deploy/terraform/run/sap_library/providers.tf @@ -20,7 +20,7 @@ data "azurerm_client_config" "current" { provider "azurerm" { features { } - skip_provider_registration = true + use_msi = var.use_spn ? false : true storage_use_azuread = !var.shared_access_key_enabled @@ -42,13 +42,11 @@ provider "azurerm" { use_msi = var.use_spn ? false : true alias = "main" - skip_provider_registration = true } provider "azurerm" { features { } - skip_provider_registration = true alias = "deployer" storage_use_azuread = !var.shared_access_key_enabled use_msi = var.use_spn ? false : true @@ -63,7 +61,6 @@ provider "azurerm" { client_id = local.use_spn ? local.spn.client_id : null client_secret = local.use_spn ? local.spn.client_secret : null tenant_id = local.use_spn ? local.spn.tenant_id : null - skip_provider_registration = true storage_use_azuread = !var.shared_access_key_enabled use_msi = var.use_spn ? false : true } @@ -75,7 +72,6 @@ provider "azurerm" { client_secret = local.use_spn ? local.spn.client_secret : null tenant_id = local.use_spn ? local.spn.tenant_id : null alias = "privatelinkdnsmanagement" - skip_provider_registration = true storage_use_azuread = true } @@ -108,7 +104,7 @@ terraform { } azurerm = { source = "hashicorp/azurerm" - version = "~> 3.3" + version = "~> 4.0" } } } diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index a30e70a745..2dc0df308c 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -460,4 +460,6 @@ module "output_files" { enable_sap_cal = var.enable_sap_cal calapi_kv = var.calapi_kv sap_cal_product_name = var.sap_cal_product_name + + site_information = module.hdb_node.site_information } diff --git a/deploy/terraform/run/sap_system/providers.tf b/deploy/terraform/run/sap_system/providers.tf index 656fbaf18b..726bc0ded8 100644 --- a/deploy/terraform/run/sap_system/providers.tf +++ b/deploy/terraform/run/sap_system/providers.tf @@ -37,7 +37,7 @@ provider "azurerm" { partner_id = "3179cd51-f54b-4c73-ac10-8e99417efce7" alias = "system" - skip_provider_registration = true + } provider "azurerm" { @@ -48,7 +48,6 @@ provider "azurerm" { client_secret = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? local.cp_spn.client_secret : null tenant_id = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? local.cp_spn.tenant_id : null use_msi = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? false : true - skip_provider_registration = true } @@ -80,7 +79,7 @@ terraform { } azurerm = { source = "hashicorp/azurerm" - version = ">=3.3" + version = ">=4.0" } } } diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf index 4913fc0c71..22a6969178 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf @@ -22,7 +22,7 @@ resource "azurerm_subnet" "webapp" { address_prefixes = [local.webapp_subnet_prefix] - private_endpoint_network_policies_enabled = var.use_private_endpoint + private_endpoint_network_policies = var.use_private_endpoint ? "Enabled" : "Disabled" service_endpoints = var.use_service_endpoint ? ( var.use_webapp ? ( diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/bastion.tf b/deploy/terraform/terraform-units/modules/sap_deployer/bastion.tf index 8d01b6b089..68a228860d 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/bastion.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/bastion.tf @@ -23,7 +23,7 @@ resource "azurerm_subnet" "bastion" { ) address_prefixes = [var.infrastructure.vnets.management.subnet_bastion.prefix] - private_endpoint_network_policies_enabled = var.use_private_endpoint + private_endpoint_network_policies = var.use_private_endpoint ? "Enabled" : "Disabled" service_endpoints = var.use_service_endpoint ? ( ["Microsoft.Storage", "Microsoft.KeyVault"]) : ( diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/firewall.tf b/deploy/terraform/terraform-units/modules/sap_deployer/firewall.tf index f507c1fc04..fcd25cafa1 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/firewall.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/firewall.tf @@ -85,7 +85,7 @@ resource "azurerm_route_table" "rt" { var.naming.separator, var.naming.resource_suffixes.routetable ) - disable_bgp_route_propagation = false + bgp_route_propagation_enabled = false resource_group_name = local.resource_group_exists ? ( data.azurerm_resource_group.deployer[0].name) : ( azurerm_resource_group.deployer[0].name diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf index 25b7fa72c7..63c72150b9 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf @@ -58,7 +58,7 @@ resource "azurerm_subnet" "subnet_mgmt" { virtual_network_name = local.vnet_mgmt_exists ? data.azurerm_virtual_network.vnet_mgmt[0].name : azurerm_virtual_network.vnet_mgmt[0].name address_prefixes = [local.management_subnet_prefix] - private_endpoint_network_policies_enabled = !var.use_private_endpoint + private_endpoint_network_policies = !var.use_private_endpoint ? "Enabled" : "Disabled" service_endpoints = var.use_service_endpoint ? ( var.use_webapp ? ( @@ -84,7 +84,7 @@ resource "azurerm_storage_account" "deployer" { location = local.resource_group_exists ? data.azurerm_resource_group.deployer[0].location : azurerm_resource_group.deployer[0].location account_replication_type = "LRS" account_tier = "Standard" - enable_https_traffic_only = local.enable_secure_transfer + https_traffic_only_enabled = local.enable_secure_transfer min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false shared_access_key_enabled = var.deployer.shared_access_key_enabled diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/providers.tf b/deploy/terraform/terraform-units/modules/sap_deployer/providers.tf index 78b8d1baba..468dfe55bb 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/providers.tf @@ -3,7 +3,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" configuration_aliases = [azurerm.dnsmanagement, azurerm.main] - version = "~> 3.0" + version = "~> 4.0" } } } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf index 6b58678af4..4d1786032b 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf @@ -139,8 +139,8 @@ resource "azurerm_network_security_rule" "nsr_controlplane_iscsi" { var.deployer_tfstate.subnet_mgmt_address_prefixes, var.deployer_tfstate.subnet_bastion_address_prefixes, local.SAP_virtualnetwork_exists ? ( - data.azurerm_virtual_network.vnet_sap[0].address_space) : ( - azurerm_virtual_network.vnet_sap[0].address_space + flatten(data.azurerm_virtual_network.vnet_sap[0].address_space)) : ( + flatten(azurerm_virtual_network.vnet_sap[0].address_space) ))) destination_address_prefixes = local.sub_iscsi_exists ? data.azurerm_subnet.iscsi[0].address_prefixes : azurerm_subnet.iscsi[0].address_prefixes } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf b/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf index 6c593b21ae..aae8fd6bc3 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf @@ -175,8 +175,8 @@ resource "azurerm_network_security_rule" "nsr_controlplane_app" { var.deployer_tfstate.subnet_mgmt_address_prefixes, var.deployer_tfstate.subnet_bastion_address_prefixes, local.SAP_virtualnetwork_exists ? ( - data.azurerm_virtual_network.vnet_sap[0].address_space) : ( - azurerm_virtual_network.vnet_sap[0].address_space + flatten(data.azurerm_virtual_network.vnet_sap[0].address_space)) : ( + flatten(azurerm_virtual_network.vnet_sap[0].address_space) ))) destination_address_prefixes = local.application_subnet_existing ? data.azurerm_subnet.app[0].address_prefixes : azurerm_subnet.app[0].address_prefixes } @@ -205,8 +205,8 @@ resource "azurerm_network_security_rule" "nsr_controlplane_web" { var.deployer_tfstate.subnet_mgmt_address_prefixes, var.deployer_tfstate.subnet_bastion_address_prefixes, local.SAP_virtualnetwork_exists ? ( - data.azurerm_virtual_network.vnet_sap[0].address_space) : ( - azurerm_virtual_network.vnet_sap[0].address_space + flatten(data.azurerm_virtual_network.vnet_sap[0].address_space)) : ( + flatten(azurerm_virtual_network.vnet_sap[0].address_space) ))) destination_address_prefixes = local.web_subnet_existing ? data.azurerm_subnet.web[0].address_prefixes : azurerm_subnet.web[0].address_prefixes } @@ -236,8 +236,8 @@ resource "azurerm_network_security_rule" "nsr_controlplane_storage" { var.deployer_tfstate.subnet_mgmt_address_prefixes, var.deployer_tfstate.subnet_bastion_address_prefixes, local.SAP_virtualnetwork_exists ? ( - data.azurerm_virtual_network.vnet_sap[0].address_space) : ( - azurerm_virtual_network.vnet_sap[0].address_space + flatten(data.azurerm_virtual_network.vnet_sap[0].address_space)) : ( + flatten(azurerm_virtual_network.vnet_sap[0].address_space) ))) destination_address_prefixes = local.storage_subnet_existing ? data.azurerm_subnet.storage[0].address_prefixes : azurerm_subnet.storage[0].address_prefixes } @@ -266,8 +266,8 @@ resource "azurerm_network_security_rule" "nsr_controlplane_db" { var.deployer_tfstate.subnet_mgmt_address_prefixes, var.deployer_tfstate.subnet_bastion_address_prefixes, local.SAP_virtualnetwork_exists ? ( - data.azurerm_virtual_network.vnet_sap[0].address_space) : ( - azurerm_virtual_network.vnet_sap[0].address_space + flatten(data.azurerm_virtual_network.vnet_sap[0].address_space)) : ( + flatten(azurerm_virtual_network.vnet_sap[0].address_space) ))) destination_address_prefixes = local.database_subnet_existing ? data.azurerm_subnet.db[0].address_prefixes : azurerm_subnet.db[0].address_prefixes } @@ -296,8 +296,8 @@ resource "azurerm_network_security_rule" "nsr_controlplane_admin" { var.deployer_tfstate.subnet_mgmt_address_prefixes, var.deployer_tfstate.subnet_bastion_address_prefixes, local.SAP_virtualnetwork_exists ? ( - data.azurerm_virtual_network.vnet_sap[0].address_space) : ( - azurerm_virtual_network.vnet_sap[0].address_space + flatten(data.azurerm_virtual_network.vnet_sap[0].address_space)) : ( + flatten(azurerm_virtual_network.vnet_sap[0].address_space) ))) destination_address_prefixes = local.admin_subnet_existing ? data.azurerm_subnet.admin[0].address_prefixes : azurerm_subnet.admin[0].address_prefixes } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf b/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf index 35722baebe..5b6fda3f0d 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf @@ -3,7 +3,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement, azurerm.peering] - version = "~> 3.23" + version = "~> 4.0" } azapi = { diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf index 331d4d139e..15c181384f 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf @@ -25,7 +25,7 @@ resource "azurerm_storage_account" "storage_bootdiag" { account_replication_type = "LRS" account_tier = "Standard" - enable_https_traffic_only = true + https_traffic_only_enabled = true min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false cross_tenant_replication_enabled = false @@ -142,7 +142,7 @@ resource "azurerm_storage_account" "witness_storage" { account_replication_type = "LRS" account_tier = "Standard" - enable_https_traffic_only = true + https_traffic_only_enabled = true min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false cross_tenant_replication_enabled = false @@ -289,7 +289,7 @@ resource "azurerm_storage_account" "transport" { account_tier = "Premium" account_replication_type = "ZRS" account_kind = "FileStorage" - enable_https_traffic_only = false + https_traffic_only_enabled = false min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false # shared_access_key_enabled = false @@ -511,7 +511,7 @@ resource "azurerm_storage_account" "install" { account_replication_type = var.storage_account_replication_type account_tier = "Premium" allow_nested_items_to_be_public = false - enable_https_traffic_only = false + https_traffic_only_enabled = false min_tls_version = "TLS1_2" cross_tenant_replication_enabled = false public_network_access_enabled = var.public_network_access_enabled diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf b/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf index 956b0f1b19..91fd40c415 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf @@ -7,7 +7,7 @@ resource "azurerm_subnet" "admin" { virtual_network_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].name : azurerm_virtual_network.vnet_sap[0].name address_prefixes = [local.admin_subnet_prefix] - enforce_private_link_endpoint_network_policies = var.use_private_endpoint + private_endpoint_network_policies = var.use_private_endpoint ? "Enabled" : "Disabled" service_endpoints = var.use_service_endpoint ? ( ["Microsoft.Storage", "Microsoft.KeyVault"] @@ -34,7 +34,7 @@ resource "azurerm_subnet" "db" { virtual_network_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].name : azurerm_virtual_network.vnet_sap[0].name address_prefixes = [local.database_subnet_prefix] - enforce_private_link_endpoint_network_policies = var.use_private_endpoint + private_endpoint_network_policies = var.use_private_endpoint ? "Enabled" : "Disabled" service_endpoints = var.use_service_endpoint ? ( ["Microsoft.Storage", "Microsoft.KeyVault"] ) : ( @@ -59,7 +59,7 @@ resource "azurerm_subnet" "app" { virtual_network_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].name : azurerm_virtual_network.vnet_sap[0].name address_prefixes = [local.application_subnet_prefix] - enforce_private_link_endpoint_network_policies = var.use_private_endpoint + private_endpoint_network_policies = var.use_private_endpoint ? "Enabled" : "Disabled" service_endpoints = var.use_service_endpoint ? ( ["Microsoft.Storage", "Microsoft.KeyVault"] @@ -86,7 +86,7 @@ resource "azurerm_subnet" "web" { virtual_network_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].name : azurerm_virtual_network.vnet_sap[0].name address_prefixes = [local.web_subnet_prefix] - enforce_private_link_endpoint_network_policies = var.use_private_endpoint + private_endpoint_network_policies = var.use_private_endpoint ? "Enabled" : "Disabled" service_endpoints = var.use_service_endpoint ? ( ["Microsoft.Storage", "Microsoft.KeyVault"] @@ -114,7 +114,7 @@ resource "azurerm_subnet" "storage" { virtual_network_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].name : azurerm_virtual_network.vnet_sap[0].name address_prefixes = [local.subnet_cidr_storage] - enforce_private_link_endpoint_network_policies = var.use_private_endpoint + private_endpoint_network_policies = var.use_private_endpoint ? "Enabled" : "Disabled" service_endpoints = var.use_service_endpoint ? ( ["Microsoft.Storage", "Microsoft.KeyVault"] diff --git a/deploy/terraform/terraform-units/modules/sap_library/providers.tf b/deploy/terraform/terraform-units/modules/sap_library/providers.tf index e08192b874..9c16a761ca 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/providers.tf @@ -3,7 +3,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement, azurerm.privatelinkdnsmanagement] - version = "~> 3.0" + version = "~> 4.0" } } } diff --git a/deploy/terraform/terraform-units/modules/sap_namegenerator/output.tf b/deploy/terraform/terraform-units/modules/sap_namegenerator/output.tf index 5e18b73318..1f7c95456d 100644 --- a/deploy/terraform/terraform-units/modules/sap_namegenerator/output.tf +++ b/deploy/terraform/terraform-units/modules/sap_namegenerator/output.tf @@ -61,13 +61,13 @@ output "naming" { ANCHOR_COMPUTERNAME = local.anchor_computer_names ANCHOR_SECONDARY_DNSNAME = local.anchor_secondary_dnsnames ANCHOR_VMNAME = local.anchor_vm_names - ANYDB_COMPUTERNAME = var.database_high_availability ? concat(local.anydb_computer_names, local.anydb_computer_names_ha) : local.anydb_computer_names - ANYDB_SECONDARY_DNSNAME = concat(local.anydb_secondary_dnsnames, local.anydb_secondary_dnsnames_ha) - ANYDB_VMNAME = var.database_high_availability ? concat(local.anydb_vm_names, local.anydb_vm_names_ha) : local.anydb_vm_names + ANYDB_COMPUTERNAME = var.database_high_availability ? local.anydb_computer_names_ha : local.anydb_computer_names + ANYDB_SECONDARY_DNSNAME = var.database_high_availability ? local.anydb_secondary_dnsnames_ha : local.anydb_secondary_dnsnames + ANYDB_VMNAME = var.database_high_availability ? local.anydb_vm_names_ha : local.anydb_vm_names DEPLOYER = local.deployer_vm_names - HANA_COMPUTERNAME = var.database_high_availability ? concat(local.hana_computer_names, local.hana_computer_names_ha) : local.hana_computer_names - HANA_SECONDARY_DNSNAME = var.database_high_availability ? concat(local.hana_secondary_dnsnames, local.hana_secondary_dnsnames_ha) : local.hana_secondary_dnsnames - HANA_VMNAME = var.database_high_availability ? concat(local.hana_server_vm_names, local.hana_server_vm_names_ha) : local.hana_server_vm_names + HANA_COMPUTERNAME = var.database_high_availability ? local.hana_computer_names_ha : local.hana_computer_names + HANA_SECONDARY_DNSNAME = var.database_high_availability ? local.hana_secondary_dnsnames_ha : local.hana_secondary_dnsnames + HANA_VMNAME = var.database_high_availability ? local.hana_server_vm_names_ha : local.hana_server_vm_names ISCSI_COMPUTERNAME = local.iscsi_server_names OBSERVER_COMPUTERNAME = local.observer_computer_names OBSERVER_VMNAME = local.observer_vm_names diff --git a/deploy/terraform/terraform-units/modules/sap_namegenerator/vm.tf b/deploy/terraform/terraform-units/modules/sap_namegenerator/vm.tf index 6bf26bb34b..d3b9fd170f 100644 --- a/deploy/terraform/terraform-units/modules/sap_namegenerator/vm.tf +++ b/deploy/terraform/terraform-units/modules/sap_namegenerator/vm.tf @@ -22,8 +22,8 @@ locals { format("%sdb%02d%s%d%s", lower(var.sap_sid), idx + var.resource_offset, local.db_oscode, 0, local.random_id_vm_verified) ] - anydb_computer_names_ha = [for idx in range(var.db_server_count) : - format("%sdb%02d%s%d%s", lower(var.sap_sid), idx + var.resource_offset, local.db_oscode, 1, local.random_id_vm_verified) + anydb_computer_names_ha = [for idx in range(var.db_server_count * 2) : + format("%sdb%02d%s%01d%s", lower(var.sap_sid), floor(idx/2) + var.resource_offset, local.db_oscode, tonumber((idx % 2)), local.random_id_vm_verified) ] anydb_vm_names = [for idx in range(var.db_server_count) : @@ -33,10 +33,10 @@ locals { ) ] - anydb_vm_names_ha = [for idx in range(var.db_server_count) : + anydb_vm_names_ha = [for idx in range(var.db_server_count * 2) : length(var.db_zones) > 0 && var.use_zonal_markers ? ( - format("%sdb%sz%s%s%02d%s%d%s", lower(var.sap_sid), local.separator, local.ha_zones[idx % max(length(local.ha_zones), 1)], local.separator, idx + var.resource_offset, local.db_oscode, 1, local.random_id_vm_verified)) : ( - format("%sdb%02d%s%d%s", lower(var.sap_sid), idx + var.resource_offset, local.db_oscode, 1, local.random_id_vm_verified) + format("%sdb%sz%s%s%02d%s%01d%s", lower(var.sap_sid), local.separator, local.ha_zones[idx % max(length(local.ha_zones), 1)], local.separator, floor(idx/2) + var.resource_offset, local.db_oscode, tonumber((idx % 2)), local.random_id_vm_verified)) : ( + format("%sdb%02d%s%01d%s", lower(var.sap_sid), floor(idx/2) + var.resource_offset, local.db_oscode, tonumber((idx % 2)), local.random_id_vm_verified) ) ] @@ -59,21 +59,21 @@ locals { format("%sd%s%02dl%d%s", lower(var.sap_sid), lower(var.db_sid), idx + var.resource_offset, 0, substr(local.random_id_vm_verified, 0, 2)) ] - hana_computer_names_ha = [for idx in range(var.db_server_count) : - format("%sd%s%02dl%d%s", lower(var.sap_sid), lower(var.db_sid), idx + var.resource_offset, 1, substr(local.random_id_vm_verified, 0, 2)) + hana_computer_names_ha = [for idx in range(var.db_server_count * 2) : + format("%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset + var.resource_offset, tonumber((idx % 2)), substr(local.random_id_vm_verified, 0, 2)) ] hana_server_vm_names = [for idx in range(var.db_server_count) : length(var.db_zones) > 0 && var.use_zonal_markers ? ( - format("%sd%s%sz%s%s%02dl%d%s", lower(var.sap_sid), lower(var.db_sid), local.separator, var.db_zones[idx % max(length(var.db_zones), 1)], local.separator, idx + var.resource_offset, 0, local.random_id_vm_verified)) : ( - format("%sd%s%02dl%d%s", lower(var.sap_sid), lower(var.db_sid), idx + var.resource_offset, 0, local.random_id_vm_verified) + format("%sd%s%sz%s%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), local.separator, var.db_zones[idx % max(length(var.db_zones), 1)], local.separator, idx + var.resource_offset, 0, local.random_id_vm_verified)) : ( + format("%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), idx + var.resource_offset, 0, local.random_id_vm_verified) ) ] - hana_server_vm_names_ha = [for idx in range(var.db_server_count) : + hana_server_vm_names_ha = [for idx in range(var.db_server_count * 2) : length(var.db_zones) > 0 && var.use_zonal_markers ? ( - format("%sd%s%sz%s%s%02dl%d%s", lower(var.sap_sid), lower(var.db_sid), local.separator, local.ha_zones[idx % max(length(local.ha_zones), 1)], local.separator, idx + var.resource_offset, 1, local.random_id_vm_verified)) : ( - format("%sd%s%02dl%d%s", lower(var.sap_sid), lower(var.db_sid), idx + var.resource_offset, 1, local.random_id_vm_verified) + format("%sd%s%sz%s%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), local.separator, local.ha_zones[idx % max(length(local.ha_zones), 1)], local.separator, floor(idx/2) + var.resource_offset, tonumber((idx % 2)), local.random_id_vm_verified)) : ( + format("%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset, tonumber((idx % 2)), local.random_id_vm_verified) ) ] @@ -123,16 +123,16 @@ locals { format("v%sd%02dl%d%s", lower(var.sap_sid), idx + var.resource_offset, 0, substr(local.random_id_vm_verified, 0, 2)) ] - anydb_secondary_dnsnames_ha = [for idx in range(var.db_server_count) : - format("v%sd%s%02dl%d%s", lower(var.sap_sid), lower(var.db_sid), idx + var.resource_offset, 1, substr(local.random_id_vm_verified, 0, 2)) + anydb_secondary_dnsnames_ha = [for idx in range(var.db_server_count * 2) : + format("v%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset, tonumber((idx % 2)), substr(local.random_id_vm_verified, 0, 2)) ] - hana_secondary_dnsnames = [for idx in range(var.db_server_count) : + hana_secondary_dnsnames = [for idx in range(var.db_server_count ) : format("v%sd%s%02dl%d%s", lower(var.sap_sid), lower(var.db_sid), idx + var.resource_offset, 0, substr(local.random_id_vm_verified, 0, 2)) ] - hana_secondary_dnsnames_ha = [for idx in range(var.db_server_count) : - format("v%sd%s%02dl%d%s", lower(var.sap_sid), lower(var.db_sid), idx + var.resource_offset, 1, local.random_id_virt_vm_verified) + hana_secondary_dnsnames_ha = [for idx in range(var.db_server_count * 2) : + format("v%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset, tonumber((idx % 2)), local.random_id_virt_vm_verified) ] scs_secondary_dnsnames = [for idx in range(var.scs_server_count) : diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/providers.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/providers.tf index 62f084307e..6305c9bcbb 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/providers.tf @@ -3,7 +3,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement] // - version = "~> 3.2" + version = "~> 4.0" } } } diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/providers.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/providers.tf index 62f084307e..6305c9bcbb 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/providers.tf @@ -3,7 +3,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement] // - version = "~> 3.2" + version = "~> 4.0" } } } diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/providers.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/providers.tf index 62f084307e..6305c9bcbb 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/providers.tf @@ -3,7 +3,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement] // - version = "~> 3.2" + version = "~> 4.0" } } } diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf index c171193f51..6ac5e9fe6e 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf @@ -34,7 +34,7 @@ resource "azurerm_storage_account" "sapmnt" { account_tier = "Premium" account_replication_type = "ZRS" account_kind = "FileStorage" - enable_https_traffic_only = false + https_traffic_only_enabled = false min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false cross_tenant_replication_enabled = false diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf index d490c142ab..b1634f6cbe 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf @@ -48,7 +48,7 @@ resource "azurerm_netapp_volume" "hanadata" { data "azurerm_netapp_volume" "hanadata" { provider = azurerm.main - depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA] + depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA_full] count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_data ? ( var.hana_ANF_volumes.use_existing_data_volume || local.use_avg ? ( @@ -76,7 +76,7 @@ data "azurerm_netapp_volume" "hanadata" { resource "azurerm_netapp_volume" "hanalog" { provider = azurerm.main - depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA] + depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA_full] count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_log && !local.use_avg ? ( var.hana_ANF_volumes.use_existing_log_volume ? ( @@ -124,7 +124,7 @@ resource "azurerm_netapp_volume" "hanalog" { data "azurerm_netapp_volume" "hanalog" { provider = azurerm.main - depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA] + depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA_full] count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_log ? ( var.hana_ANF_volumes.use_existing_log_volume || local.use_avg ? ( @@ -151,13 +151,13 @@ data "azurerm_netapp_volume" "hanalog" { resource "azurerm_netapp_volume" "hanashared" { provider = azurerm.main - depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA] + depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA_full] count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_shared && !local.use_avg ? ( var.hana_ANF_volumes.use_existing_shared_volume ? ( 0 ) : ( - var.database_server_count + local.db_zone_count )) : ( 0 ) : 0 @@ -201,11 +201,11 @@ resource "azurerm_netapp_volume" "hanashared" { data "azurerm_netapp_volume" "hanashared" { provider = azurerm.main - depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA] + depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA_full] count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_shared ? ( var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( - var.database_server_count + local.db_zone_count ) : ( 0 )) : ( diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/avg.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/avg.tf index a1ecc74995..e6865cdf64 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/avg.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/avg.tf @@ -4,9 +4,10 @@ # # #######################################4#######################################8 -resource "azurerm_netapp_volume_group_sap_hana" "avg_HANA" { +resource "azurerm_netapp_volume_group_sap_hana" "avg_HANA_full" { provider = azurerm.main - count = local.use_avg ? length(var.database.zones) * (var.database_server_count - var.database.stand_by_node_count) : 0 + depends_on = [ azurerm_linux_virtual_machine.vm_dbnode ] + count = local.use_avg ? length(var.database.zones) : 0 name = format("%s%s%s%s%d", var.naming.resource_prefixes.hana_avg, local.prefix, @@ -134,6 +135,192 @@ resource "azurerm_netapp_volume_group_sap_hana" "avg_HANA" { } +resource "azurerm_netapp_volume_group_sap_hana" "avg_HANA_data2" { + provider = azurerm.main + depends_on = [ azurerm_linux_virtual_machine.vm_dbnode ] + count = local.use_avg && var.database_server_count / length(var.database.zones) > 1 ? length(var.database.zones) : 0 + name = format("%s%s%s%sdata2_%d", + var.naming.resource_prefixes.hana_avg, + local.prefix, + var.naming.separator, + local.resource_suffixes.hana_avg, count.index + 1 + ) + resource_group_name = local.ANF_pool_settings.resource_group_name + location = local.ANF_pool_settings.location + + account_name = local.ANF_pool_settings.account_name + group_description = format("Application Volume %d group for %s", count.index + 1, var.sap_sid) + application_identifier = local.sid + + volume { + name = format("%s%s%s%sdata_2%d", + var.naming.resource_prefixes.hanadata, + local.prefix, + var.naming.separator, + local.resource_suffixes.hanadata, + count.index + 1 + ) + volume_path = format("%s-%sdata2-%02d", + var.sap_sid, + local.resource_suffixes.hanadata, + count.index + 1 + ) + service_level = local.ANF_pool_settings.service_level + capacity_pool_id = data.azurerm_netapp_pool.workload_netapp_pool[0].id + subnet_id = try(local.ANF_pool_settings.subnet_id, "") + proximity_placement_group_id = var.ppg[count.index % max(length(var.database.zones), 1)] + volume_spec_name = "data" + storage_quota_in_gb = var.hana_ANF_volumes.data_volume_size + throughput_in_mibps = upper(try(local.ANF_pool_settings.qos_type, "MANUAL")) == "AUTO" ? null : var.hana_ANF_volumes.data_volume_throughput + + protocols = ["NFSv4.1"] + security_style = "unix" + snapshot_directory_visible = false + + export_policy_rule { + rule_index = 1 + allowed_clients = "0.0.0.0/0" + nfsv3_enabled = false + nfsv41_enabled = true + unix_read_only = false + unix_read_write = true + root_access_enabled = true + } + } + + volume { + name = format("%s%s%s%slog_2%d", + var.naming.resource_prefixes.hanadata, + local.prefix, + var.naming.separator, + local.resource_suffixes.hanalog, + count.index + 1 + ) + volume_path = format("%s-%s-log2%02d", + var.sap_sid, + local.resource_suffixes.hanalog, + count.index + 1 + ) + service_level = local.ANF_pool_settings.service_level + capacity_pool_id = data.azurerm_netapp_pool.workload_netapp_pool[0].id + subnet_id = try(local.ANF_pool_settings.subnet_id, "") + proximity_placement_group_id = var.ppg[count.index % max(length(var.database.zones), 1)] + volume_spec_name = "log" + storage_quota_in_gb = var.hana_ANF_volumes.log_volume_size + throughput_in_mibps = upper(try(local.ANF_pool_settings.qos_type, "MANUAL")) == "AUTO" ? null : var.hana_ANF_volumes.log_volume_throughput + + protocols = ["NFSv4.1"] + security_style = "unix" + snapshot_directory_visible = false + + export_policy_rule { + rule_index = 1 + allowed_clients = "0.0.0.0/0" + nfsv3_enabled = false + nfsv41_enabled = true + unix_read_only = false + unix_read_write = true + root_access_enabled = true + } + } + + +} + + +resource "azurerm_netapp_volume_group_sap_hana" "avg_HANA_data3" { + provider = azurerm.main + depends_on = [ azurerm_linux_virtual_machine.vm_dbnode ] + count = local.use_avg && (var.database_server_count / length(var.database.zones) > 2) ? length(var.database.zones) : 0 + name = format("%s%s%s%sdata3_%d", + var.naming.resource_prefixes.hana_avg, + local.prefix, + var.naming.separator, + local.resource_suffixes.hana_avg, count.index + 1 + ) + resource_group_name = local.ANF_pool_settings.resource_group_name + location = local.ANF_pool_settings.location + + account_name = local.ANF_pool_settings.account_name + group_description = format("Application Volume %d group for %s", count.index + 1, var.sap_sid) + application_identifier = local.sid + + volume { + name = format("%s%s%s%sdata_3%d", + var.naming.resource_prefixes.hanadata, + local.prefix, + var.naming.separator, + local.resource_suffixes.hanadata, + count.index + 1 + ) + volume_path = format("%s-%sdata3-%02d", + var.sap_sid, + local.resource_suffixes.hanadata, + count.index + 1 + ) + service_level = local.ANF_pool_settings.service_level + capacity_pool_id = data.azurerm_netapp_pool.workload_netapp_pool[0].id + subnet_id = try(local.ANF_pool_settings.subnet_id, "") + proximity_placement_group_id = var.ppg[count.index % max(length(var.database.zones), 1)] + volume_spec_name = "data" + storage_quota_in_gb = var.hana_ANF_volumes.data_volume_size + throughput_in_mibps = upper(try(local.ANF_pool_settings.qos_type, "MANUAL")) == "AUTO" ? null : var.hana_ANF_volumes.data_volume_throughput + + protocols = ["NFSv4.1"] + security_style = "unix" + snapshot_directory_visible = false + + export_policy_rule { + rule_index = 1 + allowed_clients = "0.0.0.0/0" + nfsv3_enabled = false + nfsv41_enabled = true + unix_read_only = false + unix_read_write = true + root_access_enabled = true + } + } + + volume { + name = format("%s%s%s%slog_32%d", + var.naming.resource_prefixes.hanadata, + local.prefix, + var.naming.separator, + local.resource_suffixes.hanalog, + count.index + 1 + ) + volume_path = format("%s-%s-log2%02d", + var.sap_sid, + local.resource_suffixes.hanalog, + count.index + 1 + ) + service_level = local.ANF_pool_settings.service_level + capacity_pool_id = data.azurerm_netapp_pool.workload_netapp_pool[0].id + subnet_id = try(local.ANF_pool_settings.subnet_id, "") + proximity_placement_group_id = var.ppg[count.index % max(length(var.database.zones), 1)] + volume_spec_name = "log" + storage_quota_in_gb = var.hana_ANF_volumes.log_volume_size + throughput_in_mibps = upper(try(local.ANF_pool_settings.qos_type, "MANUAL")) == "AUTO" ? null : var.hana_ANF_volumes.log_volume_throughput + + protocols = ["NFSv4.1"] + security_style = "unix" + snapshot_directory_visible = false + + export_policy_rule { + rule_index = 1 + allowed_clients = "0.0.0.0/0" + nfsv3_enabled = false + nfsv41_enabled = true + unix_read_only = false + unix_read_write = true + root_access_enabled = true + } + } + + +} + + data "azurerm_netapp_pool" "workload_netapp_pool" { provider = azurerm.main count = length(local.ANF_pool_settings.pool_name) > 0 ? 1 : 0 diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/infrastructure.tf index bc2641fac9..e2b82bdb4f 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/infrastructure.tf @@ -60,13 +60,13 @@ resource "azurerm_lb" "hdb" { var.naming.separator, local.resource_suffixes.db_alb_feip ) - subnet_id = var.db_subnet.id + subnet_id = var.database.scale_out ? var.admin_subnet.id : var.db_subnet.id private_ip_address = length(try(var.database.loadbalancer.frontend_ips[0], "")) > 0 ? ( var.database.loadbalancer.frontend_ips[0]) : ( var.database.use_DHCP ? ( null) : ( cidrhost( - var.db_subnet.address_prefixes[0], + var.database.scale_out ? var.admin_subnet.address_prefixes[0] : var.db_subnet.address_prefixes[0], tonumber(count.index) + local.hdb_ip_offsets.hdb_lb )) ) diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf index c9f31af7ff..d28830b0bf 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf @@ -189,7 +189,7 @@ output "hana_shared" { output "application_volume_group" { description = "Application volume group" - value = azurerm_netapp_volume_group_sap_hana.avg_HANA + value = azurerm_netapp_volume_group_sap_hana.avg_HANA_full } @@ -245,3 +245,11 @@ output "observer_vms" { [""] ) } + +output "site_information" { + description = "Site information" + value = local.enable_deployment ? ( + local.site_information) : ( + null + ) + } diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/providers.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/providers.tf index 715c87cfa9..b0152df595 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/providers.tf @@ -3,7 +3,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement] - version = ">= 3.54" + version = ">= 4.0" } # azapi = { diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf index 0238e44b38..de326b9c1f 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_local.tf @@ -428,5 +428,14 @@ locals { observer_custom_image_id = local.enable_deployment ? local.hdb_os.source_image_id : "" observer_os = local.enable_deployment ? local.hdb_os : null + site_information = flatten( + [ + for idx, server_count in range(var.database_server_count) : + [ + idx %2 == 0 ? "SITE1" : "SITE2" + ] + ] + ) + } diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf index e81afe012d..444624d787 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf @@ -185,7 +185,7 @@ resource "azurerm_linux_virtual_machine" "vm_dbnode" { admin_username = var.sid_username admin_password = local.enable_auth_key ? null : var.sid_password disable_password_authentication = !local.enable_auth_password - tags = merge(var.tags, local.tags) + tags = merge(var.tags, local.tags, var.database.scale_out ? { "SITE" = count.index %2 == 0 ? "SITE1" : "SITE2" } : null) patch_mode = var.infrastructure.patch_mode patch_assessment_mode = var.infrastructure.patch_assessment_mode @@ -211,19 +211,12 @@ resource "azurerm_linux_virtual_machine" "vm_dbnode" { ) : null network_interface_ids = local.enable_storage_subnet ? ( - var.options.legacy_nic_order ? ( - compact([ + compact([ var.database_dual_nics ? azurerm_network_interface.nics_dbnodes_admin[count.index].id : null, azurerm_network_interface.nics_dbnodes_db[count.index].id, azurerm_network_interface.nics_dbnodes_storage[count.index].id ]) - ) : ( - compact([ - azurerm_network_interface.nics_dbnodes_db[count.index].id, - var.database_dual_nics ? azurerm_network_interface.nics_dbnodes_admin[count.index].id : null, - azurerm_network_interface.nics_dbnodes_storage[count.index].id - ]) - ) + ) : ( var.database_dual_nics ? ( var.options.legacy_nic_order ? ( diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf index 422bc3d28a..9441c82f22 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf @@ -17,7 +17,7 @@ resource "azurerm_network_interface" "observer" { ) resource_group_name = var.resource_group[0].name location = var.resource_group[0].location - enable_accelerated_networking = false + accelerated_networking_enabled = true tags = var.tags ip_configuration { diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/ansible_inventory.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/ansible_inventory.tmpl index d5232c1cbc..35fa441d58 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/ansible_inventory.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/ansible_inventory.tmpl @@ -10,6 +10,10 @@ ${sid}_DB: become_user : ${db_become_user} os_type : ${db_os_type} vm_name : ${db_vmnodes[idx]} +%{~ if scale_out } + site : ${site[idx]} +%{~ endif } + %{~ if db_connectiontype == "winrm" } ${winrm_cert_valid} ${winrm_timeout_sec} diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf index 2780be48ec..95c1a5fe22 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf @@ -6,7 +6,7 @@ resource "local_file" "ansible_inventory_new_yml" { content = templatefile(format("%s%s", path.module, "/ansible_inventory.tmpl"), { - ips_dbnodes = var.database_server_ips + ips_dbnodes = var.scale_out ? var.database_admin_ips : var.database_server_ips dbnodes = var.platform == "HANA" ? var.naming.virtualmachine_names.HANA_COMPUTERNAME : var.naming.virtualmachine_names.ANYDB_COMPUTERNAME db_vmnodes = var.database_server_vm_names virt_dbnodes = var.use_secondary_ips ? ( @@ -158,6 +158,9 @@ resource "local_file" "ansible_inventory_new_yml" { iscsi_servers = var.iSCSI_server_names iscsi_server_list = var.iSCSI_servers + site = var.site_information + scale_out = var.scale_out + } ) filename = format("%s/%s_hosts.yaml", path.cwd, var.sap_sid) diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/providers.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/providers.tf index a56a9e470e..614916ff7d 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/providers.tf @@ -3,7 +3,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" configuration_aliases = [azurerm.main, azurerm.dnsmanagement] - version = "~> 3.3" + version = "~> 4.0" } } } diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf index b8bb496685..6f2e28e79f 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf @@ -225,3 +225,5 @@ variable "sap_cal_product_name" { description = "Product name of SAP CAL" default = "" } + +variable "site_information" { description = "Site information" } From e7b66cdaa3bd6bd76683ca390ec1e3a98a57a86e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 14 Sep 2024 21:33:48 +0300 Subject: [PATCH 602/607] Squashed commit of the following: commit 0961f188952d6b5920b210359d1933ee331565ff Author: Kimmo Forss Date: Sat Sep 14 14:34:48 2024 +0300 Refactor VM name generation for scale-out databases with zonal markers Don 't remove hana shared from fstab Refactor ANF mount logic for HANA shared volume based on site configuration Refactor inventory.tf to include scale_out variable Update provider configurations to use version 4.0 or higher Refactor ANF volume group creation in HDB node module --- .../tasks/2.6.1-anf-mounts.yaml | 8 +-- .../tasks/2.6.1.1-anf-mount.yaml | 25 +------- deploy/terraform/run/sap_system/module.tf | 1 + .../modules/sap_namegenerator/output.tf | 6 +- .../sap_namegenerator/variables_global.tf | 5 ++ .../modules/sap_namegenerator/vm.tf | 21 +++++++ .../modules/sap_system/hdb_node/outputs.tf | 59 +++++++++++++++---- 7 files changed, 85 insertions(+), 40 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index 088d0734a6..aac1b20aa3 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -654,7 +654,7 @@ when: - hana_shared_mountpoint is defined - hana_shared_mountpoint | length > 0 - - ansible_hostname == db_hosts[0] + - not database_scale_out - name: "ANF Mount: HANA data (secondary)" ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml @@ -722,8 +722,7 @@ when: - hana_shared_mountpoint is defined - hana_shared_mountpoint | length > 1 - - db_hosts | length == 2 - - ansible_hostname == db_hosts[1] + - not database_scale_out - name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" ansible.builtin.file: @@ -802,7 +801,8 @@ # change folder to match the mount folder within the share 'folder': 'shared', # Logic : hana_shared_mountpoint[0] goes on odd numbered HANA hosts and hana_shared_mountpoint[1] goes on even numbered HANA hosts. - 'mount': "{% if ansible_hostname in query('inventory_hostnames', '{{ sap_sid | upper }}_DB')[0::2] %}{{ hana_shared_mountpoint[0] }}{% else %}{{ hana_shared_mountpoint[1] }}{% endif %}", +# 'mount': "{% if ansible_hostname in query('inventory_hostnames', '{{ sap_sid | upper }}_DB')[0::2] %}{{ hana_shared_mountpoint[0] }}{% else %}{{ hana_shared_mountpoint[1] }}{% endif %}", + 'mount': "{% if site | default('SITE1') == 'SITE1' %}{{ hana_shared_mountpoint[0] }}{% else %}{{ hana_shared_mountpoint[1] }}{% endif %}", 'opts': '{{ mnt_options }}', 'path': '{{ hana_shared_basepath }}', 'permissions': '0775', diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml index 4ee385f983..676049cff4 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml @@ -142,28 +142,7 @@ when: - node_tier in item.target_nodes or item.target_nodes == ['all'] -- name: "Backward Compatibility - Check required Database HA variables" - ansible.builtin.set_fact: - database_high_availability: "{{ db_high_availability | default(false) }}" - when: - - db_high_availability is defined - - database_high_availability is not defined - -- name: "Backward Compatibility - Check required Database HA variables" - ansible.builtin.set_fact: - database_high_availability: "{{ db_high_availability | default(false) }}" - when: - - db_high_availability is defined - - database_high_availability is not defined - -- name: "Backward Compatibility - Check required Database HA variables" - ansible.builtin.set_fact: - database_high_availability: "{{ db_high_availability | default(false) }}" - when: - - db_high_availability is defined - - database_high_availability is not defined - -# absent_from_fstab +# Remove entries from fstab - name: "ANF Mount: RHEL DB high availability configuration" when: ansible_os_family | upper == "REDHAT" and database_high_availability block: @@ -178,6 +157,7 @@ - item.target_nodes == ['hana'] - item.type in ['data','log','shared'] - database_high_availability + - not database_scale_out - name: "ANF Mount: make mount for {{ item.path }} ephemeral when DB high availability" ansible.posix.mount: @@ -190,6 +170,7 @@ - item.target_nodes == ['hana'] - item.type in ['data','log','shared'] - database_high_availability + - not database_scale_out # https://www.suse.com/support/kb/doc/?id=000019904 # - name: "ANF Mount: SLES DB high availability configuration" diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index 2dc0df308c..c12541a31a 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -39,6 +39,7 @@ module "sap_namegenerator" { scs_high_availability = local.application_tier.scs_high_availability scs_cluster_type = local.application_tier.scs_cluster_type use_zonal_markers = var.use_zonal_markers + scale_out = var.database_HANA_use_ANF_scaleout_scenario } ######################################################################################### diff --git a/deploy/terraform/terraform-units/modules/sap_namegenerator/output.tf b/deploy/terraform/terraform-units/modules/sap_namegenerator/output.tf index 1f7c95456d..552b1a617d 100644 --- a/deploy/terraform/terraform-units/modules/sap_namegenerator/output.tf +++ b/deploy/terraform/terraform-units/modules/sap_namegenerator/output.tf @@ -65,9 +65,9 @@ output "naming" { ANYDB_SECONDARY_DNSNAME = var.database_high_availability ? local.anydb_secondary_dnsnames_ha : local.anydb_secondary_dnsnames ANYDB_VMNAME = var.database_high_availability ? local.anydb_vm_names_ha : local.anydb_vm_names DEPLOYER = local.deployer_vm_names - HANA_COMPUTERNAME = var.database_high_availability ? local.hana_computer_names_ha : local.hana_computer_names - HANA_SECONDARY_DNSNAME = var.database_high_availability ? local.hana_secondary_dnsnames_ha : local.hana_secondary_dnsnames - HANA_VMNAME = var.database_high_availability ? local.hana_server_vm_names_ha : local.hana_server_vm_names + HANA_COMPUTERNAME = var.database_high_availability ? var.scale_out ? local.hana_computer_names_scaleout : concat(local.hana_computer_names, local.hana_computer_names_ha) : local.hana_computer_names + HANA_SECONDARY_DNSNAME = var.database_high_availability ? var.scale_out ? local.hana_secondary_dnsnames_scaleout : concat(local.hana_secondary_dnsnames, local.hana_secondary_dnsnames_ha) : local.hana_secondary_dnsnames + HANA_VMNAME = var.database_high_availability ? var.scale_out ? local.hana_server_vm_names_scaleout : concat(local.hana_server_vm_names, local.hana_server_vm_names_ha) : local.hana_server_vm_names ISCSI_COMPUTERNAME = local.iscsi_server_names OBSERVER_COMPUTERNAME = local.observer_computer_names OBSERVER_VMNAME = local.observer_vm_names diff --git a/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf index d113ad9d07..073ce697e8 100644 --- a/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf @@ -559,3 +559,8 @@ variable "utility_vm_count" { type = number default = 0 } + +variable "scale_out" { + type = bool + default = false +} diff --git a/deploy/terraform/terraform-units/modules/sap_namegenerator/vm.tf b/deploy/terraform/terraform-units/modules/sap_namegenerator/vm.tf index d3b9fd170f..c79b25ce89 100644 --- a/deploy/terraform/terraform-units/modules/sap_namegenerator/vm.tf +++ b/deploy/terraform/terraform-units/modules/sap_namegenerator/vm.tf @@ -40,6 +40,13 @@ locals { ) ] + anydb_vm_names_scaleout = [for idx in range(var.db_server_count * 2) : + length(var.db_zones) > 0 && var.use_zonal_markers ? ( + format("%sdb%sz%s%s%02d%s%d%s", lower(var.sap_sid), local.separator, var.db_zones[idx % max(length(var.db_zones), 1)], local.separator, idx + var.resource_offset, local.db_oscode, 1, local.random_id_vm_verified)) : ( + format("%sdb%02d%s%d%s", lower(var.sap_sid), idx + var.resource_offset, local.db_oscode, 1, local.random_id_vm_verified) + ) + ] + app_computer_names = [for idx in range(var.app_server_count) : format("%sapp%02d%s%s", lower(var.sap_sid), idx + var.resource_offset, local.app_oscode, local.random_id_vm_verified) ] @@ -63,6 +70,10 @@ locals { format("%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset + var.resource_offset, tonumber((idx % 2)), substr(local.random_id_vm_verified, 0, 2)) ] + hana_computer_names_scaleout = [for idx in range(var.db_server_count * 2) : + format("%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset, tonumber((( idx + 1 ) % 2)), substr(local.random_id_vm_verified, 0, 2)) + ] + hana_server_vm_names = [for idx in range(var.db_server_count) : length(var.db_zones) > 0 && var.use_zonal_markers ? ( format("%sd%s%sz%s%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), local.separator, var.db_zones[idx % max(length(var.db_zones), 1)], local.separator, idx + var.resource_offset, 0, local.random_id_vm_verified)) : ( @@ -77,6 +88,13 @@ locals { ) ] + hana_server_vm_names_scaleout = [for idx in range(var.db_server_count * 2) : + length(var.db_zones) > 0 && var.use_zonal_markers ? ( + format("%sd%s%sz%s%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), local.separator, var.db_zones[idx % max(length(var.db_zones), 1)], local.separator, floor(idx/2) + var.resource_offset, tonumber(( idx % 2)), local.random_id_vm_verified)) : ( + format("%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset, tonumber(( idx % 2)), local.random_id_vm_verified) + ) + ] + scs_computer_names = [for idx in range(var.scs_server_count) : format("%sscs%02d%s%s", lower(var.sap_sid), idx + var.resource_offset, local.app_oscode, local.random_id_vm_verified) ] @@ -135,6 +153,9 @@ locals { format("v%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset, tonumber((idx % 2)), local.random_id_virt_vm_verified) ] + hana_secondary_dnsnames_scaleout = [for idx in range(var.db_server_count * 2) : + format("v%sd%s%02dl%d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset, tonumber((( idx + 1 ) % 2)), local.random_id_vm_verified) + ] scs_secondary_dnsnames = [for idx in range(var.scs_server_count) : format("v%ss%02d%s%s", lower(var.sap_sid), idx + var.resource_offset, local.app_oscode, local.random_id_virt_vm_verified) ] diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf index d28830b0bf..5d8f9da3d3 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf @@ -168,24 +168,61 @@ output "hana_log_ANF_volumes" { ]) : [] } +# Order the list so that the zonal information is in the correct order + output "hana_shared" { - description = "HANA Shared primary volume" - value = local.shared_volume_count > 0 ? flatten([ - for idx in range(local.shared_volume_count) : [ + description = "HANA Shared volumes" + value = local.shared_volume_count == 0 ? ( + [] + ) : (( + var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? data.azurerm_netapp_volume.hanashared[0].zone : azurerm_netapp_volume.hanashared[0].zone)) == var.database.zones[0] ? ( + [ format("%s:/%s", var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( - data.azurerm_netapp_volume.hanashared[idx].mount_ip_addresses[0]) : ( - azurerm_netapp_volume.hanashared[idx].mount_ip_addresses[0] + data.azurerm_netapp_volume.hanashared[0].mount_ip_addresses[0]) : ( + azurerm_netapp_volume.hanashared[0].mount_ip_addresses[0] ), var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( - data.azurerm_netapp_volume.hanashared[idx].volume_path) : ( - azurerm_netapp_volume.hanashared[idx].volume_path + data.azurerm_netapp_volume.hanashared[0].volume_path) : ( + azurerm_netapp_volume.hanashared[0].volume_path ) + ), + format("%s:/%s", + var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( + data.azurerm_netapp_volume.hanashared[1].mount_ip_addresses[0]) : ( + azurerm_netapp_volume.hanashared[1].mount_ip_addresses[0] + ), + var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( + data.azurerm_netapp_volume.hanashared[1].volume_path) : ( + azurerm_netapp_volume.hanashared[1].volume_path ) - - ] - ]) : [] - } + ) + ] + ) : ( + [ + format("%s:/%s", + var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( + data.azurerm_netapp_volume.hanashared[1].mount_ip_addresses[0]) : ( + azurerm_netapp_volume.hanashared[1].mount_ip_addresses[0] + ), + var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( + data.azurerm_netapp_volume.hanashared[1].volume_path) : ( + azurerm_netapp_volume.hanashared[1].volume_path + ) + ), + format("%s:/%s", + var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( + data.azurerm_netapp_volume.hanashared[0].mount_ip_addresses[0]) : ( + azurerm_netapp_volume.hanashared[0].mount_ip_addresses[0] + ), + var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( + data.azurerm_netapp_volume.hanashared[0].volume_path) : ( + azurerm_netapp_volume.hanashared[0].volume_path + ) + ) + ] + ) + } output "application_volume_group" { description = "Application volume group" From ebe1621ed61282535bb4973c3242469c7c879c00 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sun, 15 Sep 2024 17:37:27 +0300 Subject: [PATCH 603/607] Scaleout hsr - naming fix (#632) * Refactor ANF volume group creation in HDB node module * Refactor ANF volume group creation in HDB node module * Refactor ANF volume group creation in HDB node module * Refactor ANF volume group creation in HDB node module and update provider configurations * Update provider configurations * Update provider configurations to use version 4.0 or higher * chore: replace db_scale_out with database_scale_out * Bump up the version * Refactor NAT Gateway resource properties in outputs.tf * Refactor azapi provider source in providers.tf * Refactor address space concatenation in NSG rule * For scaleout use the admin subnet ID * TF 4.0 support * Refactor provider configuration for SAP library * For scale out make the admin nic the primary * Update Azure.ResourceManager.Network package to version 1.9.0 * Refactor inventory.tf to support scale-out for SAP system * Add site information * Refactor vm-hdb.tf to include site information for scale-out databases * Refactor HANA shared volume output for improved zonal ordering * Refactor VM name generation for scale-out databases with zonal markers * Refactor ANF mount logic for HANA shared volume based on site configuration * Don 't remove hana shared from fstab * Refactor SAP user and group creation for generic installation --------- Co-authored-by: Kimmo Forss Co-authored-by: hdamecharla --- .../ansible/playbook_04_00_00_db_install.yaml | 3 -- deploy/ansible/playbook_05_01_sap_dbload.yaml | 30 +++++++++---------- .../tasks/main.yaml | 10 +++++-- .../1.18-scaleout-pacemaker/defaults/main.yml | 2 -- .../tasks/1.18.2.0-cluster-RedHat.yml | 2 +- .../tasks/1.18.2.0-cluster-Suse.yml | 2 +- .../tasks/bom_processor.yaml | 2 +- .../tasks/process_exe_archives.yaml | 1 + .../tasks/5.8.3-SAPHanaSRMultiTarget.yml | 4 +-- .../5.8.4.0-clusterPrep-ScaleOut-RedHat.yml | 8 ++--- .../tasks/5.8.4.1-cluster-ScaleOut-RedHat.yml | 2 -- .../tasks/5.8.5-post_provision_report.yml | 2 -- .../modules/sap_namegenerator/vm.tf | 2 +- 13 files changed, 32 insertions(+), 38 deletions(-) diff --git a/deploy/ansible/playbook_04_00_00_db_install.yaml b/deploy/ansible/playbook_04_00_00_db_install.yaml index ea498a1fd4..062a8b9921 100644 --- a/deploy/ansible/playbook_04_00_00_db_install.yaml +++ b/deploy/ansible/playbook_04_00_00_db_install.yaml @@ -534,9 +534,6 @@ state: touch mode: 0755 - - - # /*----------------------------------------------------------------------------8 # | | # | Playbook for Oracle DB Install diff --git a/deploy/ansible/playbook_05_01_sap_dbload.yaml b/deploy/ansible/playbook_05_01_sap_dbload.yaml index bfbf23d2f7..e1a19f5984 100644 --- a/deploy/ansible/playbook_05_01_sap_dbload.yaml +++ b/deploy/ansible/playbook_05_01_sap_dbload.yaml @@ -117,21 +117,21 @@ ansible.builtin.set_fact: all_sids: "{% if MULTI_SIDS is defined %}{{ MULTI_SIDS }}{% else %}{{ all_sids | default([]) + [this_sid] }}{% endif %}" - - name: Generic Users and Groups for SAP Installation - ansible.builtin.include_role: - name: roles-sap-os/2.5-sap-users - tasks_from: user_nw.yaml - vars: - scs_instance_number: "{{ sid_to_be_deployed.ascs_inst_no }}" - tier: generic - main_password: "{{ hostvars.localhost.sap_password }}" - sapbits_location_base_path: "{{ hostvars.localhost.sapbits_location_base_path }}" - sapbits_sas_token: "{{ hostvars.localhost.sapbits_sas_token }}" - loop: "{{ all_sids }}" - loop_control: - loop_var: sid_to_be_deployed - tags: - - 2.5-sap-users + # - name: Generic Users and Groups for SAP Installation + # ansible.builtin.include_role: + # name: roles-sap-os/2.5-sap-users + # tasks_from: user_nw.yaml + # vars: + # scs_instance_number: "{{ sid_to_be_deployed.ascs_inst_no }}" + # tier: generic + # main_password: "{{ hostvars.localhost.sap_password }}" + # sapbits_location_base_path: "{{ hostvars.localhost.sapbits_location_base_path }}" + # sapbits_sas_token: "{{ hostvars.localhost.sapbits_sas_token }}" + # loop: "{{ all_sids }}" + # loop_control: + # loop_var: sid_to_be_deployed + # tags: + # - 2.5-sap-users - name: "DBLoad Playbook: - Mounting" ansible.builtin.include_role: diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml index 10452a2368..9939935f0f 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml @@ -599,7 +599,9 @@ state: present mode: 0644 option: basepath_shared - value: no + value: "{{ _rsp_basepath_shared }}" + tags: + - skip_ansible_lint - name: "Prepare global.ini for site hosts name resolution (Primary Site)" community.general.ini_file: @@ -872,7 +874,9 @@ state: present mode: 0644 option: basepath_shared - value: no + value: "{{ _rsp_basepath_shared }}" + tags: + - skip_ansible_lint - name: "Prepare global.ini for site hosts name resolution (Secondary Site)" community.general.ini_file: @@ -1035,7 +1039,7 @@ mode: '0755' owner: root group: root - force: yes + force: true - name: "Create {{ db_sid | lower }}adm account for Observer " when: diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/defaults/main.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/defaults/main.yml index a9baae5c17..9272a1081c 100644 --- a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/defaults/main.yml +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/defaults/main.yml @@ -6,5 +6,3 @@ distro_name: "{{ ansible_distribution | upper }}-{{ ansible_distribution_major_version }}" distribution_id: "{{ ansible_distribution | lower ~ ansible_distribution_major_version }}" distribution_full_id: "{{ ansible_distribution | lower ~ ansible_distribution_version }}" - - diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-RedHat.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-RedHat.yml index 3291a3a474..0117680585 100644 --- a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-RedHat.yml @@ -214,7 +214,7 @@ - name: "1.18.2.0 Generic Pacemaker - Install fence-agents-kdump package" when: - kdump_enabled | default("disabled") == "enabled" - ansible.builtin.yum: + ansible.builtin.dnf: name: fence-agents-kdump state: present register: fence_agents_kdump_package diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-Suse.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-Suse.yml index a7c02f3bac..baed76f33f 100644 --- a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-Suse.yml +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-Suse.yml @@ -80,7 +80,7 @@ crm configure property stonith-enabled=true crm configure property concurrent-fencing=true -# templatize the pcmk_host_map from all entries in ansible_play_hosts_all and mm_hosts +# templatize the pcmk_host_map from all entries in ansible_play_hosts_all and mm_hosts - name: "1.18.2.0 Generic Pacemaker - Create Azure Fencing Agent" ansible.builtin.command: > crm configure primitive rsc_st_azure stonith:fence_azure_arm params diff --git a/deploy/ansible/roles-sap/3.3-bom-processing/tasks/bom_processor.yaml b/deploy/ansible/roles-sap/3.3-bom-processing/tasks/bom_processor.yaml index 8d5fa0cea2..2c686456f8 100644 --- a/deploy/ansible/roles-sap/3.3-bom-processing/tasks/bom_processor.yaml +++ b/deploy/ansible/roles-sap/3.3-bom-processing/tasks/bom_processor.yaml @@ -194,7 +194,7 @@ # RHEL does not distibuted unrar anymore https://access.redhat.com/solutions/28959 # RHEL recommends using unarchiver ( alias : unar ) in EPEL repository from Fedora as unrar from https://www.rarlab.com does not cplies with OpenSource Licenses. # This also presents another problem as the archive files will get unpacked into /usr/sap/install/CD_EXPORT// and will cause setup to fail. -# This issue is now being handled by process_exe_archives.yaml +# This issue is now being handled by process_exe_archives.yaml # - name: "3.3 BoM Processing: - Extract File, UNRAR" # ansible.builtin.command: "{% if (ansible_os_family | upper) == 'REDHAT' %}unar -s -D{% else %}unrar x{% endif %} {{ target_media_location }}/{% if item.path is undefined %}downloads{% else %}{{ item.path }}{% endif %}/{% if item.filename is undefined %}{{ item.archive }}{% else %}{{ item.filename }}{% endif %}" diff --git a/deploy/ansible/roles-sap/3.3-bom-processing/tasks/process_exe_archives.yaml b/deploy/ansible/roles-sap/3.3-bom-processing/tasks/process_exe_archives.yaml index 68f8fc850b..858422273a 100644 --- a/deploy/ansible/roles-sap/3.3-bom-processing/tasks/process_exe_archives.yaml +++ b/deploy/ansible/roles-sap/3.3-bom-processing/tasks/process_exe_archives.yaml @@ -33,6 +33,7 @@ src: '{{ tempdir.path }}{% if item.tempDir is defined %}/{{ item.tempDir }}{% endif %}/' dest: '{{ target_media_location }}/{{ item.extractDir }}' remote_src: true + mode: 0755 - name: Remove extract directory ansible.builtin.file: diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget.yml index 2ed1a9fb2d..1b61373654 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget.yml @@ -254,8 +254,6 @@ when: - ansible_os_family | upper == "REDHAT" - - - name: Start HANA System on both nodes become_user: "{{ db_sid | lower }}adm" become: true @@ -367,7 +365,7 @@ # - inventory_hostname == primary_instance_name - ansible_os_family | upper == "SUSE" - is_susTkOver_ready is defined - - is_susTkOver_ready == true + - is_susTkOver_ready block: - name: "Verify the hook Installation (SUSE)" become_user: "{{ db_sid | lower }}adm" diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-RedHat.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-RedHat.yml index 36c8ed2ae2..fe9a0e91f4 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-RedHat.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-RedHat.yml @@ -32,8 +32,8 @@ - hana_shared_mountpoint is defined - hana_shared_mountpoint | length > 1 block: - #Notes : - # 1. keep cluster out of maintenance mode + # Notes : + # 1. keep cluster out of maintenance mode # 2. Stopping HANA is fine, but do not kill any sap services locking /hana/shared especially if you have configured replication between sites. # 3. mask the /hana/shared in /etc/fstab and configure filesystem role in pacemaker. Let pacemaker handle share mount. # 4. Do not kill existing processes and attempt to unmount /hana/shared. Bad things will happen. @@ -48,7 +48,7 @@ failed_when: false changed_when: false register: hana_system_stopped - when: ansible_hostname in ["{{ primary_instance_name }}","{{ secondary_instance_name }}"] + when: ansible_hostname in [ primary_instance_name , secondary_instance_name ] - name: Wait 2 minutes for SAP system to stop ansible.builtin.wait_for: @@ -210,7 +210,7 @@ changed_when: false register: hana_system_started when: - - ansible_hostname in ["{{ primary_instance_name }}","{{ secondary_instance_name }}"] + - ansible_hostname in [ primary_instance_name , secondary_instance_name ] - name: Wait 5 minutes for SAP system to stablize ansible.builtin.wait_for: diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-RedHat.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-RedHat.yml index eb674c6f92..88715aa4be 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-RedHat.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-RedHat.yml @@ -107,8 +107,6 @@ register: nfs_constraint failed_when: nfs_constraint.rc > 0 - - - name: "5.5.4.1 HANA cluster resource configuration - RHEL 8/9" when: - ansible_distribution_major_version in ["8", "9"] diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.5-post_provision_report.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.5-post_provision_report.yml index d31bef033a..375a071753 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.5-post_provision_report.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.5-post_provision_report.yml @@ -154,11 +154,9 @@ delay: 30 - # SUSE only # Check on all nodes, status of susTkOver Hook - - name: "Log that the hook script is working as expected" block: - name: "Debug (saphanasr)" diff --git a/deploy/terraform/terraform-units/modules/sap_namegenerator/vm.tf b/deploy/terraform/terraform-units/modules/sap_namegenerator/vm.tf index c79b25ce89..644bc86fd5 100644 --- a/deploy/terraform/terraform-units/modules/sap_namegenerator/vm.tf +++ b/deploy/terraform/terraform-units/modules/sap_namegenerator/vm.tf @@ -71,7 +71,7 @@ locals { ] hana_computer_names_scaleout = [for idx in range(var.db_server_count * 2) : - format("%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset, tonumber((( idx + 1 ) % 2)), substr(local.random_id_vm_verified, 0, 2)) + format("%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset, tonumber((( idx ) % 2)), substr(local.random_id_vm_verified, 0, 2)) ] hana_server_vm_names = [for idx in range(var.db_server_count) : From 25d33e10553d7a6ab1d7daee38d3e9ca39e9216b Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 16 Sep 2024 00:50:26 +0300 Subject: [PATCH 604/607] Scaleout hsr - debugging info (#633) * Refactor ANF volume group creation in HDB node module * Refactor ANF volume group creation in HDB node module * Refactor ANF volume group creation in HDB node module * Refactor ANF volume group creation in HDB node module * Refactor ANF volume group creation in HDB node module * Refactor ANF volume group creation in HDB node module * Refactor ANF volume group creation in HDB node module and update provider configurations * Refactor ANF volume group creation in HDB node module and add dependencies * Refactor ANF volume group creation in HDB node module and update provider configurations * Update provider configurations * Update provider configurations to use version 4.0 or higher * Refactor ANF volume group creation in HDB node module and update provider configurations * chore: replace db_scale_out with database_scale_out * Bump up the version * Add User creation * Set permissions on /sapmnt * Fix capitalization of source in providers.tf * Refactor NAT Gateway resource properties in outputs.tf * Refactor azapi provider source in providers.tf * Refactor private endpoint network policies in subnet resources * Refactor private endpoint network policies in subnet resources * Refactor private endpoint network policies in subnet resources * Refactor address space concatenation in NSG rule * Refactor address space concatenation in NSG rule * Refactor address space concatenation in NSG rule * Refactor address space concatenation in NSG rule * For scaleout use the admin subnet ID * TF 4.0 support * Refactor SAP user and group creation for generic installation * More verbose debugging --------- Co-authored-by: Kimmo Forss Co-authored-by: hdamecharla --- deploy/ansible/playbook_04_00_01_db_ha.yaml | 4 +-- .../defaults/main.yaml | 1 + .../tasks/main.yaml | 34 +++++++++---------- 3 files changed, 20 insertions(+), 19 deletions(-) create mode 100644 deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/defaults/main.yaml diff --git a/deploy/ansible/playbook_04_00_01_db_ha.yaml b/deploy/ansible/playbook_04_00_01_db_ha.yaml index 3304c0cc9e..89169d252d 100644 --- a/deploy/ansible/playbook_04_00_01_db_ha.yaml +++ b/deploy/ansible/playbook_04_00_01_db_ha.yaml @@ -185,7 +185,7 @@ ansible.builtin.include_role: name: roles-sap/5.5-hanadb-pacemaker when: - - db_high_availability or database_high_availability + - database_high_availability - not database_scale_out tags: - 5.5-hanadb-pacemaker @@ -194,7 +194,7 @@ ansible.builtin.include_role: name: roles-sap/5.8-hanadb-scaleout-pacemaker when: - - db_high_availability or database_high_availability + - database_high_availability - database_scale_out tags: - 5.8-hanadb-scaleout-pacemaker diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/defaults/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/defaults/main.yaml new file mode 100644 index 0000000000..fae27bd53a --- /dev/null +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/defaults/main.yaml @@ -0,0 +1 @@ +_rsp_basepath_shared: "no" diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml index 9939935f0f..363bb3a484 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml @@ -140,7 +140,7 @@ - name: "4.0.3 - SAP HANA SCALE OUT: Progress" ansible.builtin.debug: - msg: "Start HANA Installation" + msg: "Start HANA Installation on primary node" - name: "4.0.3 - SAP HANA SCALE OUT: installation" block: @@ -164,7 +164,7 @@ - name: "4.0.3 - SAP HANA SCALE OUT: Progress" ansible.builtin.debug: - msg: "Restarting the HANA Installation" + msg: "Restarting the HANA Installation on primary node" when: hana_installation.rc == 1 @@ -191,7 +191,7 @@ - name: "4.0.3 - SAP HANA SCALE OUT: Installation results" ansible.builtin.debug: msg: - - "HANA Installation failed" + - "HANA Installation failed on primary node" - "HDBLCM output: {{ hana_installation }}" when: - hana_installation.rc is defined @@ -209,7 +209,7 @@ - name: "4.0.3 - SAP HANA SCALE OUT: Installation results" ansible.builtin.debug: - msg: "HANA Installation succeeded" + msg: "HANA Installation succeeded on primary node" - name: "4.0.3 - SAP HANA SCALE OUT: HANA Install: flag" ansible.builtin.file: @@ -380,7 +380,7 @@ - not hana_installed.stat.exists - not (database_high_availability | default(false)) # Only allowed for the first node. No other node in the scale out - ANF setup is allowed to install hdblcm. - - ansible_hostname == db_hosts[0] + - ansible_hostname == primary_instance_name - database_scale_out is defined - database_scale_out @@ -447,7 +447,7 @@ - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Progress" ansible.builtin.debug: - msg: "Start HANA Installation" + msg: "Start HANA Installation on primary node" - name: "4.0.3 - SAP HANA SCALE OUT-HSR: installation" block: @@ -471,7 +471,7 @@ - name: "SAP HANA SCALE OUT-HSR: Progress" ansible.builtin.debug: - msg: "Restarting the HANA Installation" + msg: "Restarting the HANA Installation on primary node" when: hana_installation.rc == 1 - name: "SAP HANA SCALE OUT-HSR: Re-execute hdblcm on {{ virtual_host }} and rescue" @@ -492,12 +492,12 @@ rescue: - name: "Fail if HANA installation failed on second attempt." ansible.builtin.fail: - msg: "INSTALL:0022:Execute hdblcm failed." + msg: "INSTALL:0022:Execute hdblcm failed on primary node." - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Installation results" ansible.builtin.debug: msg: - - "HANA Installation failed" + - "HANA Installation failed on primary node" - "HDBLCM output: {{ hana_installation }}" when: - hana_installation.rc is defined @@ -515,7 +515,7 @@ - name: "SAP HANA SCALE OUT-HSR: Installation results" ansible.builtin.debug: - msg: "HANA Installation succeeded" + msg: "HANA Installation succeeded on primary node" - name: "SAP HANA SCALE OUT-HSR: HANA Install: flag" ansible.builtin.file: @@ -673,7 +673,7 @@ # /*---------------------------------------------------------------------------8 # | Secondary site setup with Shared nothing scale out | # +------------------------------------4--------------------------------------*/ - - name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - HSR ( Secondary Site )" + - name: "4.0.3 - SAP HANA SCALE OUT: HANA Install - Scale Out - HSR ( Secondary Site )" block: - name: "4.0.3 - SAP HANA SCALE OUT-HSR: remove install response file if exists" ansible.builtin.file: @@ -722,7 +722,7 @@ - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Progress" ansible.builtin.debug: - msg: "Start HANA Installation" + msg: "Start HANA Installation on secondary node" - name: "4.0.3 - SAP HANA SCALE OUT-HSR: installation" block: @@ -746,7 +746,7 @@ - name: "SAP HANA: Progress" ansible.builtin.debug: - msg: "Restarting the HANA Installation" + msg: "Restarting the HANA Installation on secondary node" when: hana_installation.rc == 1 - name: "SAP HANA: Re-execute hdblcm on {{ virtual_host }} and rescue" @@ -767,12 +767,12 @@ rescue: - name: "Fail if HANA installation failed on second attempt." ansible.builtin.fail: - msg: "INSTALL:0022:Execute hdblcm failed." + msg: "INSTALL:0022:Execute hdblcm failed on secondary node." - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Installation results" ansible.builtin.debug: msg: - - "HANA Installation failed" + - "HANA Installation failed on secondary node" - "HDBLCM output: {{ hana_installation }}" when: - hana_installation.rc is defined @@ -790,7 +790,7 @@ - name: "SAP HANA SCALE OUT-HSR: Installation results" ansible.builtin.debug: - msg: "HANA Installation succeeded" + msg: "HANA Installation succeeded on secondary node" - name: "SAP HANA SCALE OUT-HSR: HANA Install: flag" ansible.builtin.file: @@ -925,7 +925,7 @@ ansible.builtin.wait_for: timeout: 120 - - name: "Start HANA Database" + - name: "Start HANA Database on secondary node" become_user: "{{ db_sid | lower }}adm" become: true ansible.builtin.shell: | From 26934fd8f6e46341ba50fb01c279c78c90b2a8bf Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Mon, 23 Sep 2024 21:11:31 +0300 Subject: [PATCH 605/607] Add support for HANA Scale Out with HSR (#635) * Refactor ANF volume group creation in HDB node module and update provider configurations * chore: replace db_scale_out with database_scale_out * Bump up the version * Add User creation * Set permissions on /sapmnt * Fix capitalization of source in providers.tf * Refactor NAT Gateway resource properties in outputs.tf * Refactor azapi provider source in providers.tf * Refactor private endpoint network policies in subnet resources * Refactor address space concatenation in NSG rule * For scaleout use the admin subnet ID * TF 4.0 support * Refactor provider configuration for SAP library * For scale out make the admin nic the primary * Refactor Variable class to remove nullable value property Update Azure.ResourceManager.Network package to version 1.9.0 * Refactor inventory.tf to support scale-out for SAP system * Add site information variable * Refactor HANA and secondary DNS names for scale-out databases * Refactor HANA shared volume output for improved zonal ordering * Refactor VM name generation for scale-out databases with zonal markers * Refactor ANF mount logic for HANA shared volume based on site configuration * Don 't remove hana shared from fstab * Refactor VM name generation for scale-out databases with zonal markers * Linting fixes * More verbose debugging * Move observer to client NIC * Misc fixes * Jinja updates * Update main.yaml - correct subnet entry for [system_replication_hostname_resolution] to point to HSR subnet for scale out - HSR based deployment * Update 5.5.4.0-clusterPrep-RedHat.yml * fix for HANA scale out - HSR not correctly populating FS resource params * Update 5.8.4.0-clusterPrep-ScaleOut-RedHat.yml removing bind in NFS options as its causing mount failure * Start HANA using sapcontrol commands * Fix HANA HSR start command in clusterPrep-ScaleOut-RedHat.yml * Task naming * Update HANA HSR start command in clusterPrep-ScaleOut-RedHat.yml * Refactor HANA Pacemaker: Remove unused code and update helper variables * chore: Update environment variables in provision-ScaleOut.yml * Refactor HANA Pacemaker: Remove unused code and update helper variables * Refactor HANA Pacemaker: Update HANA start task to use include_tasks instead of import_tasks * Refactor sap_namegenerator module: Update computer and VM names for HA configuration * Update github-actions-ansible-lint.yml Use commit hashes instead of versions Signed-off-by: hdamecharla <71097261+hdamecharla@users.noreply.github.com> * Create scorecard.yml create ossf scorecard workflow Signed-off-by: hdamecharla <71097261+hdamecharla@users.noreply.github.com> * Update scorecard.yml Only default branch is supported Signed-off-by: hdamecharla <71097261+hdamecharla@users.noreply.github.com> * Create ossf-scorecard.yml create ossf-scorecard.yml in main Signed-off-by: hdamecharla <71097261+hdamecharla@users.noreply.github.com> * chore: Update NFS filesystem resource configuration for HSR sites and general cluster properties * Refactor cluster preparation task to display cluster configuration status * Refactor cluster preparation task to remove unnecessary debug statements * Add rescue action * Refactor cluster preparation task to remove unnecessary debug statements and add rescue action * HDB start * Update main.yaml * Change the load balancer backend pool for scale out scenarios * Remove obsolete files --------- Signed-off-by: hdamecharla <71097261+hdamecharla@users.noreply.github.com> Co-authored-by: Kimmo Forss Co-authored-by: hdamecharla Co-authored-by: Shekhar Sorot ( MSFT ) Co-authored-by: Shekhar Sorot ( MSFT ) Co-authored-by: hdamecharla <71097261+hdamecharla@users.noreply.github.com> --- .../workflows/github-actions-ansible-lint.yml | 4 +- .github/workflows/ossf-scorecard.yml | 73 ++ .github/workflows/scorecard.yml | 73 ++ .../roles-db/4.0.1-hdb-hsr/defaults/main.yml | 7 +- .../tasks/4.0.1.6-post_checks.yml | 54 +- .../tasks/main.yaml | 80 +- .../tasks/main.yaml | 38 +- .../roles-misc/0.4-helpers/defaults/main.yaml | 3 + .../0.4-helpers/tasks/04.01-start_hana.yml | 68 +- .../0.4-helpers/tasks/04.02-stop_hana.yml | 48 +- .../tasks/1.17.1-pre_checks.yml | 2 +- .../tasks/1.18.0-set_runtime_facts.yml | 10 +- .../tasks/1.18.1-pre_checks.yml | 2 +- .../tasks/1.18.2.0-cluster-Suse.yml | 2 +- .../tasks/1.18.3-post_provision_report.yml | 12 +- .../2.4-hosts-file/templates/hosts.j2 | 8 +- .../tasks/2.6.1.2-anf-mounts-scaleout.yaml | 990 ------------------ .../tasks/2.6.3-oracle-asm-mounts.yaml | 36 +- .../tasks/2.6.3-oracle-asm-prereq.yaml | 55 +- .../2.6-sap-mounts/tasks/main.yaml | 4 +- .../roles-sap/5.2-pas-install/tasks/main.yaml | 2 +- .../tasks/5.5.3-SAPHanaSR.yml | 4 - .../tasks/5.5.4.0-clusterPrep-RedHat.yml | 14 +- .../tasks/5.5.4.1-cluster-Suse.yml | 4 +- .../tasks/5.6.6-validate.yml | 8 +- .../tasks/5.8.1-set_runtime_facts.yml | 31 +- .../tasks/5.8.2-pre_checks.yml | 32 +- .../5.8.3-SAPHanaSRMultiTarget-RedHat.yml | 197 ++++ ...ml => 5.8.3-SAPHanaSRMultiTarget-Suse.yml} | 242 ++--- .../tasks/5.8.4-provision-ScaleOut.yml | 18 +- .../5.8.4.0-clusterPrep-ScaleOut-RedHat.yml | 185 ++-- .../5.8.4.0-clusterPrep-ScaleOut-Suse.yml | 13 +- .../tasks/5.8.4.1-cluster-ScaleOut-RedHat.yml | 64 +- .../tasks/5.8.4.1-cluster-ScaleOut-Suse.yml | 62 +- .../tasks/5.8.5-post_provision_report.yml | 110 +- .../tasks/main.yml | 20 +- deploy/ansible/vars/ansible-input-api.yaml | 9 +- deploy/terraform/run/sap_system/providers.tf | 1 + .../modules/sap_namegenerator/vm.tf | 41 +- .../sap_system/hdb_node/infrastructure.tf | 4 +- .../sap_system/hdb_node/variables_global.tf | 2 +- .../sap_system/hdb_node/vm-observer.tf | 9 +- 42 files changed, 1024 insertions(+), 1617 deletions(-) create mode 100644 .github/workflows/ossf-scorecard.yml create mode 100644 .github/workflows/scorecard.yml delete mode 100644 deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.2-anf-mounts-scaleout.yaml create mode 100644 deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget-RedHat.yml rename deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/{5.8.3-SAPHanaSRMultiTarget.yml => 5.8.3-SAPHanaSRMultiTarget-Suse.yml} (55%) diff --git a/.github/workflows/github-actions-ansible-lint.yml b/.github/workflows/github-actions-ansible-lint.yml index 472cc3e7d2..c83f78bd56 100644 --- a/.github/workflows/github-actions-ansible-lint.yml +++ b/.github/workflows/github-actions-ansible-lint.yml @@ -6,10 +6,10 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout the code - uses: actions/checkout@v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 #v4.1.7 - name: Setup Python - uses: actions/setup-python@v5 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 #v5.2.0 with: python-version: '3.x' diff --git a/.github/workflows/ossf-scorecard.yml b/.github/workflows/ossf-scorecard.yml new file mode 100644 index 0000000000..df1d38fcd7 --- /dev/null +++ b/.github/workflows/ossf-scorecard.yml @@ -0,0 +1,73 @@ +# This workflow uses actions that are not certified by GitHub. They are provided +# by a third-party and are governed by separate terms of service, privacy +# policy, and support documentation. + +name: Scorecard supply-chain security +on: + # For Branch-Protection check. Only the default branch is supported. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection + branch_protection_rule: + # To guarantee Maintained check is occasionally updated. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained + schedule: + - cron: '32 4 * * 5' + push: + branches: [ "main" ] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Needed to publish results and get a badge (see publish_results below). + id-token: write + # Uncomment the permissions below if installing in a private repository. + # contents: read + # actions: readhttps://github.com/hdamecharla/sap-automation-kimforss/tree/main + + steps: + - name: "Checkout code" + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 + with: + results_file: results.sarif + results_format: sarif + # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecard on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional. + # repo_token: ${{ secrets.SCORECARD_TOKEN }} + + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories: + # - `publish_results` will always be set to `false`, regardless + # of the value entered here. + publish_results: true + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@97a0fba1372883ab732affbe8f94b823f91727db # v3.pre.node20 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard (optional). + # Commenting out will disable upload of results to your repo's Code Scanning dashboard + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: results.sarif diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml new file mode 100644 index 0000000000..df1d38fcd7 --- /dev/null +++ b/.github/workflows/scorecard.yml @@ -0,0 +1,73 @@ +# This workflow uses actions that are not certified by GitHub. They are provided +# by a third-party and are governed by separate terms of service, privacy +# policy, and support documentation. + +name: Scorecard supply-chain security +on: + # For Branch-Protection check. Only the default branch is supported. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection + branch_protection_rule: + # To guarantee Maintained check is occasionally updated. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained + schedule: + - cron: '32 4 * * 5' + push: + branches: [ "main" ] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Needed to publish results and get a badge (see publish_results below). + id-token: write + # Uncomment the permissions below if installing in a private repository. + # contents: read + # actions: readhttps://github.com/hdamecharla/sap-automation-kimforss/tree/main + + steps: + - name: "Checkout code" + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 + with: + results_file: results.sarif + results_format: sarif + # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecard on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional. + # repo_token: ${{ secrets.SCORECARD_TOKEN }} + + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories: + # - `publish_results` will always be set to `false`, regardless + # of the value entered here. + publish_results: true + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@97a0fba1372883ab732affbe8f94b823f91727db # v3.pre.node20 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard (optional). + # Commenting out will disable upload of results to your repo's Code Scanning dashboard + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: results.sarif diff --git a/deploy/ansible/roles-db/4.0.1-hdb-hsr/defaults/main.yml b/deploy/ansible/roles-db/4.0.1-hdb-hsr/defaults/main.yml index 5cc94d0e2a..fa7a2fa714 100644 --- a/deploy/ansible/roles-db/4.0.1-hdb-hsr/defaults/main.yml +++ b/deploy/ansible/roles-db/4.0.1-hdb-hsr/defaults/main.yml @@ -27,5 +27,8 @@ hana_stop_start_delay_in_seconds: 10 # TODO: Maybe move these to a group_vars/all/distro file so that they # can be shared by all playbooks/tasks automatically, and extend with # standardised versions of all similar patterns used in the playbooks. -distro_name: "{{ ansible_os_family | upper }}-{{ ansible_distribution_major_version }}" -distro_id: "{{ ansible_os_family | lower ~ ansible_distribution_major_version }}" +distro_name: "{{ ansible_os_family | upper }}-{{ ansible_distribution_major_version }}" +distro_id: "{{ ansible_os_family | lower ~ ansible_distribution_major_version }}" + +DB: "{{ db_sid | upper }}/HDB{{ db_instance_number }}" +db_sid_admin_user: "{{ db_sid | lower }}adm" diff --git a/deploy/ansible/roles-db/4.0.1-hdb-hsr/tasks/4.0.1.6-post_checks.yml b/deploy/ansible/roles-db/4.0.1-hdb-hsr/tasks/4.0.1.6-post_checks.yml index 7bbedefafd..35b4fd40cc 100644 --- a/deploy/ansible/roles-db/4.0.1-hdb-hsr/tasks/4.0.1.6-post_checks.yml +++ b/deploy/ansible/roles-db/4.0.1-hdb-hsr/tasks/4.0.1.6-post_checks.yml @@ -3,22 +3,40 @@ - name: "HANA HSR: - Ensure replication status is active" become_user: "{{ db_sid_admin_user }}" become: true - # Note: ideally we should be using set -o pipefail here (see for example https://xanmanning.co.uk/2019/03/21/ansible-lint-rule-306.html). - # However, the python script returns a status of 15 (!), which breaks the pipeline. Consequently, - # no pipefail option and elect to skip Ansible linting of this task. - ansible.builtin.shell: | - set -o pipefail - cdpy - (python systemReplicationStatus.py; echo) | grep -q 'overall system replication status: ACTIVE' - register: grep_result - until: grep_result.rc == 0 or grep_result.rc == 1 - failed_when: grep_result.rc != 0 and grep_result.rc != 1 - changed_when: false - retries: 10 - delay: 5 - when: ansible_hostname == primary_instance_name + block: -- name: "HANA HSR: - Debug replication" - ansible.builtin.debug: - var: grep_result - verbosity: 2 + - name: "HANA HSR: - Ensure replication status is active" + # Note: ideally we should be using set -o pipefail here (see for example https://xanmanning.co.uk/2019/03/21/ansible-lint-rule-306.html). + # However, the python script returns a status of 15 (!), which breaks the pipeline. Consequently, + # no pipefail option and elect to skip Ansible linting of this task. + ansible.builtin.shell: | + set -o pipefail + (python systemReplicationStatus.py; echo) | grep -q 'overall system replication status: ACTIVE' + register: grep_result + until: grep_result.rc == 0 or grep_result.rc == 1 + failed_when: grep_result.rc != 0 and grep_result.rc != 1 + changed_when: false + retries: 10 + delay: 5 + when: ansible_hostname == primary_instance_name + + - name: "HANA HSR: - Debug replication" + ansible.builtin.debug: + var: grep_result + verbosity: 2 + + vars: + ansible_python_interpreter: python3 + environment: + HOME: "/usr/sap/{{ db_sid | upper }}/home" + PYTHONHOME: "/usr/sap/{{ DB }}/exe/Python3" + DIR_EXECUTABLE: "/usr/sap/{{ DB }}/exe" + SAP_RETRIEVAL_PATH: "/usr/sap/{{ DB }}/{{ virtual_host }}" + DIR_SYSEXE: "/usr/sap/{{ db_sid | upper }}/SYS/exe/hdb" + SAPSYSTEMNAME: "{{ db_sid | upper }}" + SECUDIR: "/usr/sap/{{ DB }}/{{ virtual_host }}/sec" + DAT_BIN_DIR: "/usr/sap/{{ DB }}/exe/dat_bin_dir" + DIR_INSTANCE: "/usr/sap/{{ DB }}" + PYTHONPATH: "/usr/sap/{{ DB }}/exe/Py3:/usr/sap/HDB/SYS/global/hdb/custom/python_support:/usr/sap/{{ DB }}/exe/python_support:/usr/sap/{{ DB }}/{{ virtual_host }}:/usr/sap/{{ DB }}/exe:/usr/sap/{{ DB }}/exe/testscripts:/usr/sap/{{ DB }}/exe/Python3/lib/python3.7" + PATH: "/usr/sap/{{ DB }}/exe/krb5/bin:/usr/sap/{{ DB }}/exe/krb5/sbin:/usr/sap/{{ DB }}/{{ virtual_host }}:/usr/sap/{{ DB }}:/usr/sap/{{ DB }}/exe:/usr/sap/{{ DB }}/exe/mdc:/usr/sap/{{ DB }}/exe/Python3/bin:/usr/sap/{{ DB }}/exe/dat_bin_dir:/usr/sap/HDB/home:/usr/sap/HDB/home/bin:/usr/local/bin:/usr/bin:/bin:/usr/games:/usr/lib/mit/bin" + LD_LIBRARY_PATH: "/usr/sap/{{ DB }}/exe/krb5/lib/krb5/plugins/preauth:/usr/sap/{{ DB }}/exe/krb5/lib:/usr/sap/{{ DB }}/exe:/usr/sap/{{ DB }}/exe/Python3/lib:/usr/sap/{{ DB }}/exe/Py3:/usr/sap/{{ DB }}/exe/filter:/usr/sap/{{ DB }}/exe/dat_bin_dir:/usr/sap/{{ DB }}/exe/plugins/afl:/usr/sap/{{ DB }}/exe/plugins/lcapps:/usr/sap/{{ DB }}/exe/plugins/repository:/usr/sap/{{ DB }}/exe/plugins/epmmds:/usr/sap/HDB/SYS/global/hdb/federation:/usr/sap/HDB/SYS/global/hdb/plugins/3rd_party_libs:/usr/sap/HDB/SYS/global/hdb/plugins/1st_party_libs" diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml index 363bb3a484..0c082f8062 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml @@ -119,25 +119,25 @@ _rsp_number: "{{ db_instance_number }}" _rsp_system_usage: "custom" use_master_password: "{{ hana_use_master_password }}" - password_copy: "{%if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" + password_copy: "{% if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" _rsp_internal_network: "{{ subnet_cidr_db | default((subnet_address + '/' + subnet_prefix), true) }}" # This comes in from the main ansible playbook. It is the password for the root user. Must be randomized after the installation. _rsp_root_password: "{{ root_password }}" # Note : Default configuration involves placing the last node in DB List as standby. # Note : This behavior can be overridden via property 'database_no_standby' to force all remaining nodes as workers # Note : This configuration is not recommended as it leaves your distributed system without a standby + # NOTE: DO NOT ALTER FORMATTING OR SPACING. THIS BREAKS THE JINJA2 CODE. _rsp_additional_hosts: "{% for item in db_hosts[1:] %} - {% if loop.index == db_hosts | length -1 %} - {% if database_no_standby %} - {{ item }}:role=worker:group=default:workergroup=default - {% else %} - {{ item }}:role=standby:group=default:workergroup=default - {% endif %} - {% else %} - {{ item }}:role=worker:group=default:workergroup=default, - {% endif %} - {% endfor %}" - + {% if loop.index == db_hosts | length - 1 %} + {% if database_no_standby %} + {{ item | trim }}:role=worker:group=default:workergroup=default + {% else %} + {{ item | trim }}:role=standby:group=default:workergroup=default + {% endif %} + {% else %} + {{ item | trim }}:role=worker:group=default:workergroup=default, + {% endif %} + {% endfor %}" - name: "4.0.3 - SAP HANA SCALE OUT: Progress" ansible.builtin.debug: msg: "Start HANA Installation on primary node" @@ -148,7 +148,7 @@ ansible.builtin.shell: | umask {{ custom_umask | default('022') }} ; chmod 755 /usr/sap; - ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' + ./hdblcm --batch --action=install --hostname {{ virtual_host | trim }} --configfile='{{ dir_params }}/{{ sap_inifile }}' args: chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" @@ -423,16 +423,17 @@ use_master_password: "{{ hana_use_master_password }}" _rsp_hana_data_basepath: "{{ hana_data_basepath }}" _rsp_hana_log_basepath: "{{ hana_log_basepath }}" - password_copy: "{%if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" + password_copy: "{% if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" _rsp_internal_network: "{{ (subnet_address + '/' + subnet_prefix) }}" _rsp_root_password: "{{ root_password }}" - _rsp_additional_hosts: "{% for item in ansible_play_hosts_all[2::2] %} - {% if loop.index == ansible_play_hosts_all | length -1 %} - {{ item }}:role=worker:group=default:workergroup=default - {% else %} - {{ item }}:role=worker:group=default:workergroup=default, - {% endif %} - {% endfor %}" + # NOTE: DO NOT ALTER FORMATTING OR SPACING. THIS BREAKS THE JINJA2 CODE. + _rsp_additional_hosts: "{% for item in db_hosts[2::2] %} + {% if loop.index == db_hosts | length - 1 %} + {{ item | trim }}:role=worker:group=default:workergroup=default + {% else %} + {{ item | trim }}:role=worker:group=default:workergroup=default, + {% endif %} + {% endfor %}" - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Template processing: Create custom ini file {{ sap_custom_config }} from {{ HANA_2_00_customconfig.rsp }}" ansible.builtin.template: @@ -455,7 +456,7 @@ ansible.builtin.shell: | umask {{ custom_umask | default('022') }} ; chmod 755 /usr/sap; - ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' --custom_cfg='{{ dir_params }}' + ./hdblcm --batch --action=install --hostname {{ virtual_host | trim }} --configfile='{{ dir_params }}/{{ sap_inifile }}' --custom_cfg='{{ dir_params }}' args: chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" @@ -480,7 +481,7 @@ ansible.builtin.shell: | umask {{ custom_umask | default('022') }} ; chmod 755 /usr/sap; - ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' + ./hdblcm --batch --action=install --hostname {{ virtual_host | trim }} --configfile='{{ dir_params }}/{{ sap_inifile }}' args: chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" @@ -612,7 +613,7 @@ option: "{{ hostvars[item].ansible_host }}" value: "{{ hostvars[item].virtual_host }}" with_items: - - "{{ ansible_play_hosts_all[0::2] }}" + - "{{ db_hosts[0::2] }}" - name: "Prepare global.ini for HANA hosts name resolution on replication network" community.general.ini_file: @@ -620,12 +621,12 @@ section: "system_replication_hostname_resolution" mode: 0644 state: present - option: "{{ hostvars[item].ansible_all_ipv4_addresses | ansible.utils.ipaddr(subnet_cidr_storage) | first | default(hostvars[item].ansible_host ) }}" + option: "{{ hostvars[item].ansible_all_ipv4_addresses | ansible.utils.ipaddr(subnet_cidr_db) | first | default(hostvars[item].ansible_host ) }}" value: "{{ hostvars[item].virtual_host }}" with_items: - "{{ groups[(sap_sid | upper)~'_DB' ] }}" when: - - subnet_cidr_storage is defined + - subnet_cidr_db is defined - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Restart HANA" block: @@ -698,16 +699,17 @@ use_master_password: "{{ hana_use_master_password }}" _rsp_hana_data_basepath: "{{ hana_data_basepath }}" _rsp_hana_log_basepath: "{{ hana_log_basepath }}" - password_copy: "{%if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" + password_copy: "{% if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" _rsp_internal_network: "{{ (subnet_address + '/' + subnet_prefix) }}" _rsp_root_password: "{{ root_password }}" - _rsp_additional_hosts: "{% for item in ansible_play_hosts_all[3::2] %} - {% if loop.index == ansible_play_hosts_all | length -1 %} - {{ item }}:role=worker:group=default:workergroup=default - {% else %} - {{ item }}:role=worker:group=default:workergroup=default, - {% endif %} - {% endfor %}" + # NOTE: DO NOT ALTER FORMATTING OR SPACING. THIS BREAKS THE JINJA2 CODE. + _rsp_additional_hosts: "{% for item in db_hosts[3::2] %} + {% if loop.index == db_hosts | length - 1 %} + {{ item | trim }}:role=worker:group=default:workergroup=default + {% else %} + {{ item | trim }}:role=worker:group=default:workergroup=default, + {% endif %} + {% endfor %}" - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Template processing: Create custom ini file {{ sap_custom_config }} from {{ HANA_2_00_customconfig.rsp }}" ansible.builtin.template: @@ -730,7 +732,7 @@ ansible.builtin.shell: | umask {{ custom_umask | default('022') }} ; chmod 755 /usr/sap; - ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' --custom_cfg='{{ dir_params }}' + ./hdblcm --batch --action=install --hostname {{ virtual_host | trim }} --configfile='{{ dir_params }}/{{ sap_inifile }}' --custom_cfg='{{ dir_params }}' args: chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" @@ -755,7 +757,7 @@ ansible.builtin.shell: | umask {{ custom_umask | default('022') }} ; chmod 755 /usr/sap; - ./hdblcm --batch --action=install --hostname {{ virtual_host }} --configfile='{{ dir_params }}/{{ sap_inifile }}' + ./hdblcm --batch --action=install --hostname {{ virtual_host | trim }} --configfile='{{ dir_params }}/{{ sap_inifile }}' args: chdir: "{{ target_media_location }}/CD_HDBSERVER/SAP_HANA_DATABASE" creates: "/etc/sap_deployment_automation/{{ db_sid | upper }}/sap_deployment_hdb.txt" @@ -887,7 +889,7 @@ option: "{{ hostvars[item].ansible_host }}" value: "{{ hostvars[item].virtual_host }}" with_items: - - "{{ ansible_play_hosts_all[1::2] }}" + - "{{ db_hosts[1::2] }}" - name: "Prepare global.ini for HANA hosts name resolution on replication network" community.general.ini_file: @@ -895,12 +897,12 @@ section: "system_replication_hostname_resolution" mode: 0644 state: present - option: "{{ hostvars[item].ansible_all_ipv4_addresses | ansible.utils.ipaddr(subnet_cidr_storage) | first | default(hostvars[item].ansible_host) }}" + option: "{{ hostvars[item].ansible_all_ipv4_addresses | ansible.utils.ipaddr(subnet_cidr_db) | first | default(hostvars[item].ansible_host) }}" value: "{{ hostvars[item].virtual_host }}" with_items: - "{{ groups[(sap_sid | upper)~'_DB' ] }}" when: - - subnet_cidr_storage is defined + - subnet_cidr_db is defined - name: "4.0.3 - SAP HANA SCALE OUT-HSR: Restart HANA" block: diff --git a/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml b/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml index 9bcaf5ef8d..9b762f0898 100644 --- a/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml @@ -215,9 +215,43 @@ ansible.builtin.set_fact: sapbits_sas_token: "{{ az_sapbits_sas_token.stdout }}" -- name: "0.0 Validations - Check required variables are present and not empty" +- name: "0.4 Installation Media: - Create SAP Binaries Storage Account SAS token" when: - - allowSharedKeyAccess + - sapbits_sas_token is not defined or (sapbits_sas_token | string | length == 0) + block: + + - name: "0.4 Installation Media: - SAS token" + ansible.builtin.debug: + msg: "Creating the storage account SAS token" + + - name: "0.4 Installation Media: - Set Expiry" + ansible.builtin.command: "date +'%Y-%m-%d' -d '+3 days'" + register: expiry + + - name: "0.4 Installation Media: - Create SAP Binaries Storage Account SAS in Control Plane subscription" + ansible.builtin.command: >- + az storage account generate-sas \ + --account-name {{ account_name }} \ + --expiry {{ expiry.stdout }} \ + --permissions lr \ + --auth-mode login \ + --as-user \ + {{ subscription_parameter }} \ + --out tsv + changed_when: false + register: az_sapbits_sas_token + + - name: "0.4 Installation Media: - Debug storage account details (sas)" + ansible.builtin.debug: + var: az_sapbits_sas_token + verbosity: 4 + + - name: "0.4 Installation Media: - Extract SAP Binaries Storage Account SAS (temp)" + ansible.builtin.set_fact: + sapbits_sas_token: "{{ az_sapbits_sas_token.stdout }}" + + +- name: "0.0 Validations - Check required variables are present and not empty" ansible.builtin.assert: that: - sapbits_sas_token is defined # Has the variable been defined diff --git a/deploy/ansible/roles-misc/0.4-helpers/defaults/main.yaml b/deploy/ansible/roles-misc/0.4-helpers/defaults/main.yaml index d57247d17e..2839aa78fd 100644 --- a/deploy/ansible/roles-misc/0.4-helpers/defaults/main.yaml +++ b/deploy/ansible/roles-misc/0.4-helpers/defaults/main.yaml @@ -6,3 +6,6 @@ sapcontrol_command: "sapcontrol -nr {{ db_instance_number }}" hana_stop_start_timeout_in_seconds: 600 hana_stop_start_delay_in_seconds: 10 + +DB: "{{ db_sid | upper }}/HDB{{ db_instance_number }}" +db_sid_admin_user: "{{ db_sid | lower }}adm" diff --git a/deploy/ansible/roles-misc/0.4-helpers/tasks/04.01-start_hana.yml b/deploy/ansible/roles-misc/0.4-helpers/tasks/04.01-start_hana.yml index 32cd529821..2d566358b7 100644 --- a/deploy/ansible/roles-misc/0.4-helpers/tasks/04.01-start_hana.yml +++ b/deploy/ansible/roles-misc/0.4-helpers/tasks/04.01-start_hana.yml @@ -14,32 +14,56 @@ # | 4 GetProcessList succeeded, all processes stopped # | | # +------------------------------------4--------------------------------------*/ - -- name: "Determine if HANA is running on {{ ansible_hostname }}" +- name: "Start HANA on {{ ansible_hostname }}" become_user: "{{ db_sid | lower }}adm" become: true - ansible.builtin.command: "{{ sapcontrol_command }} -function GetProcessList" - changed_when: false - failed_when: false - register: is_hana_running + block: + - name: "Determine if HANA is running on {{ ansible_hostname }}" + ansible.builtin.command: "{{ sapcontrol_command }} -function GetProcessList" + changed_when: false + failed_when: false + register: is_hana_running -- name: "Determine if HANA is running on {{ ansible_hostname }}" - ansible.builtin.debug: - var: is_hana_running - verbosity: 2 + - name: "Determine if HANA is running on {{ ansible_hostname }}" + ansible.builtin.debug: + var: is_hana_running + verbosity: 2 -- name: "Ensure HANA is running on {{ ansible_hostname }}" - become_user: "{{ db_sid | lower }}adm" - become: true - ansible.builtin.command: "{{ sapcontrol_command }} -function StartWait {{ hana_stop_start_timeout_in_seconds }} {{ hana_stop_start_delay_in_seconds }}" - changed_when: false + - name: "Ensure HANA is running on {{ ansible_hostname }}" + ansible.builtin.command: "{{ sapcontrol_command }} -function StartWait {{ hana_stop_start_timeout_in_seconds }} {{ hana_stop_start_delay_in_seconds }}" + changed_when: false + + - name: "Validate HANA is running on {{ ansible_hostname }}" + ansible.builtin.command: "{{ sapcontrol_command }} -function GetProcessList" + changed_when: false + register: hana_running + failed_when: hana_running.rc != 3 + rescue: + - name: "Rescue: Ensure HANA is running on {{ ansible_hostname }}" + ansible.builtin.command: "HDB start" + changed_when: false + + - name: "Rescue: Validate HANA is running on {{ ansible_hostname }}" + ansible.builtin.command: "{{ sapcontrol_command }} -function GetProcessList" + changed_when: false + register: hana_running + failed_when: hana_running.rc != 3 + + vars: + ansible_python_interpreter: python3 + environment: + HOME: "/usr/sap/{{ db_sid | upper }}/home" + PYTHONHOME: "/usr/sap/{{ DB }}/exe/Python3" + DIR_EXECUTABLE: "/usr/sap/{{ DB }}/exe" + SAP_RETRIEVAL_PATH: "/usr/sap/{{ DB }}/{{ virtual_host }}" + DIR_SYSEXE: "/usr/sap/{{ db_sid | upper }}/SYS/exe/hdb" + SAPSYSTEMNAME: "{{ db_sid | upper }}" + SECUDIR: "/usr/sap/{{ DB }}/{{ virtual_host }}/sec" + DAT_BIN_DIR: "/usr/sap/{{ DB }}/exe/dat_bin_dir" + DIR_INSTANCE: "/usr/sap/{{ DB }}" + PYTHONPATH: "/usr/sap/{{ DB }}/exe/Py3:/usr/sap/HDB/SYS/global/hdb/custom/python_support:/usr/sap/{{ DB }}/exe/python_support:/usr/sap/{{ DB }}/{{ virtual_host }}:/usr/sap/{{ DB }}/exe:/usr/sap/{{ DB }}/exe/testscripts:/usr/sap/{{ DB }}/exe/Python3/lib/python3.7" + PATH: "/usr/sap/{{ DB }}/exe/krb5/bin:/usr/sap/{{ DB }}/exe/krb5/sbin:/usr/sap/{{ DB }}/{{ virtual_host }}:/usr/sap/{{ DB }}:/usr/sap/{{ DB }}/exe:/usr/sap/{{ DB }}/exe/mdc:/usr/sap/{{ DB }}/exe/Python3/bin:/usr/sap/{{ DB }}/exe/dat_bin_dir:/usr/sap/HDB/home:/usr/sap/HDB/home/bin:/usr/local/bin:/usr/bin:/bin:/usr/games:/usr/lib/mit/bin" + LD_LIBRARY_PATH: "/usr/sap/{{ DB }}/exe/krb5/lib/krb5/plugins/preauth:/usr/sap/{{ DB }}/exe/krb5/lib:/usr/sap/{{ DB }}/exe:/usr/sap/{{ DB }}/exe/Python3/lib:/usr/sap/{{ DB }}/exe/Py3:/usr/sap/{{ DB }}/exe/filter:/usr/sap/{{ DB }}/exe/dat_bin_dir:/usr/sap/{{ DB }}/exe/plugins/afl:/usr/sap/{{ DB }}/exe/plugins/lcapps:/usr/sap/{{ DB }}/exe/plugins/repository:/usr/sap/{{ DB }}/exe/plugins/epmmds:/usr/sap/HDB/SYS/global/hdb/federation:/usr/sap/HDB/SYS/global/hdb/plugins/3rd_party_libs:/usr/sap/HDB/SYS/global/hdb/plugins/1st_party_libs" -- name: "Validate HANA is running on {{ ansible_hostname }}" - become_user: "{{ db_sid | lower }}adm" - become: true - ansible.builtin.command: "{{ sapcontrol_command }} -function GetProcessList" - changed_when: false - register: hana_running - failed_when: hana_running.rc != 3 ... diff --git a/deploy/ansible/roles-misc/0.4-helpers/tasks/04.02-stop_hana.yml b/deploy/ansible/roles-misc/0.4-helpers/tasks/04.02-stop_hana.yml index 581ec9cc8a..a4c0258d62 100644 --- a/deploy/ansible/roles-misc/0.4-helpers/tasks/04.02-stop_hana.yml +++ b/deploy/ansible/roles-misc/0.4-helpers/tasks/04.02-stop_hana.yml @@ -15,24 +15,38 @@ # | | # +------------------------------------4--------------------------------------*/ -- name: "Determine if HANA is stopped on {{ ansible_hostname }}" +- name: "Stop HANA on {{ ansible_hostname }}" become_user: "{{ db_sid | lower }}adm" become: true - ansible.builtin.command: "{{ sapcontrol_command }} -function GetProcessList" - failed_when: false - changed_when: false - register: hana_stopped + block: -- name: "Ensure HANA is stopped {{ ansible_hostname }}" - when: hana_stopped.rc != 4 - become_user: "{{ db_sid | lower }}adm" - become: true - ansible.builtin.command: "{{ sapcontrol_command }} -function StopWait {{ hana_stop_start_timeout_in_seconds }} {{ hana_stop_start_delay_in_seconds }}" + - name: "Determine if HANA is stopped on {{ ansible_hostname }}" + ansible.builtin.command: "{{ sapcontrol_command }} -function GetProcessList" + failed_when: false + changed_when: false + register: hana_stopped -- name: "Verify HANA is stopped on {{ ansible_hostname }}" - become_user: "{{ db_sid | lower }}adm" - become: true - ansible.builtin.command: "{{ sapcontrol_command }} -function GetProcessList" - changed_when: false - register: hana_stopped - failed_when: hana_stopped.rc != (4 or 0) + - name: "Ensure HANA is stopped {{ ansible_hostname }}" + when: hana_stopped.rc != 4 + ansible.builtin.command: "{{ sapcontrol_command }} -function StopWait {{ hana_stop_start_timeout_in_seconds }} {{ hana_stop_start_delay_in_seconds }}" + + - name: "Verify HANA is stopped on {{ ansible_hostname }}" + ansible.builtin.command: "{{ sapcontrol_command }} -function GetProcessList" + changed_when: false + register: hana_stopped + failed_when: hana_stopped.rc != (4 or 0) + vars: + ansible_python_interpreter: python3 + environment: + HOME: "/usr/sap/{{ db_sid | upper }}/home" + PYTHONHOME: "/usr/sap/{{ DB }}/exe/Python3" + DIR_EXECUTABLE: "/usr/sap/{{ DB }}/exe" + SAP_RETRIEVAL_PATH: "/usr/sap/{{ DB }}/{{ virtual_host }}" + DIR_SYSEXE: "/usr/sap/{{ db_sid | upper }}/SYS/exe/hdb" + SAPSYSTEMNAME: "{{ db_sid | upper }}" + SECUDIR: "/usr/sap/{{ DB }}/{{ virtual_host }}/sec" + DAT_BIN_DIR: "/usr/sap/{{ DB }}/exe/dat_bin_dir" + DIR_INSTANCE: "/usr/sap/{{ DB }}" + PYTHONPATH: "/usr/sap/{{ DB }}/exe/Py3:/usr/sap/HDB/SYS/global/hdb/custom/python_support:/usr/sap/{{ DB }}/exe/python_support:/usr/sap/{{ DB }}/{{ virtual_host }}:/usr/sap/{{ DB }}/exe:/usr/sap/{{ DB }}/exe/testscripts:/usr/sap/{{ DB }}/exe/Python3/lib/python3.7" + PATH: "/usr/sap/{{ DB }}/exe/krb5/bin:/usr/sap/{{ DB }}/exe/krb5/sbin:/usr/sap/{{ DB }}/{{ virtual_host }}:/usr/sap/{{ DB }}:/usr/sap/{{ DB }}/exe:/usr/sap/{{ DB }}/exe/mdc:/usr/sap/{{ DB }}/exe/Python3/bin:/usr/sap/{{ DB }}/exe/dat_bin_dir:/usr/sap/HDB/home:/usr/sap/HDB/home/bin:/usr/local/bin:/usr/bin:/bin:/usr/games:/usr/lib/mit/bin" + LD_LIBRARY_PATH: "/usr/sap/{{ DB }}/exe/krb5/lib/krb5/plugins/preauth:/usr/sap/{{ DB }}/exe/krb5/lib:/usr/sap/{{ DB }}/exe:/usr/sap/{{ DB }}/exe/Python3/lib:/usr/sap/{{ DB }}/exe/Py3:/usr/sap/{{ DB }}/exe/filter:/usr/sap/{{ DB }}/exe/dat_bin_dir:/usr/sap/{{ DB }}/exe/plugins/afl:/usr/sap/{{ DB }}/exe/plugins/lcapps:/usr/sap/{{ DB }}/exe/plugins/repository:/usr/sap/{{ DB }}/exe/plugins/epmmds:/usr/sap/HDB/SYS/global/hdb/federation:/usr/sap/HDB/SYS/global/hdb/plugins/3rd_party_libs:/usr/sap/HDB/SYS/global/hdb/plugins/1st_party_libs" diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml index 1c004a1a0f..693c66ce8e 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml @@ -131,7 +131,7 @@ - name: "1.17 Generic Pacemaker - Show if a cluster has already been prepared" ansible.builtin.debug: msg: - - "CLUSTER VALIDATION : {{ cluster_existence_check }}" + - "Is the cluster configured: {{ cluster_existence_check }}" # /*---------------------------------------------------------------------------8 # | | diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.0-set_runtime_facts.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.0-set_runtime_facts.yml index 5a71780e12..5f07e66ae0 100644 --- a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.0-set_runtime_facts.yml +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.0-set_runtime_facts.yml @@ -112,8 +112,8 @@ - name: "1.18.0 Generic Pacemaker - Extract NIC IPs" ansible.builtin.set_fact: - "{{ host_var }}_instance_ip": "{{ hostvars[ansible_hostname]['primary_ip'] | string }}" - + # "{{ host_var }}_instance_ip": "{{ hostvars[ansible_hostname]['primary_ip'] | string }}" + "instance_ip": "{{ hostvars[ansible_hostname]['primary_ip'] | string }}" - name: "1.18.0 Generic Pacemaker - Show Details" ansible.builtin.debug: msg: @@ -159,10 +159,12 @@ block: - name: "Wait for cluster_public_ssh_key check on nodes to finish" ansible.builtin.set_fact: - "is_ssh_defined_on_{{ host_var }}": "{{ hostvars[ansible_hostname].cluster_public_ssh_key is defined }}" + # "is_ssh_defined_on_{{ host_var }}": "{{ hostvars[ansible_hostname].cluster_public_ssh_key is defined }}" + "is_ssh_defined": "{{ hostvars[ansible_hostname].cluster_public_ssh_key is defined }}" retries: 30 delay: 60 - until: is_ssh_defined_on_{{ host_var }} + # until: is_ssh_defined_on_{{ host_var }} + until: is_ssh_defined # /*---------------------------------------------------------------------------8 diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.1-pre_checks.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.1-pre_checks.yml index 5d54a1a542..3fb7359694 100644 --- a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.1-pre_checks.yml +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.1-pre_checks.yml @@ -119,7 +119,7 @@ - name: "1.18.1 Generic Pacemaker - Show if a cluster has already been prepared" ansible.builtin.debug: msg: - - "CLUSTER VALIDATION : {{ cluster_existence_check }}" + - "Is the cluster configured : {{ cluster_existence_check }}" ... # /*---------------------------------------------------------------------------8 diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-Suse.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-Suse.yml index baed76f33f..e7b021bcb7 100644 --- a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-Suse.yml +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.2.0-cluster-Suse.yml @@ -45,7 +45,7 @@ block: - name: "1.18.2.0 Generic Pacemaker - Ensure Secondary nodes joins the Cluster" # ha-cluster-join is not supported in SLES 15 SP4 anymore, crm syntax required - ansible.builtin.command: "sudo crm cluster join -y -c {{ ansible_hostname }} --interface eth0" + ansible.builtin.command: "sudo crm cluster join -y -c {{ primary_instance_name }} --interface eth0" when: - ansible_hostname != ansible_play_hosts_all[0] diff --git a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.3-post_provision_report.yml b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.3-post_provision_report.yml index 3500e44977..7027afcbd1 100644 --- a/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.3-post_provision_report.yml +++ b/deploy/ansible/roles-os/1.18-scaleout-pacemaker/tasks/1.18.3-post_provision_report.yml @@ -4,11 +4,21 @@ ansible.builtin.pause: seconds: "{{ cluster_status_report_wait_in_s }}" -- name: "1.18.3 Generic Pacemaker - Cleanup resource status" +- name: "1.18.3 Generic Pacemaker - Cleanup resource status ( REDHAT )" ansible.builtin.shell: > pcs resource cleanup register: cluster_cleanup failed_when: cluster_cleanup.rc > 0 + when: + ansible_os_family | upper == "REDHAT" + +- name: "1.18.3 Generic Pacemaker - Cleanup resource status ( SUSE )" + ansible.builtin.shell: > + crm resource cleanup + register: cluster_cleanup + failed_when: cluster_cleanup.rc > 0 + when: + ansible_os_family | upper == "SUSE" - name: "1.18.3 Generic Pacemaker - Check the post-provisioning cluster status" ansible.builtin.command: "{{ cluster_status_cmd[ansible_os_family] }}" diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 b/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 index 142df8be1f..90aeecddc7 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 @@ -131,15 +131,15 @@ ansible_facts. {% for ip in host_ips[1:] %} {% if (database_scale_out) %} {% if (database_high_availability) %} -{% if (subnet_cidr_storage | ansible.utils.network_in_usable(ip)) %} +{% if (subnet_cidr_db | ansible.utils.network_in_usable(ip)) %} {{ '%-19s' | format(ip) }}{{ '%-80s ' | format(host + '-hsr.' + sap_fqdn) }}{{ '%-21s' | format(host + '-hsr') }} -{% elif (subnet_cidr_client | ansible.utils.network_in_usable(ip)) %} +{% elif (subnet_cidr_storage | ansible.utils.network_in_usable(ip)) %} {{ '%-19s' | format(ip) }}{{ '%-80s ' | format(host + '-inter.' + sap_fqdn) }}{{ '%-21s' | format(host + '-inter') }} {% endif %} {% else %} -{% if (subnet_cidr_storage | ansible.utils.network_in_usable(ip)) %} +{% if (subnet_cidr_db | ansible.utils.network_in_usable(ip)) %} {{ '%-19s' | format(ip) }}{{ '%-80s ' | format(host + '-storage.' + sap_fqdn) }}{{ '%-21s' | format(host + '-storage') }} -{% elif (subnet_cidr_client | ansible.utils.network_in_usable(ip)) %} +{% elif (subnet_cidr_storage | ansible.utils.network_in_usable(ip)) %} {{ '%-19s' | format(ip) }}{{ '%-80s ' | format(host + '-hana.' + sap_fqdn) }}{{ '%-21s' | format(host + '-hana') }} {% endif %} {% endif %} diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.2-anf-mounts-scaleout.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.2-anf-mounts-scaleout.yaml deleted file mode 100644 index 3b1e3d2d31..0000000000 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.2-anf-mounts-scaleout.yaml +++ /dev/null @@ -1,990 +0,0 @@ -# This task is now deprecated as the functionality is merged into 2.6.1 and 2.6.8 -# This file will be removed in the later releases. Its left here for tracing and debugging - - -# # /*---------------------------------------------------------------------------8 -# # | | -# # | Perform the ANF system mounts for Scale out systems only | -# # | | -# # +------------------------------------4--------------------------------------*/ -# --- - -# - name: "ANF Mount: Set the NFS Service name" -# ansible.builtin.set_fact: -# nfs_service: "{% if distribution_id in ['redhat8', 'redhat9'] %}nfs-server{% else %}{% if distribution_id == 'redhat7' %}nfs{% else %}{% if distribution_id == 'oraclelinux8' %}rpcbind{% else %}nfsserver{% endif %}{% endif %}{% endif %}" - -# - name: "ANF Mount: Set the NFSmount options" -# ansible.builtin.set_fact: -# mnt_options: 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys' -# when: distribution_full_id not in ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5'] - -# - name: "ANF Mount: Set the NFSmount options" -# ansible.builtin.set_fact: -# mnt_options: 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys,nconnect=8' -# when: distribution_full_id in ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5'] - -# - name: "ANF Mount: Define this SID" -# ansible.builtin.set_fact: -# this_sid: -# { -# 'sid': '{{ sap_sid | upper }}', -# 'dbsid_uid': '{{ hdbadm_uid }}', -# 'sidadm_uid': '{{ sidadm_uid }}', -# 'ascs_inst_no': '{{ scs_instance_number }}', -# 'pas_inst_no': '{{ pas_instance_number }}', -# 'app_inst_no': '{{ app_instance_number }}' -# } - -# - name: "ANF Mount: Create list of all_sap_mounts to support " -# ansible.builtin.set_fact: -# all_sap_mounts: "{% if MULTI_SIDS is defined %}{{ MULTI_SIDS }}{% else %}{{ all_sap_mounts | default([]) + [this_sid] }}{% endif %}" -# db_hosts: "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" - -# - name: "ANF Mount: Ensure the NFS service is stopped" -# ansible.builtin.systemd: -# name: "{{ nfs_service }}" -# state: stopped -# when: -# - "'scs' in supported_tiers" -# - sap_mnt is not defined -# - sap_trans is not defined - -# # /*---------------------------------------------------------------------------8 -# # | | -# # | Mount the ANF Volumes | -# # | Make sure to set the NFS domain in /etc/idmapd.conf on the VM to match the | -# # | default domain configuration on Azure NetApp Files: defaultv4iddomain.com. | -# # | and the mapping is set to nobody | -# # | We use tier in tasks as well, to treat any special scenarios that may arise| -# # +------------------------------------4--------------------------------------*/ -# # For additional information refer to the below URLs -# # https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/sap-hana-high-availability-netapp-files-suse#mount-the-azure-netapp-files-volume -# # https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/sap-hana-high-availability-netapp-files-red-hat#mount-the-azure-netapp-files-volume -# - name: "ANF Mount: NFS Domain Setting (ANF)" -# block: -# - name: "ANF Mount: Domain is configured as -# the default Azure NetApp Files domain" -# ansible.builtin.lineinfile: -# path: /etc/idmapd.conf -# regexp: '^[ #]*Domain = ' -# line: 'Domain = defaultv4iddomain.com' -# insertafter: '[General]' -# when: -# - tier == 'sapos' -# register: id_mapping_changed - -# - name: "ANF Mount: Make sure that user -# mapping is set to 'nobody'" -# ansible.builtin.lineinfile: -# path: /etc/idmapd.conf -# regexp: '^[ #]*Nobody-User = ' -# line: 'Nobody-User = nobody' -# insertafter: '^[ #]*Nobody-User = ' -# when: -# - tier == 'sapos' -# register: id_mapping_changed - -# - name: "ANF Mount: Make sure that group -# mapping is set to 'nobody'" -# ansible.builtin.lineinfile: -# path: /etc/idmapd.conf -# regexp: '^[ #]*Nobody-Group = ' -# line: 'Nobody-Group = nobody' -# insertafter: '^[ #]*Nobody-Group = ' -# when: -# - tier == 'sapos' -# register: id_mapping_changed -# when: -# - tier == 'sapos' - -# - name: "ANF Mount: Set nfs4_disable_idmapping to Y" -# ansible.builtin.lineinfile: -# path: /etc/modprobe.d/nfs.conf -# line: 'options nfs nfs4_disable_idmapping=Y' -# create: true -# mode: 0644 -# when: -# - tier == 'sapos' - -# - name: "ANF Mount: Ensure the services are restarted" -# block: -# - name: "AF Mount: Ensure the rpcbind service is restarted" -# ansible.builtin.systemd: -# name: rpcbind -# state: restarted -# - name: "ANF Mount: Ensure the NFS ID Map service is restarted" -# ansible.builtin.systemd: -# name: "nfs-idmapd" -# daemon-reload: true -# state: restarted -# - name: "ANF Mount: Pause for 5 seconds" -# ansible.builtin.pause: -# seconds: 5 -# - name: "ANF Mount: Ensure the NFS service is restarted" -# ansible.builtin.systemd: -# name: "{{ nfs_service }}" -# state: restarted -# when: -# - id_mapping_changed is changed - -# # /*---------------------------------------------------------------------------8 -# # | | -# # | Prepare for the /usr/sap mounts | -# # | Create temporary directory structure | -# # | Mount the share, create the directory structure on share | -# # | Unmount and clean up temporary directory structure | -# # | | -# # +------------------------------------4--------------------------------------*/ - -# - name: "ANF Mount: install:Get the Server name list" -# ansible.builtin.set_fact: -# first_app_server_temp: "{{ first_app_server_temp | default([]) + [item] }}" -# with_items: -# - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_PAS') }}" -# - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" - -# - name: "ANF Mount: usr/sap" -# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml -# loop: -# - { -# 'type': 'usrsap', -# 'temppath': 'tmpusersap', -# 'mount': '{{ usr_sap_mountpoint }}', -# 'opts': 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp', -# 'path': '/usr/sap', -# 'set_chattr_on_dir': false, -# 'target_nodes': ['app','pas'], -# 'create_temp_folders': false -# } -# vars: -# primary_host: "{{ first_app_server_temp | first }}" -# when: -# - tier == 'sapos' -# - usr_sap_mountpoint is defined - -# # /*---------------------------------------------------------------------------8 -# # | | -# # | Prepare for the sap_mnt mounts | -# # | Create temporary directory structure | -# # | Mount the share, create the directory structure on share | -# # | Unmount and clean up temporary directory structure | -# # | | -# # +------------------------------------4--------------------------------------*/ -# - name: "ANF Mount: (sapmnt)" -# block: -# - name: "ANF Mount: Create /saptmp" -# ansible.builtin.file: -# path: "/saptmp" -# state: directory -# mode: 0755 -# group: sapsys - -# - name: "ANF Mount: (sapmnt)" -# block: -# - name: "ANF Mount: Filesystems on ANF (sapmnt)" -# ansible.posix.mount: -# src: "{{ sap_mnt }}" -# path: "/saptmp" -# fstype: "nfs4" -# opts: "rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp" -# state: mounted -# rescue: -# - name: "ANF Mount: Clear the cache of the nfsidmap daemon (ANF)" -# ansible.builtin.shell: | -# nfsidmap -c -# - name: "ANF Mount: Ensure the rpcbind service is restarted" -# ansible.builtin.systemd: -# name: rpcbind -# daemon-reload: true -# state: restarted - -# - name: "ANF Mount: Create SAP Directories (spmnt & usrsap)" -# ansible.builtin.file: -# path: "{{ item.path }}" -# state: directory -# mode: 0755 -# loop: -# - { path: '/saptmp/sapmnt{{ sap_sid | upper }}' } -# - { path: '/saptmp/usrsap{{ sap_sid | upper }}' } -# - { path: '/saptmp/usrsap{{ sap_sid | upper }}ascs{{ scs_instance_number }}' } -# - { path: '/saptmp/usrsap{{ sap_sid | upper }}ers{{ ers_instance_number }}' } -# - { path: '/saptmp/usrsap{{ sap_sid | upper }}sys' } - -# - name: "ANF Mount: Create SAP Directories (ANF)" -# ansible.builtin.file: -# path: "/saptmp/sapmnt{{ item.sid | upper }}" -# state: directory -# mode: 0755 -# loop: "{{ MULTI_SIDS }}" -# when: MULTI_SIDS is defined - -# - name: "ANF Mount: Unmount file systems (sap_mnt)" -# ansible.posix.mount: -# src: "{{ sap_mnt }}" -# path: "/saptmp" -# state: unmounted - -# - name: "ANF Mount: Delete locally created SAP Directories" -# ansible.builtin.file: -# path: "{{ item.path }}" -# state: absent -# loop: -# - { path: '/saptmp/sapmnt{{ sap_sid | upper }}' } -# - { path: '/saptmp/usrsap{{ sap_sid | upper }}' } -# - { path: '/saptmp/usrsap{{ sap_sid | upper }}ascs{{ scs_instance_number }}' } -# - { path: '/saptmp/usrsap{{ sap_sid | upper }}ers{{ ers_instance_number }}' } -# - { path: '/saptmp/usrsap{{ sap_sid | upper }}sys' } - -# - name: "ANF Mount: Remove SAP Directories (ANF)" -# ansible.builtin.file: -# path: "/saptmp/sapmnt{{ item.sid | upper }}" -# state: absent -# loop: "{{ MULTI_SIDS }}" -# when: MULTI_SIDS is defined - -# - name: "ANF Mount: Cleanup fstab and directory (sap_mnt)" -# ansible.posix.mount: -# src: "{{ sap_mnt }}" -# path: "/saptmp" -# fstype: "nfs4" -# opts: "rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp" -# state: absent - -# when: -# - tier == 'sapos' -# - "'scs' in supported_tiers" -# - sap_mnt is defined - -# # /*---------------------------------------------------------------------------8 -# # | | -# # | Perform the sap_mnt mounts | -# # | Create directories and make them immutable | -# # | | -# # +------------------------------------4--------------------------------------*/ - -# - name: "ANF Mount: Create SAP Directories (sapmnt)" -# ansible.builtin.file: -# owner: "{{ item.sidadm_uid }}" -# group: sapsys -# mode: 0755 -# path: "/sapmnt/{{ item.sid }}" -# state: directory -# register: is_created_now -# loop: "{{ all_sap_mounts }}" -# when: -# - tier == 'sapos' -# - node_tier in ['app','scs','ers', 'pas'] or 'scs' in supported_tiers -# - sap_mnt is defined - -# - name: "ANF Mount: Change attribute only when we create SAP Directories (sap_mnt)" -# ansible.builtin.file: -# path: "{{ item.item.path }}" -# state: directory -# mode: 0755 -# attr: i+ -# loop: "{{ is_created_now.results }}" -# when: -# - tier == 'sapos' -# - item.item is changed -# register: set_immutable_attribute - -# - name: "ANF Mount: Create SAP Directories (scs & ers)" -# ansible.builtin.file: -# path: "{{ item.path }}" -# state: directory -# owner: '{{ sidadm_uid }}' -# group: sapsys -# mode: 0755 -# loop: -# - { path: '/usr/sap/{{ sap_sid | upper }}' } -# - { path: '/usr/sap/{{ sap_sid | upper }}/SYS' } -# - { path: '/usr/sap/{{ sap_sid | upper }}/{{ instance_type | upper }}{{ scs_instance_number }}' } -# - { path: '/usr/sap/{{ sap_sid | upper }}/ERS{{ ers_instance_number }}' } -# when: -# - tier == 'sapos' -# - node_tier in ['scs','ers'] or 'scs' in supported_tiers -# - sap_mnt is defined -# - MULTI_SIDS is undefined -# register: is_created_now3 - -# - name: "ANF Mount: Change attribute only when we create SAP Directories (scs & ers)" -# ansible.builtin.file: -# path: "{{ item.item.path }}" -# state: directory -# mode: 0755 -# attr: i+ -# loop: "{{ is_created_now3.results }}" -# when: -# - tier == 'sapos' -# - item.item is changed -# register: set_immutable_attribute - -# - name: "ANF Mount: Debug" -# ansible.builtin.debug: -# msg: 'isHA:{{ scs_high_availability }} | node_tier:{{ node_tier }} | tier:{{ tier }} | sapmnt:{{ sap_mnt }}' - -# - name: "ANF Mount: sapmnt/{{ sap_sid | upper }} - Distributed Non-HA" -# ansible.posix.mount: -# src: "{{ item.src }}" -# path: "{{ item.path }}" -# fstype: "{{ item.type }}" -# opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' -# state: mounted -# loop: -# - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } -# when: -# - tier == 'sapos' -# - sap_mnt is defined -# - not scs_high_availability -# - ansible_play_hosts_all | length > 1 -# - node_tier != 'hana' - -# - name: "ANF Mount: sapmnt/{{ sap_sid | upper }} - Single instance" -# ansible.posix.mount: -# src: "{{ item.src }}" -# path: "{{ item.path }}" -# fstype: "{{ item.type }}" -# opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' -# state: mounted -# loop: -# - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } -# when: -# - tier == 'sapos' -# - sap_mnt is defined -# - not scs_high_availability -# - ansible_play_hosts_all | length == 1 - - -# - name: "ANF Mount: sapmnt/{{ sap_sid | upper }} - Standalone MULTI_SIDS" -# become: true -# become_user: root -# ansible.posix.mount: -# src: "{{ sap_mnt }}/sapmnt{{ item.sid }}" -# path: "/sapmnt/{{ item.sid }}" -# fstype: 'nfs4' -# opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' -# state: mounted -# loop: "{{ MULTI_SIDS }}" -# when: -# - not scs_high_availability -# - sap_mnt is defined -# - MULTI_SIDS is defined - -# - name: "ANF Mount: sapmnt/{{ sap_sid | upper }} - High Availability" -# ansible.posix.mount: -# src: "{{ item.src }}" -# path: "{{ item.path }}" -# fstype: "{{ item.type }}" -# opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' -# state: mounted -# loop: -# - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } -# when: -# - scs_high_availability -# - tier in ['sapos'] -# - node_tier != 'hana' -# - sap_mnt is defined - -# - name: "ANF Mount: usr/sap/{{ sap_sid | upper }}/SYS" -# ansible.posix.mount: -# src: "{{ item.src }}" -# path: "{{ item.path }}" -# fstype: "{{ item.type }}" -# opts: 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp' -# state: mounted -# loop: -# - { type: 'nfs4', src: '{{ sap_mnt }}/usrsap{{ sap_sid | upper }}sys', path: '/usr/sap/{{ sap_sid | upper }}/SYS' } -# when: -# - scs_high_availability -# - tier in ['sapos'] -# - node_tier in ['scs','ers'] -# - sap_mnt is defined - - -# # /*---------------------------------------------------------------------------8 -# # | | -# # | Prepare for the sap_trans, install mounts | -# # | Create temporary directory structure | -# # | Mount the share, create the directory structure on share | -# # | Unmount and clean up temporary directory structure | -# # | | -# # +------------------------------------4--------------------------------------*/ - -# - name: "ANF Mount: install:Get the Server name list" -# ansible.builtin.set_fact: -# first_server_temp: "{{ first_server_temp | default([]) + [item] }}" -# with_items: -# - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" -# - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" - -# - name: "ANF Mount: sap_trans" -# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml -# loop: -# - { -# 'type': 'trans', -# 'temppath': 'saptrans', -# 'mount': '{{ sap_trans }}', -# 'opts': 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp', -# 'path': '/usr/sap/trans', -# 'permissions': '0775', -# 'set_chattr_on_dir': false, -# 'target_nodes': ['app','pas', 'ers', 'scs'], -# 'create_temp_folders': false -# } -# vars: -# primary_host: "{{ first_server_temp | first }}" -# when: -# - tier == 'sapos' -# - sap_trans is defined - -# - name: "ANF Mount: install" -# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml -# loop: -# - { -# 'type': 'install', -# 'temppath': 'sapinstall', -# 'folder': '{{ bom_base_name }}', -# 'mount': '{{ usr_sap_install_mountpoint }}', -# 'opts': 'rw,hard,rsize=65536,wsize=65536,sec=sys,vers=4.1,tcp', -# 'path': '/usr/sap/install', -# 'permissions': '0775', -# 'set_chattr_on_dir': false, -# 'target_nodes': ['all'], -# 'create_temp_folders': true -# } -# vars: -# primary_host: "{{ first_server_temp | first }}" -# when: -# - tier == 'sapos' -# - usr_sap_install_mountpoint is defined - -# # /*---------------------------------------------------------------------------8 -# # | | -# # | Prepare the OS for running SAP HANA on | -# # | Azure NetApp Files with NFS | -# # | Except Scale out + ANF | -# # +------------------------------------4--------------------------------------*/ -# - name: "ANF Mount: Prepare the OS for running -# SAP HANA on Azure NetApp with NFS" -# block: -# - name: "ANF Mount: Create configuration file for the NetApp configuration settings" -# ansible.builtin.blockinfile: -# path: /etc/sysctl.d/91-NetApp-HANA.conf -# backup: true -# create: true -# mode: 0644 -# marker: "# {mark} HANA NetApp configuration high availability" -# block: | -# net.core.rmem_max = 16777216 -# net.core.wmem_max = 16777216 -# net.core.rmem_default = 16777216 -# net.core.wmem_default = 16777216 -# net.core.optmem_max = 16777216 -# net.ipv4.tcp_rmem = 4096 131072 16777216 -# net.ipv4.tcp_wmem = 4096 16384 16777216 -# net.core.netdev_max_backlog = 300000 -# net.ipv4.tcp_slow_start_after_idle=0 -# net.ipv4.tcp_no_metrics_save = 1 -# net.ipv4.tcp_moderate_rcvbuf = 1 -# net.ipv4.tcp_window_scaling = 1 -# net.ipv4.tcp_timestamps = 0 -# net.ipv4.tcp_sack = 1 -# when: -# - node_tier == 'hana' - -# - name: "Backward Compatibility - Check required Database HA variables" -# ansible.builtin.set_fact: -# database_high_availability: "{{ db_high_availability | default(false) }}" -# when: -# - db_high_availability is defined -# - database_high_availability is not defined - -# - name: "ANF Mount: Create configuration file for the NetApp configuration settings" -# ansible.builtin.blockinfile: -# path: /etc/sysctl.d/91-NetApp-HANA.conf -# backup: true -# create: true -# mode: 0644 -# marker: "# {mark} HANA NetApp configuration standalone" -# block: | -# net.core.rmem_max = 16777216 -# net.core.wmem_max = 16777216 -# net.core.rmem_default = 16777216 -# net.core.wmem_default = 16777216 -# net.core.optmem_max = 16777216 -# net.ipv4.tcp_rmem = 4096 131072 16777216 -# net.ipv4.tcp_wmem = 4096 16384 16777216 -# net.core.netdev_max_backlog = 300000 -# net.ipv4.tcp_slow_start_after_idle=0 -# net.ipv4.tcp_no_metrics_save = 1 -# net.ipv4.tcp_moderate_rcvbuf = 1 -# net.ipv4.tcp_window_scaling = 1 -# net.ipv4.tcp_timestamps = 1 -# net.ipv4.tcp_sack = 1 -# when: -# - node_tier == 'hana' -# - not database_high_availability - -# - name: "ANF Mount: Create configuration file -# with additional optimization settings" -# ansible.builtin.blockinfile: -# path: /etc/sysctl.d/ms-az.conf -# backup: true -# create: true -# mode: 0644 -# marker: "# {mark} HANA NetApp optimizations" -# block: | -# net.ipv6.conf.all.disable_ipv6 = 1 -# net.ipv4.tcp_max_syn_backlog = 16348 -# net.ipv4.conf.all.rp_filter = 0 -# sunrpc.tcp_slot_table_entries = 128 -# vm.swappiness=10 -# when: -# - node_tier == 'hana' - -# # /*-----------------------------------------------------------------------8 -# # | Configure the maximum number of (TCP) RPC requests that can be in | -# # | flight at a time (to the NFS server) to be 128 | -# # |--------------------------------4--------------------------------------*/ -# - name: "ANF Mount: configure the maximum number -# of RPC requests for the NFS session" -# ansible.builtin.blockinfile: -# path: /etc/modprobe.d/sunrpc.conf -# backup: true -# create: true -# mode: 0644 -# marker: "# {mark} NFS RPC Connections" -# block: "options sunrpc tcp_max_slot_table_entries=128" -# when: -# - node_tier == 'hana' - -# when: -# - tier == 'sapos' -# - node_tier == 'hana' - -# - name: "ANF Mount: Create /hana folder" -# ansible.builtin.file: -# path: /hana -# mode: 0755 -# state: directory -# group: sapsys -# when: -# - tier == 'sapos' -# - node_tier == 'hana' - -# # Note: This block ( and one for second DB note) must run only for HSR - pacemaker HANA scale out -# # Currently we only support two node cluster + observer. -# # TODO: Add support for >2(even count) node cluster + observer -# - name: "ANF Mount: HANA data" -# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml -# loop: -# - { -# 'type': 'data', -# 'temppath': 'hanadata', -# 'folder': 'hanadata', -# 'mount': '{{ hana_data_mountpoint[0] }}', -# 'opts': '{{ mnt_options }}', -# 'path': '/hana/data', -# 'permissions': '0755', -# 'set_chattr_on_dir': false, -# 'target_nodes' : ['hana'], -# 'create_temp_folders': true -# } -# vars: -# primary_host: "{{ db_hosts[0] }}" -# when: -# - tier == 'sapos' -# - node_tier == 'hana' -# - hana_data_mountpoint is defined -# - hana_data_mountpoint | length > 0 -# - ansible_hostname == db_hosts[0] -# # For HSR based scale out, needs DB high availability -# - db_high_availability is defined -# - db_high_availability - -# - name: "ANF Mount: HANA log" -# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml -# loop: -# - { -# 'type': 'log', -# 'temppath': 'hanalog', -# 'folder': 'hanalog', -# 'mount' : '{{ hana_log_mountpoint[0] }}', -# 'opts': '{{ mnt_options }}', -# 'path' : '/hana/log', -# 'permissions': '0755', -# 'set_chattr_on_dir': false, -# 'target_nodes': ['hana'], -# 'create_temp_folders': true -# } -# vars: -# primary_host: "{{ db_hosts[0] }}" -# when: -# - tier == 'sapos' -# - node_tier == 'hana' -# - hana_log_mountpoint is defined -# - hana_log_mountpoint | length > 0 -# - ansible_hostname == db_hosts[0] -# # For HSR based scale out, needs DB high availability -# - db_high_availability is defined -# - db_high_availability - -# - name: "ANF Mount: HANA shared" -# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml -# loop: -# - { -# 'type': 'shared', -# 'temppath': 'hanashared', -# 'folder': 'hanashared', -# 'mount': '{{ hana_shared_mountpoint[0] }}', -# 'opts': '{{ mnt_options }}', -# 'path': '/hana/shared', -# 'permissions': '0775', -# 'set_chattr_on_dir': false, -# 'target_nodes' : ['hana'], -# 'create_temp_folders': true -# } -# vars: -# primary_host: "{{ db_hosts[0] }}" -# when: -# - tier == 'sapos' -# - node_tier == 'hana' -# - hana_shared_mountpoint is defined -# - hana_shared_mountpoint | length > 0 -# - ansible_hostname == db_hosts[0] -# # For HSR based scale out, needs DB high availability -# - db_high_availability is defined -# - db_high_availability - -# - name: "ANF Mount: HANA data (secondary)" -# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml -# loop: -# - { -# 'type': 'data', -# 'temppath': 'hanadata', -# 'folder': 'hanadata', -# 'mount': '{{ hana_data_mountpoint[1] }}', -# 'opts': '{{ mnt_options }}', -# 'path': '/hana/data', -# 'permissions': '0755', -# 'set_chattr_on_dir': false, -# 'target_nodes' : ['hana'], -# 'create_temp_folders': true -# } -# vars: -# primary_host: "{{ db_hosts[1] }}" -# when: -# - tier == 'sapos' -# - node_tier == 'hana' -# - hana_data_mountpoint is defined -# - hana_data_mountpoint | length > 1 -# - db_hosts | length == 2 -# - ansible_hostname == db_hosts[1] -# # For HSR based scale out, needs DB high availability -# - db_high_availability is defined -# - db_high_availability - -# - name: "ANF Mount: HANA log (secondary)" -# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml -# loop: -# - { -# 'type': 'log', -# 'temppath': 'hanalog', -# 'folder': 'hanalog', -# 'mount' : '{{ hana_log_mountpoint[1] }}', -# 'opts': '{{ mnt_options }}', -# 'path' : '/hana/log', -# 'permissions': '0755', -# 'set_chattr_on_dir': false, -# 'target_nodes': ['hana'], -# 'create_temp_folders': true -# } -# vars: -# primary_host: "{{ db_hosts[1] }}" -# when: -# - tier == 'sapos' -# - node_tier == 'hana' -# - hana_log_mountpoint is defined -# - hana_log_mountpoint | length > 1 -# - db_hosts | length ==2 -# - ansible_hostname == db_hosts[1] -# # For HSR based scale out, needs DB high availability -# - db_high_availability is defined -# - db_high_availability - -# - name: "ANF Mount: HANA shared (secondary)" -# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml -# loop: -# - { -# 'type': 'shared', -# 'temppath': 'hanashared', -# 'folder': 'hanashared', -# 'mount': '{{ hana_shared_mountpoint[1] }}', -# 'opts': '{{ mnt_options }}', -# 'path': '/hana/shared', -# 'permissions': '0775', -# 'set_chattr_on_dir': false, -# 'target_nodes' : ['hana'], -# 'create_temp_folders': true -# } -# vars: -# primary_host: "{{ db_hosts[1] }}" -# when: -# - tier == 'sapos' -# - node_tier == 'hana' -# - hana_shared_mountpoint is defined -# - hana_shared_mountpoint | length > 1 -# - db_hosts | length == 2 -# - ansible_hostname == db_hosts[1] -# # For HSR based scale out, needs DB high availability -# - db_high_availability is defined -# - db_high_availability - -# # /*---------------------------------------------------------------------------8 -# # | | -# # | Prepare the OS for running SAP HANA on | -# # | Azure NetApp Files with NFS | -# # | Scale out + ANF | -# # +------------------------------------4--------------------------------------*/ - -# # FOR ANF mount on SLES and RHEl, the below tasks replicate the steps in the link https://learn.microsoft.com/en-us/azure/sap/workloads/sap-hana-scale-out-standby-netapp-files-suse#mount-the-azure-netapp-files-volumes -# # Mount the HANA shared on to the temp path - -# - name: "ANF Mount: Scale Out - Create SAP Directories (usrsap)" -# ansible.builtin.file: -# owner: "{{ db_sid | lower }}adm" -# group: sapsys -# mode: 0755 -# path: "/usr/sap/{{ db_sid | upper }}" -# state: directory -# when: -# - tier == 'hana' -# # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. -# - database_scale_out is defined -# - database_scale_out - -# - name: "ANF Mount: Scale Out - Create SAP Directories (hana data)" -# ansible.builtin.file: -# owner: "{{ db_sid | lower }}adm" -# group: sapsys -# mode: 0755 -# path: "/hana/data/{{ db_sid | upper }}" -# state: directory -# when: -# - tier == 'hana' -# # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. -# - database_scale_out is defined -# - database_scale_out - - -# - name: "ANF Mount: Scale Out - Create SAP Directories (hana log)" -# ansible.builtin.file: -# owner: "{{ db_sid | lower }}adm" -# group: sapsys -# mode: 0755 -# path: "/hana/log/{{ db_sid | upper }}" -# state: directory -# when: -# - tier == 'hana' -# # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. -# - database_scale_out is defined -# - database_scale_out - -# - name: "ANF Mount: HANA shared - Scale out" -# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml -# loop: -# - { -# 'type': 'shared', -# 'temppath': 'shared', -# # change folder to match the mount folder within the share -# 'folder': 'shared', -# 'mount': '{{ hana_shared_mountpoint[0] }}', -# 'opts': '{{ mnt_options }}', -# 'path': '/hana/shared', -# 'permissions': '0775', -# 'set_chattr_on_dir': false, -# 'target_nodes' : ['hana'], -# 'create_temp_folders': true -# } -# vars: -# # Run this on all the nodes, not just primary. -# primary_host: "{{ ansible_hostname }}" -# when: -# - node_tier == 'hana' -# - hana_shared_mountpoint is defined -# - hana_shared_mountpoint | length > 0 -# # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. -# - database_scale_out is defined -# - database_scale_out - -# # This runs for unique share per node -# - name: "ANF Mount: usrsap - Scale out" -# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml -# loop: -# - { -# 'type': 'usrsap', -# 'temppath': 'usrsap', -# 'folder': "usr-sap-hanadb{{ lookup('ansible.utils.index_of', db_hosts, 'eq', ansible_hostname) }}", -# 'mount': '{{ hana_shared_mountpoint[0] }}', -# 'opts': '{{ mnt_options }}', -# 'path': '/usr/sap/{{ db_sid | upper }}', -# 'permissions': '0775', -# 'set_chattr_on_dir': false, -# 'target_nodes' : ['hana'], -# 'create_temp_folders': true -# } -# vars: -# primary_host: "{{ ansible_hostname }}" -# when: -# - node_tier == 'hana' -# - hana_shared_mountpoint is defined -# - hana_shared_mountpoint | length == 1 -# # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. -# - database_scale_out is defined -# - database_scale_out - -# - name: "ANF Mount: HANA Data - Scale out - Create mount list" -# block: -# - name: "Initialize HANA Data mountpoints" -# ansible.builtin.set_fact: -# hana_data_scaleout_mountpoint: [] -# - name: "Build HANA Data mountpoints" -# ansible.builtin.set_fact: -# # hana_data_mountpoint: "{{ hana_data_mountpoint | default([]) + [item] }}" -# hana_data_scaleout_mountpoint: "{{ hana_data_scaleout_mountpoint + dataupdate }}" -# loop: "{{ hana_data_mountpoint }}" -# loop_control: -# index_var: my_index -# # Note the object structure and specific key:pair value. Do not modify those hard coded. -# vars: -# dataupdate: -# - { type: 'data', -# temppath: 'hanadata', -# folder: 'hanadata', -# mount: "{{ item }}", -# opts: "{{ mnt_options }}", -# path: "{{ '/hana/data/' + (db_sid | upper ) + '/mnt0000' + ( my_index + 1 )| string }}", -# permissions: '0775', -# set_chattr_on_dir: false, -# target_nodes: ['hana'], -# create_temp_folders: 'true' -# } -# when: -# - node_tier == 'hana' -# - hana_data_mountpoint is defined -# # - hana_data_mountpoint | length == db_hosts | length -# # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. -# - database_scale_out is defined -# - database_scale_out - -# - name: "DEBUG:ANF Mount: HANA Data - Scale out - Create mount list" -# ansible.builtin.debug: -# var: hana_data_scaleout_mountpoint - -# - name: "ANF Mount: HANA Data - Scale out" -# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml -# # Do not change this to loop:. It Breaks. i don't know why but this modification only seems to work with with_items: despite multiple formatting attempts. -# with_items: -# - "{{ hana_data_scaleout_mountpoint | list }}" -# vars: -# primary_host: "{{ ansible_hostname }}" -# when: -# - node_tier == 'hana' -# - hana_data_mountpoint is defined -# # - hana_data_mountpoint | length == db_hosts | length -# # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. -# - database_scale_out is defined -# - database_scale_out - - -# - name: "ANF Mount: HANA Log - Scale out - Create mount list" -# block: -# - name: "Initialize HANA Log mountpoints" -# ansible.builtin.set_fact: -# hana_log_scaleout_mountpoint: [] - -# - name: "Build HANA log mountpoints" -# ansible.builtin.set_fact: -# hana_log_scaleout_mountpoint: "{{ hana_log_scaleout_mountpoint + logupdate }}" -# loop: "{{ hana_log_mountpoint }}" -# loop_control: -# index_var: my_index -# # Note the object structure and specific key:pair value. Do not modify those hard coded. -# vars: -# logupdate: -# - { type: 'log', -# temppath: 'hanalog', -# folder: 'hanalog', -# mount: "{{ item }}", -# opts: "{{ mnt_options }}", -# path: "{{ '/hana/log/' + (db_sid | upper ) + '/mnt0000' + ( my_index + 1 )| string }}", -# permissions: '0775', -# set_chattr_on_dir: false, -# target_nodes: ['hana'], -# create_temp_folders: 'true' -# } -# when: -# - node_tier == 'hana' -# - hana_log_mountpoint is defined -# # - hana_log_mountpoint | length == db_hosts | length -# # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. -# - database_scale_out is defined -# - database_scale_out - -# - name: "DEBUG:ANF Mount: HANA Log - Scale out - Create mount list" -# ansible.builtin.debug: -# var: hana_log_scaleout_mountpoint - -# - name: "ANF Mount: HANA Log - Scale out" -# ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml -# # Do not change this to loop:. It Breaks. i don't know why but this modification only seems to work with with_items: despite multiple formatting attempts. -# with_items: -# - "{{ hana_log_scaleout_mountpoint | list }}" -# vars: -# primary_host: "{{ ansible_hostname }}" -# when: -# - node_tier == 'hana' -# - hana_log_mountpoint is defined -# # - hana_log_mountpoint | length == db_hosts | length -# # For Scale out without HSR/pacemaker. Relies on ANF + hot spare to provide HA. -# - database_scale_out is defined -# - database_scale_out - - -# - name: "ANF Mount: Set Permissons on HANA (HSR) Directories ({{ item.path }})" -# ansible.builtin.file: -# owner: '{{ hdbadm_uid }}' -# group: sapsys -# path: "{{ item.path }}" -# state: directory -# recurse: true -# loop: -# - { 'path': '/hana/data' } -# - { 'path': '/hana/log' } -# - { 'path': '/hana/shared' } -# when: -# - tier == 'sapos' -# - node_tier == 'hana' -# - db_high_availability is defined -# - db_high_availability - - -# - name: "ANF Mount: Set Permissons on HANA Directories ({{ item.path }})" -# ansible.builtin.file: -# owner: '{{ hdbadm_uid }}' -# group: sapsys -# path: "{{ item.path }}" -# state: directory -# recurse: true -# with_items: -# - "{{ hana_log_scaleout_mountpoint }}" -# - "{{ hana_data_scaleout_mountpoint }}" -# - { 'path': '/hana/shared' } -# - { 'path': '/usr/sap/{{ db_sid | upper }}' } -# when: -# - tier == 'sapos' -# - node_tier == 'hana' -# - not (db_high_availability | default(false)) -# - database_scale_out - -# ... diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-mounts.yaml index 53dc91b2f8..82831e7665 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-mounts.yaml @@ -1,26 +1,32 @@ --- -- name: ORACLE ASM - Gather Logical volumes created in 1.5.1 +# /*---------------------------------------------------------------------------8 +# | | +# | Oracle ASM mounts | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "2.6.3 ORACLE ASM - Mounts: Gather Logical volumes created in 1.5.1" ansible.builtin.include_vars: disks_config_asm.yml -- name: ORACLE ASM - Set the NFS Server name list +- name: "2.6.3 ORACLE ASM - Mounts: Set the NFS Server name list" ansible.builtin.set_fact: nfs_server_temp: "{{ nfs_server_temp | default([]) + [item] }}" with_items: - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" -- name: ORACLE ASM - Set the NFS Server name +- name: "2.6.3 ORACLE ASM - Mounts: Set the NFS Server name" ansible.builtin.set_fact: nfs_server: "{{ nfs_server_temp | first }}" when: NFS_provider == "NONE" -- name: ORACLE ASM - Check if LVs exists. +- name: "2.6.3 ORACLE ASM - Mounts: Check if LVs exists." ansible.builtin.stat: path: "{{ dev_path_from_lv_item }}" loop: "{{ logical_volumes }}" register: oravgstat -- name: "ORACLE ASM - Mount SAP Filesystems on Database for installation when using Shared File systems" +- name: "2.6.3 ORACLE ASM - Mounts: Mount SAP Filesystems on Database for installation when using Shared File systems" ansible.posix.mount: src: "{{ item.src }}" path: "{{ item.path }}" @@ -33,16 +39,16 @@ when: - NFS_provider != "NONE" -- name: ORACLE ASM - Print oravgstat +- name: "2.6.3 ORACLE ASM - Mounts: Print oravgstat" ansible.builtin.debug: var: oravgstat verbosity: 2 -- name: ORACLE ASM - Gather existing LVs +- name: "2.6.3 ORACLE ASM - Mounts: Gather existing LVs" ansible.builtin.set_fact: lvexists: "{{ oravgstat.results | selectattr('stat.exists', 'equalto', true) | map(attribute='item.lv') | list }}" -- name: ORACLE ASM - Print lvexists +- name: "2.6.3 ORACLE ASM - Mounts: Print lvexists" ansible.builtin.debug: var: lvexists verbosity: 2 @@ -51,7 +57,7 @@ # Same as 2.6.2-oracle-mounts.yaml from here on. -- name: "ORACLE ASM : Create sap_deployment_automation folder" +- name: "2.6.3 ORACLE ASM - Mounts: Create sap_deployment_automation folder" become: true become_user: root ansible.builtin.file: @@ -61,12 +67,12 @@ owner: oracle group: oinstall -- name: "ORACLE ASM: Make orasid:dba as owners for Oracle directories - check flag file" +- name: "2.6.3 ORACLE ASM - Mounts: Make orasid:dba as owners for Oracle directories - check flag file" ansible.builtin.stat: path: /etc/sap_deployment_automation/filepermission.txt register: oracle_permissions_set -- name: "ORACLE ASM : Make oracle:oinstall as owners" +- name: "2.6.3 ORACLE ASM - Mounts: Make oracle:oinstall as owners" become: true become_user: root ansible.builtin.file: @@ -77,7 +83,7 @@ when: - not oracle_permissions_set.stat.exists -- name: "ORACLE ASM : Create filepermission.txt" +- name: "2.6.3 ORACLE ASM - Mounts: Create filepermission.txt" become: true become_user: root ansible.builtin.file: @@ -86,7 +92,7 @@ state: touch # Mount Filesystems -- name: "ORACLE ASM : Mount SAP File systems on Database for installation" +- name: "2.6.3 ORACLE ASM - Mounts: Mount SAP File systems on Database for installation" ansible.posix.mount: src: "{{ item.src }}" path: "{{ item.path }}" @@ -104,7 +110,7 @@ # Mount install file system on Observer node. -- name: "ORACLE ASM : Mount SAP : Mount Install folder when using AFS" +- name: "2.6.3 ORACLE ASM - Mounts: Mount Install folder when using AFS" ansible.posix.mount: src: "{{ item.src }}" path: "{{ item.path }}" @@ -121,7 +127,7 @@ - use_AFS # Debug for testing -- name: "ORACLE ASM : Print oracle filesystems" +- name: "2.6.3 ORACLE ASM - Mounts: Print oracle filesystems" ansible.builtin.debug: var: oracle_filesystem_mounts verbosity: 2 diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-prereq.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-prereq.yaml index da56c24afc..1bf8c74c82 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-prereq.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-prereq.yaml @@ -1,41 +1,46 @@ --- +# /*---------------------------------------------------------------------------8 +# | | +# | Prerequisites for Oracle ASM | +# | | +# +------------------------------------4--------------------------------------*/ -- name: Gather Logical volumes created in roles-os/1.5.1.1 - ansible.builtin.include_vars: disks_config_asm.yml +- name: "2.6.3 ORACLE ASM - Prerequisites: Gather Logical volumes created in roles-os/1.5.1.1" + ansible.builtin.include_vars: disks_config_asm.yml -- name: Set the NFS Server name list +- name: "2.6.3 ORACLE ASM - Prerequisites: Set the NFS Server name list" ansible.builtin.set_fact: nfs_server_temp: "{{ nfs_server_temp | default([]) + [item] }}" with_items: - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" -- name: Set the NFS Server name +- name: "2.6.3 ORACLE ASM - Prerequisites: Set the NFS Server name" ansible.builtin.set_fact: nfs_server: "{{ nfs_server_temp | first }}" when: NFS_provider == "NONE" -- name: Check if LVs exists. +- name: "2.6.3 ORACLE ASM - Prerequisites: Check if LVs exists." ansible.builtin.stat: - path: "{{ dev_path_from_lv_item }}" - loop: "{{ logical_volumes }}" - register: oravgstat + path: "{{ dev_path_from_lv_item }}" + loop: "{{ logical_volumes }}" + register: oravgstat -- name: Print oravgstat +- name: "2.6.3 ORACLE ASM - Prerequisites: Print oravgstat" ansible.builtin.debug: var: oravgstat verbosity: 2 -- name: Gather existing LVs +- name: "2.6.3 ORACLE ASM - Prerequisites: Gather existing LVs" ansible.builtin.set_fact: lvexists: "{{ oravgstat.results | selectattr('stat.exists', 'equalto', true) | map(attribute='item.lv') | list }}" -- name: Print lvexists +- name: "2.6.3 ORACLE ASM - Prerequisites: Print lvexists" ansible.builtin.debug: var: lvexists verbosity: 2 -- name: "SAP Mounts: - Create SAP Directories (sapmnt)" +- name: "2.6.3 ORACLE ASM - Prerequisites: Create SAP Directories (sapmnt)" ansible.builtin.file: owner: root group: sapsys @@ -45,13 +50,13 @@ when: - node_tier == "oracle-asm" -- name: "Mount SAP Filesystems on Database for usrsap and oracle" +- name: "2.6.3 ORACLE ASM - Prerequisites: Mount SAP Filesystems on Database for usrsap and oracle" ansible.posix.mount: - src: "{{ item.src }}" - path: "{{ item.path }}" - fstype: "{{ item.type }}" - opts: defaults - state: mounted + src: "{{ item.src }}" + path: "{{ item.path }}" + fstype: "{{ item.type }}" + opts: defaults + state: mounted loop: - { type: 'xfs', src: '/dev/vg_sap/lv_usrsap', path: '/usr/sap' } - { type: 'xfs', src: '/dev/vg_oracle/lv_oracle', path: '/oracle' } @@ -60,20 +65,20 @@ # Mount Filesystems when AFS is not used. -- name: "Mount SAP Filesystems on Database for installation when using NFS Cluster" +- name: "2.6.3 ORACLE ASM - Prerequisites: Mount SAP Filesystems on Database for installation when using NFS Cluster" ansible.posix.mount: - src: "{{ item.src }}" - path: "{{ item.path }}" - fstype: "{{ item.type }}" - opts: defaults - state: mounted + src: "{{ item.src }}" + path: "{{ item.path }}" + fstype: "{{ item.type }}" + opts: defaults + state: mounted loop: - { type: 'nfs4', src: '{{ nfs_server }}:{{ target_media_location }}', path: '{{ target_media_location }}' } - { type: 'nfs4', src: '{{ nfs_server }}:/sapmnt/{{ db_sid | upper }}', path: '/sapmnt/{{ db_sid | upper }}' } when: - NFS_provider == "NONE" -- name: "ORACLE: Create sap_deployment_automation folder" +- name: "2.6.3 ORACLE ASM - Prerequisites: Create sap_deployment_automation folder" become: true become_user: root ansible.builtin.file: diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml index 3c64b643e3..a267fd1a96 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml @@ -184,7 +184,7 @@ when: - "'scs' in supported_tiers" - usr_sap_install_mountpoint is undefined - - not is_executed_by_acss or (is_executed_by_acss and single_server) + - not is_executed_by_acss or ( is_executed_by_acss and ( ansible_play_hosts_all | length == 1 ) ) - name: "1.5 Disk setup - Check if installation root directory exists" ansible.builtin.stat: @@ -268,7 +268,7 @@ - node_tier != 'scs' - "'scs' not in supported_tiers" - usr_sap_install_mountpoint is not defined - - not is_executed_by_acss or (is_executed_by_acss and single_server) + - not is_executed_by_acss or (is_executed_by_acss and (is_executed_by_acss and ( ansible_play_hosts_all | length == 1) )) # Mount File systems for SCS server in Multi-SID installations - name: "2.6 SAP Mounts: - Mount local sapmnt (scs) for oracle shared home installation" diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index 996e0a877e..4e78a47045 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -102,7 +102,7 @@ - name: "PAS Install: Check if the DB load balancer port is available and listening" ansible.builtin.wait_for: host: "{{ db_lb_virtual_host }}" - port: "3{{ db_instance_number }}13" + port: "625{{ db_instance_number }}" state: started timeout: 30 msg: 'INSTALL:0026:PAS Install failed, database is unreachable.' diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.3-SAPHanaSR.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.3-SAPHanaSR.yml index 1dbce279a1..cb0188ab8f 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.3-SAPHanaSR.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.3-SAPHanaSR.yml @@ -175,10 +175,6 @@ changed_when: false register: hana_system_started - - name: Wait 5 minutes for SAP system to start - ansible.builtin.pause: - seconds: 300 - - name: Start HANA Database ansible.builtin.import_tasks: ../../../roles-misc/0.4-helpers/tasks/04.01-start_hana.yml diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml index e6f9471e0c..39bb58e634 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml @@ -9,6 +9,7 @@ # SAP HANA Cluster resources prep for ANF # https://learn.microsoft.com/en-us/azure/sap/workloads/sap-hana-high-availability-netapp-files-red-hat +# Additonal steps inherited from https://access.redhat.com/articles/6093611 # +------------------------------------4--------------------------------------*/ - name: "Backward Compatibility - Check required Database HA variables" @@ -48,9 +49,8 @@ changed_when: false register: hana_system_stopped - - name: Wait 2 minutes for SAP system to stop - ansible.builtin.pause: - seconds: 120 + - name: Stop HANA Database + ansible.builtin.import_tasks: ../../../roles-misc/0.4-helpers/tasks/04.01-stop_hana.yml - name: "Configure ANF hana mounts on {{ primary_instance_name }}" when: ansible_hostname == primary_instance_name @@ -314,7 +314,7 @@ msg: "Failed to create ANF hana mounts on {{ secondary_instance_name }}" when: chk_nfs_mounts_enable_node2 | length > 0 - - name: Start HANA System on both nodes + - name: Start HANA System on node become_user: "{{ db_sid | lower }}adm" become: true ansible.builtin.command: "{{ sapcontrol_command }} -function StartSystem" @@ -322,7 +322,7 @@ changed_when: false register: hana_system_started - - name: Wait 5 minutes for SAP system to start - ansible.builtin.pause: - seconds: 300 + - name: Start HANA Database + ansible.builtin.import_tasks: ../../../roles-misc/0.4-helpers/tasks/04.01-start_hana.yml + # End of HANA clustering resources diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml index 6dc775c753..d5e12eea6d 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml @@ -213,9 +213,9 @@ changed_when: false register: hana_system_started - - name: "5.5.4.1 HANA Pacemaker configuration - Wait 5 minutes for SAP system to start" + - name: "5.5.4.1 HANA Pacemaker configuration - Wait {{ hana_wait_for_start_in_sec }} seconds for SAP system to start" ansible.builtin.pause: - seconds: 300 + seconds: "{{ hana_wait_for_start_in_sec }}" - name: "5.5.4.1 HANA Pacemaker configuration - Ensure maintenance mode is disabled" ansible.builtin.command: crm configure property maintenance-mode=false diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.6-validate.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.6-validate.yml index 6c98063a6c..166428f264 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.6-validate.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.6-validate.yml @@ -121,17 +121,17 @@ # or cluster_group_moved.stderr is not search('is already active on') # ) - - name: "5.6 SCS/ERS Validation: Wait 300 seconds for the StartService {{ sap_sid | upper }} to finish" + - name: "5.6 SCS/ERS Validation: Wait {{ scs_wait_for_start_in_sec }} seconds for the StartService {{ sap_sid | upper }} to finish" ansible.builtin.debug: - msg: "Wait for 300 seconds for the StartService {{ sap_sid | upper }} to finish" + msg: "Wait for {{ scs_wait_for_start_in_sec }} seconds for the StartService {{ sap_sid | upper }} to finish" when: - ansible_hostname == primary_instance_name - cluster_group_location.stdout_lines != primary_instance_name - scs_running_on is not defined - - name: "5.6 SCS/ERS Validation: Wait 300 seconds for the StartService {{ sap_sid | upper }} to finish" + - name: "5.6 SCS/ERS Validation: Wait {{ scs_wait_for_start_in_sec }} seconds for the StartService {{ sap_sid | upper }} to finish" ansible.builtin.wait_for: - timeout: 300 + timeout: "{{ scs_wait_for_start_in_sec }}" when: - ansible_hostname == primary_instance_name - cluster_group_location.stdout_lines != primary_instance_name diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.1-set_runtime_facts.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.1-set_runtime_facts.yml index 9778d293ed..db714d06ce 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.1-set_runtime_facts.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.1-set_runtime_facts.yml @@ -11,7 +11,7 @@ # ---------------------------------------- -- name: "5.5 HANA Pacemaker - Retrieve Subscription ID and Resource Group Name" +- name: "5.8 HANA Pacemaker Scaleout - Retrieve Subscription ID and Resource Group Name" ansible.builtin.uri: url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 use_proxy: false @@ -19,27 +19,27 @@ Metadata: true register: hanavmmetadata -- name: "5.5 HANA Pacemaker - Show IMDS results" +- name: "5.8 HANA Pacemaker Scaleout - Show IMDS results" ansible.builtin.debug: var: hanavmmetadata.json verbosity: 2 -- name: "5.5 HANA Pacemaker - Extract Subscription ID" +- name: "5.8 HANA Pacemaker Scaleout - Extract Subscription ID" ansible.builtin.set_fact: fencing_spn_subscription_id: "{{ hanavmmetadata.json.compute.subscriptionId }}" no_log: true -- name: "5.5 HANA Pacemaker - Extract ResourceGroup Name" +- name: "5.8 HANA Pacemaker Scaleout - Extract ResourceGroup Name" ansible.builtin.set_fact: resource_group_name: "{{ hanavmmetadata.json.compute.resourceGroupName }}" no_log: true -- name: "5.5 HANA Pacemaker - Set the primary intance nic and secondary instance nic IP" +- name: "5.8 HANA Pacemaker Scaleout - Set the primary intance nic and secondary instance nic IP" ansible.builtin.set_fact: primary_ip: "{{ hanavmmetadata.json.network.interface[0].ipv4.ipAddress[0].privateIpAddress }}" subnet_prefix: "{{ hanavmmetadata.json.network.interface[0].ipv4.subnet[0].prefix }}" -- name: "5.5 HANA Pacemaker - Extract NIC IPs" +- name: "5.8 HANA Pacemaker Scaleout - Extract NIC IPs" ansible.builtin.set_fact: primary_instance_ip_db: "{{ hostvars[primary_instance_name]['primary_ip'] | string }}" secondary_instance_ip_db: "{{ hostvars[secondary_instance_name]['primary_ip'] | string }}" @@ -52,17 +52,17 @@ # ansible.builtin.set_fact: # secondary_instance_ip_db: "{{ hostvars[secondary_instance_name]['ansible_eth0']['ipv4'][0]['address'] }}" -- name: "5.5 HANA Pacemaker - Show Subscription ID" +- name: "5.8 HANA Pacemaker Scaleout - Show Subscription ID" ansible.builtin.debug: var: fencing_spn_subscription_id verbosity: 2 -- name: "5.5 HANA Pacemaker - Show Resource Group Name" +- name: "5.8 HANA Pacemaker Scaleout - Show Resource Group Name" ansible.builtin.debug: var: resource_group_name verbosity: 2 -- name: "5.5 HANA Pacemaker - Ensure HANA DB version is checked and captured" +- name: "5.8 HANA Pacemaker Scaleout - Ensure HANA DB version is checked and captured" block: - name: Check HANA DB Version and register become_user: "{{ db_sid | lower }}adm" @@ -71,18 +71,25 @@ register: hdbversion changed_when: false - - name: "5.5 HANA Pacemaker - Capture the Hana DB version" + - name: "5.8 HANA Pacemaker Scaleout - Capture the Hana DB version" ansible.builtin.set_fact: hdb_version: "{{ hdbversion.stdout_lines.1.split().1 }}" - - name: "5.5 HANA Pacemaker - Show the HDB version" + - name: "5.8 HANA Pacemaker Scaleout - Show the HDB version" ansible.builtin.debug: var: hdb_version - - name: "5.5 HANA Pacemaker - Show the HDB version prefix" + - name: "5.8 HANA Pacemaker Scaleout - Show the HDB version prefix" ansible.builtin.debug: var: hdb_version[0:2] + - name: "5.8 HANA Pacemaker Scaleout - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ database_high_availability | default(false) }}" + when: + - database_high_availability is defined + - database_high_availability is not defined + # /*---------------------------------------------------------------------------8 # | END | # +------------------------------------4--------------------------------------*/ diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.2-pre_checks.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.2-pre_checks.yml index 5188fe8e9d..a09c6baf72 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.2-pre_checks.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.2-pre_checks.yml @@ -8,7 +8,7 @@ # | | # +------------------------------------4--------------------------------------*/ -- name: Check the fencing agent configuration variables are set +- name: "5.8 HANA Pacemaker Scaleout - Check the fencing agent configuration variables are set" ansible.builtin.assert: that: - "fencing_spn_subscription_id is defined" @@ -28,34 +28,52 @@ - "password_ha_db_cluster | trim | length > 0" fail_msg: The cluster password is not defined -- name: Check the required Clustering scripts are available +- name: "5.8 HANA Pacemaker Scaleout - Check the required Clustering scripts are available" ansible.builtin.stat: path: "/usr/sbin/crm" register: cluster_scripts_status_results failed_when: not cluster_scripts_status_results.stat.exists when: ansible_os_family == 'SUSE' -- name: "HANA PCM Install: Create run flag directory" +- name: "5.8 HANA Pacemaker Scaleout - Create run flag directory" ansible.builtin.file: path: /etc/sap_deployment_automation state: directory mode: 0755 -- name: "HANA PCM Install: reset" +- name: "5.8 HANA Pacemaker Scaleout - Install: reset" ansible.builtin.file: path: /etc/sap_deployment_automation/sap_deployment_hana_pcm.txt state: absent when: reinstall -- name: "HANA PCM Install: check if deployed" +- name: "5.8 HANA Pacemaker Scaleout - Install: check if deployed" ansible.builtin.stat: path: /etc/sap_deployment_automation/sap_deployment_hana_pcm.txt register: hana_pacemaker -- name: Check if a cluster has already been prepared (save) +- name: "5.8 HANA Pacemaker Scaleout - Check if a cluster has already been prepared (save)" ansible.builtin.set_fact: hana_cluster_existence_check: "{{ hana_pacemaker.stat.exists }}" -- name: Check if a cluster has already been prepared (show) +- name: "5.8 HANA Pacemaker Scaleout - Check if a cluster has already been prepared (show)" ansible.builtin.debug: msg: "Cluster check return value: {{ hana_cluster_existence_check }}" + +- name: "5.8 HANA Pacemaker Scaleout - Wait for /hana/shared to be mounted" + ansible.builtin.wait_for: + path: /hana/shared + state: present + timeout: 60 + +- name: "5.8 HANA Pacemaker Scaleout - Check if /hana/shared is mounted" + ansible.builtin.shell: > + mountpoint -q /hana/shared + register: hana_shared_mounted + changed_when: false + failed_when: false + +- name: "5.8 HANA Pacemaker Scaleout - Fail if /hana/shared is not mounted" + ansible.builtin.fail: + msg: "Critical failure : /hana/shared is not mounted" + when: hana_shared_mounted.rc > 0 diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget-RedHat.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget-RedHat.yml new file mode 100644 index 0000000000..1081641f79 --- /dev/null +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget-RedHat.yml @@ -0,0 +1,197 @@ +--- + +# /*---------------------------------------------------------------------------8 +# | | +# | Implement the Python system replication hook SAPHanaSR-ScaleOut | +# | Ref: https://learn.microsoft.com/en-us/azure/sap/workloads/sap-hana-high-availability-scale-out-hsr-suse#implement-hana-ha-hooks-saphanasrmultitarget-and-suschksrv +# | Begin: configuration for SAPHanaSR-ScaleOut python hook | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "5.8 HANA Pacemaker Scaleout - HANA 2.0 only - Implement the Python system replication hook SAPHanaSR-ScaleOut MultiTarget" + when: + - hdb_version[0:2] == "2." + - database_high_availability + block: + - name: "5.8 HANA Pacemaker Scaleout - Generate list of deployed packages on current host" + ansible.builtin.package_facts: + + # for RHEL, ensure resource-agents-sap-hana-scaleout is installed + + - name: "5.8 HANA Pacemaker Scaleout - Ensure resource-agents-sap-hana is absent (REDHAT)" + ansible.builtin.package: + name: resource-agents-sap-hana + state: absent + when: + - ansible_facts.packages['resource-agents-sap-hana'] is defined + + - name: "5.8 HANA Pacemaker Scaleout - Ensure resource-agents-sap-hana-scaleout is installed (REDHAT)" + ansible.builtin.package: + name: resource-agents-sap-hana-scaleout + state: present + when: + - ansible_facts.packages['resource-agents-sap-hana-scaleout'] is not defined + + # add package verification for RHEL based on link https://access.redhat.com/articles/3397471 + + - name: "5.8 HANA Pacemaker Scaleout - Check HANA DB Version and register" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.command: /hana/shared/{{ db_sid | upper }}/HDB{{ db_instance_number }}/HDB version + register: hdb_version_output + changed_when: false + + - name: "5.8 HANA Pacemaker Scaleout - Extract SAP HANA version number" + ansible.builtin.set_fact: + hana_version_str: "{{ hdb_version_output.stdout | regex_search('version:\\s+([\\d\\.]+)', '\\1') | first }}" + + - name: "5.8 HANA Pacemaker Scaleout - Assert SAP HANA version is greater than SAP HANA 2.0 SP5" + ansible.builtin.assert: + that: + - hana_version_str is version('2.00.050', '>=') + fail_msg: "Installed HANA version is not greater than SAP HANA 2.0 SP5" + success_msg: "Installed HANA version is greater than SAP HANA 2.0 SP5" + register: hana_sp_version + when: + - ansible_os_family | upper == "SUSE" + - hdb_version_output.stdout is search("version") + + - name: "5.8 HANA Pacemaker Scaleout - Check if 'myHooks' file exists in /hana/shared directory" + ansible.builtin.stat: + path: /hana/shared/myHooks + register: my_hooks_stat + + - name: "5.8 HANA Pacemaker Scaleout - Stop HANA System on both sites" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.command: "{{ sapcontrol_command }} -function StopSystem" + failed_when: false + changed_when: false + register: hana_system_stopped + when: + - ansible_hostname == primary_instance_name or ansible_hostname == secondary_instance_name + + - name: "5.8 HANA Pacemaker Scaleout - Wait for SAP system to stop" + ansible.builtin.debug: + msg: "5.8 HANA Pacemaker Scaleout - Wait {{ hana_wait_for_stop_in_sec }} seconds for SAP system to stop" + + - name: "5.8 HANA Pacemaker Scaleout - Wait for SAP system to stop" + ansible.builtin.wait_for: + timeout: "{{ hana_wait_for_stop_in_sec }}" + + - name: "5.8 HANA Pacemaker Scaleout - Copy /usr/share/SAPHanaSR-ScaleOut/SAPHanaSR.py to /hana/shared/myHooks/ (RHEL)" + ansible.builtin.copy: + src: /usr/share/SAPHanaSR-ScaleOut/SAPHanaSR.py + dest: /hana/shared/myHooks/ + remote_src: true + owner: root + group: root # TODO - check the correct group once the python hook package is installed + mode: '0644' + when: + - ansible_os_family | upper == "REDHAT" + - not my_hooks_stat.stat.exists + + - name: "5.8 HANA Pacemaker Scaleout - Change ownership of the directory" + ansible.builtin.file: + path: /hana/shared/myHooks + state: directory + recurse: true + owner: "{{ db_sid | lower }}adm" + group: sapsys + + - name: "5.8 HANA Pacemaker Scaleout - Prepare global.ini for host name resolution" + community.general.ini_file: + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: "system_replication_hostname_resolution" + mode: 0644 + state: present + option: "{{ hostvars[item].ansible_host }}" + value: "{{ hostvars[item].virtual_host }}" + with_items: + - "{{ groups[(sap_sid | upper)~'_DB' ] }}" + + - name: "5.8 HANA Pacemaker Scaleout - Adjust global.ini on each cluster node ( RHEL without susChkSrv/susTkOver )" + ansible.builtin.blockinfile: + path: /hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini + block: | + [ha_dr_provider_SAPHanaSR] + provider = SAPHanaSR + path = /hana/shared/myHooks + execution_order = 1 + + [trace] + ha_dr_saphanasr = info + when: + - ansible_hostname == primary_instance_name or ansible_hostname == secondary_instance_name + + # Ref: https://learn.microsoft.com/en-us/azure/sap/workloads/sap-hana-high-availability-scale-out-hsr-rhel?tabs=lb-portal#create-sap-hana-cluster-resources + - name: "5.8 HANA Pacemaker Scaleout - Create sudoers file for /etc/sudoers.d/20-saphana for RHEL" + ansible.builtin.template: + src: "20-saphana-rhel.j2" + dest: "/etc/sudoers.d/20-saphana" + mode: "0440" + owner: root + group: root + # validate: /usr/sbin/visudo -cf %s + + - name: "5.8 HANA Pacemaker Scaleout - Start HANA System on both nodes" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.command: "{{ sapcontrol_command }} -function StartSystem" + failed_when: false + changed_when: false + register: hana_system_started + + - name: "5.8 HANA Pacemaker Scaleout - Wait for SAP system to start" + ansible.builtin.debug: + msg: "5.8 HANA Pacemaker Scaleout - Wait {{ hana_scaleout_wait_for_start_in_sec }} seconds for SAP system to start" + + - name: "5.8 HANA Pacemaker Scaleout - Wait for SAP system to start" + ansible.builtin.wait_for: + timeout: "{{ hana_scaleout_wait_for_start_in_sec }}" + +# REDHAT only +# This needs to be run on all the nodes where HANA is deployed. + - name: Verify that the hook script is working as expected (REDHAT)" + when: + - ansible_hostname == primary_instance_name + block: + - name: "5.8 HANA Pacemaker Scaleout - Verify the hook Installation (REDHAT)" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + set -o pipefail + awk '/ha_dr_SAPHanaSR.*crm_attribute/ \ + { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* + args: + chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ primary_instance_name }}/trace + register: saphanasr + until: saphanasr.stdout is search("SOK") + retries: 10 + delay: 30 + rescue: + - name: "[Rescue] 5.8 HANA Pacemaker Scaleout - Pause to give HANA replication time to stabilize" + ansible.builtin.wait_for: + timeout: "{{ rescue_hsr_status_report_wait_in_s }}" + + - name: "[Rescue] 5.8 HANA Pacemaker Scaleout - Verify the hook Installation (REDHAT)" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.shell: | + set -o pipefail + awk '/ha_dr_SAPHanaSR.*crm_attribute/ \ + { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* + args: + chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ primary_instance_name }}/trace + register: saphanasr + until: saphanasr.stdout is search("SOK") + retries: 10 + delay: 30 + + - name: "5.8 HANA Pacemaker Scaleout - Verify the hook Installation" + ansible.builtin.debug: + var: saphanasr + verbosity: 2 + +# Note: We do not configure Hook on Majority maker, only installation is needed. Unfortunately since this task runs on HANA VM's only, Majority maker is skipped. +# Hook packages are deployed on Majority maker in task 1.18-scaleout-pacemaker diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget-Suse.yml similarity index 55% rename from deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget.yml rename to deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget-Suse.yml index 1b61373654..65370dacb3 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.3-SAPHanaSRMultiTarget-Suse.yml @@ -8,77 +8,52 @@ # | | # +------------------------------------4--------------------------------------*/ -- name: HANA 2.0 only - Implement the Python system replication hook SAPHanaSR-ScaleOut MultiTarget +- name: "5.8 HANA Pacemaker Scaleout - HANA 2.0 only - Implement the Python system replication hook SAPHanaSR-ScaleOut MultiTarget" when: - hdb_version[0:2] == "2." - database_high_availability block: - - name: Generate list of deployed packages on current host + - name: "5.8 HANA Pacemaker Scaleout - Generate list of deployed packages on current host" ansible.builtin.package_facts: # SAPHanaSR-ScaleOut conflicts with SAPHanaSR and dependencies - - name: "Ensure SAPHanaSR package is absent" + - name: "5.8 HANA Pacemaker Scaleout - Ensure SAPHanaSR package is absent" ansible.builtin.package: name: SAPHanaSR state: absent when: - - ansible_os_family | upper == "SUSE" - ansible_facts.packages['SAPHanaSR'] is defined - - name: "Ensure SAPHanaSR-doc package is absent" + - name: "5.8 HANA Pacemaker Scaleout - Ensure SAPHanaSR-doc package is absent" ansible.builtin.package: name: SAPHanaSR-doc state: absent when: - - ansible_os_family | upper == "SUSE" - ansible_facts.packages['SAPHanaSR-doc'] is defined - - name: "Ensure yast2-sap-ha package is absent" + - name: "5.8 HANA Pacemaker Scaleout - Ensure yast2-sap-ha package is absent" ansible.builtin.package: name: yast2-sap-ha state: absent when: - - ansible_os_family | upper == "SUSE" - ansible_facts.packages['yast2-sap-ha'] is defined # Ensure SAPHANA SR Scaleout package is installed - - name: "Ensure SAPHanaSR-ScaleOut package is installed" + - name: "5.8 HANA Pacemaker Scaleout - Ensure SAPHanaSR-ScaleOut package is installed" ansible.builtin.package: name: SAPHanaSR-ScaleOut state: present when: - - ansible_os_family | upper == "SUSE" - ansible_facts.packages['SAPHanaSR-ScaleOut'] is not defined - - name: "Ensure SAPHanaSR-ScaleOut-doc package is installed" + - name: "5.8 HANA Pacemaker Scaleout - Ensure SAPHanaSR-ScaleOut-doc package is installed" ansible.builtin.package: name: SAPHanaSR-ScaleOut-doc state: present when: - - ansible_os_family | upper == "SUSE" - ansible_facts.packages['SAPHanaSR-ScaleOut-doc'] is not defined - # for RHEL, ensure resource-agents-sap-hana-scaleout is installed - - - name: "Ensure resource-agents-sap-hana is absent (REDHAT)" - ansible.builtin.package: - name: resource-agents-sap-hana - state: absent - when: - - ansible_os_family | upper == "REDHAT" - - ansible_facts.packages['resource-agents-sap-hana'] is defined - - - name: "Ensure resource-agents-sap-hana-scaleout is installed (REDHAT)" - ansible.builtin.package: - name: resource-agents-sap-hana-scaleout - state: present - when: - - ansible_os_family | upper == "REDHAT" - - ansible_facts.packages['resource-agents-sap-hana-scaleout'] is not defined - - # add package verification for RHEL based on link https://access.redhat.com/articles/3397471 - - - name: "Verify SAPHanaSR-ScaleOut package version is greater than 0.180" + - name: "5.8 HANA Pacemaker Scaleout - Verify SAPHanaSR-ScaleOut package version is greater than 0.180" ansible.builtin.assert: that: - ansible_facts.packages['SAPHanaSR-ScaleOut'][0].version is version('0.180', '>=') @@ -86,21 +61,20 @@ success_msg: "SAPHanaSR-ScaleOut version is greater than 0.180" register: saphanasr_scaleout_version when: - - ansible_os_family | upper == "SUSE" - ansible_facts.packages['SAPHanaSR-ScaleOut'] is defined - - name: "HANA HSR: - Check HANA DB Version and register" + - name: "5.8 HANA Pacemaker Scaleout - Check HANA DB Version and register" become_user: "{{ db_sid | lower }}adm" become: true ansible.builtin.command: /hana/shared/{{ db_sid | upper }}/HDB{{ db_instance_number }}/HDB version register: hdb_version_output changed_when: false - - name: "Extract SAP HANA version number" + - name: "5.8 HANA Pacemaker Scaleout - Extract SAP HANA version number" ansible.builtin.set_fact: hana_version_str: "{{ hdb_version_output.stdout | regex_search('version:\\s+([\\d\\.]+)', '\\1') | first }}" - - name: "Assert SAP HANA version is greater than SAP HANA 2.0 SP5" + - name: "5.8 HANA Pacemaker Scaleout - Assert SAP HANA version is greater than SAP HANA 2.0 SP5" ansible.builtin.assert: that: - hana_version_str is version('2.00.050', '>=') @@ -108,30 +82,28 @@ success_msg: "Installed HANA version is greater than SAP HANA 2.0 SP5" register: hana_sp_version when: - - ansible_os_family | upper == "SUSE" - hdb_version_output.stdout is search("version") - - name: "Set fact (is_susTkOver_ready) to determine if susTkOver is ready to be configured" + - name: "5.8 HANA Pacemaker Scaleout - Set fact (is_susTkOver_ready) to determine if susTkOver is ready to be configured" ansible.builtin.set_fact: is_susTkOver_ready: true when: - - ansible_os_family | upper == "SUSE" - hdb_version_output.stdout is search("version") - saphanasr_scaleout_version is defined - saphanasr_scaleout_version is success - hana_sp_version is defined - hana_sp_version is success - - name: "Set fact that susTkOver is ready to be configured" + - name: "5.8 HANA Pacemaker Scaleout - Set fact that susTkOver is ready to be configured" ansible.builtin.set_fact: configure_susTkOver: "{{ (is_susTkOver_ready is defined and is_susTkOver_ready) | ternary(true, false) }}" - - name: Check if "myHooks" file exists in /hana/shared directory + - name: "5.8 HANA Pacemaker Scaleout - Check if 'myHooks' file exists in /hana/shared directory" ansible.builtin.stat: path: /hana/shared/myHooks register: my_hooks_stat - - name: Stop HANA System on both sites + - name: "5.8 HANA Pacemaker Scaleout - Stop HANA System on both sites" become_user: "{{ db_sid | lower }}adm" become: true ansible.builtin.command: "{{ sapcontrol_command }} -function StopSystem" @@ -141,9 +113,13 @@ when: - ansible_hostname == primary_instance_name or ansible_hostname == secondary_instance_name - - name: Wait 2 minutes for SAP system to stop + - name: "5.8 HANA Pacemaker Scaleout - Wait for SAP system to stop" + ansible.builtin.debug: + msg: "5.8 HANA Pacemaker Scaleout - Wait {{ hana_wait_for_stop_in_sec }} seconds for SAP system to stop" + + - name: "5.8 HANA Pacemaker Scaleout - Wait for SAP system to stop" ansible.builtin.wait_for: - timeout: 120 + timeout: "{{ hana_wait_for_stop_in_sec }}" # This is not needed any more as we are going to use the default path of the hook script installed by the package. # - name: copy SAPHanaSR-ScaleOut.py (SUSE) @@ -158,19 +134,7 @@ # - ansible_os_family | upper == "SUSE" # - not my_hooks_stat.stat.exists - - name: Copy /usr/share/SAPHanaSR-ScaleOut/SAPHanaSR.py to /hana/shared/myHooks/ (RHEL) - ansible.builtin.copy: - src: /usr/share/SAPHanaSR-ScaleOut/SAPHanaSR.py - dest: /hana/shared/myHooks/ - remote_src: true - owner: root - group: root # TODO - check the correct group once the python hook package is installed - mode: '0644' - when: - - ansible_os_family | upper == "REDHAT" - - not my_hooks_stat.stat.exists - - - name: Change ownership of the directory + - name: "5.8 HANA Pacemaker Scaleout - Change ownership of the directory" ansible.builtin.file: path: /hana/shared/myHooks state: directory @@ -178,35 +142,18 @@ owner: "{{ db_sid | lower }}adm" group: sapsys - - name: "Prepare global.ini for host name resolution" + - name: "5.8 HANA Pacemaker Scaleout - Prepare global.ini for host name resolution" community.general.ini_file: - path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" - section: "system_replication_hostname_resolution" - mode: 0644 - state: present - option: "{{ hostvars[item].ansible_host }}" - value: "{{ hostvars[item].virtual_host }}" + path: "/hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini" + section: "system_replication_hostname_resolution" + mode: 0644 + state: present + option: "{{ hostvars[item].ansible_host }}" + value: "{{ hostvars[item].virtual_host }}" with_items: - "{{ groups[(sap_sid | upper)~'_DB' ] }}" -# susTkOver is not available on REDHAT - - name: Adjust global.ini on each cluster node ( RHEL without susChkSrv/susTkOver ) - ansible.builtin.blockinfile: - path: /hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini - block: | - [ha_dr_provider_SAPHanaSR] - provider = SAPHanaSR - path = /hana/shared/myHooks - execution_order = 1 - - [trace] - ha_dr_saphanasr = info - when: - - not configure_susTkOver - - ansible_os_family | upper == "REDHAT" - - ansible_hostname == primary_instance_name or ansible_hostname == secondary_instance_name - - - name: Adjust global.ini on each cluster node ( with susChkSrv/susTkOver ) + - name: "5.8 HANA Pacemaker Scaleout - Adjust global.ini on each cluster node ( with susChkSrv/susTkOver )" ansible.builtin.blockinfile: path: /hana/shared/{{ db_sid | upper }}/global/hdb/custom/config/global.ini block: | @@ -231,30 +178,16 @@ # Ref: https://documentation.suse.com/sbp/sap-15/html/SLES4SAP-hana-scaleOut-PerfOpt-15/index.html#id-integrating-sap-hana-with-the-cluster # Note: Azure documentation is outdated w.r.t to SAP HANA hook. - - name: Create sudoers file for /etc/sudoers.d/20-saphana for SLES - ansible.builtin.template: - src: "20-saphana-suse.j2" - dest: "/etc/sudoers.d/20-saphana" - mode: "0440" - owner: root - group: root - # validate: /usr/sbin/visudo -cf %s - when: - - ansible_os_family | upper == "SUSE" - - # Ref: https://learn.microsoft.com/en-us/azure/sap/workloads/sap-hana-high-availability-scale-out-hsr-rhel?tabs=lb-portal#create-sap-hana-cluster-resources - - name: Create sudoers file for /etc/sudoers.d/20-saphana for RHEL + - name: "5.8 HANA Pacemaker Scaleout - Create sudoers file for /etc/sudoers.d/20-saphana for SLES" ansible.builtin.template: - src: "20-saphana-rhel.j2" - dest: "/etc/sudoers.d/20-saphana" - mode: "0440" - owner: root - group: root + src: "20-saphana-suse.j2" + dest: "/etc/sudoers.d/20-saphana" + mode: "0440" + owner: root + group: root # validate: /usr/sbin/visudo -cf %s - when: - - ansible_os_family | upper == "REDHAT" - - name: Start HANA System on both nodes + - name: "5.8 HANA Pacemaker Scaleout - Start HANA System on both nodes" become_user: "{{ db_sid | lower }}adm" become: true ansible.builtin.command: "{{ sapcontrol_command }} -function StartSystem" @@ -262,9 +195,13 @@ changed_when: false register: hana_system_started - - name: Wait 5 minutes for SAP system to start + - name: "5.8 HANA Pacemaker Scaleout - Wait for SAP system to start" + ansible.builtin.debug: + msg: "5.8 HANA Pacemaker Scaleout - Wait {{ hana_scaleout_wait_for_start_in_sec }} seconds for SAP system to start" + + - name: "5.8 HANA Pacemaker Scaleout - Wait for SAP system to start" ansible.builtin.wait_for: - timeout: 300 + timeout: "{{ hana_scaleout_wait_for_start_in_sec }}" # - name: Start HANA Database # ansible.builtin.import_tasks: ../../../roles-misc/0.4-helpers/tasks/04.01-start_hana.yml @@ -274,63 +211,19 @@ # old command: # awk '/ha_dr_SAPHanaSR-ScaleOut.*crm_attribute/ { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* # Verify that the SAPHanaSR-ScaleOut hook script is working as expected. - - name: Pause to give HANA replication time to stabilize - ansible.builtin.wait_for: - timeout: "{{ hsr_status_report_wait_in_s }}" - -# REDHAT only -# This needs to be run on all the nodes where HANA is deployed. - - name: Verify that the hook script is working as expected (REDHAT)" - when: - - ansible_os_family | upper == "REDHAT" - - ansible_hostname == primary_instance_name - block: - - name: "Verify the hook Installation (REDHAT)" - become_user: "{{ db_sid | lower }}adm" - become: true - ansible.builtin.shell: | - set -o pipefail - awk '/ha_dr_SAPHanaSR.*crm_attribute/ \ - { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* | tail -n 1 - args: - chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ primary_instance_name }}/trace - register: saphanasr - until: saphanasr.stdout is search("SOK") - retries: 10 - delay: 30 - rescue: - - name: "[Rescue] - Pause to give HANA replication time to stabilize" - ansible.builtin.wait_for: - timeout: "{{ rescue_hsr_status_report_wait_in_s }}" + # - name: Pause to give HANA replication time to stabilize + # ansible.builtin.wait_for: + # timeout: "{{ hsr_status_report_wait_in_s }}" - - name: "[Rescue] - Verify the hook Installation (REDHAT)" - become_user: "{{ db_sid | lower }}adm" - become: true - ansible.builtin.shell: | - set -o pipefail - awk '/ha_dr_SAPHanaSR.*crm_attribute/ \ - { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* | tail -n 1 - args: - chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ primary_instance_name }}/trace - register: saphanasr - until: saphanasr.stdout is search("SOK") - retries: 10 - delay: 30 - - -# SUSE only # Check on all nodes, status of SAPHanaSrMultiTarget Hook - - name: Verify that the hook script is working as expected (SUSE)" - when: - # - inventory_hostname == primary_instance_name - - ansible_os_family | upper == "SUSE" + - name: "5.8 HANA Pacemaker Scaleout - Verify that the hook script is working as expected (SUSE)" block: - - name: "Verify the hook Installation (SUSE)" + - name: "5.8 HANA Pacemaker Scaleout - Verify the hook Installation (SUSE)" become_user: "{{ db_sid | lower }}adm" become: true ansible.builtin.shell: | set -o pipefail - grep SAPHanaSr.*init nameserver_*.trc | tail -3 + grep SAPHanaSr.*init nameserver_*.trc args: chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ inventory_hostname }}/trace register: SAPHanaSR_ScaleOut @@ -339,16 +232,16 @@ delay: 30 # when: inventory_hostname == primary_instance_name rescue: - - name: "[Rescue] - Pause to give HANA replication time to stabilize" + - name: "[Rescue] 5.8 HANA Pacemaker Scaleout - Pause to give HANA replication time to stabilize" ansible.builtin.wait_for: timeout: "{{ rescue_hsr_status_report_wait_in_s }}" - - name: "[Rescue] - Verify the hook Installation (SUSE)" + - name: "[Rescue] 5.8 HANA Pacemaker Scaleout - Verify the hook Installation (SUSE)" become_user: "{{ db_sid | lower }}adm" become: true ansible.builtin.shell: | set -o pipefail - grep SAPHanaSr.*init nameserver_*.trc | tail -3 + grep SAPHanaSr.*init nameserver_*.trc args: chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ inventory_hostname }}/trace register: SAPHanaSR_ScaleOut @@ -360,20 +253,19 @@ # SUSE only # Check on all nodes, status of susTkOver Hook - - name: Verify that the susTkOver hook script is working as expected (SUSE)" + - name: "5.8 HANA Pacemaker Scaleout - Verify that the susTkOver hook script is working as expected (SUSE)" when: # - inventory_hostname == primary_instance_name - - ansible_os_family | upper == "SUSE" - is_susTkOver_ready is defined - is_susTkOver_ready block: - - name: "Verify the hook Installation (SUSE)" + - name: "5.8 HANA Pacemaker Scaleout - Verify the hook Installation (SUSE)" become_user: "{{ db_sid | lower }}adm" become: true ansible.builtin.shell: | set -o pipefail - grep HADR.*load.*susTkOver nameserver_*.trc | tail -3 - grep susTkOver.init nameserver_*.trc | tail -3 + grep HADR.*load.*susTkOver nameserver_*.trc + grep susTkOver.init nameserver_*.trc -3 args: chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ inventory_hostname }}/trace register: susTkOver @@ -382,17 +274,17 @@ delay: 30 # when: inventory_hostname == primary_instance_name rescue: - - name: "[Rescue] - Pause to give HANA replication time to stabilize" + - name: "[Rescue] 5.8 HANA Pacemaker Scaleout - Pause to give HANA replication time to stabilize" ansible.builtin.wait_for: timeout: "{{ rescue_hsr_status_report_wait_in_s }}" - - name: "[Rescue] - Verify the hook Installation" + - name: "[Rescue] 5.8 HANA Pacemaker Scaleout - Verify the hook Installation" become_user: "{{ db_sid | lower }}adm" become: true ansible.builtin.shell: | set -o pipefail - grep HADR.*load.*susTkOver nameserver_*.trc | tail -3 - grep susTkOver.init nameserver_*.trc | tail -3 + grep HADR.*load.*susTkOver nameserver_*.trc + grep susTkOver.init nameserver_*.trc args: chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ inventory_hostname }}/trace register: susTkOver @@ -402,7 +294,7 @@ # when: inventory_hostname == primary_instance_name - - name: "Log that the hook script is working as expected" + - name: "5.8 HANA Pacemaker Scaleout - Log that the hook script is working as expected" block: - name: "Debug (SAPHanaSR_ScaleOut)" @@ -410,26 +302,26 @@ var: SAPHanaSR_ScaleOut verbosity: 2 - - name: "set_fact (SAPHanaSR_ScaleOut)" + - name: "5.8 HANA Pacemaker Scaleout - set_fact (SAPHanaSR_ScaleOut)" ansible.builtin.set_fact: hsr_result: SAPHanaSR_ScaleOut.stdout - - name: "Debug (hsr_result)" + - name: "5.8 HANA Pacemaker Scaleout - Debug (hsr_result)" ansible.builtin.debug: var: hsr_result verbosity: 2 - - name: "Assert HSR Hook verification is successful" + - name: "5.8 HANA Pacemaker Scaleout - Assert HSR Hook verification is successful" ansible.builtin.assert: that: - "'SFAIL' != hsr_result" fail_msg: "Unable to determine if HSR Hook is working" # when: inventory_hostname == primary_instance_name - - name: Verify the hook Installation + - name: "5.8 HANA Pacemaker Scaleout - Verify the hook Installation" ansible.builtin.debug: - var: SAPHanaSR_ScaleOut - verbosity: 2 + var: SAPHanaSR_ScaleOut + verbosity: 2 # Note: We do not configure Hook on Majority maker, only installation is needed. Unfortunately since this task runs on HANA VM's only, Majority maker is skipped. # Hook packages are deployed on Majority maker in task 1.18-scaleout-pacemaker diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4-provision-ScaleOut.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4-provision-ScaleOut.yml index 97f89e3a3b..9bdffe2970 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4-provision-ScaleOut.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4-provision-ScaleOut.yml @@ -3,15 +3,19 @@ ########################################################################################### # This file calls the OS specific tasks to configure HANA specific clustering resources #8 ########################################################################################### +- name: "5.8 HANA Pacemaker Scaleout - HSR Create helper variables" + ansible.builtin.set_fact: + DB: "{{ db_sid | upper }}/HDB{{ db_instance_number }}" + db_sid_admin_user: "{{ db_sid | lower }}adm" +- name: "5.8 HANA Pacemaker Scaleout - configure pre-requisites" + ansible.builtin.include_tasks: "5.8.4.0-clusterPrep-ScaleOut-{{ ansible_os_family }}.yml" -# Clustering commands are based on the Host OS -- name: "5.8 HANADB Pacemaker - configure pre-requisites" - ansible.builtin.include_tasks: "5.8.4.0-clusterPrep-ScaleOut-{{ ansible_os_family }}.yml" +- name: "5.8 HANA Pacemaker Scaleout - import - SAP HanaSRMultiTarget" + ansible.builtin.include_tasks: 5.8.3-SAPHanaSRMultiTarget-{{ ansible_os_family }}.yml + +- name: "5.8 HANA Pacemaker Scaleout - configure cluster resources" + ansible.builtin.include_tasks: "5.8.4.1-cluster-ScaleOut-{{ ansible_os_family }}.yml" -- name: "5.8 HANADB Pacemaker - import - SAP HanaSRMultiTarget" - ansible.builtin.include_tasks: 5.8.3-SAPHanaSRMultiTarget.yml -- name: "5.8 HANADB Pacemaker - configure cluster resources" - ansible.builtin.include_tasks: "5.8.4.1-cluster-ScaleOut-{{ ansible_os_family }}.yml" ... diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-RedHat.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-RedHat.yml index fe9a0e91f4..741a9b6617 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-RedHat.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-RedHat.yml @@ -11,20 +11,14 @@ # +------------------------------------4--------------------------------------*/ -- name: "Backward Compatibility - Check required Database HA variables" - ansible.builtin.set_fact: - database_high_availability: "{{ database_high_availability | default(false) }}" - when: - - database_high_availability is defined - - database_high_availability is not defined # We set this to prevent code overflow. Since pacemaker code for both AFS and ANF are the same barring NFS mount options, we parameterize this parameter. -- name: "NFS Compatibility - set mount options based on NFS source" +- name: "5.8 HANA Pacemaker Scaleout - NFS Compatibility - set mount options based on NFS source" ansible.builtin.set_fact: - nfs_mount_options: "{% if NFS_provider == 'ANF' %}bind,defaults,rw,hard,rsize=262144,wsize=262144,proto=tcp,noatime,_netdev,nfsvers=4.1,lock,sec=sys{% else %}noresvport,defaults,rw,hard,proto=tcp,noatime,nfsvers=4.1,lock{% endif %}" + nfs_mount_options: "{% if NFS_provider == 'ANF' %}defaults,rw,hard,rsize=262144,wsize=262144,proto=tcp,noatime,_netdev,nfsvers=4.1,lock,sec=sys{% else %}noresvport,defaults,rw,hard,proto=tcp,noatime,nfsvers=4.1,lock{% endif %}" # Ref : https://learn.microsoft.com/en-us/azure/sap/workloads/sap-hana-high-availability-scale-out-hsr-rhel?tabs=lb-portal#create-file-system-resources -- name: "Configure the ANF/AFS file system resources" +- name: "5.8 HANA Pacemaker Scaleout - Configure the ANF/AFS file system resources" when: - database_high_availability - database_scale_out @@ -39,37 +33,27 @@ # 4. Do not kill existing processes and attempt to unmount /hana/shared. Bad things will happen. # 5. No Seriously !! Terrible things will happen and you will have a hard time repairing the deployment - - name: Stop HANA System on both sites + - name: "5.8 HANA Pacemaker Scaleout - Stop HANA System on both sites" block: - - name: Execute HANA StopSystem on both sites - become_user: "{{ db_sid | lower }}adm" - become: true - ansible.builtin.command: "{{ sapcontrol_command }} -function StopSystem" - failed_when: false - changed_when: false - register: hana_system_stopped - when: ansible_hostname in [ primary_instance_name , secondary_instance_name ] - - - name: Wait 2 minutes for SAP system to stop + - name: "5.8 HANA Pacemaker Scaleout - Execute HANA StopSystem on both sites" + become_user: "{{ db_sid | lower }}adm" + become: true + ansible.builtin.command: "{{ sapcontrol_command }} -function StopSystem" + failed_when: false + changed_when: false + register: hana_system_stopped + when: ansible_hostname in [ primary_instance_name , secondary_instance_name ] + + - name: "5.8 HANA Pacemaker Scaleout - Wait 2 minutes for SAP system to stop" ansible.builtin.wait_for: timeout: 120 - - name: Unmount /hana/shared from all cluster participating nodes - block: - - name: "Comment out the mountpoint from '/etc/fstab' file" - ansible.builtin.replace: - path: /etc/fstab - regexp: "^{{ item }}" - replace: "# {{ item }}" - backup: true - loop: - - "{{ hana_shared_mountpoint[0] }}" - - "{{ hana_shared_mountpoint[1] }}" - - - name: "Configure pacemaker hana shared filesystem resources on {{ primary_instance_name }}" + - name: "5.8 HANA Pacemaker Scaleout - Configure pacemaker hana shared filesystem resources on {{ primary_instance_name }}" when: ansible_hostname == primary_instance_name + become: true + become_user: root block: - - name: "Configure NFS filesystem resource in Pacemaker for HSR sites" + - name: "5.8 HANA Pacemaker Scaleout - Configure NFS filesystem resource in Pacemaker for HSR sites" ansible.builtin.shell: > pcs resource create {{ item.fs_name }} --disabled ocf:heartbeat:Filesystem \ device="{{ item.fs_mount }}" directory="{{ item.fs_dir }}" fstype="nfs" \ @@ -81,22 +65,29 @@ failed_when: false ignore_errors: true loop: - - { fs_name: 'fs_hana_shared_s1',fs_mount: '{{ hana_shared_mountpoint[0] }}', fs_dir: '/hana/shared' } - - { fs_name: 'fs_hana_shared_s2',fs_mount: '{{ hana_shared_mountpoint[1] }}', fs_dir: '/hana/shared' } + - { fs_name: 'fs_hana_shared_s1',fs_mount: '{{ hana_shared_mountpoint[0] }}/shared', fs_dir: '/hana/shared' } + - { fs_name: 'fs_hana_shared_s2',fs_mount: '{{ hana_shared_mountpoint[1] }}/shared', fs_dir: '/hana/shared' } loop_control: loop_var: item - - name: "Check if NFS hana mounts did not error on {{ primary_instance_name }}" + - name: "5.8 HANA Pacemaker Scaleout - Check if NFS hana mounts did not error on {{ primary_instance_name }}" ansible.builtin.set_fact: - chk_nfs_mount_sites: "{{ nfs_mount_sites.results | selectattr('rc', 'ne', 0) | rejectattr('stderr', 'search', 'already exists') | default([]) | list }}" + chk_nfs_mount_sites: "{{ nfs_mount_sites.results | selectattr('rc', 'ne', 0) | rejectattr('stderr', 'search', 'already exists') | default([]) | list | select() }}" + + + - name: "5.8 HANA Pacemaker Scaleout - Check if NFS hana mounts did not error on {{ primary_instance_name }}" + ansible.builtin.debug: + msg: + - "nfs_mount_sites_results: {{ nfs_mount_sites.results }}" + - "chk_nfs_mount_sites: {{ chk_nfs_mount_sites }}" - - name: "Fail when NFS hana mounts errored on {{ primary_instance_name }}" + - name: "5.8 HANA Pacemaker Scaleout - Fail when NFS hana mounts errored on {{ primary_instance_name }}" ansible.builtin.fail: msg: "Failed to create NFS hana mounts on {{ primary_instance_name }}" when: - chk_nfs_mount_sites | length > 0 - - name: "Configure node attributes for primary site on {{ primary_instance_name }}" + - name: "5.8 HANA Pacemaker Scaleout - Configure node attributes for primary site on {{ primary_instance_name }}" ansible.builtin.shell: > pcs node attribute {{ item }} NFS_{{ db_sid | upper }}_SITE=S1 register: node_nfs_attribute_site1 @@ -105,7 +96,7 @@ with_items: - "{{ ansible_play_hosts_all[0::2] }}" - - name: "Configure node attributes for secondary site on {{ primary_instance_name }}" + - name: "5.8 HANA Pacemaker Scaleout - Configure node attributes for secondary site on {{ primary_instance_name }}" ansible.builtin.shell: > pcs node attribute {{ item }} NFS_{{ db_sid | upper }}_SITE=S2 register: node_nfs_attribute_site2 @@ -114,7 +105,7 @@ with_items: - "{{ ansible_play_hosts_all[1::2] }}" - - name: "Configure location constraint for filesystem resource clone on {{ primary_instance_name }}" + - name: "5.8 HANA Pacemaker Scaleout - Configure location constraint for filesystem resource clone on {{ primary_instance_name }}" ansible.builtin.shell: > pcs constraint location {{ item.clone_name }} rule resource-discovery=never score=-INFINITY NFS_{{ db_sid | upper }}_SITE ne {{ item.site_code }} register: location_nfs_attribute_sites @@ -124,17 +115,27 @@ - { clone_name: 'fs_hana_shared_s1-clone', site_code: 'S1'} - { clone_name: 'fs_hana_shared_s2-clone', site_code: 'S2'} - - name: "Check if NFS hana mounts constraints did not error on {{ primary_instance_name }}" + - name: "5.8 HANA Pacemaker Scaleout - Check if NFS hana mounts constraints did not error on {{ primary_instance_name }}" ansible.builtin.set_fact: chk_location_nfs_attribute_sites: "{{ location_nfs_attribute_sites.results | selectattr('rc', 'ne', 0) | rejectattr('stderr', 'search', 'already exists') | default([]) | list }}" - - name: "Fail when NFS hana mounts errored on {{ primary_instance_name }}" + - name: "5.8 HANA Pacemaker Scaleout - Fail when NFS hana mounts errored on {{ primary_instance_name }}" ansible.builtin.fail: msg: "Failed to create NFS hana mounts on {{ primary_instance_name }}" when: - chk_location_nfs_attribute_sites | length > 0 - - name: Activate filesystem resource on {{ primary_instance_name }} + - name: "5.8 HANA Pacemaker Scaleout - Comment out the mountpoint from '/etc/fstab' file" + ansible.builtin.replace: + path: /etc/fstab + regexp: "^{{ item }}" + replace: "# {{ item }}" + backup: true + loop: + - "{{ hana_shared_mountpoint[0] }}" + - "{{ hana_shared_mountpoint[1] }}" + + - name: "5.8 HANA Pacemaker Scaleout - Activate filesystem resource on {{ primary_instance_name }}" ansible.builtin.shell: > pcs resource enable {{ item.fs_name }} register: activate_nfs_mount_sites @@ -147,7 +148,7 @@ - chk_location_nfs_attribute_sites | length == 0 - chk_nfs_mount_sites | length == 0 - - name: Configure pacemaker attribute resource on {{ primary_instance_name }} + - name: "5.8 HANA Pacemaker Scaleout - Configure pacemaker attribute resource on {{ primary_instance_name }}" ansible.builtin.shell: > pcs resource create {{ item.res_name }} ocf:pacemaker:attribute active_value=true \ inactive_value=false name={{ item.res_name }} \ @@ -159,7 +160,7 @@ - { res_name: 'hana_nfs_s1_active' } - { res_name: 'hana_nfs_s2_active' } - - name: Create constraints for pacemaker attribute resource on {{ primary_instance_name }} + - name: "5.8 HANA Pacemaker Scaleout - Create constraints for pacemaker attribute resource on {{ primary_instance_name }}" ansible.builtin.shell: > pcs constraint order fs_hana_shared_s1-clone then hana_nfs_s1_active-clone register: loc_attribute_hana_nfs_sites @@ -169,7 +170,7 @@ - { fs_clone: 'fs_hana_shared_s1-clone', res_clone: 'hana_nfs_s1_active-clone' } - { fs_clone: 'fs_hana_shared_s2-clone', res_clone: 'hana_nfs_s2_active-clone' } - - name: Wait for /hana/shared to become available on all participating nodes + - name: "5.8 HANA Pacemaker Scaleout - Wait for /hana/shared to become available on all participating nodes" block: - name: Wait for /hana/shared to be mounted ansible.builtin.wait_for: @@ -177,65 +178,75 @@ state: present timeout: 300 - - name: Check if /hana/shared is mounted + - name: "5.8 HANA Pacemaker Scaleout - Check if /hana/shared is mounted" ansible.builtin.shell: > mountpoint -q /hana/shared register: hana_shared_mounted changed_when: false failed_when: false - - name: Fail if /hana/shared is not mounted + - name: "5.8 HANA Pacemaker Scaleout - Fail if /hana/shared is not mounted" ansible.builtin.fail: msg: "Critical failure : /hana/shared is not mounted" when: hana_shared_mounted.rc > 0 - # Note: We need to manually start HANA on all participating nodes via HDB script. - - name: Start HANA database on each participating node - become_user: "{{ db_sid | lower }}adm" - args: - chdir: "/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}" - become: true - ansible.builtin.shell: > - source /usr/sap/{{ db_sid | upper }}/home/.sapenv.sh && - HDB start - failed_when: hdb_start.rc > 0 - changed_when: false - register: hdb_start - - - name: Start HANA System on both sites - become_user: "{{ db_sid | lower }}adm" - become: true - ansible.builtin.command: "{{ sapcontrol_command }} -function StartSystem" - failed_when: hana_system_started.rc > 0 - changed_when: false - register: hana_system_started - when: - - ansible_hostname in [ primary_instance_name , secondary_instance_name ] - - - name: Wait 5 minutes for SAP system to stablize + - name: "5.8 HANA Pacemaker Scaleout - Start HANA on both nodes" + ansible.builtin.include_tasks: ../../../roles-misc/0.4-helpers/tasks/04.01-start_hana.yml + when: + - ansible_hostname in [ primary_instance_name , secondary_instance_name ] + + # # Note: We need to manually start HANA on all participating nodes via HDB script. + # - name: Start HANA database on each participating node + # become_user: "{{ db_sid | lower }}adm" + # args: + # chdir: "/usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}" + # become: true + # ansible.builtin.shell: > + # source /usr/sap/{{ db_sid | upper }}/home/.sapenv.sh && + # HDB start + # failed_when: hdb_start.rc > 0 + # changed_when: false + # register: hdb_start + + # - name: Start HANA System on both sites + # become_user: "{{ db_sid | lower }}adm" + # become: true + # ansible.builtin.command: "{{ sapcontrol_command }} -function StartSystem" + # failed_when: hana_system_started.rc > 0 + # changed_when: false + # register: hana_system_started + # when: + # - ansible_hostname in [ primary_instance_name , secondary_instance_name ] + + - name: "5.8 HANA Pacemaker Scaleout - Wait for SAP system to stabilize (debug)" + ansible.builtin.debug: + msg: "5.8 HANA Pacemaker Scaleout - Wait {{ hana_scaleout_wait_for_start_in_sec }} seconds for SAP system to stabilize" + + - name: "5.8 HANA Pacemaker Scaleout - Wait for SAP system to stabilize" ansible.builtin.wait_for: - timeout: 300 + timeout: "{{ hana_scaleout_wait_for_start_in_sec }}" # End of HANA filesystem clustering resources # Ref : https://access.redhat.com/articles/3004101 - 4.3 Configure general cluster properties -- name: "Configure general cluster properties" +- name: "5.8 HANA Pacemaker Scaleout - Configure general cluster properties" + become: true + become_user: root when: - ansible_hostname == primary_instance_name block: - - name: "Set resource stickiness value to 1000" + - name: "5.8 HANA Pacemaker Scaleout - Set resource stickiness value to 1000" ansible.builtin.shell: > - pcs resource defaults resource-stickiness=1000 - register: res_stickiness - failed_when: res_stickiness.rc > 0 - changed_when: false + pcs resource defaults update resource-stickiness=1000 + register: res_stickiness + failed_when: res_stickiness.rc > 0 + changed_when: false - - name: "Set migration threshold value to 5000" + - name: "5.8 HANA Pacemaker Scaleout - Set migration threshold value to 5000" ansible.builtin.shell: > - pcs resource defaults migration-threshold=5000 - register: mig_threshold - failed_when: mig_threshold.rc > 0 - changed_when: false - + pcs resource defaults update migration-threshold=5000 + register: mig_threshold + failed_when: mig_threshold.rc > 0 + changed_when: false # ⠀⠀⠀⠀⠀⠀⣠⡤⠶⠒⢛⢻⠛⠛⠛⠛⠛⠛⢿⣛⡓⠶⢦⣤⠀⠀⠀⠀⠀⠀ # ⠀⠀⠀⠀⠀⡴⡫⠒⠊⠁⠀⣸⠀⠀⠀⠀⠀⠀⢹⠀⠀⠁⠒⡏⢳⡄⠀⠀⠀⠀ diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-Suse.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-Suse.yml index ee300c993e..14d529442c 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-Suse.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.0-clusterPrep-ScaleOut-Suse.yml @@ -5,18 +5,11 @@ # Ref: https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/sap-hana-high-availability # Ref: https://learn.microsoft.com/en-us/azure/sap/workloads/sap-hana-high-availability-netapp-files-suse#create-file-system-resources -- name: "Backward Compatibility - Check required Database HA variables" - ansible.builtin.set_fact: - database_high_availability: "{{ database_high_availability | default(false) }}" - when: - - database_high_availability is defined - - database_high_availability is not defined - -- name: "NFS Compatibility - set mount options based on NFS source" +- name: "5.8 HANA Pacemaker Scaleout - NFS Compatibility - set mount options based on NFS source" ansible.builtin.set_fact: nfs_mount_options: "{% if NFS_provider == 'ANF' %}bind,defaults,rw,hard,rsize=262144,wsize=262144,proto=tcp,noatime,_netdev,nfsvers=4.1,lock,sec=sys{% else %}bind,defaults,rw,hard,proto=tcp,noatime,nfsvers=4.1,lock{% endif %}" -- name: "Scale-Out Cluster Compatibility - Fetch majority maker node name" +- name: "5.8 HANA Pacemaker Scaleout - Scale-Out Cluster Compatibility - Fetch majority maker node name" ansible.builtin.set_fact: majority_maker: "{{ (query('inventory_hostnames', '{{ sap_sid | upper }}_OBSERVER_DB'))[0] }}" @@ -26,7 +19,7 @@ - hana_shared_mountpoint is defined - hana_shared_mountpoint | length > 1 block: - - name: "Create dummy file system cluster resource for monitoring" + - name: "5.8 HANA Pacemaker Scaleout - Create dummy file system cluster resource for monitoring" ansible.builtin.file: path: "{{ item.folderpath }}" state: directory diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-RedHat.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-RedHat.yml index 88715aa4be..3d81e83c19 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-RedHat.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-RedHat.yml @@ -14,59 +14,59 @@ # +------------------------------------4--------------------------------------*/ # Fetch the majority maker node OS Hostname as we need to create a constraint to prevent HANA resources from running on it -- name: "Scale-Out Cluster Compatibility - Fetch majority maker node name" +- name: "5.8 HANA Pacemaker Scaleout - Scale-Out Cluster Compatibility - Fetch majority maker node name" ansible.builtin.set_fact: - majority_maker: "{{ (query('inventory_hostnames', '{{ sap_sid | upper }}_OBSERVER_DB'))[0] }}" + majority_maker: "{{ (query('inventory_hostnames', '{{ sap_sid | upper }}_OBSERVER_DB'))[0] }}" -- name: "5.5.4.1 HANA Cluster configuration - Optimise the Pacemaker cluster for SAP HANA" +- name: "5.8 HANA Pacemaker Scaleout - Optimise the Pacemaker cluster for SAP HANA" block: - - name: "5.5.4.1 HANA Cluster configuration - Get the cluster maintenance mode status" + - name: "5.8 HANA Pacemaker Scaleout - Get the cluster maintenance mode status" ansible.builtin.shell: pcs property show maintenance-mode register: get_status_maintenance_mode changed_when: false ignore_errors: true - - name: "5.5.4.1 HANA Cluster configuration - Set the cluster maintenance mode if not already in maintenance mode" + - name: "5.8 HANA Pacemaker Scaleout - Set the cluster maintenance mode if not already in maintenance mode" ansible.builtin.shell: pcs property set maintenance-mode=true when: >- get_status_maintenance_mode.stdout is not search('maintenance-mode') or get_status_maintenance_mode.stdout is search('maintenance-mode: false') - - name: "5.5.4.1 HANA cluster resource configuration - RHEL 7" + - name: "5.8 HANA Pacemaker Scaleout - RHEL 7" when: - ansible_distribution_major_version == "7" block: - - name: Ensure the SAP topology resource is configured and cloned + - name: "5.8 HANA Pacemaker Scaleout - Ensure the SAP topology resource is configured and cloned" ansible.builtin.shell: > pcs resource create SAPHanaTopology_{{ db_sid | upper }}_HDB{{ db_instance_number }} SAPHanaTopologyScaleOut \ SID={{ db_sid | upper }} InstanceNumber={{ db_instance_number }} \ op start timeout=600 op stop timeout=300 op monitor interval=10 timeout=600 \ clone meta clone-node-max=1 interleave=true - register: hana_t - failed_when: hana_t.rc > 0 + register: hana_t + failed_when: hana_t.rc > 0 - - name: Ensure the SAP HANA instance resource is created + - name: "5.8 HANA Pacemaker Scaleout - Ensure the SAP HANA instance resource is created" ansible.builtin.shell: > pcs resource create SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} SAPHanaController \ SID={{ db_sid | upper }} InstanceNumber={{ db_instance_number }} PREFER_SITE_TAKEOVER=true DUPLICATE_PRIMARY_TIMEOUT=7200 AUTOMATED_REGISTER=true \ op start interval=0 timeout=3600 op stop interval=0 timeout=3600 op promote interval=0 timeout=3600 \ op monitor interval=60 role="Master" timeout=700 op monitor interval=61 role="Slave" timeout=700 - register: sap_hana - failed_when: sap_hana.rc > 0 + register: sap_hana + failed_when: sap_hana.rc > 0 - - name: Ensure master-slave (msl) resource for managing an SAP HANA instance is created + - name: "5.8 HANA Pacemaker Scaleout - Ensure master-slave (msl) resource for managing an SAP HANA instance is created" ansible.builtin.shell: > pcs resource master msl_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} \ meta master-max="1" clone-node-max=1 interleave=true - register: msl_sap_hana - failed_when: msl_sap_hana.rc > 0 + register: msl_sap_hana + failed_when: msl_sap_hana.rc > 0 - - name: Ensure the netcat resource for the Load Balancer Healthprobe is created + - name: "5.8 HANA Pacemaker Scaleout - Ensure the netcat resource for the Load Balancer Healthprobe is created" ansible.builtin.shell: pcs resource create nc_{{ db_sid | upper }}_{{ db_instance_number }} azure-lb port=625{{ db_instance_number }} - register: netcat - failed_when: netcat.rc > 0 + register: netcat + failed_when: netcat.rc > 0 - - name: Ensure the Virtual IP resource for the Load Balancer Front End IP is created + - name: "5.8 HANA Pacemaker Scaleout - Ensure the Virtual IP resource for the Load Balancer Front End IP is created" ansible.builtin.shell: pcs resource create vip_{{ db_sid | upper }}_{{ db_instance_number }} ocf:heartbeat:IPaddr2 ip={{ database_loadbalancer_ip }} op monitor interval="10s" timeout="20s" register: vip failed_when: vip.rc > 0 @@ -186,10 +186,10 @@ failed_when: nfs_constraint.rc > 0 - - name: "5.5.4.1 HANA Cluster configuration - Disable Maintenance mode for the cluster" + - name: "5.8 HANA Pacemaker Scaleout - Disable Maintenance mode for the cluster" ansible.builtin.shell: pcs property set maintenance-mode=false - - name: "5.5.4.1 HANA Cluster configuration - Wait until cluster has stabilized on RHEL 7" + - name: "5.8 HANA Pacemaker Scaleout - Wait until cluster has stabilized on RHEL 7" ansible.builtin.shell: set -o pipefail && pcs status | grep '^Online:' register: cluster_stable_check retries: 12 @@ -198,7 +198,7 @@ when: ansible_distribution_major_version != "8" and ansible_distribution_major_version != "9" # '*' is a special character in regexp and needs to be escaped for literal matching # if we are worried about character spacing across distros we can match for '\* Online:' - - name: "5.5.4.1 HANA Cluster configuration - Wait until cluster has stabilized on RHEL 8 or 9" + - name: "5.8 HANA Pacemaker Scaleout - Wait until cluster has stabilized on RHEL 8 or 9" ansible.builtin.shell: set -o pipefail && pcs status | grep '^ \* Online:' register: cluster_stable_check retries: 12 @@ -206,11 +206,11 @@ until: "(primary_instance_name + ' ' + secondary_instance_name) in cluster_stable_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in cluster_stable_check.stdout" when: ansible_distribution_major_version in ["8", "9"] - - name: "5.5.4.1 HANA Cluster configuration - Cleanup any stale cluster resource StartSystem" + - name: "5.8 HANA Pacemaker Scaleout - Cleanup any stale cluster resource StartSystem" ansible.builtin.shell: pcs resource cleanup # the leading spaces are irrelevant here as we are looking for *Started: - - name: "5.5.4.1 HANA Cluster configuration - Ensure Cluster resources are started on RHEL 7" + - name: "5.8 HANA Pacemaker Scaleout - Ensure Cluster resources are started on RHEL 7" ansible.builtin.shell: set -o pipefail && pcs resource show | grep ' Started:' register: hana_cluster_resource_check retries: 12 @@ -218,7 +218,7 @@ until: "(primary_instance_name + ' ' + secondary_instance_name) in hana_cluster_resource_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in hana_cluster_resource_check.stdout" when: ansible_distribution_major_version != "8" and ansible_distribution_major_version != "9" - - name: "5.5.4.1 HANA Cluster configuration - Ensure Cluster resources are started on RHEL 8 or 9" + - name: "5.8 HANA Pacemaker Scaleout - Ensure Cluster resources are started on RHEL 8 or 9" ansible.builtin.shell: set -o pipefail && pcs resource status | grep '\* Started:' register: hana_cluster_resource_check retries: 12 @@ -236,17 +236,17 @@ # +------------------------------------4--------------------------------------*/ # Follow steps described in https://access.redhat.com/articles/6884531 -- name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" +- name: "5.8 HANA Pacemaker Scaleout - check if the OS version is RHEL 8.2 or newer" ansible.builtin.set_fact: is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" -- name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" +- name: "5.8 HANA Pacemaker Scaleout - (systemd) Creating drop-in file" become: true when: - is_rhel_82_or_newer is defined - is_rhel_82_or_newer block: - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" + - name: "5.8 HANA Pacemaker Scaleout - (systemd) Create drop-in file" ansible.builtin.lineinfile: path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf create: true @@ -256,7 +256,7 @@ mode: '0644' line: "[Unit]" - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + - name: "5.8 HANA Pacemaker Scaleout - (systemd) Update drop-in file" ansible.builtin.lineinfile: path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf create: true @@ -267,7 +267,7 @@ insertafter: '^[Unit]$' line: "Description=Pacemaker needs the SAP HANA instance service" - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + - name: "5.8 HANA Pacemaker Scaleout - (systemd) Update drop-in file" ansible.builtin.lineinfile: path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf create: true @@ -278,7 +278,7 @@ insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" - - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + - name: "5.8 HANA Pacemaker Scaleout - (systemd) Update drop-in file" ansible.builtin.lineinfile: path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf create: true @@ -290,7 +290,7 @@ line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" register: dropinfile - - name: "5.5.4.1 HANA Cluster configuration - systemd reload" + - name: "5.8 HANA Pacemaker Scaleout - systemd reload" ansible.builtin.systemd: daemon_reload: true when: diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-Suse.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-Suse.yml index 4da049101f..4f1c021df9 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-Suse.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.4.1-cluster-ScaleOut-Suse.yml @@ -6,12 +6,12 @@ # Ref: https://learn.microsoft.com/en-us/azure/sap/workloads/sap-hana-high-availability-scale-out-hsr-suse?tabs=lb-portal # This code contains references to terms that Microsoft no longer uses. When these terms are removed from the software, we'll remove them from this article. -- name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure the Cluster STONITH is configured" +- name: "5.8 HANA Pacemaker Scaleout - Ensure the Cluster STONITH is configured" block: - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure maintenance mode is enabled" + - name: "5.8 HANA Pacemaker Scaleout - Ensure maintenance mode is enabled" ansible.builtin.command: crm configure property maintenance-mode=true - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure CIB Bootstrap Options are set" + - name: "5.8 HANA Pacemaker Scaleout - Ensure CIB Bootstrap Options are set" when: - (database_cluster_type == "ASD") or (database_cluster_type == "ISCSI") @@ -22,7 +22,7 @@ stonith-action="reboot" stonith-timeout="144s" - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure CIB Bootstrap Options are set" + - name: "5.8 HANA Pacemaker Scaleout - Ensure CIB Bootstrap Options are set" when: database_cluster_type not in ["ISCSI", "ASD"] ansible.builtin.command: > crm configure property \$id="cib-bootstrap-options" @@ -31,13 +31,13 @@ stonith-action="reboot" stonith-timeout="900s" - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure the Resource Defaults are configured" + - name: "5.8 HANA Pacemaker Scaleout - Ensure the Resource Defaults are configured" ansible.builtin.shell: > crm configure rsc_defaults \$id="rsc-options" resource-stickiness="1000" migration-threshold="5000" - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure file system cluster resources are created for for cluster site" + - name: "5.8 HANA Pacemaker Scaleout - Ensure file system cluster resources are created for for cluster site" ansible.builtin.shell: > crm configure primitive fs_{{ db_sid | upper }}_HDB{{db_instance_number}}_fscheck Filesystem params device="/hana/shared/{{ db_sid | upper }}/check" @@ -59,7 +59,7 @@ # Operation Default recommendation from section 5.3.1 in https://www.suse.com/media/white-paper/suse_linux_enterprise_server_for_sap_applications_12_sp1.pdf#page=26 # Ref: https://learn.microsoft.com/en-us/azure/sap/workloads/sap-hana-high-availability-scale-out-hsr-suse?tabs=lb-portal#create-sap-hana-cluster-resources - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure SAP HANA Topology resource is configured on cluster site" + - name: "5.8 HANA Pacemaker Scaleout - Ensure SAP HANA Topology resource is configured on cluster site" ansible.builtin.shell: > crm configure primitive rsc_SAPHanaTopology_{{ db_sid | upper }}_HDB{{ db_instance_number }} \ ocf:suse:SAPHanaTopology \ @@ -70,7 +70,7 @@ register: sap_hana_topology failed_when: sap_hana_topology.rc > 1 - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure SAP HANA Topology clone set resource is configured on cluster site" + - name: "5.8 HANA Pacemaker Scaleout - Ensure SAP HANA Topology clone set resource is configured on cluster site" ansible.builtin.shell: > crm configure clone cln_SAPHanaTopology_{{ db_sid | upper }}_HDB{{ db_instance_number }} \ rsc_SAPHanaTopology_{{ db_sid | upper }}_HDB{{ db_instance_number }} \ @@ -81,7 +81,7 @@ # We recommend as a best practice that you only set AUTOMATED_REGISTER to no, while performing thorough fail-over tests, # to prevent failed primary instance to automatically register as secondary. # Once the fail-over tests have completed successfully, set AUTOMATED_REGISTER to yes, so that after takeover system replication can resume automatically. - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure SAP HANA Controller is configured" + - name: "5.8 HANA Pacemaker Scaleout - Ensure SAP HANA Controller is configured" ansible.builtin.shell: > crm configure primitive rsc_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} ocf:suse:SAPHanaController \ op start interval="0" timeout="3600" \ @@ -94,7 +94,7 @@ register: sap_hana_controller failed_when: sap_hana_controller.rc > 1 - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure SAP HANA master-slave resource is configured" + - name: "5.8 HANA Pacemaker Scaleout - Ensure SAP HANA master-slave resource is configured" ansible.builtin.shell: > crm configure ms msl_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} \ rsc_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} \ @@ -102,7 +102,7 @@ register: sap_hana_msl failed_when: sap_hana_msl.rc > 1 - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure SAP HANA Virtual IP resource is configured" + - name: "5.8 HANA Pacemaker Scaleout - Ensure SAP HANA Virtual IP resource is configured" ansible.builtin.shell: > crm configure primitive rsc_ip_{{ db_sid | upper }}_HDB{{ db_instance_number }} ocf:heartbeat:IPaddr2 meta target-role="Started" @@ -119,13 +119,13 @@ # crm configure primitive rsc_nc_{{ db_sid | upper }}_HDB{{ instance_number }} anything # params binfile="/usr/bin/socat" cmdline_options="-U TCP-LISTEN:625{{ instance_number }},backlog=10,fork,reuseaddr /dev/null" # op monitor timeout=20s interval=10 depth=0 - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure SAP HANA azure-lb resource agent is configured" + - name: "5.8 HANA Pacemaker Scaleout - Ensure SAP HANA azure-lb resource agent is configured" ansible.builtin.shell: > crm configure primitive rsc_nc_{{ db_sid | upper }}_HDB{{ db_instance_number }} azure-lb port=625{{ db_instance_number }} meta resource-stickiness=0 register: sap_hana_nc_ip failed_when: sap_hana_nc_ip.rc > 1 - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure Group IP Address resource is configured" + - name: "5.8 HANA Pacemaker Scaleout - Ensure Group IP Address resource is configured" ansible.builtin.shell: > crm configure group g_ip_{{ db_sid | upper }}_HDB{{ db_instance_number }} rsc_ip_{{ db_sid | upper }}_HDB{{ db_instance_number }} @@ -133,7 +133,7 @@ register: sap_hana_g_ip failed_when: sap_hana_g_ip.rc > 1 - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure Co-Location constraint is configured" + - name: "5.8 HANA Pacemaker Scaleout - Ensure Co-Location constraint is configured" ansible.builtin.shell: > crm configure colocation col_saphana_ip_{{ db_sid | upper }}_HDB{{ db_instance_number }} 4000: @@ -142,7 +142,7 @@ register: sap_hana_g_col_ip failed_when: sap_hana_g_col_ip.rc > 1 - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure Resource order is configured" + - name: "5.8 HANA Pacemaker Scaleout - Ensure Resource order is configured" ansible.builtin.shell: > crm configure order ord_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} Optional: @@ -152,37 +152,37 @@ failed_when: sap_hana_ord_ip.rc > 1 # Ensure the first entry of observer_db host group is excluded from running cluster resources - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure master slave configuration does not runs on majority maker node" + - name: "5.8 HANA Pacemaker Scaleout - Ensure master slave configuration does not runs on majority maker node" ansible.builtin.shell: > crm configure location loc_SAPHanaCon_not_on_majority_maker msl_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} -inf: {{ majority_maker }} register: sap_hana_msl_loc_mm failed_when: sap_hana_msl_loc_mm.rc > 1 - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure clone set does not runs on majority maker" + - name: "5.8 HANA Pacemaker Scaleout - Ensure clone set does not runs on majority maker" ansible.builtin.shell: > sudo crm configure location loc_SAPHanaTop_not_on_majority_maker cln_SAPHanaTopology_{{ db_sid | upper }}_HDB{{ db_instance_number }} -inf: {{ majority_maker }} register: sap_hana_cln_mm failed_when: sap_hana_cln_mm.rc > 1 - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Configure cluster stickiness parameter" + - name: "5.8 HANA Pacemaker Scaleout - Configure cluster stickiness parameter" ansible.builtin.shell: > crm configure rsc_defaults resource-stickiness=1000 register: sap_hana_stickiness failed_when: sap_hana_stickiness.rc > 1 - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Configure cluster default migration threshold" + - name: "5.8 HANA Pacemaker Scaleout - Configure cluster default migration threshold" ansible.builtin.shell: > crm configure rsc_defaults migration-threshold=50 register: sap_hana_migration failed_when: sap_hana_migration.rc > 1 - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure any required cluster resources are cleaned up" + - name: "5.8 HANA Pacemaker Scaleout - Ensure any required cluster resources are cleaned up" ansible.builtin.command: "crm resource cleanup rsc_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }}" - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure maintenance mode is disabled" + - name: "5.8 HANA Pacemaker Scaleout - Ensure maintenance mode is disabled" ansible.builtin.command: crm configure property maintenance-mode=false when: - inventory_hostname == primary_instance_name @@ -194,7 +194,7 @@ - database_high_availability is defined - database_high_availability is not defined -- name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Configure the ANF specific resources when relevant" +- name: "5.8 HANA Pacemaker Scaleout - Configure the ANF specific resources when relevant" when: - database_high_availability - NFS_provider == "ANF" @@ -202,7 +202,7 @@ - hana_shared_mountpoint | length > 1 - inventory_hostname == primary_instance_name block: - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Stop HANA System on both sites" + - name: "5.8 HANA Pacemaker Scaleout - Stop HANA System on both sites" become_user: "{{ db_sid | lower }}adm" become: true ansible.builtin.command: "{{ sapcontrol_command }} -function StopSystem" @@ -210,14 +210,14 @@ changed_when: false register: hana_system_stopped - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Wait 2 minutes for SAP system to stop" + - name: "5.8 HANA Pacemaker Scaleout - Wait 2 minutes for SAP system to stop" ansible.builtin.wait_for: timeout: 120 - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure maintenance mode is enabled" + - name: "5.8 HANA Pacemaker Scaleout - Ensure maintenance mode is enabled" ansible.builtin.command: crm configure property maintenance-mode=true - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Configure the cluster to add the directory structure for monitoring" + - name: "5.8 HANA Pacemaker Scaleout - Configure the cluster to add the directory structure for monitoring" ansible.builtin.command: > crm configure primitive rsc_fs_check_{{ db_sid | upper }}_HDB{{ db_instance_number }} Filesystem params \ device="/hana/shared/{{ db_sid | upper }}/check/" \ @@ -230,14 +230,14 @@ register: sap_hana_fs_check failed_when: sap_hana_fs_check.rc != 0 - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Clone and check the newly configured volume in the cluster" + - name: "5.8 HANA Pacemaker Scaleout - Clone and check the newly configured volume in the cluster" ansible.builtin.command: > crm configure clone cln_fs_check_{{ db_sid | upper }}_HDB{{ db_instance_number }} rsc_fs_check_{{ db_sid | upper }}_HDB{{ db_instance_number }} \ meta clone-node-max=1 interleave=true register: sap_hana_cln_fs_check failed_when: sap_hana_cln_fs_check.rc != 0 - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Start HANA System on both nodes" + - name: "5.8 HANA Pacemaker Scaleout - Start HANA System on both nodes" become_user: "{{ db_sid | lower }}adm" become: true ansible.builtin.command: "{{ sapcontrol_command }} -function StartSystem" @@ -245,12 +245,12 @@ changed_when: false register: hana_system_started - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Wait 5 minutes for SAP system to start" + - name: "5.8 HANA Pacemaker Scaleout - Wait 5 minutes for SAP system to start" ansible.builtin.wait_for: timeout: 300 - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Ensure maintenance mode is disabled" + - name: "5.8 HANA Pacemaker Scaleout - Ensure maintenance mode is disabled" ansible.builtin.command: crm configure property maintenance-mode=false - - name: "5.5.4.1 HANA Scale-Out Pacemaker configuration - Remove false positives" + - name: "5.8 HANA Pacemaker Scaleout - Remove false positives" ansible.builtin.shell: crm_resource -C diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.5-post_provision_report.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.5-post_provision_report.yml index 375a071753..6db645a1f0 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.5-post_provision_report.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/5.8.5-post_provision_report.yml @@ -5,29 +5,29 @@ # | | # +------------------------------------4--------------------------------------*/ -- name: Pause to give cluster time to stabilize +- name: "5.8 HANA Pacemaker Scaleout - Pause to give cluster time to stabilize" ansible.builtin.wait_for: timeout: "{{ cluster_status_report_wait_in_s }}" -- name: Check the post-provisioning cluster status +- name: "5.8 HANA Pacemaker Scaleout - Check the post-provisioning cluster status" ansible.builtin.command: "{{ cluster_status_cmd[ansible_os_family] }}" register: cluster_status_report changed_when: false failed_when: false -- name: Output cluster status +- name: "5.8 HANA Pacemaker Scaleout - Output cluster status" ansible.builtin.debug: msg: "{{ cluster_status_report.stdout }}" verbosity: 2 -- name: Check the SBD devices status +- name: "5.8 HANA Pacemaker Scaleout - Check the SBD devices status" ansible.builtin.shell: set -o pipefail && crm_mon -1 | grep sbd register: sbd_status_report changed_when: false failed_when: false when: ansible_os_family == 'Suse' -- name: Output SBD status +- name: "5.8 HANA Pacemaker Scaleout - Output SBD status" ansible.builtin.debug: msg: "{{ sbd_status_report.stdout }}" when: ansible_os_family == 'Suse' @@ -35,14 +35,14 @@ # old command: # awk '/ha_dr_SAPHanaSR.*crm_attribute/ { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* # Verify that the hook script is working as expected. -- name: Pause to give HANA replication time to stabilize +- name: "5.8 HANA Pacemaker Scaleout - Pause to give HANA replication time to stabilize" ansible.builtin.wait_for: timeout: "{{ hsr_status_report_wait_in_s }}" - name: "Verify that the hook script is working as expected" when: not database_scale_out block: - - name: "Verify the hook Installation" + - name: "5.8 HANA Pacemaker Scaleout - Verify the hook Installation" become_user: "{{ db_sid | lower }}adm" become: true ansible.builtin.shell: | @@ -58,11 +58,11 @@ register: saphanasr when: inventory_hostname == primary_instance_name rescue: - - name: "[Rescue] - Pause to give HANA replication time to stabilize" + - name: "[Rescue] 5.8 HANA Pacemaker Scaleout - Pause to give HANA replication time to stabilize" ansible.builtin.wait_for: timeout: "{{ rescue_hsr_status_report_wait_in_s }}" - - name: "[Rescue] - Verify the hook Installation" + - name: "[Rescue] 5.8 HANA Pacemaker Scaleout - Verify the hook Installation" become_user: "{{ db_sid | lower }}adm" become: true ansible.builtin.shell: | @@ -79,96 +79,96 @@ when: inventory_hostname == primary_instance_name # Code block is specifically for ScaleOut, SUSE and RedHat -- name: "Verify that the hook script is working as expected (Scale Out) for SUSE " +- name: "5.8 HANA Pacemaker Scaleout - Verify that the hook script is working as expected (Scale Out) for SUSE " when: - database_scale_out - ansible_os_family | upper == 'SUSE' - inventory_hostname == primary_instance_name block: - - name: "Verify the hook Installation (SUSE)" - become_user: "{{ db_sid | lower }}adm" - become: true + - name: "5.8 HANA Pacemaker Scaleout - Verify the hook Installation (SUSE)" + become_user: "{{ db_sid | lower }}adm" + become: true ansible.builtin.shell: | set -o pipefail grep SAPHanaSr.*init nameserver_*.trc | tail -3 args: - chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ inventory_hostname }}/trace - register: saphanasr - until: saphanasr.stdout is search("Running") - retries: 10 - delay: 30 + chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ inventory_hostname }}/trace + register: saphanasr + until: saphanasr.stdout is search("Running") + retries: 10 + delay: 30 rescue: - - name: "[Rescue] - Pause to give HANA replication time to stabilize" + - name: "[Rescue] 5.8 HANA Pacemaker Scaleout - Pause to give HANA replication time to stabilize" ansible.builtin.wait_for: - timeout: "{{ rescue_hsr_status_report_wait_in_s }}" + timeout: "{{ rescue_hsr_status_report_wait_in_s }}" - - name: "[Rescue] - Verify the hook Installation (SUSE)" - become_user: "{{ db_sid | lower }}adm" - become: true + - name: "[Rescue] 5.8 HANA Pacemaker Scaleout - Verify the hook Installation (SUSE)" + become_user: "{{ db_sid | lower }}adm" + become: true ansible.builtin.shell: | set -o pipefail grep SAPHanaSr.*init nameserver_*.trc | tail -3 args: - chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ inventory_hostname }}/trace - register: saphanasr - until: saphanasr.stdout is search("Running") - retries: 10 - delay: 30 + chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ inventory_hostname }}/trace + register: saphanasr + until: saphanasr.stdout is search("Running") + retries: 10 + delay: 30 -- name: Verify that the hook script is working as expected (Scale Out) for Red Hat {{ ansible_distribution_major_version }}" +- name: "5.8 HANA Pacemaker Scaleout - Verify that the hook script is working as expected (Scale Out) for Red Hat {{ ansible_distribution_major_version }}" when: - ansible_os_family | upper == "REDHAT" - ansible_hostname == primary_instance_name - database_scale_out block: - - name: "Verify the hook Installation (REDHAT)" - become_user: "{{ db_sid | lower }}adm" - become: true + - name: "5.8 HANA Pacemaker Scaleout - Verify the hook Installation (REDHAT)" + become_user: "{{ db_sid | lower }}adm" + become: true ansible.builtin.shell: | set -o pipefail awk '/ha_dr_SAPHanaSR.*crm_attribute/ \ - { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* | tail -n 1 + { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* args: - chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ primary_instance_name }}/trace - register: saphanasr - until: saphanasr.stdout is search("SOK") - retries: 10 - delay: 30 + chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ primary_instance_name }}/trace + register: saphanasr + until: saphanasr.stdout is search("SOK") + retries: 10 + delay: 30 rescue: - - name: "[Rescue] - Pause to give HANA replication time to stabilize" + - name: "[Rescue] 5.8 HANA Pacemaker Scaleout - Pause to give HANA replication time to stabilize" ansible.builtin.wait_for: - timeout: "{{ rescue_hsr_status_report_wait_in_s }}" + timeout: "{{ rescue_hsr_status_report_wait_in_s }}" - - name: "[Rescue] - Verify the hook Installation (REDHAT)" - become_user: "{{ db_sid | lower }}adm" - become: true + - name: "[Rescue] 5.8 HANA Pacemaker Scaleout - Verify the hook Installation (REDHAT)" + become_user: "{{ db_sid | lower }}adm" + become: true ansible.builtin.shell: | set -o pipefail awk '/ha_dr_SAPHanaSR.*crm_attribute/ \ - { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* | tail -n 1 + { printf "%s %s %s %s\n",$2,$3,$5,$16 }' nameserver_* args: - chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ primary_instance_name }}/trace - register: saphanasr - until: saphanasr.stdout is search("SOK") - retries: 10 - delay: 30 + chdir: /usr/sap/{{ db_sid | upper }}/HDB{{ db_instance_number }}/{{ primary_instance_name }}/trace + register: saphanasr + until: saphanasr.stdout is search("SOK") + retries: 10 + delay: 30 # SUSE only # Check on all nodes, status of susTkOver Hook -- name: "Log that the hook script is working as expected" +- name: "5.8 HANA Pacemaker Scaleout - Log that the hook script is working as expected" block: - - name: "Debug (saphanasr)" + - name: "5.8 HANA Pacemaker Scaleout - Debug (saphanasr)" ansible.builtin.debug: var: saphanasr verbosity: 2 - - name: "set_fact (saphanasr)" + - name: "5.8 HANA Pacemaker Scaleout - set_fact (saphanasr)" ansible.builtin.set_fact: hsr_result: saphanasr.stdout - - name: "Debug (hsr_result)" + - name: "5.8 HANA Pacemaker Scaleout - Debug (hsr_result)" ansible.builtin.debug: var: hsr_result verbosity: 2 @@ -180,12 +180,12 @@ fail_msg: "Unable to determine if HSR Hook is working" when: inventory_hostname == primary_instance_name -- name: Verify the hook Installation +- name: "5.8 HANA Pacemaker Scaleout - Verify the hook Installation" ansible.builtin.debug: var: saphanasr verbosity: 2 -- name: "HANA PCM Install: reset" +- name: "5.8 HANA Pacemaker Scaleout - flag" ansible.builtin.file: path: /etc/sap_deployment_automation/sap_deployment_hana_pcm.txt state: touch diff --git a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/main.yml b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/main.yml index 487fa498b2..c0a709fd7f 100644 --- a/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/main.yml +++ b/deploy/ansible/roles-sap/5.8-hanadb-scaleout-pacemaker/tasks/main.yml @@ -1,20 +1,20 @@ --- -- name: "5.8 HANADB Pacemaker - import - set_runtime_facts" - ansible.builtin.import_tasks: 5.8.1-set_runtime_facts.yml +- name: "5.8 HANADB Pacemaker - import - set_runtime_facts" + ansible.builtin.import_tasks: 5.8.1-set_runtime_facts.yml -- name: "5.8 HANADB Pacemaker - import - pre_checks" - ansible.builtin.import_tasks: 5.8.2-pre_checks.yml +- name: "5.8 HANADB Pacemaker - import - pre_checks" + ansible.builtin.import_tasks: 5.8.2-pre_checks.yml -- name: "5.8 HANADB Pacemaker - import - SAP HanaSR" - ansible.builtin.import_tasks: 5.8.3-SAPHanaSRMultiTarget.yml +- name: "5.8 HANADB Pacemaker - import - SAP HanaSR" + ansible.builtin.include_tasks: 5.8.3-SAPHanaSRMultiTarget-{{ ansible_os_family }}.yml when: - node_tier in ['hana','observer'] # Scale-Out HSR Specific provision task with its own unique sequence -- name: "5.8 HANADB Pacemaker - import - provision Scale-Out-HSR" - ansible.builtin.import_tasks: 5.8.4-provision-ScaleOut.yml +- name: "5.8 HANADB Pacemaker - import - provision Scale-Out-HSR" + ansible.builtin.import_tasks: 5.8.4-provision-ScaleOut.yml when: - not hana_cluster_existence_check -- name: "5.8 HANADB Pacemaker - import - post_provision_report" - ansible.builtin.import_tasks: 5.8.5-post_provision_report.yml +- name: "5.8 HANADB Pacemaker - import - post_provision_report" + ansible.builtin.import_tasks: 5.8.5-post_provision_report.yml diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index aad2619ee0..18b4ae7722 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -69,6 +69,11 @@ dbload_retry_count: 120 single_server: false +hana_scaleout_wait_for_start_in_sec: 300 +scs_wait_for_start_in_sec: 300 +hana_wait_for_start_in_sec: 300 +hana_wait_for_stop_in_sec: 120 + # Default sizes for volumes sapmnt_volume_size: 32g @@ -225,9 +230,9 @@ use_simple_mount: false # Cluster - Defaults # database_high_availability: false -database_scale_out: false +database_scale_out: false database_cluster_type: "AFA" -database_no_standby: false # when set to true, will deploy the scale out - ANF cluster without a standby node. +database_no_standby: false # when set to true, will deploy the scale out - ANF cluster without a standby node. # scs_high_availability: false scs_cluster_type: "AFA" # Configure pacemaker for Azure scheduled events diff --git a/deploy/terraform/run/sap_system/providers.tf b/deploy/terraform/run/sap_system/providers.tf index 726bc0ded8..eb7afac8a5 100644 --- a/deploy/terraform/run/sap_system/providers.tf +++ b/deploy/terraform/run/sap_system/providers.tf @@ -36,6 +36,7 @@ provider "azurerm" { use_msi = try(data.terraform_remote_state.landscape.outputs.use_spn, true) && var.use_spn ? false : true partner_id = "3179cd51-f54b-4c73-ac10-8e99417efce7" + storage_use_azuread = true alias = "system" } diff --git a/deploy/terraform/terraform-units/modules/sap_namegenerator/vm.tf b/deploy/terraform/terraform-units/modules/sap_namegenerator/vm.tf index 644bc86fd5..f6741628d9 100644 --- a/deploy/terraform/terraform-units/modules/sap_namegenerator/vm.tf +++ b/deploy/terraform/terraform-units/modules/sap_namegenerator/vm.tf @@ -18,12 +18,12 @@ locals { lower(format("%s%s%sdeploy%02d", local.env_verified, local.location_short, local.dep_vnet_verified, idx + var.resource_offset)) ] - anydb_computer_names = [for idx in range(var.db_server_count) : + anydb_computer_names = [for idx in range(var.db_server_count) : format("%sdb%02d%s%d%s", lower(var.sap_sid), idx + var.resource_offset, local.db_oscode, 0, local.random_id_vm_verified) ] - anydb_computer_names_ha = [for idx in range(var.db_server_count * 2) : - format("%sdb%02d%s%01d%s", lower(var.sap_sid), floor(idx/2) + var.resource_offset, local.db_oscode, tonumber((idx % 2)), local.random_id_vm_verified) + anydb_computer_names_ha = [for idx in range(var.db_server_count) : + format("%sdb%02d%s%d%s", lower(var.sap_sid), idx + var.resource_offset, local.db_oscode, 1, local.random_id_vm_verified) ] anydb_vm_names = [for idx in range(var.db_server_count) : @@ -33,10 +33,10 @@ locals { ) ] - anydb_vm_names_ha = [for idx in range(var.db_server_count * 2) : + anydb_vm_names_ha = [for idx in range(var.db_server_count) : length(var.db_zones) > 0 && var.use_zonal_markers ? ( - format("%sdb%sz%s%s%02d%s%01d%s", lower(var.sap_sid), local.separator, local.ha_zones[idx % max(length(local.ha_zones), 1)], local.separator, floor(idx/2) + var.resource_offset, local.db_oscode, tonumber((idx % 2)), local.random_id_vm_verified)) : ( - format("%sdb%02d%s%01d%s", lower(var.sap_sid), floor(idx/2) + var.resource_offset, local.db_oscode, tonumber((idx % 2)), local.random_id_vm_verified) + format("%sdb%sz%s%s%02d%s%d%s", lower(var.sap_sid), local.separator, local.ha_zones[idx % max(length(local.ha_zones), 1)], local.separator, idx + var.resource_offset, local.db_oscode, 1, local.random_id_vm_verified)) : ( + format("%sdb%02d%s%d%s", lower(var.sap_sid), idx + var.resource_offset, local.db_oscode, 1, local.random_id_vm_verified) ) ] @@ -66,28 +66,28 @@ locals { format("%sd%s%02dl%d%s", lower(var.sap_sid), lower(var.db_sid), idx + var.resource_offset, 0, substr(local.random_id_vm_verified, 0, 2)) ] - hana_computer_names_ha = [for idx in range(var.db_server_count * 2) : - format("%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset + var.resource_offset, tonumber((idx % 2)), substr(local.random_id_vm_verified, 0, 2)) - ] - - hana_computer_names_scaleout = [for idx in range(var.db_server_count * 2) : - format("%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset, tonumber((( idx ) % 2)), substr(local.random_id_vm_verified, 0, 2)) + hana_computer_names_ha = [for idx in range(var.db_server_count) : + format("%sd%s%02dl%d%s", lower(var.sap_sid), lower(var.db_sid), idx + var.resource_offset, 1, substr(local.random_id_vm_verified, 0, 2)) ] hana_server_vm_names = [for idx in range(var.db_server_count) : length(var.db_zones) > 0 && var.use_zonal_markers ? ( - format("%sd%s%sz%s%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), local.separator, var.db_zones[idx % max(length(var.db_zones), 1)], local.separator, idx + var.resource_offset, 0, local.random_id_vm_verified)) : ( - format("%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), idx + var.resource_offset, 0, local.random_id_vm_verified) + format("%sd%s%sz%s%s%02dl%d%s", lower(var.sap_sid), lower(var.db_sid), local.separator, var.db_zones[idx % max(length(var.db_zones), 1)], local.separator, idx + var.resource_offset, 0, local.random_id_vm_verified)) : ( + format("%sd%s%02dl%d%s", lower(var.sap_sid), lower(var.db_sid), idx + var.resource_offset, 0, local.random_id_vm_verified) ) ] - hana_server_vm_names_ha = [for idx in range(var.db_server_count * 2) : + hana_server_vm_names_ha = [for idx in range(var.db_server_count) : length(var.db_zones) > 0 && var.use_zonal_markers ? ( - format("%sd%s%sz%s%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), local.separator, local.ha_zones[idx % max(length(local.ha_zones), 1)], local.separator, floor(idx/2) + var.resource_offset, tonumber((idx % 2)), local.random_id_vm_verified)) : ( - format("%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset, tonumber((idx % 2)), local.random_id_vm_verified) + format("%sd%s%sz%s%s%02dl%d%s", lower(var.sap_sid), lower(var.db_sid), local.separator, local.ha_zones[idx % max(length(local.ha_zones), 1)], local.separator, idx + var.resource_offset, 1, local.random_id_vm_verified)) : ( + format("%sd%s%02dl%d%s", lower(var.sap_sid), lower(var.db_sid), idx + var.resource_offset, 1, local.random_id_vm_verified) ) ] + hana_computer_names_scaleout = [for idx in range(var.db_server_count * 2) : + format("%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset, tonumber((( idx ) % 2)), substr(local.random_id_vm_verified, 0, 2)) + ] + hana_server_vm_names_scaleout = [for idx in range(var.db_server_count * 2) : length(var.db_zones) > 0 && var.use_zonal_markers ? ( format("%sd%s%sz%s%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), local.separator, var.db_zones[idx % max(length(var.db_zones), 1)], local.separator, floor(idx/2) + var.resource_offset, tonumber(( idx % 2)), local.random_id_vm_verified)) : ( @@ -145,12 +145,13 @@ locals { format("v%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset, tonumber((idx % 2)), substr(local.random_id_vm_verified, 0, 2)) ] - hana_secondary_dnsnames = [for idx in range(var.db_server_count ) : + + hana_secondary_dnsnames = [for idx in range(var.db_server_count) : format("v%sd%s%02dl%d%s", lower(var.sap_sid), lower(var.db_sid), idx + var.resource_offset, 0, substr(local.random_id_vm_verified, 0, 2)) ] - hana_secondary_dnsnames_ha = [for idx in range(var.db_server_count * 2) : - format("v%sd%s%02dl%01d%s", lower(var.sap_sid), lower(var.db_sid), floor(idx/2) + var.resource_offset, tonumber((idx % 2)), local.random_id_virt_vm_verified) + hana_secondary_dnsnames_ha = [for idx in range(var.db_server_count) : + format("v%sd%s%02dl%d%s", lower(var.sap_sid), lower(var.db_sid), idx + var.resource_offset, 1, local.random_id_virt_vm_verified) ] hana_secondary_dnsnames_scaleout = [for idx in range(var.db_server_count * 2) : diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/infrastructure.tf index e2b82bdb4f..e7be0fcd02 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/infrastructure.tf @@ -112,8 +112,8 @@ resource "azurerm_lb_probe" "hdb" { resource "azurerm_network_interface_backend_address_pool_association" "hdb" { provider = azurerm.main count = local.enable_db_lb_deployment ? var.database_server_count : 0 - network_interface_id = azurerm_network_interface.nics_dbnodes_db[count.index].id - ip_configuration_name = azurerm_network_interface.nics_dbnodes_db[count.index].ip_configuration[0].name + network_interface_id = var.database.scale_out ? azurerm_network_interface.nics_dbnodes_admin[count.index].id : azurerm_network_interface.nics_dbnodes_db[count.index].id + ip_configuration_name = var.database.scale_out ? azurerm_network_interface.nics_dbnodes_admin[count.index].ip_configuration[0].name : azurerm_network_interface.nics_dbnodes_db[count.index].ip_configuration[0].name backend_address_pool_id = azurerm_lb_backend_address_pool.hdb[0].id } diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf index 3a86e0e091..0f3ec34b0c 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf @@ -32,7 +32,7 @@ variable "db_subnet" { description = "Informa variable "deploy_application_security_groups" { description = "Defines if application security groups should be deployed" } variable "deployment" { description = "The type of deployment" } variable "fencing_role_name" { description = "If specified the role name to use for the fencing" } -variable "infrastructure" {} +variable "infrastructure" { description = "Dictionary with infrastructure settings" } variable "landscape_tfstate" { description = "Landscape remote tfstate file" } variable "license_type" { description = "Specifies the license type for the OS" diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf index 9441c82f22..63b1ae57ca 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-observer.tf @@ -22,12 +22,12 @@ resource "azurerm_network_interface" "observer" { ip_configuration { name = "IPConfig1" - subnet_id = var.db_subnet.id + subnet_id = var.admin_subnet.id private_ip_address = var.database.use_DHCP ? ( null) : ( try(var.database.observer_vm_ips[count.index], cidrhost( - var.db_subnet.address_prefixes[0], + var.admin_subnet.address_prefixes[0], tonumber(count.index) + local.hdb_ip_offsets.observer_db_vm ) ) @@ -79,6 +79,11 @@ resource "azurerm_linux_virtual_machine" "observer" { tags = merge(local.tags, var.tags) + patch_mode = var.infrastructure.patch_mode + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + dynamic "admin_ssh_key" { for_each = range(var.deployment == "new" ? 1 : (local.enable_auth_password ? 0 : 1)) content { From 53fd32fecc76f9bc54d27b6e2e300d53959c1775 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 24 Sep 2024 12:37:06 +0530 Subject: [PATCH 606/607] delete the duplicate ossf-scorecard --- .github/workflows/scorecard.yml | 73 --------------------------------- 1 file changed, 73 deletions(-) delete mode 100644 .github/workflows/scorecard.yml diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml deleted file mode 100644 index df1d38fcd7..0000000000 --- a/.github/workflows/scorecard.yml +++ /dev/null @@ -1,73 +0,0 @@ -# This workflow uses actions that are not certified by GitHub. They are provided -# by a third-party and are governed by separate terms of service, privacy -# policy, and support documentation. - -name: Scorecard supply-chain security -on: - # For Branch-Protection check. Only the default branch is supported. See - # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection - branch_protection_rule: - # To guarantee Maintained check is occasionally updated. See - # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained - schedule: - - cron: '32 4 * * 5' - push: - branches: [ "main" ] - -# Declare default permissions as read only. -permissions: read-all - -jobs: - analysis: - name: Scorecard analysis - runs-on: ubuntu-latest - permissions: - # Needed to upload the results to code-scanning dashboard. - security-events: write - # Needed to publish results and get a badge (see publish_results below). - id-token: write - # Uncomment the permissions below if installing in a private repository. - # contents: read - # actions: readhttps://github.com/hdamecharla/sap-automation-kimforss/tree/main - - steps: - - name: "Checkout code" - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - persist-credentials: false - - - name: "Run analysis" - uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 - with: - results_file: results.sarif - results_format: sarif - # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: - # - you want to enable the Branch-Protection check on a *public* repository, or - # - you are installing Scorecard on a *private* repository - # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional. - # repo_token: ${{ secrets.SCORECARD_TOKEN }} - - # Public repositories: - # - Publish results to OpenSSF REST API for easy access by consumers - # - Allows the repository to include the Scorecard badge. - # - See https://github.com/ossf/scorecard-action#publishing-results. - # For private repositories: - # - `publish_results` will always be set to `false`, regardless - # of the value entered here. - publish_results: true - - # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF - # format to the repository Actions tab. - - name: "Upload artifact" - uses: actions/upload-artifact@97a0fba1372883ab732affbe8f94b823f91727db # v3.pre.node20 - with: - name: SARIF file - path: results.sarif - retention-days: 5 - - # Upload the results to GitHub's code scanning dashboard (optional). - # Commenting out will disable upload of results to your repo's Code Scanning dashboard - - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@v3 - with: - sarif_file: results.sarif From e9cde8973e74b3009b6cf6c10dca7b6f40e75303 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 24 Sep 2024 12:30:11 +0300 Subject: [PATCH 607/607] Remove shared access key from sapmnt storage account configuration --- .../modules/sap_system/common_infrastructure/storage_accounts.tf | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf index 6ac5e9fe6e..cf0c629f9d 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf @@ -40,7 +40,6 @@ resource "azurerm_storage_account" "sapmnt" { cross_tenant_replication_enabled = false public_network_access_enabled = try(var.landscape_tfstate.public_network_access_enabled, true) - shared_access_key_enabled = false tags = var.tags network_rules {