From f7bf5d3eaf197c7a7863d721f75a1782a9976c86 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 25 May 2024 20:34:37 +0300 Subject: [PATCH 001/164] Bring in the manual updates --- .../playbook_02_os_sap_specific_config.yaml | 10 + .../ansible/playbook_03_bom_processing.yaml | 7 + .../ansible/playbook_04_00_00_db_install.yaml | 4 + deploy/ansible/playbook_04_00_01_db_ha.yaml | 29 +++ .../roles-db/4.1.3-ora-dg/tasks/main.yaml | 8 + .../tasks/ora-dg-postprocessing-primary.yaml | 33 +++ .../tasks/ora-dg-secondary-preparation.yaml | 11 - .../tasks/ora-dg-setup-secondary.yaml | 75 +++++++ .../tasks/4.2.1.8-db2_copy_keystore_files.yml | 32 +++ .../tasks/main.yaml | 1 + .../0.5-ACSS-registration/tasks/main.yaml | 147 +++---------- .../tasks/1.17.1-pre_checks.yml | 63 ++++++ .../windows/1.4-packages/tasks/main.yaml | 4 +- .../tasks/2.6.1.1-anf-mount.yaml | 7 + .../2.6-sap-mounts/tasks/main.yaml | 11 + .../roles-sap/5.3-app-install/tasks/main.yaml | 30 +++ .../tasks/5.5.4.1-cluster-RedHat.yml | 203 ++++++++++++++++++ .../tasks/5.6.4.2-sap-resources-RedHat.yml | 11 + deploy/pipelines/01-deploy-control-plane.yaml | 84 +++++--- .../pipelines/03-sap-system-deployment.yaml | 17 +- .../pipelines/05-DB-and-SAP-installation.yaml | 7 + deploy/scripts/New-SDAFDevopsProject.ps1 | 142 +++++++++--- deploy/scripts/Test-SDAFReadiness.ps1 | 99 +++++---- deploy/scripts/advanced_state_management.sh | 6 +- deploy/scripts/helpers/script_helpers.sh | 5 +- deploy/scripts/install_library.sh | 5 +- deploy/scripts/install_workloadzone.sh | 6 +- deploy/scripts/installer.sh | 9 +- .../modules/sap_deployer/infrastructure.tf | 16 ++ .../modules/sap_landscape/iscsi.tf | 14 +- .../modules/sap_landscape/nsg.tf | 31 +++ .../modules/sap_library/dns.tf | 2 +- .../modules/sap_system/anydb_node/outputs.tf | 12 ++ .../modules/sap_system/anydb_node/vm-anydb.tf | 6 +- .../modules/sap_system/app_tier/vm-scs.tf | 141 +++++++++++- .../modules/sap_system/hdb_node/anf.tf | 46 +++- .../output_files/sap-parameters.tmpl | 10 + 37 files changed, 1084 insertions(+), 260 deletions(-) diff --git a/deploy/ansible/playbook_02_os_sap_specific_config.yaml b/deploy/ansible/playbook_02_os_sap_specific_config.yaml index b6ab630a9e..ab66479547 100644 --- a/deploy/ansible/playbook_02_os_sap_specific_config.yaml +++ b/deploy/ansible/playbook_02_os_sap_specific_config.yaml @@ -138,6 +138,16 @@ - always - is_run_with_infraCreate_only + - name: "SAP OS configuration playbook: - Set sapos storage account facts" + ansible.builtin.set_fact: + sapbits_sas_token: "{{ hostvars.localhost.sapbits_sas_token }}" + when: + - not is_run_with_infraCreate_only + - hostvars.localhost.sapbits_sas_token is defined + tags: + - always + - is_run_with_infraCreate_only + - name: "SAP OS configuration playbook: - Get the IP information from instance meta data service" ansible.builtin.uri: url: http://169.254.169.254/metadata/instance/network?api-version=2021-02-01 diff --git a/deploy/ansible/playbook_03_bom_processing.yaml b/deploy/ansible/playbook_03_bom_processing.yaml index e6b169f544..cf99e3b0c4 100644 --- a/deploy/ansible/playbook_03_bom_processing.yaml +++ b/deploy/ansible/playbook_03_bom_processing.yaml @@ -77,6 +77,13 @@ tags: - always + - name: Set facts + when: hostvars.localhost.sapbits_sas_token is defined + ansible.builtin.set_fact: + sapbits_sas_token: "{{ hostvars.localhost.sapbits_sas_token }}" + tags: + - always + - name: 3.3-bom-processing role for Linux become: true when: ansible_os_family != "Windows" diff --git a/deploy/ansible/playbook_04_00_00_db_install.yaml b/deploy/ansible/playbook_04_00_00_db_install.yaml index d0b18a5847..d9db8d07e7 100644 --- a/deploy/ansible/playbook_04_00_00_db_install.yaml +++ b/deploy/ansible/playbook_04_00_00_db_install.yaml @@ -406,6 +406,10 @@ ansible.builtin.include_role: name: roles-os/1.11-accounts + - name: "Configure accounts for oracle" + ansible.builtin.include_role: + name: roles-os/1.11-accounts + - name: "Database Installation Playbook: - Check for file system mounts" ansible.builtin.include_role: name: roles-sap-os/2.6-sap-mounts diff --git a/deploy/ansible/playbook_04_00_01_db_ha.yaml b/deploy/ansible/playbook_04_00_01_db_ha.yaml index a2435464a6..b432bc116a 100644 --- a/deploy/ansible/playbook_04_00_01_db_ha.yaml +++ b/deploy/ansible/playbook_04_00_01_db_ha.yaml @@ -318,6 +318,35 @@ suffix: "_DC_ACTION_2" tier: 'oracle' + + - name: "Oracle Data Guard Setup on Secondary" + when: + - db_high_availability + - platform in ['ORACLE', 'ORACLE-ASM'] + - inventory_hostname == secondary_instance_name + become: true + block: + - name: Setting the DB facts + ansible.builtin.set_fact: + tier: ora # Actions for Oracle DB Servers + action: 'Post_Processing_Secondary' + main_password: "{{ hostvars.localhost.sap_password }}" + tags: + - always + + - name: Oracle Data guard + ansible.builtin.include_role: + name: roles-db/4.1.3-ora-dg + tags: + - 4.1.3-ora-dg + + - name: "Observer Playbook: - Run post installation routines" + ansible.builtin.include_role: + name: roles-sap/7.0.0-post-install + vars: + suffix: "_DC_ACTION_2" + tier: 'oracle' + # /*----------------------------------------------------------------------------8 # | | # | PLAY FOR Observer Node setup | diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml index bcf4bac333..69f285cd10 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/main.yaml @@ -58,6 +58,14 @@ - node_tier != "observer" - action == 'Post_Processing_Secondary' +# Enable Flashback Loggining on the Secondary for FSFO +- name: "Oracle Data Guard: Post processing on Secondary" + ansible.builtin.include_tasks: "ora-dg-postprocessing-secondary.yaml" + when: + - node_tier == "oracle" or node_tier == "oracle-asm" + - node_tier != "observer" + - action == 'Post_Processing_Secondary' + # FSFO is enabled from the Observer. - name: "Oracle Data Guard: Setup Observer" ansible.builtin.include_tasks: "ora-dg-observer-setup.yaml" diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing-primary.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing-primary.yaml index a0cbe94cfd..b2b6d984f7 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing-primary.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-postprocessing-primary.yaml @@ -188,6 +188,39 @@ mode: '0755' when: enable_dbtrigger_results.rc == 0 + # Enable the DB trigger for SAP HA + - name: "Oracle Data Guard - Post Processing: Enable DB Trigger" + become: true + become_user: "{{ oracle_user_name }}" + ansible.builtin.shell: sqlplus / as sysdba @dbtrigger.sql + register: enable_dbtrigger_results + failed_when: enable_dbtrigger_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/enable_dbtrigger.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + # when: current_host == ora_primary + - name: "Oracle Data Guard - Post Processing: Create dbtrigger on Primary (debug)" + ansible.builtin.debug: + var: enable_dbtrigger_results.stdout_lines + verbosity: 2 + + - name: "Oracle Data Guard - Post Processing: Create dbtrigger configuration on Primary (save output)" + ansible.builtin.copy: + dest: /etc/sap_deployment_automation/dgscripts/enable_dbtrigger.log + content: "{{ enable_dbtrigger_results.stdout }}" + mode: '0777' + when: enable_dbtrigger_results.stdout is defined + + - name: "Oracle Data Guard - Post Processing: Create enable_dbtrigger.txt" + become: true + become_user: "{{ oracle_user_name }}" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/enable_dbtrigger.txt + state: touch + mode: '0755' + when: enable_dbtrigger_results.rc == 0 + - name: "Create post processing completed" become: true become_user: root diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-secondary-preparation.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-secondary-preparation.yaml index 5864f002b1..87368876b8 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-secondary-preparation.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-secondary-preparation.yaml @@ -132,17 +132,6 @@ path: /etc/sap_deployment_automation/dgscripts/dbscopied.txt state: touch mode: '0755' - -- name: "Oracle Data Guard - Post Processing: Restart lsnrctl on Secondary" - become: true - become_user: "{{ oracle_user_name }}" - ansible.builtin.shell: lsnrctl reload - register: lsnrctl_start_secondary_results - failed_when: lsnrctl_start_secondary_results.rc > 0 - args: - chdir: /etc/sap_deployment_automation/dgscripts - executable: /bin/csh - ... # /*---------------------------------------------------------------------------8 # | END | diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml index dc4900e21d..e9d1dc4ec0 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml @@ -149,6 +149,81 @@ - current_host == ora_secondary - lsnrctl_start_secondary_results.rc == 0 + + +# Restart the Listener on Secondary node when the node_tier is Oracle-ASM. + +- name: "Oracle Data Guard - ASM - Setup Secondary: Stop lsnrctl on Secondary" + become: true + become_user: "{{ oracle_user_name }}" + ansible.builtin.shell: lsnrctl stop + register: lsnrctl_stop_secondary_results + failed_when: lsnrctl_stop_secondary_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/lsnrctl_stopped_sec.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + when: + - node_tier == "oracle-asm" + - current_host == ora_secondary + +- name: "Oracle Data Guard - ASM - Setup Secondary: Create lsnrctl_stopped_sec.txt" + become: true + become_user: "{{ oracle_user_name }}" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/lsnrctl_stopped_sec.txt + state: touch + mode: '0755' + when: + - node_tier == "oracle-asm" + - current_host == ora_secondary + - lsnrctl_stop_secondary_results.rc == 0 + + +- name: "Oracle Data Guard - ASM - Setup Secondary: Start lsnrctl on Secondary" + become: true + become_user: "{{ oracle_user_name }}" + ansible.builtin.shell: lsnrctl start + register: lsnrctl_asm_start_secondary_results + failed_when: lsnrctl_asm_start_secondary_results.rc > 0 + args: + creates: /etc/sap_deployment_automation/dgscripts/lsnrctl_asm_started_sec.txt + chdir: /etc/sap_deployment_automation/dgscripts + executable: /bin/csh + when: + - node_tier == "oracle-asm" + - current_host == ora_secondary + + +- name: "Oracle Data Guard - Setup Secondary: restart lsnrctl on Secondary (Debug)" + ansible.builtin.debug: + var: lsnrctl_asm_start_secondary_results.stdout_lines + verbosity: 2 + +- name: "Oracle Data Guard - Setup Secondary: restart lsnrctl on Secondary (save output)" + ansible.builtin.copy: + dest: /etc/sap_deployment_automation/lsnrctl_start_primary.log + content: "{{ lsnrctl_asm_start_secondary_results.stdout }}" + mode: '0777' + when: lsnrctl_asm_start_secondary_results.stdout is defined + +- name: "Oracle Data Guard - Setup Secondary: Create lsnrctl_started_sec.txt" + become: true + become_user: "{{ oracle_user_name }}" + ansible.builtin.file: + path: /etc/sap_deployment_automation/dgscripts/lsnrctl_asm_started_sec.txt + state: touch + mode: '0755' + when: + - node_tier == "oracle-asm" + - current_host == ora_secondary + - lsnrctl_asm_start_secondary_results.rc == 0 + + +- name: "Oracle Data Guard - ASM Listener Starting: Sleep for 40 seconds and continue with play" + ansible.builtin.wait_for: + timeout: 40 + # Restart the Listener on Secondary node when the node_tier is Oracle-ASM. - name: "Oracle Data Guard - ASM - Setup Secondary: Stop lsnrctl on Secondary" diff --git a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml index 6189f9c773..134df444ea 100644 --- a/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml +++ b/deploy/ansible/roles-db/4.2.1-db2-hainstall/tasks/4.2.1.8-db2_copy_keystore_files.yml @@ -13,6 +13,38 @@ - sapdb2{{ db_sid | lower }}_db_encr.p12 - sapdb2{{ db_sid | lower }}_db_encr.sth +- name: "DB2: Stat if the keystore files exist on Primary node" + ansible.builtin.stat: + path: "/db2/db2{{ db_sid | lower }}/keystore/{{ item }}" + loop: "{{ keystore_files }}" + register: keystore_files_stat + when: ansible_hostname == primary_instance_name + +- name: "DB2: Determine if the database is encrypted" + ansible.builtin.set_fact: + db_encrypted: "{{ (keystore_files_stat.results | map(attribute='stat.exists')) is all }}" + when: ansible_hostname == primary_instance_name + +- name: "DB2: Debug if the database is encrypted" + ansible.builtin.debug: + msg: + - "Database is encrypted: {{ db_encrypted }}" + when: ansible_hostname == primary_instance_name + +- name: "DB2: Debug if the database is encrypted" + ansible.builtin.debug: + msg: + - "Database is encrypted: {{ db_encrypted }}" + - "Keystore files exist: {{ keystore_files_stat.results | map(attribute='stat.exists') | list }}" + - "Keystore files stat: {{ keystore_files_stat.results | map(attribute='stat') | selectattr('exists') | list }}" + when: ansible_hostname == primary_instance_name + +- name: "DB2: Debug if the database is encrypted" + ansible.builtin.debug: + msg: + - "Database is encrypted: {{ db_encrypted }}" + when: ansible_hostname == primary_instance_name + - name: "DB2: Fetch keystore files from Primary node to Controller" when: ansible_hostname == primary_instance_name ansible.builtin.fetch: diff --git a/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml b/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml index 29c3f23d43..9bcaf5ef8d 100644 --- a/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.3.sap-installation-media-storage-details/tasks/main.yaml @@ -146,6 +146,7 @@ when: - sapbits_access_key is not defined - sapbits_sas_token is not defined or (sapbits_sas_token | string | length == 0) + - allowSharedKeyAccess block: - name: "0.4 Installation Media: - Check if SAS token secret exists" ansible.builtin.command: >- diff --git a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml index b949a51286..77e386572e 100644 --- a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml @@ -3,7 +3,11 @@ # Validate that the SCS cluster is working as expected - name: "0.5.1 acss registration: - Set Python version {{ distribution_id }}" ansible.builtin.set_fact: - python_version: "python2" + python_version: "python3" + +- name: "0.0 Validations: - Set Python version {{ distribution_id }}" + ansible.builtin.set_fact: + python_version: "python2" when: (ansible_distribution | lower ~ ansible_distribution_major_version) in ['sles_sap12'] - name: "0.5.1 acss registration: - Determine if SCS is running on {{ ansible_hostname }}" @@ -71,6 +75,19 @@ crm_resource --resource g-{{ sap_sid | upper }}_{{ instance_type | upper }} --locate | cut -d ':' -f 2| cut -d " " -f 2 when: - scs_high_availability + - ansible_os_family | upper == 'SUSE' + register: cluster_group_location + failed_when: cluster_group_location.stdout != hostvars[ansible_hostname]['scs_running_on'] + tags: + - skip_ansible_lint + + - name: "0.5.1 acss registration: - Check where the cluster group g-{{ sap_sid | upper }}_{{ instance_type | upper }} is running" + ansible.builtin.shell: >- + set -o pipefail; + pcs constraint location show resources g-{{ sap_sid | upper }}_{{ instance_type | upper }} | grep "Node" | awk '{print $2}' + when: + - scs_high_availability + - ansible_os_family | upper == 'REDHAT' register: cluster_group_location failed_when: cluster_group_location.stdout != hostvars[ansible_hostname]['scs_running_on'] tags: @@ -90,133 +107,17 @@ - name: "0.5.1 acss registration: - Set variables from Azure IMDS" delegate_facts: true delegate_to: localhost - ansible.builtin.set_fact: - acss_scs_instance_metadata: "{{ azure_metadata }}" - when: - - hostvars[ansible_hostname]['scs_running_on'] is defined - - ansible_hostname == hostvars[ansible_hostname]['scs_running_on'] - -- name: "0.5.1 acss registration: - Register SAP System to ACSS" - delegate_to: localhost - when: - - hostvars[ansible_hostname]['scs_running_on'] is defined - block: - - name: "0.5.1 acss registration: - Get Azure metadata from the VM where scs_running_on is defined" - ansible.builtin.set_fact: - az_instance_metadata: "{{ hostvars.localhost.acss_scs_instance_metadata }}" - - - name: "0.5.1 acss registration: - Print metadata" - ansible.builtin.debug: - var: az_instance_metadata - verbosity: 2 - - - name: "0.5.1 acss registration: - Set variables from Azure IMDS" - ansible.builtin.set_fact: - acss_resource_id: "{{ az_instance_metadata.json.compute.resourceId }}" - acss_subscription_id: "{{ az_instance_metadata.json.compute.subscriptionId }}" - acss_resource_group: "{{ az_instance_metadata.json.compute.resourceGroupName }}" - acss_location: "{{ az_instance_metadata.json.compute.location }}" - acss_sid: "{{ sap_sid | upper }}" - acss_instance_type: "{{ instance_type }}" - - - name: "0.5.1 acss registration: - Install [ACSS] cli extension" - ansible.builtin.shell: >- - az extension add --name workloads --yes || exit 1 - tags: - - skip_ansible_lint - - - name: "0.5.1 acss registration: - perform az login" ansible.builtin.command: >- - az login --identity --allow-no-subscriptions --output none - no_log: true - changed_when: false - - - name: "0.5.1 acss registration: - Get Access Token" - ansible.builtin.shell: >- - az account get-access-token --resource https://management.azure.com \ - --query accessToken -o tsv - register: acss_access_token - changed_when: false - no_log: true - tags: - - skip_ansible_lint - - - name: "0.5.1 acss registration: - Generate a guid for the ACSS instance" - ansible.builtin.command: uuidgen - register: acss_guid - tags: - - skip_ansible_lint - - - name: "0.5.1 acss registration: - Check if we have [ACSS] virtual instance write" - ansible.builtin.shell: >- - az provider show --namespace Microsoft.Workloads \ - --query "resourceTypes[?resourceType=='sapVirtualInstances'].permissions[?contains(@.actions, 'Microsoft.Workloads/sapVirtualInstances/write')]" - register: acss_virtual_instance_write - changed_when: false - - - name: "0.5.1 acss registration: - Debug [ACSS] virtual instance parameters" - ansible.builtin.debug: - msg: - - "acss_resource_id: {{ acss_resource_id }}" - - "acss_subscription_id: {{ acss_subscription_id }}" - - "acss_resource_group: {{ acss_resource_group }}" - - "acss_location: {{ acss_location }}" - - "acss_sid: {{ acss_sid }}" - - "acss_instance_type: {{ acss_instance_type }}" - - "acss_environment: {{ acss_environment }}" - - "acss_sap_product: {{ acss_sap_product }}" - - "acss_guid: {{ acss_guid.stdout }}" - - "acss_vm_id: {{ hostvars[ansible_hostname]['scs_running_on'] }}" - - "acss_write_auth: {{ acss_virtual_instance_write.stdout }}" - verbosity: 2 + "az extension add --name workloads --yes || exit 1" tags: - skip_ansible_lint - - name: "0.5.1 acss registration: - Create [ACSS] virtual instance" - ansible.builtin.uri: - url: "https://management.azure.com/subscriptions/{{ acss_subscription_id }}/resourceGroups/{{ acss_resource_group }}/providers/Microsoft.Workloads/sapVirtualInstances/{{ acss_sid }}?api-version=2023-04-01" - method: PUT - body_format: json - body: | - { - "properties": { - "environment": "{{ acss_environment }}", - "sapProduct": "{{ acss_sap_product }}", - "configuration": { - "configurationType": "Discovery", - "centralServerVmId": "{{ acss_resource_id }}" - } - }, - "location": "{{ acss_location }}" - } - # status_code: [200, 201] - headers: - Authorization: "Bearer {{ acss_access_token.stdout }}" - x-ms-rpaas-new-resource: "true" - x-ms-client-request-id: "SDAF-{{ acss_guid.stdout }}" - register: create_vis_response - failed_when: create_vis_response.json.properties.provisioningState != 'Accepted' - no_log: false - - - name: "0.5.1 acss registration: - Debug [ACSS] virtual instance creation response" - ansible.builtin.debug: - msg: "{{ create_vis_response }}" + - name: "Create [ACSS] virtual instance" + ansible.builtin.command: "az workloads sap-virtual-instance create --sap-virtual-instance-name {{ acss_sid }} --resource-group {{ acss_resource_group }} --location {{ acss_location }} --environment {{ acss_environment }} --sap-product {{ acss_sap_product }} --configuration {{ acss_configuration }}" + when: + - ansible_hostname == primary_instance_name + - cluster_group_location.stdout != ansible_hostname tags: - skip_ansible_lint - - name: "0.5.1 acss registration: - Check the registered [ACSS] virtual instance" - ansible.builtin.uri: - url: "https://management.azure.com/subscriptions/{{ acss_subscription_id }}/resourceGroups/{{ acss_resource_group }}/providers/Microsoft.Workloads/sapVirtualInstances/{{ acss_sid }}?api-version=2023-04-01" - method: GET - # status_code: [200, 201] - headers: - Authorization: "Bearer {{ acss_access_token.stdout }}" - x-ms-rpaas-new-resource: "true" - x-ms-client-request-id: "SDAF-{{ acss_guid.stdout }}" - register: get_vis_response - until: get_vis_response.json.properties.provisioningState == 'Succeeded' - retries: 10 - delay: 60 - no_log: true - ... diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml index d53626b480..1c004a1a0f 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml @@ -196,6 +196,69 @@ # +------------------------------------4--------------------------------------*/ +# /*---------------------------------------------------------------------------8 +# | | +# | Fencing Specific - Kdump RHEL only | +# | | +# +------------------------------------4--------------------------------------*/ +- name: "1.17 Generic Pacemaker - RHEL - Configure optional fence_kdump stonith device" + when: + - ansible_os_family | upper == "REDHAT" + - use_fence_kdump + block: + - name: "1.17 Generic Pacemaker - kdump service is enabled" + ansible.builtin.service: + name: "kdump" + enabled: true + state: started + + # Todo: Use ansible.builtin.service_facts to get the status of the kdump service + - name: "1.17 Generic Pacemaker - RHEL - Check if kdump is enabled" + ansible.builtin.command: "systemctl is-enabled kdump" + register: kdump_enabled_check_result + changed_when: false + failed_when: false + tags: + - skip_ansible_lint + + - name: "1.17 Generic Pacemaker - RHEL - Save kdump facts" + ansible.builtin.set_fact: + kdump_enabled_check: "{{ kdump_enabled_check_result.rc | int }}" + kdump_enabled: "{{ kdump_enabled_check_result.stdout | trim }}" + when: + - kdump_enabled_check_result.rc == 0 + + # kdump_enabled_check_result.stdout == "enabled" + - name: "1.17 Generic Pacemaker - RHEL - show if kdump is enabled" + ansible.builtin.debug: + msg: + - "Kdump is enabled: {{ kdump_enabled | default('false') }}" + verbosity: 2 + + # Install the fence_kdump fence agent. This is required for fencing to work with kdump. + - name: "1.17 Generic Pacemaker - RHEL - Install fence-agents-kdump" + ansible.builtin.package: + name: "fence-agents-kdump" + state: present + when: + - kdump_enabled_check == 0 + - kdump_enabled == "enabled" + + # Allow the required ports for fence_kdump through the firewall. + - name: "1.17 Generic Pacemaker - RHEL - Allow ports for fence_kdump through the firewall" + ansible.posix.firewalld: + port: "7410/udp" + permanent: true + state: enabled + offline: true + +# /*---------------------------------------------------------------------------8 +# | | +# | Fencing Specific - Kdump RHEL only - END | +# | | +# +------------------------------------4--------------------------------------*/ + + ... # /*---------------------------------------------------------------------------8 # | END | diff --git a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml index ba5a8aa2fd..3894638015 100644 --- a/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/windows/1.4-packages/tasks/main.yaml @@ -52,7 +52,9 @@ ansible.windows.win_shell: | [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 Install-PackageProvider -Name NuGet -Force - register: nuget_installed + Register-PSRepository -Default -Force + register: repository_registered + - name: "1.4-Packages: Register-PSRepository" ansible.windows.win_shell: | diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml index c5302e8550..c2d0e95a84 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1.1-anf-mount.yaml @@ -149,6 +149,13 @@ - db_high_availability is defined - database_high_availability is not defined +- name: "Backward Compatibility - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + # absent_from_fstab - name: "ANF Mount: RHEL DB high availability configuration" when: ansible_os_family | upper == "REDHAT" and database_high_availability diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml index 4b2f8df3a9..097f3c0609 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml @@ -375,6 +375,17 @@ - not use_simple_mount - NFS_provider == 'ANF' +# Update : Deprecated as the scale out anf mount code functionality is now integrated into 2.6.1 and 2.6.8 +# This will be removed in the next release, left here for tracing and documentation +# Import this task only if db_scale_out is defined. Implies that sap_mnt is defined, i.e. ANF is used +# - name: "2.6 SAP Mounts: - Import ANF tasks for Scale-Out" +# ansible.builtin.import_tasks: 2.6.1.2-anf-mounts-scaleout.yaml +# when: +# - NFS_provider == 'ANF' +# - db_scale_out is defined +# - db_scale_out +# - sap_mnt is defined or sap_trans is defined or usr_sap_install_mountpoint is defined + # Import this task only if the tier is ora. - name: "2.6 SAP Mounts: - Import Oracle tasks" diff --git a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml index 7335f19f49..ddcdf93b89 100644 --- a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml @@ -113,6 +113,36 @@ - platform == "HANA" - db_port_open.msg is defined +- name: "APP Install: Check if the DB load balancer port is available and listening" + ansible.builtin.wait_for: + host: "{{ db_lb_virtual_host }}" + port: "3{{ db_instance_number }}13" + state: started + timeout: 30 + msg: 'INSTALL:0026:APP Install failed, database is unreachable.' + register: db_port_open + failed_when: false + when: + - database_high_availability + - platform == "HANA" + +- name: "APP Install: DEBUG - DB Loadbalancer check" + ansible.builtin.debug: + msg: "{{ db_port_open.msg }}" + verbosity: 2 + when: + - database_high_availability + - platform == "HANA" + - db_port_open.msg is defined + +- name: "ErrorHandling" + ansible.builtin.fail: + msg: "INSTALL:0026:APP Install failed, database is unreachable." + when: + - database_high_availability + - platform == "HANA" + - db_port_open.msg is defined + - name: "APP Install: Set schema_name variable for HANA" when: platform == "HANA" block: diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index 0245fcec20..4065c2470b 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -177,6 +177,80 @@ loop_var: item failed_when: constraint.rc > 1 + # /*---------------------------------------------------------------------------8 + # | | + # | Systemd-Based SAP Startup Framework - BEGIN | + # | | + # +------------------------------------4--------------------------------------*/ + # Follow steps described in https://access.redhat.com/articles/6884531 + + - name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" + ansible.builtin.set_fact: + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" + become: true + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + block: + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + line: "[Unit]" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^[Unit]$' + line: "Description=Pacemaker needs the SAP HANA instance service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' + line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' + line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + register: dropinfile + + - name: "5.5.4.1 HANA Cluster configuration - systemd reload" + ansible.builtin.systemd: + daemon_reload: true + when: + - dropinfile.changed + + # /*---------------------------------------------------------------------------8 + # | | + # | Systemd-Based SAP Startup Framework - END | + # | | + # +------------------------------------4--------------------------------------*/ + + - name: "5.5.4.1 HANA Cluster configuration - Disable Maintenance mode for the cluster" ansible.builtin.shell: pcs property set maintenance-mode=false @@ -239,6 +313,135 @@ # +------------------------------------4--------------------------------------*/ # Follow steps described in https://access.redhat.com/articles/6884531 +- name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" + ansible.builtin.set_fact: + is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" + +- name: "5.5.4.1 HANA Cluster configuration - (systemd) Creating drop-in file" + become: true + when: + - is_rhel_82_or_newer is defined + - is_rhel_82_or_newer + - ansible_hostname == primary_instance_name + block: + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Create drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + line: "[Unit]" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^[Unit]$' + line: "Description=Pacemaker needs the SAP HANA instance service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Description=Pacemaker needs the SAP HANA instance service$' + line: "Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + + - name: "5.5.4.1 HANA Cluster configuration - (systemd) Update drop-in file" + ansible.builtin.lineinfile: + path: /etc/systemd/system/pacemaker.service.d/00-pacemaker.conf + create: true + backup: true + owner: root + group: root + mode: '0644' + insertafter: '^Wants=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service$' + line: "After=SAP{{ db_sid | upper }}_{{ db_instance_number }}.service" + register: dropinfile + + - name: "5.5.4.1 HANA Cluster configuration - systemd reload" + ansible.builtin.systemd: + daemon_reload: true + when: + - dropinfile.changed + + # /*---------------------------------------------------------------------------8 + # | | + # | Systemd-Based SAP Startup Framework - END | + # | | + # +------------------------------------4--------------------------------------*/ + + + - name: "5.5.4.1 HANA Cluster configuration - Disable Maintenance mode for the cluster" + ansible.builtin.shell: pcs property set maintenance-mode=false + + - name: "5.5.4.1 HANA Cluster configuration - Wait until cluster has stabilized on RHEL 7" + ansible.builtin.shell: set -o pipefail && pcs status | grep '^Online:' + register: cluster_stable_check + retries: 12 + delay: 10 + until: "(primary_instance_name + ' ' + secondary_instance_name) in cluster_stable_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in cluster_stable_check.stdout" + when: ansible_distribution_major_version != "8" and ansible_distribution_major_version != "9" + # '*' is a special character in regexp and needs to be escaped for literal matching + # if we are worried about character spacing across distros we can match for '\* Online:' + - name: "5.5.4.1 HANA Cluster configuration - Wait until cluster has stabilized on RHEL 8 or 9" + ansible.builtin.shell: set -o pipefail && pcs status | grep '^ \* Online:' + register: cluster_stable_check + retries: 12 + delay: 10 + until: "(primary_instance_name + ' ' + secondary_instance_name) in cluster_stable_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in cluster_stable_check.stdout" + when: ansible_distribution_major_version in ["8", "9"] + + # - name: Ensure Cluster resources are started + # ansible.builtin.shell: pcs status | grep '\* Started:' + # register: hana_pcs_cluster_resource_check + # when: ansible_distribution_major_version == "8" + + # - name: Ensure Cluster resources are started + # ansible.builtin.shell: pcs status | grep '^Started ' + # register: hana_pcs_cluster_resource_check + # when: ansible_distribution_major_version != "8" + + + # - name: Ensure Cluster resources are started + # ansible.builtin.debug: + # var: hana_pcs_cluster_resource_check + + # the leading spaces are irrelevant here as we are looking for *Started: + - name: "5.5.4.1 HANA Cluster configuration - Ensure Cluster resources are started on RHEL 7" + ansible.builtin.shell: set -o pipefail && pcs resource show | grep ' Started:' + register: hana_cluster_resource_check + retries: 12 + delay: 10 + until: "(primary_instance_name + ' ' + secondary_instance_name) in hana_cluster_resource_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in hana_cluster_resource_check.stdout" + when: ansible_distribution_major_version != "8" and ansible_distribution_major_version != "9" + + - name: "5.5.4.1 HANA Cluster configuration - Ensure Cluster resources are started on RHEL 8 or 9" + ansible.builtin.shell: set -o pipefail && pcs resource status | grep '\* Started:' + register: hana_cluster_resource_check + retries: 12 + delay: 10 + until: "(primary_instance_name + ' ' + secondary_instance_name) in hana_cluster_resource_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in hana_cluster_resource_check.stdout" + when: ansible_distribution_major_version in ["8", "9"] + +# End of HANA clustering resources + +# /*---------------------------------------------------------------------------8 +# | | +# | Systemd-Based SAP Startup Framework - BEGIN | +# | | +# +------------------------------------4--------------------------------------*/ +# Follow steps described in https://access.redhat.com/articles/6884531 + - name: "5.5.4.1 HANA Cluster configuration - check if the OS version is RHEL 8.2 or newer" ansible.builtin.set_fact: is_rhel_82_or_newer: "{{ ansible_distribution_version is version('8.2', '>=') | default(false) }}" diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index 718c5c17fd..79995446bc 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -131,6 +131,17 @@ - is_rhel_82_or_newer is defined - is_rhel_82_or_newer | default(false) + # - name: "5.6 SCSERS - validate that the drop-in file is active" + # when: + # - is_rhel_82_or_newer is defined + # - is_rhel_82_or_newer + # ansible.builtin.shell: >- + # systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service' + # register: dropinfile_validation + # changed_when: false + # failed_when: dropinfile_validation.rc > 0 + + # /*---------------------------------------------------------------------------8 # | | # | Systemd-Based SAP Startup Framework - END | diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 7defba8091..ad8a3e39df 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -442,6 +442,9 @@ stages: deployer_configfile="${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig)" library_configfile="${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig)" + deployer_configfile="${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig)" + library_configfile="${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig)" + echo -e "$green--- Configure devops CLI extension ---$reset" az config set extension.use_dynamic_install=yes_without_prompt az extension add --name azure-devops --output none @@ -514,10 +517,25 @@ stages: export TF_VAR_ansible_core_version=2.15 fi - if [ "$USE_WEBAPP" = "true" ]; then - echo "Use WebApp is selected" - fi - bootstrapped=0 + if [ "$USE_WEBAPP" = "true" ]; then + echo "Use WebApp is selected" + + if [ -z ${APP_REGISTRATION_APP_ID} ]; then + echo "##vso[task.logissue type=error]Variable APP_REGISTRATION_APP_ID was not defined." + exit 2 + fi + + if [ -z ${WEB_APP_CLIENT_SECRET} ]; then + echo "##vso[task.logissue type=error]Variable WEB_APP_CLIENT_SECRET was not defined." + exit 2 + fi + export TF_VAR_app_registration_app_id=$(APP_REGISTRATION_APP_ID); echo 'App Registration App ID' ${TF_VAR_app_registration_app_id} + export TF_VAR_webapp_client_secret=$(WEB_APP_CLIENT_SECRET) + export TF_VAR_use_webapp=true + + fi + + bootstrapped=0 if [ ! -f $deployer_environment_file_name ]; then az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value") @@ -554,6 +572,11 @@ stages: echo SAP_AUTOMATION_REPO_PATH=$SAP_AUTOMATION_REPO_PATH >.sap_deployment_automation/config export SAP_AUTOMATION_REPO_PATH=$SAP_AUTOMATION_REPO_PATH + cd DEPLOYER + ls -lart + cd $(deployerfolder) + ls -lart + echo -e "$green--- File Validations ---$reset" if [ ! -f DEPLOYER/$(deployerfolder)/$(deployerconfig) ]; then echo -e "$boldred--- File ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) was not found ---$reset" @@ -865,35 +888,30 @@ stages: displayName: Deploy control plane env: - CP_ARM_CLIENT_ID: $(CP_ARM_CLIENT_ID) - CP_ARM_CLIENT_SECRET: $(CP_ARM_CLIENT_SECRET) - CP_ARM_SUBSCRIPTION_ID: $(CP_ARM_SUBSCRIPTION_ID) - ARM_TENANT_ID: $(APP_TENANT_ID) - AZURE_DEVOPS_EXT_PAT: $(PAT) - CONFIG_REPO_PATH: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path) - DEPLOYER_RANDOM_ID_SEED: $(DEPLOYER_RANDOM_ID_SEED) - DEPLOYER_TFSTATE_KEY: "${{ parameters.deployer }}.terraform.tfstate" - IS_PIPELINE_DEPLOYMENT: true - LOGON_USING_SPN: $(Logon_Using_SPN) - POOL: $(POOL) - SAP_AUTOMATION_REPO_PATH: ${{ parameters.sap_automation_repo_path }} - SYSTEM_ACCESSTOKEN: $(System.AccessToken) - TF_APP_REGISTRATION_APP_ID: $(APP_REGISTRATION_APP_ID) - TF_IN_AUTOMATION: true - TF_LOG: $(TF_LOG) - TF_VAR_agent_ado_url: $(System.CollectionUri) - TF_VAR_agent_pat: $(PAT) - TF_VAR_agent_pool: $(POOL) - TF_VAR_ansible_core_version: $(ansible_core_version) - TF_VAR_app_registration_app_id: $(APP_REGISTRATION_APP_ID) - TF_VAR_deployer_kv_user_arm_id: $(Deployer_Key_Vault) - TF_VAR_spn_id: $(CP_ARM_OBJECT_ID) - TF_VAR_tf_version: $(tf_version) - TF_VAR_use_webapp: ${{ lower(parameters.use_webapp_param) }} - TF_VAR_webapp_client_secret: $(WEB_APP_CLIENT_SECRET) - USE_MSI: $(Use_MSI) - USE_WEBAPP: ${{ lower(parameters.use_webapp_param) }} - WEB_APP_CLIENT_SECRET: $(WEB_APP_CLIENT_SECRET) + SYSTEM_ACCESSTOKEN: $(System.AccessToken) + ARM_SUBSCRIPTION_ID: $(CP_ARM_SUBSCRIPTION_ID) + CP_ARM_CLIENT_ID: $(CP_ARM_CLIENT_ID) + CP_ARM_CLIENT_SECRET: $(CP_ARM_CLIENT_SECRET) + CP_ARM_TENANT_ID: $(CP_ARM_TENANT_ID) + TF_VAR_spn_id: $(CP_ARM_OBJECT_ID) + TF_VAR_agent_pool: $(POOL) + TF_VAR_agent_ado_url: $(System.CollectionUri) + TF_VAR_tf_version: $(tf_version) + AZURE_DEVOPS_EXT_PAT: $(PAT) + IS_PIPELINE_DEPLOYMENT: true + WEB_APP_CLIENT_SECRET: $(WEB_APP_CLIENT_SECRET) + APP_REGISTRATION_APP_ID: $(APP_REGISTRATION_APP_ID) + keyvault: $(Deployer_Key_Vault) + POOL: $(POOL) + SAP_AUTOMATION_REPO_PATH: ${{ parameters.sap_automation_repo_path }} + CONFIG_REPO_PATH: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path) + TF_VAR_ansible_core_version: $(ansible_core_version) + TF_LOG: $(TF_LOG) + TF_IN_AUTOMATION: true + DEPLOYER_TFSTATE_KEY: "${{ parameters.deployer }}.terraform.tfstate" + LOGON_USING_SPN: $(Logon_Using_SPN) + USE_MSI: $(Use_MSI) + DEPLOYER_RANDOM_ID_SEED: $(DEPLOYER_RANDOM_ID_SEED) failOnStderr: false diff --git a/deploy/pipelines/03-sap-system-deployment.yaml b/deploy/pipelines/03-sap-system-deployment.yaml index 3efc4aa9fe..164d2248af 100644 --- a/deploy/pipelines/03-sap-system-deployment.yaml +++ b/deploy/pipelines/03-sap-system-deployment.yaml @@ -70,12 +70,7 @@ stages: else source /etc/profile.d/deploy_server.sh fi - - echo -e "$green--- Configure devops CLI extension ---$reset" - - az config set extension.use_dynamic_install=yes_without_prompt --output none - - az extension add --name azure-devops --output none + export AZURE_DEVOPS_EXT_PAT=$PAT HOME_CONFIG=${CONFIG_REPO_PATH}/$(Deployment_Configuration_Path) cd $HOME_CONFIG; mkdir -p .sap_deployment_automation @@ -261,6 +256,16 @@ stages: fi fi + if [ -z $USE_MSI ]; then + USE_MSI="false" + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query USE_MSI.value --output table) + if [ -n "${az_var}" ]; then + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name USE_MSI --value false --output none --only-show-errors + else + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name USE_MSI --value false --output none --only-show-errors + fi + fi + if [ $USE_MSI != "true" ]; then echo "Using SPN" diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index 27c9b38760..1effad8bfd 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -257,6 +257,13 @@ stages: new_parameters="$EXTRA_PARAMETERS $PIPELINE_EXTRA_PARAMETERS" fi + if [[ $EXTRA_PARAMETERS = "'$(EXTRA_PARAMETERS)'" ]]; then + new_parameters=$PIPELINE_EXTRA_PARAMETERS + else + echo "##vso[task.logissue type=warning]Extra parameters were provided - '$EXTRA_PARAMETERS'" + new_parameters="$EXTRA_PARAMETERS $PIPELINE_EXTRA_PARAMETERS" + fi + echo "##vso[task.setvariable variable=SSH_KEY_NAME;isOutput=true]${workload_prefix}-sid-sshkey" echo "##vso[task.setvariable variable=VAULT_NAME;isOutput=true]$workload_key_vault" echo "##vso[task.setvariable variable=PASSWORD_KEY_NAME;isOutput=true]${workload_prefix}-sid-password" diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index 145b53f6d9..51e2ea34ad 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -38,33 +38,24 @@ $versionLabel = "v3.11.0.2" # az login --output none --tenant $ARM_TENANT_ID --only-show-errors --scope https://graph.microsoft.com//.default # } -# Check if access to the Azure DevOps organization is available and prompt for PAT if needed -# Exact permissions required, to be validated, and included in the Read-Host text. +Write-Host "" +Write-Host "" -if ($Env:AZURE_DEVOPS_EXT_PAT.Length -gt 0) { - Write-Host "Using the provided Personal Access Token (PAT) to authenticate to the Azure DevOps organization $ADO_Organization" -ForegroundColor Yellow -} +if (Test-Path ".${pathSeparator}start.md") { Write-Host "Removing start.md" ; Remove-Item ".${pathSeparator}start.md" } + +if ($Env:SDAF_AuthenticationMethod.Length -eq 0) { + $Title = "Select the authentication method to use" + $data = @('Service Principal', 'Managed Identity') + Show-Menu($data) + $selection = Read-Host $Title + $authenticationMethod = $data[$selection - 1] -$checkPAT = (az devops user list --organization $ADO_Organization --only-show-errors --top 1) -if ($checkPAT.Length -eq 0) { - $env:AZURE_DEVOPS_EXT_PAT = Read-Host "Please enter your Personal Access Token (PAT) with full access to the Azure DevOps organization $ADO_Organization" - $verifyPAT = (az devops user list --organization $ADO_Organization --only-show-errors --top 1) - if ($verifyPAT.Length -eq 0) { - Read-Host -Prompt "Failed to authenticate to the Azure DevOps organization, press to exit" - exit - } - else { - Write-Host "Successfully authenticated to the Azure DevOps organization $ADO_Organization" -ForegroundColor Green - } } else { - Write-Host "Successfully authenticated to the Azure DevOps organization $ADO_Organization" -ForegroundColor Green + $authenticationMethod = $Env:SDAF_AuthenticationMethod } -Write-Host "" -Write-Host "" - -if (Test-Path ".${pathSeparator}start.md") { Write-Host "Removing start.md" ; Remove-Item ".${pathSeparator}start.md" } +Write-Host "Using authentication method: $authenticationMethod" -ForegroundColor Yellow if ($Env:SDAF_AuthenticationMethod.Length -eq 0) { $Title = "Select the authentication method to use" @@ -209,6 +200,7 @@ else { Write-Host "Using an existing project" + $repo_id = (az repos list --query "[?name=='$ADO_Project'].id | [0]" --out tsv) az devops configure --defaults organization=$ADO_ORGANIZATION project=$ADO_PROJECT $repo_id = (az repos list --query "[?name=='$ADO_Project'].id | [0]" --output tsv) @@ -824,6 +816,106 @@ if ($authenticationMethod -eq "Service Principal") { $CP_ARM_CLIENT_SECRET = (az ad sp credential reset --id $CP_ARM_CLIENT_ID --append --query "password" --out tsv --only-show-errors).Replace("""", "") } else { + + if ($Env:MSI_OBJECT_ID.Length -ne 0) { + $MSI_objectId = $Env:MSI_OBJECT_ID + } + else { + + $Title = "Choose the subscription that contains the Managed Identity" + $subscriptions = $(az account list --query "[].{Name:name}" -o table | Sort-Object) + Show-Menu($subscriptions[2..($subscriptions.Length - 1)]) + $selection = Read-Host $Title + + $subscription = $subscriptions[$selection - 1] + + $Title = "Choose the Managed Identity" + $identities = $(az identity list --query "[].{Name:name}" --subscription $subscription --output table | Sort-Object) + Show-Menu($identities[2..($identities.Length - 1)]) + $selection = Read-Host $Title + $selectionOffset = [convert]::ToInt32($selection, 10) + 1 + + $identity = $identities[$selectionOffset] + Write-Host "Using Managed Identity:" $identity + + $id = $(az identity list --query "[?name=='$identity'].id" --subscription $subscription --output tsv) + $MSI_objectId = $(az identity show --ids $id --query "principalId" --output tsv) + + $postBody = [PSCustomObject]@{ + accessLevel = @{ + accountLicenseType = "stakeholder" + } + projectEntitlements = @([ordered]@{ + group = @{ + groupType = "projectContributor" + } + projectRef = @{ + id = $Project_ID + } + + }) + servicePrincipal = @{ + origin = "aad" + originId = $id + subjectKind = "servicePrincipal" + } + + } + + Set-Content -Path "user.json" -Value ($postBody | ConvertTo-Json -Depth 6) + + az devops invoke --area MemberEntitlementManagement --resource ServicePrincipalEntitlements --in-file user.json --api-version "7.1-preview" --http-method POST + + } +} + +#region App registration +if ($WebApp) { + Write-Host "Creating the App registration in Azure Active Directory" -ForegroundColor Green + + $found_appRegistration = (az ad app list --all --filter "startswith(displayName, '$ApplicationName')" --query "[?displayName=='$ApplicationName'].displayName | [0]" --only-show-errors) + + if ($found_appRegistration.Length -ne 0) { + Write-Host "Found an existing App Registration:" $ApplicationName + $ExistingData = (az ad app list --all --filter "startswith(displayName, '$ApplicationName')" --query "[?displayName=='$ApplicationName']| [0]" --only-show-errors) | ConvertFrom-Json + + $APP_REGISTRATION_ID = $ExistingData.appId + $APP_REGISTRATION_OBJECTID = $ExistingData.id + + # $confirmation = Read-Host "Reset the app registration secret y/n?" + # if ($confirmation -eq 'y') { + # $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors --display-name "SDAF") + # } + # else { + # $WEB_APP_CLIENT_SECRET = Read-Host "Please enter the app registration secret" + # } + } + else { + Write-Host "Creating an App Registration for" $ApplicationName -ForegroundColor Green + if ($IsWindows) { $manifestPath = ".\manifest.json" } else { $manifestPath = "./manifest.json" } + Add-Content -Path manifest.json -Value '[{"resourceAppId":"00000003-0000-0000-c000-000000000000","resourceAccess":[{"id":"e1fe6dd8-ba31-4d61-89e7-88639da4683d","type":"Scope"}]}]' + + $APP_REGISTRATION_ID = (az ad app create --display-name $ApplicationName --enable-id-token-issuance true --sign-in-audience AzureADMyOrg --required-resource-access $manifestPath --query "appId" --output tsv) + $ExistingData = (az ad app list --all --filter "startswith(displayName, '$ApplicationName')" --query "[?displayName=='$ApplicationName']| [0]" --only-show-errors) | ConvertFrom-Json + $APP_REGISTRATION_OBJECTID = $ExistingData.id + + if (Test-Path $manifestPath) { Write-Host "Removing manifest.json" ; Remove-Item $manifestPath } + + + # $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors --display-name "SDAF") + } + + az role assignment create --assignee $CP_ARM_CLIENT_ID --role "Contributor" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none + + az role assignment create --assignee $CP_ARM_CLIENT_ID --role "User Access Administrator" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none + + $Control_plane_groupID = (az pipelines variable-group list --query "[?name=='$ControlPlanePrefix'].id | [0]" --only-show-errors) + if ($Control_plane_groupID.Length -eq 0) { + Write-Host "Creating the variable group" $ControlPlanePrefix -ForegroundColor Green + if ($WebApp) { + az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' APP_REGISTRATION_APP_ID=$APP_REGISTRATION_ID APP_REGISTRATION_OBJECTID=$APP_REGISTRATION_OBJECTID APP_TENANT_ID=$ARM_TENANT_ID CP_ARM_CLIENT_ID=$CP_ARM_CLIENT_ID CP_ARM_OBJECT_ID=$CP_ARM_OBJECT_ID CP_ARM_CLIENT_SECRET='Enter your SPN password here' CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID CP_ARM_TENANT_ID=$CP_ARM_TENANT_ID POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF --output none --authorize true + } + else { $CP_ARM_CLIENT_SECRET = Read-Host "Please enter the Control Plane Service Principal password" } @@ -844,11 +936,13 @@ if ($authenticationMethod -eq "Service Principal") { az role assignment create --assignee $CP_ARM_CLIENT_ID --role "User Access Administrator" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none +} +else { $Control_plane_groupID = (az pipelines variable-group list --query "[?name=='$ControlPlanePrefix'].id | [0]" --only-show-errors) if ($Control_plane_groupID.Length -eq 0) { Write-Host "Creating the variable group" $ControlPlanePrefix -ForegroundColor Green if ($WebApp) { - az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' APP_REGISTRATION_APP_ID=$APP_REGISTRATION_ID APP_REGISTRATION_OBJECTID=$APP_REGISTRATION_OBJECTID APP_TENANT_ID=$ARM_TENANT_ID CP_ARM_CLIENT_ID=$CP_ARM_CLIENT_ID CP_ARM_OBJECT_ID=$CP_ARM_OBJECT_ID CP_ARM_CLIENT_SECRET='Enter your SPN password here' CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID CP_ARM_TENANT_ID=$CP_ARM_TENANT_ID POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF --output none --authorize true + az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' APP_REGISTRATION_APP_ID=$APP_REGISTRATION_ID APP_REGISTRATION_OBJECTID=$APP_REGISTRATION_OBJECTID APP_TENANT_ID=$ARM_TENANT_ID CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF USE_MSI=true --output none --authorize true } else { az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' CP_ARM_CLIENT_ID=$CP_ARM_CLIENT_ID CP_ARM_OBJECT_ID=$CP_ARM_OBJECT_ID CP_ARM_CLIENT_SECRET='Enter your SPN password here' CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID CP_ARM_TENANT_ID=$CP_ARM_TENANT_ID PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF --output none --authorize true @@ -901,7 +995,7 @@ else { Write-Host Write-Host "" - Write-Host "The browser will now open, Please create an 'Azure Resource Manager' service connection with the name 'Control_Plane_Service_Connection'." + Write-Host "The browser will now open, Please create a service connection with the name 'Control_Plane_Service_Connection'." $connections_url = $ADO_ORGANIZATION + "/" + [uri]::EscapeDataString($ADO_Project) + "/_settings/adminservices" Write-Host "URL: " $connections_url @@ -1049,7 +1143,7 @@ Write-Host "" Write-Host "The browser will now open, Select the '"$ADO_PROJECT "Build Service' user and ensure that it has 'Allow' in the Contribute section." $permissions_url = $ADO_ORGANIZATION + "/" + [uri]::EscapeDataString($ADO_Project) + "/_settings/repositories?_a=permissions" -Write-Host "URL: " $permissions_url + Start-Process $permissions_url Read-Host -Prompt "Once you have verified the permission, Press any key to continue" diff --git a/deploy/scripts/Test-SDAFReadiness.ps1 b/deploy/scripts/Test-SDAFReadiness.ps1 index 7365519b95..d5bd34fabb 100644 --- a/deploy/scripts/Test-SDAFReadiness.ps1 +++ b/deploy/scripts/Test-SDAFReadiness.ps1 @@ -13,6 +13,15 @@ function Show-Menu($data) { $rnd = $(Get-Random -Minimum 1 -Maximum 1000).ToString() +if (Test-Path $LogFileDir) { + $LogFileName = "SDAF-" + $(Get-Date -Format "yyyyMMdd-HHmm") + ".md" + $LogFileName = Join-Path $LogFileDir -ChildPath $LogFileName +} +else { + Write-Host "The directory does not exist" + return +} + $LogFileDir = $Env:LogFileDir if ($null -eq $LogFileDir -or $LogFileDir -eq "") { $LogFileDir = Read-Host "Please enter the directory to save the log file" @@ -28,6 +37,7 @@ else { return } +$LogFileName = "SDAF-" + $(Get-Date -Format "yyyyMMdd-HHmm") + ".md" Add-Content -Path $LogFileName "# SDAF Assesment #" Add-Content -Path $LogFileName "" @@ -166,7 +176,7 @@ if ($authenticationMethod -ne "User Account") { } } -$vmName="SDAF-VM" +$vmName = "SDAF-VM" $vnetName = "SDAF-VNet" $anfSubnetName = "SDAF-anf" @@ -429,6 +439,7 @@ if ($selection.ToUpper() -eq "Y") { $OutputString = "$url is accessible" Write-Host $OutputString -ForegroundColor Green Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" } else { $OutputString = "$url is not accessible" @@ -444,6 +455,7 @@ if ($selection.ToUpper() -eq "Y") { foreach ($IP in $UrlsToCheck.windows.IPs) { Write-Host "Checking if $IP is accessible from the Virtual Machine" $result = $(az vm run-command invoke --resource-group $resourceGroupName --name $vmName --command-id RunShellScript --scripts "nc -zv $IP 443" --query value[0].message) + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name "SDAF-VM" --command-id RunShellScript --scripts "nc -zv $IP 443" --query value[0].message) if ($result.Contains("succeeded!")) { $OutputString = "$IP is accessible" Write-Host $OutputString -ForegroundColor Green @@ -468,48 +480,59 @@ if ($selection.ToUpper() -eq "Y") { foreach ($url in $UrlsToCheck.sap.urls) { Write-Host "Checking if $url is accessible from the Virtual Machine" $result = $(az vm run-command invoke --resource-group $resourceGroupName --name $vmName --command-id RunShellScript --scripts "wget -O /tmp/foo.zip $url" --query value[0].message) - if ($result.Contains("200 OK")) { - $OutputString = "$url is accessible" - Write-Host $OutputString -ForegroundColor Green - Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + + foreach ($url in $UrlsToCheck.sap.urls) { + Write-Host "Checking if $url is accessible from the Virtual Machine" + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name "SDAF-VM" --command-id RunShellScript --scripts "wget -O /tmp/foo.zip $url" --query value[0].message) + if ($result.Contains("200 OK")) { + $OutputString = "$url is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + } + elseif ($result.Contains("403 Forbidden")) { + $OutputString = "$url is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } + else { + $OutputString = "$url is not accessible" + Write-Host $OutputString -ForegroundColor Red + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } } - elseif ($result.Contains("403 Forbidden")) { - $OutputString = "$url is accessible" - Write-Host $OutputString -ForegroundColor Green - Add-Content -Path $LogFileName $OutputString - } - else { - $OutputString = "$url is not accessible" - Write-Host $OutputString -ForegroundColor Red - Add-Content -Path $LogFileName $OutputString - } - } - - Write-Host "Checking 'runtime' IPs" -ForegroundColor Yellow - Add-Content -Path $LogFileName "Checking 'runtime' IPs" - foreach ($IP in $UrlsToCheck.sap.IPs) { - Write-Host "Checking if $IP is accessible from the Virtual Machine" - $result = $(az vm run-command invoke --resource-group $resourceGroupName --name $vmName --command-id RunShellScript --scripts "nc -zv $IP 443" --query value[0].message) - if ($result.Contains("succeeded!")) { - $OutputString = "$IP is accessible" - Write-Host $OutputString -ForegroundColor Green - Add-Content -Path $LogFileName $OutputString - } - elseif ($result.Contains("Connected")) { - $OutputString = "$IP is accessible" - Write-Host $OutputString -ForegroundColor Green - Add-Content -Path $LogFileName $OutputString + Write-Host "Checking 'runtime' IPs" -ForegroundColor Yellow + Add-Content -Path $LogFileName "Checking 'runtime' IPs" + + foreach ($IP in $UrlsToCheck.sap.IPs) { + Write-Host "Checking if $IP is accessible from the Virtual Machine" + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name $vmName --command-id RunShellScript --scripts "nc -zv $IP 443" --query value[0].message) + $result = $(az vm run-command invoke --resource-group $resourceGroupName --name "SDAF-VM" --command-id RunShellScript --scripts "nc -zv $IP 443" --query value[0].message) + if ($result.Contains("succeeded!")) { + $OutputString = "$IP is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } + elseif ($result.Contains("Connected")) { + $OutputString = "$IP is accessible" + Write-Host $OutputString -ForegroundColor Green + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } + else { + $OutputString = "$IP is not accessible" + Write-Host $OutputString -ForegroundColor Red + Add-Content -Path $LogFileName $OutputString + Add-Content -Path $LogFileName "" + } } - else { - $OutputString = "$IP is not accessible" - Write-Host $OutputString -ForegroundColor Red - Add-Content -Path $LogFileName $OutputString - } - } + } } - } $selection = Read-Host "Create Azure NetApp account Y/N" diff --git a/deploy/scripts/advanced_state_management.sh b/deploy/scripts/advanced_state_management.sh index 4a62ac7561..842cecdc71 100755 --- a/deploy/scripts/advanced_state_management.sh +++ b/deploy/scripts/advanced_state_management.sh @@ -196,8 +196,12 @@ automation_config_directory=$CONFIG_REPO_PATH/.sap_deployment_automation/ system_config_information="${automation_config_directory}""${environment}""${region_code}" #Plugins -sudo mkdir -p /opt/terraform/.terraform.d/plugin-cache +if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] +then + mkdir /opt/terraform/.terraform.d/plugin-cache +fi sudo chown -R $USER:$USER /opt/terraform + export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache diff --git a/deploy/scripts/helpers/script_helpers.sh b/deploy/scripts/helpers/script_helpers.sh index 4d1f035297..89a94402f6 100755 --- a/deploy/scripts/helpers/script_helpers.sh +++ b/deploy/scripts/helpers/script_helpers.sh @@ -391,7 +391,10 @@ function validate_dependencies { return 2 #No such file or directory fi # Set Terraform Plug in cache - sudo mkdir -p /opt/terraform/.terraform.d/plugin-cache + if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] + then + mkdir /opt/terraform/.terraform.d/plugin-cache + fi sudo chown -R $USER:$USER /opt/terraform export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache diff --git a/deploy/scripts/install_library.sh b/deploy/scripts/install_library.sh index 9628f678eb..354c39282d 100755 --- a/deploy/scripts/install_library.sh +++ b/deploy/scripts/install_library.sh @@ -165,7 +165,10 @@ generic_config_information="${automation_config_directory}"config library_config_information="${automation_config_directory}""${environment}""${region_code}" #Plugins -sudo mkdir -p /opt/terraform/.terraform.d/plugin-cache +if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] +then + mkdir /opt/terraform/.terraform.d/plugin-cache +fi sudo chown -R $USER:$USER /opt/terraform export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache diff --git a/deploy/scripts/install_workloadzone.sh b/deploy/scripts/install_workloadzone.sh index 527b20c7a4..5f45a1d903 100755 --- a/deploy/scripts/install_workloadzone.sh +++ b/deploy/scripts/install_workloadzone.sh @@ -556,9 +556,11 @@ ok_to_proceed=false new_deployment=false #Plugins -sudo mkdir -p /opt/terraform/.terraform.d/plugin-cache +if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] +then + mkdir /opt/terraform/.terraform.d/plugin-cache +fi sudo chown -R $USER:$USER /opt/terraform - export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache root_dirname=$(pwd) diff --git a/deploy/scripts/installer.sh b/deploy/scripts/installer.sh index 778a8f0ef5..e04a98e8d7 100755 --- a/deploy/scripts/installer.sh +++ b/deploy/scripts/installer.sh @@ -165,7 +165,10 @@ fi #Plugins -sudo mkdir -p /opt/terraform/.terraform.d/plugin-cache +if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] +then + mkdir /opt/terraform/.terraform.d/plugin-cache +fi sudo chown -R $USER:$USER /opt/terraform export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache @@ -626,7 +629,11 @@ terraform -chdir="$terraform_module_directory" plan -no-color -detailed-exitcode return_value=$? echo "Terraform Plan return code: " $return_value +<<<<<<< HEAD if [ 1 == $return_value ] ; then +======= +if [ 1 == $return_value ]: then +>>>>>>> 49ea369e (When Terraform plan failed, stop the script from executing Terraform apply (#560)) echo "" echo "#########################################################################################" echo "# #" diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf index 03ef7bca79..d301822699 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf @@ -145,3 +145,19 @@ resource "azurerm_role_assignment" "resource_group_acsservice_msi" { principal_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].principal_id : data.azurerm_user_assigned_identity.deployer[0].principal_id } +resource "azurerm_role_assignment" "resource_group_acsservice" { + provider = azurerm.main + count = var.assign_subscription_permissions && var.deployer.add_system_assigned_identity ? var.deployer_vm_count : 0 + scope = local.resource_group_exists ? data.azurerm_resource_group.deployer[0].id : azurerm_resource_group.deployer[0].id + role_definition_name = "Azure Center for SAP solutions administrator" + principal_id = azurerm_linux_virtual_machine.deployer[count.index].identity[0].principal_id +} + +resource "azurerm_role_assignment" "resource_group_acsservice_msi" { + provider = azurerm.main + count = var.assign_subscription_permissions ? 1 : 0 + scope = local.resource_group_exists ? data.azurerm_resource_group.deployer[0].id : azurerm_resource_group.deployer[0].id + role_definition_name = "Azure Center for SAP solutions administrator" + principal_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].principal_id : data.azurerm_user_assigned_identity.deployer[0].principal_id +} + diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf index 3459a9348b..c864edf0db 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf @@ -327,7 +327,6 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_iscsi_lnx" { type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true - } resource "azurerm_virtual_machine_extension" "monitoring_defender_iscsi_lnx" { @@ -344,11 +343,14 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_iscsi_lnx" { auto_upgrade_minor_version = true settings = jsonencode( - { - "enableGenevaUpload" = true, - "enableAutoConfig" = true, - "reportSuccessOnUnsupportedDistro" = true, + { + "authentication" = { + "managedIdentity" = { + "identifier-name" : "mi_res_id", + "identifier-value": var.infrastructure.iscsi.user_assigned_identity_id + } + } } - ) + ) } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf b/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf index c5ff4712d1..baf156a07c 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf @@ -242,6 +242,37 @@ resource "azurerm_network_security_rule" "nsr_controlplane_storage" { destination_address_prefixes = local.storage_subnet_existing ? data.azurerm_subnet.storage[0].address_prefixes : azurerm_subnet.storage[0].address_prefixes } +// Add SSH network security rule +resource "azurerm_network_security_rule" "nsr_controlplane_storage" { + provider = azurerm.main + + count = local.storage_subnet_defined ? local.storage_subnet_nsg_exists ? 0 : 1 : 0 + depends_on = [ + azurerm_network_security_group.storage + ] + name = "ConnectivityToSAPApplicationSubnetFromControlPlane-ssh-rdp-winrm-ANF" + resource_group_name = local.SAP_virtualnetwork_exists ? ( + data.azurerm_virtual_network.vnet_sap[0].resource_group_name + ) : ( + azurerm_virtual_network.vnet_sap[0].resource_group_name + ) + network_security_group_name = try(azurerm_network_security_group.storage[0].name, azurerm_network_security_group.app[0].name) + priority = 100 + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_ranges = [22, 443, 3389, 5985, 5986, 111, 635, 2049, 4045, 4046, 4049] + source_address_prefixes = compact(concat( + var.deployer_tfstate.subnet_mgmt_address_prefixes, + var.deployer_tfstate.subnet_bastion_address_prefixes, + local.SAP_virtualnetwork_exists ? ( + data.azurerm_virtual_network.vnet_sap[0].address_space) : ( + azurerm_virtual_network.vnet_sap[0].address_space + ))) + destination_address_prefixes = local.storage_subnet_existing ? data.azurerm_subnet.storage[0].address_prefixes : azurerm_subnet.storage[0].address_prefixes +} + // Add SSH network security rule resource "azurerm_network_security_rule" "nsr_controlplane_db" { provider = azurerm.main diff --git a/deploy/terraform/terraform-units/modules/sap_library/dns.tf b/deploy/terraform/terraform-units/modules/sap_library/dns.tf index c77fdfa31a..c0b41cda3d 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/dns.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/dns.tf @@ -31,7 +31,7 @@ resource "azurerm_private_dns_zone" "blob" { resource "azurerm_private_dns_zone" "table" { provider = azurerm.main - count = local.use_local_private_dns && var.use_private_endpoint && var.use_webapp ? 1 : 0 + count = local.use_local_private_dns && var.use_private_endpoint && length(try(var.deployer_tfstate.webapp_id),"")> 0 ? 1 : 0 depends_on = [ azurerm_resource_group.library ] diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf index b711793c5f..a1bdeece3e 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf @@ -177,3 +177,15 @@ output "database_kdump_disks" { ) ) } +output "database_kdump_disks" { + description = "List of Azure kdump disks" + value = distinct( + flatten( + [for vm in var.naming.virtualmachine_names.ANYDB_VMNAME : + [for idx, disk in azurerm_virtual_machine_data_disk_attachment.kdump : + format("{ host: '%s', LUN: %d, type: 'kdump' }", vm, disk.lun) + ] + ] + ) + ) + } diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf index 6679a1e6e6..f1f8d5e25f 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf @@ -460,7 +460,6 @@ resource "azurerm_virtual_machine_extension" "anydb_lnx_aem_extension" { settings = jsonencode( { "system": "SAP", - } ) tags = var.tags @@ -485,7 +484,6 @@ resource "azurerm_virtual_machine_extension" "anydb_win_aem_extension" { settings = jsonencode( { "system": "SAP", - } ) tags = var.tags @@ -611,7 +609,7 @@ resource "azurerm_role_assignment" "role_assignment_msi" { count = ( var.use_msi_for_clusters && length(var.fencing_role_name) > 0 && - var.database.high_availability && upper(var.database.os.os_type) == "LINUX" + var.database_server_count > 1 ) ? ( var.database_server_count ) : ( @@ -627,7 +625,7 @@ resource "azurerm_role_assignment" "role_assignment_msi_ha" { count = ( var.use_msi_for_clusters && length(var.fencing_role_name) > 0 && - var.database.high_availability && upper(var.database.os.os_type) == "LINUX" + var.database_server_count > 1 ) ? ( var.database_server_count ) : ( diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index febe959a9f..b50a5a13ab 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -499,7 +499,6 @@ resource "azurerm_virtual_machine_extension" "scs_lnx_aem_extension" { settings = jsonencode( { "system": "SAP", - } ) tags = var.tags @@ -520,7 +519,6 @@ resource "azurerm_virtual_machine_extension" "scs_win_aem_extension" { settings = jsonencode( { "system": "SAP", - } ) tags = var.tags @@ -728,6 +726,145 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_scs_win" { } +resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_lnx" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( + local.scs_server_count) : ( + 0 ) + virtual_machine_id = azurerm_linux_virtual_machine.scs[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityLinuxAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = true + + settings = jsonencode( + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, + } + ) +} + +resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_win" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.scs_os.os_type) == "WINDOWS" ? ( + local.scs_server_count) : ( + 0 ) + virtual_machine_id = azurerm_windows_virtual_machine.scs[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityWindowsAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityWindowsAgent" + type_handler_version = "1.0" + auto_upgrade_minor_version = true + + settings = jsonencode( + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, + } + ) +} + +######################################################################################### +# # +# Azure Data Disk for Kdump # +# # +#######################################+################################################# +resource "azurerm_managed_disk" "kdump" { + provider = azurerm.main + count = ( + local.enable_deployment && + var.application_tier.scs_high_availability && + ( + upper(var.application_tier.scs_os.os_type) == "LINUX" && + ( var.application_tier.fence_kdump_disk_size > 0 ) + ) + ) ? local.scs_server_count : 0 + + name = format("%s%s%s%s%s", + try( var.naming.resource_prefixes.fence_kdump_disk, ""), + local.prefix, + var.naming.separator, + var.naming.virtualmachine_names.SCS_VMNAME[count.index], + try( var.naming.resource_suffixes.fence_kdump_disk, "fence_kdump_disk" ) + ) + location = var.resource_group[0].location + resource_group_name = var.resource_group[0].name + create_option = "Empty" + storage_account_type = "Premium_LRS" + disk_size_gb = try(var.application_tier.fence_kdump_disk_size,64) + disk_encryption_set_id = try(var.options.disk_encryption_set_id, null) + tags = var.tags + + zone = local.scs_zonal_deployment ? ( + upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( + azurerm_linux_virtual_machine.scs[count.index].zone) : + null + ) : ( + null + ) + lifecycle { + ignore_changes = [ + create_option, + hyper_v_generation, + source_resource_id, + tags + ] + } + +} + +resource "azurerm_virtual_machine_data_disk_attachment" "kdump" { + provider = azurerm.main + count = ( + local.enable_deployment && + var.application_tier.scs_high_availability && + ( + upper(var.application_tier.scs_os.os_type) == "LINUX" && + ( var.application_tier.fence_kdump_disk_size > 0 ) + ) + ) ? local.scs_server_count : 0 + + managed_disk_id = azurerm_managed_disk.kdump[count.index].id + virtual_machine_id = (upper(var.application_tier.scs_os.os_type) == "LINUX" # If Linux + ) ? ( + azurerm_linux_virtual_machine.scs[count.index].id + ) : null + caching = "None" + lun = var.application_tier.fence_kdump_lun_number +} + +resource "azurerm_virtual_machine_extension" "monitoring_extension_scs_lnx" { + provider = azurerm.main + count = local.deploy_monitoring_extension && upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( + local.scs_server_count) : ( + 0 ) + virtual_machine_id = azurerm_linux_virtual_machine.scs[count.index].id + name = "Microsoft.Azure.Monitor.AzureMonitorLinuxAgent" + publisher = "Microsoft.Azure.Monitor" + type = "AzureMonitorLinuxAgent" + type_handler_version = "1.0" + auto_upgrade_minor_version = true +} + + +resource "azurerm_virtual_machine_extension" "monitoring_extension_scs_win" { + provider = azurerm.main + count = local.deploy_monitoring_extension && upper(var.application_tier.scs_os.os_type) == "WINDOWS" ? ( + local.scs_server_count) : ( + 0 ) + virtual_machine_id = azurerm_windows_virtual_machine.scs[count.index].id + name = "Microsoft.Azure.Monitor.AzureMonitorWindowsAgent" + publisher = "Microsoft.Azure.Monitor" + type = "AzureMonitorWindowsAgent" + type_handler_version = "1.0" + auto_upgrade_minor_version = true +} + + resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_lnx" { provider = azurerm.main count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf index 2ab01fd0dc..d490c142ab 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/anf.tf @@ -50,7 +50,14 @@ data "azurerm_netapp_volume" "hanadata" { depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA] - count = local.use_data_volumes ? (var.database_server_count - var.database.stand_by_node_count) * var.hana_ANF_volumes.data_volume_count : 0 + count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_data ? ( + var.hana_ANF_volumes.use_existing_data_volume || local.use_avg ? ( + var.database_server_count + ) : ( + 0 + )) : ( + 0 + ) : 0 name = local.use_avg ? ( format("%s%s%s%s%d", var.naming.resource_prefixes.hanadata, @@ -71,7 +78,14 @@ resource "azurerm_netapp_volume" "hanalog" { provider = azurerm.main depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA] - count = local.create_log_volumes ? (var.database_server_count - var.database.stand_by_node_count) * var.hana_ANF_volumes.log_volume_count : 0 + count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_log && !local.use_avg ? ( + var.hana_ANF_volumes.use_existing_log_volume ? ( + 0 + ) : ( + var.database_server_count + )) : ( + 0 + ) : 0 name = format("%s%s%s%s%d", var.naming.resource_prefixes.hanalog, local.prefix, @@ -112,7 +126,14 @@ data "azurerm_netapp_volume" "hanalog" { provider = azurerm.main depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA] - count = local.use_log_volumes ? (var.database_server_count - var.database.stand_by_node_count) * var.hana_ANF_volumes.log_volume_count : 0 + count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_log ? ( + var.hana_ANF_volumes.use_existing_log_volume || local.use_avg ? ( + var.database_server_count + ) : ( + 0 + )) : ( + 0 + ) : 0 name = local.use_avg ? ( format("%s%s%s%s%d", var.naming.resource_prefixes.hanalog, @@ -132,7 +153,14 @@ resource "azurerm_netapp_volume" "hanashared" { provider = azurerm.main depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA] - count = local.create_shared_volumes ? length(var.ppg) : 0 + count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_shared && !local.use_avg ? ( + var.hana_ANF_volumes.use_existing_shared_volume ? ( + 0 + ) : ( + var.database_server_count + )) : ( + 0 + ) : 0 name = format("%s%s%s%s%d", var.naming.resource_prefixes.hanashared, local.prefix, @@ -174,7 +202,15 @@ resource "azurerm_netapp_volume" "hanashared" { data "azurerm_netapp_volume" "hanashared" { provider = azurerm.main depends_on = [azurerm_netapp_volume_group_sap_hana.avg_HANA] - count = local.use_shared_volumes ? length(var.ppg) : 0 + + count = length(local.ANF_pool_settings.pool_name) > 0 ? var.hana_ANF_volumes.use_for_shared ? ( + var.hana_ANF_volumes.use_existing_shared_volume || local.use_avg ? ( + var.database_server_count + ) : ( + 0 + )) : ( + 0 + ) : 0 name = local.use_avg ? ( format("%s%s%s%s%d", var.naming.resource_prefixes.hanashared, diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl index 6047f02782..0d04eb615b 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl @@ -45,7 +45,14 @@ ers_instance_number: "${ers_instance_number}" # the SAP Central Services virtual machines ers_lb_ip: ${ers_server_loadbalancer_ip} +<<<<<<< HEAD +<<<<<<< HEAD %{~ if platform == "SQLSERVER" } +======= +>>>>>>> 6b5d2310 (Fix initramfs image file configuration and update SAP parameters template) +======= +%{~ if platform == "SQLSERVER" } +>>>>>>> 44aee32a (Bring in Scale out improvements (#569)) # IP address of CNO in Windows and takes the form IPAddress/CIDR scs_clst_lb_ip: ${scs_cluster_loadbalancer_ip} %{~ endif } @@ -94,6 +101,9 @@ platform: ${platform} db_scale_out: ${scale_out} db_no_standby: ${scale_out_no_standby_role} +subnet_cidr_storage: ${subnet_cidr_storage} +%{~ endif } + subnet_cidr_storage: ${subnet_cidr_storage} %{~ endif } subnet_cidr_anf: ${subnet_cidr_anf} From a954b1fe8b523091eed6d20df58fd6015f7451da Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 25 May 2024 20:42:12 +0300 Subject: [PATCH 002/164] Fixing Merge conflicts --- deploy/scripts/New-SDAFDevopsProject.ps1 | 7 +++++-- deploy/scripts/installer.sh | 4 ---- .../modules/sap_system/output_files/sap-parameters.tmpl | 7 ------- 3 files changed, 5 insertions(+), 13 deletions(-) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index 51e2ea34ad..37f58847ff 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -815,7 +815,9 @@ if ($authenticationMethod -eq "Service Principal") { $CP_ARM_CLIENT_SECRET = (az ad sp credential reset --id $CP_ARM_CLIENT_ID --append --query "password" --out tsv --only-show-errors).Replace("""", "") } - else { + } +} +else { if ($Env:MSI_OBJECT_ID.Length -ne 0) { $MSI_objectId = $Env:MSI_OBJECT_ID @@ -869,6 +871,7 @@ if ($authenticationMethod -eq "Service Principal") { } } + #region App registration if ($WebApp) { Write-Host "Creating the App registration in Azure Active Directory" -ForegroundColor Green @@ -1100,7 +1103,7 @@ if (!$AlreadySet -or $ResetPAT ) { accessLevel = @{ accountLicenseType = "stakeholder" } - user = @{ + user = @{ origin = "aad" originId = $MSI_objectId subjectKind = "servicePrincipal" diff --git a/deploy/scripts/installer.sh b/deploy/scripts/installer.sh index e04a98e8d7..475f051008 100755 --- a/deploy/scripts/installer.sh +++ b/deploy/scripts/installer.sh @@ -629,11 +629,7 @@ terraform -chdir="$terraform_module_directory" plan -no-color -detailed-exitcode return_value=$? echo "Terraform Plan return code: " $return_value -<<<<<<< HEAD if [ 1 == $return_value ] ; then -======= -if [ 1 == $return_value ]: then ->>>>>>> 49ea369e (When Terraform plan failed, stop the script from executing Terraform apply (#560)) echo "" echo "#########################################################################################" echo "# #" diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl index 0d04eb615b..83397bc733 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl @@ -45,14 +45,7 @@ ers_instance_number: "${ers_instance_number}" # the SAP Central Services virtual machines ers_lb_ip: ${ers_server_loadbalancer_ip} -<<<<<<< HEAD -<<<<<<< HEAD %{~ if platform == "SQLSERVER" } -======= ->>>>>>> 6b5d2310 (Fix initramfs image file configuration and update SAP parameters template) -======= -%{~ if platform == "SQLSERVER" } ->>>>>>> 44aee32a (Bring in Scale out improvements (#569)) # IP address of CNO in Windows and takes the form IPAddress/CIDR scs_clst_lb_ip: ${scs_cluster_loadbalancer_ip} %{~ endif } From fc4f91ffde5ab45c626696dd51c5c1699efe86f2 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Sat, 25 May 2024 20:43:27 +0300 Subject: [PATCH 003/164] Fix Linting --- .../roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml index e9d1dc4ec0..b973436cba 100644 --- a/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml +++ b/deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml @@ -150,7 +150,6 @@ - lsnrctl_start_secondary_results.rc == 0 - # Restart the Listener on Secondary node when the node_tier is Oracle-ASM. - name: "Oracle Data Guard - ASM - Setup Secondary: Stop lsnrctl on Secondary" From 08eebc13a40c59b6276905137214509583c79543 Mon Sep 17 00:00:00 2001 From: hdamecharla <71097261+hdamecharla@users.noreply.github.com> Date: Tue, 28 May 2024 13:58:40 +0530 Subject: [PATCH 004/164] Pacemaker changes, saptune and NAT Gateway (#583) ## Summary of Changes ### Authentication and Identity Management - **Web Application Authentication Configuration**: Repeated updates to refine and simplify the authentication configuration and identity management scripts. ### Repository and Package Management - **SLES Repositories**: Added repositories for SLES 15.3, 15.4, and 15.5. - **WAAgent Updates**: Updated WAAgent package, configuration checks, and systemd service reloads across multiple roles. ### Deployment Configuration - **NAT Gateway**: Added support for provisioning a NAT gateway, including configuration variables in Terraform files. - **AutoUpdate Configuration**: Updated AutoUpdate.Enabled settings and added Extensions.WaitForCloudInit across various roles. - **Oracle Simplification**: Simplified Oracle-related configurations, including ASM backup process and Data Guard tasks. - **SAP Deployment Playbooks**: Various updates to SAP deployment playbooks, including fixing conditions, resource flag settings, and systemd service paths. ### Infrastructure and Pipeline Enhancements - **Control Plane Pipeline**: Multiple fixes and improvements to error handling, logging, environment variables, and Azure AD authentication. - **Terraform and Ansible Versions**: Updated versions in deployment scripts to 1.7.5. - **Dotnet SDK**: Bumped dotnet SDK installation to the latest version. ### Miscellaneous - **Error Handling and Logging**: Improved error handling and logging across various deployment scripts and playbooks. - **Validation Fixes**: Fixed validation conditions for disk space, OS version checks, and cluster initialization commands. --------- Co-authored-by: Kimmo Forss Co-authored-by: devanshjain --- .../0.8-ams-providers/tasks/main.yaml | 22 +-- .../1.16-services/vars/os-services.yaml | 2 + .../tasks/1.17.1.2-sbd.yaml | 29 ++-- .../tasks/1.17.2.0-cluster-RedHat.yml | 21 ++- .../tasks/1.17.2.0-cluster-Suse.yml | 31 +++- .../1.4-packages/vars/os-packages.yaml | 12 +- .../2.10-sap-notes/tasks/2.10.3.yaml | 164 ++++++++++++++++++ .../2.10-sap-notes/tasks/main.yaml | 4 + .../tasks/5.5.4.1-cluster-RedHat.yml | 26 +++ .../tasks/5.5.4.1-cluster-Suse.yml | 13 ++ .../tasks/5.6.4.1-scsersprofile.yaml | 44 +++++ .../tasks/5.6.4.2-sap-resources-RedHat.yml | 27 ++- .../tasks/5.6.4.2-sap-resources-Suse.yml | 8 +- deploy/terraform/run/sap_landscape/output.tf | 12 +- .../run/sap_landscape/tfvar_variables.tf | 48 +++++ .../terraform/run/sap_landscape/transform.tf | 23 ++- .../modules/sap_landscape/nat_gateway.tf | 74 ++++++++ .../modules/sap_landscape/outputs.tf | 11 ++ .../modules/sap_landscape/variables_local.tf | 19 ++ 19 files changed, 549 insertions(+), 41 deletions(-) create mode 100644 deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml create mode 100644 deploy/terraform/terraform-units/modules/sap_landscape/nat_gateway.tf diff --git a/deploy/ansible/roles-misc/0.8-ams-providers/tasks/main.yaml b/deploy/ansible/roles-misc/0.8-ams-providers/tasks/main.yaml index 147748ec3b..ce8b332a7f 100644 --- a/deploy/ansible/roles-misc/0.8-ams-providers/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.8-ams-providers/tasks/main.yaml @@ -8,34 +8,34 @@ ers_hosts: "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_ERS') }}" ha_cluster_port_number: "{{ 9664 if ansible_os_family | upper == 'SUSE' else 44322 }}" -- name: "0.8.1 ams provider creation: - Install [AMS] cli extension" +- name: "0.8.1 ams provider creation: - Install [AMS] cli extension" delegate_to: localhost ansible.builtin.shell: >- - az extension add --name workloads --yes || exit 1 + az extension add --name workloads --yes || exit 1 tags: - skip_ansible_lint -- name: "0.8.1 ams provider creation: - Get Access Token" +- name: "0.8.1 ams provider creation: - Get Access Token" delegate_to: localhost ansible.builtin.shell: >- - az account get-access-token --resource https://management.azure.com \ - --query accessToken -o tsv - register: ams_access_token + az account get-access-token --resource https://management.azure.com \ + --query accessToken -o tsv + register: ams_access_token tags: - skip_ansible_lint -- name: "0.8.1 ams provider creation: - Generate a guid for the AMS provider instance" +- name: "0.8.1 ams provider creation: - Generate a guid for the AMS provider instance" delegate_to: localhost - ansible.builtin.command: uuidgen - register: ams_provider_guid + ansible.builtin.command: uuidgen + register: ams_provider_guid tags: - skip_ansible_lint - name: "0.8.1 ams provider creation: - Create PrometheusOS (OS) provider in AMS" delegate_to: localhost when: - - ansible_os_family | upper == 'SUSE' or ansible_os_family | upper == 'REDHAT' - - enable_os_monitoring + - ansible_os_family | upper == 'SUSE' or ansible_os_family | upper == 'REDHAT' + - enable_os_monitoring block: - name: "0.8.1 ams provider creation: - Create Prometheus OS AMS provider instance" ansible.builtin.uri: diff --git a/deploy/ansible/roles-os/1.16-services/vars/os-services.yaml b/deploy/ansible/roles-os/1.16-services/vars/os-services.yaml index c192491fe4..02f41f4f98 100644 --- a/deploy/ansible/roles-os/1.16-services/vars/os-services.yaml +++ b/deploy/ansible/roles-os/1.16-services/vars/os-services.yaml @@ -78,6 +78,8 @@ services: sles_sap15: - { tier: 'os', service: 'fstrim.timer', node_tier: 'all', state: 'stopped' } - { tier: 'os', service: 'fstrim.timer', node_tier: 'all', state: 'disabled' } + - { tier: 'ha', service: 'logd', node_tier: 'all', state: 'enabled' } + - { tier: 'ha', service: 'logd', node_tier: 'all', state: 'started' } # --------------------------- Begin - Packages required for iSCSI -----------------------------------------8 # https://learn.microsoft.com/en-us/azure/sap/workloads/high-availability-guide-suse-pacemaker#sbd-with-an-iscsi-target-server - { tier: 'iscsi', service: 'targetcli', node_tier: 'iscsi', state: 'enabled' } diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml index a6db93c040..5096ec160c 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1.2-sbd.yaml @@ -270,7 +270,7 @@ # /*------------------------------------ # | -# | systemctl enable sbd +# | systemctl enable sbd, iscsi, iscsid # | # +------------------------------------4 # - name: "systemctl enable sbd" @@ -291,16 +291,23 @@ (scs_cluster_type == "ASD") or (scs_cluster_type == "ISCSI") # scs_high_availability = true is already assumed -# you need to restart cluster after enabling sbd -# - name: "systemctl restart pacemaker" -# ansible.builtin.systemd: -# name: pacemaker -# state: restarted -# when: -# - (database_cluster_type == "ASD") or -# (database_cluster_type == "ISCSI") or -# (scs_cluster_type == "ASD") or -# (scs_cluster_type == "ISCSI") # scs_high_availability = true is already assumed +- name: "systemctl enable iscsi" + ansible.builtin.systemd: + name: iscsi + enabled: true + daemon_reload: true + when: + - (database_cluster_type == "ISCSI") or + (scs_cluster_type == "ISCSI") + +- name: "systemctl enable iscsid" + ansible.builtin.systemd: + name: iscsid + enabled: true + daemon_reload: true + when: + - (database_cluster_type == "ISCSI") or + (scs_cluster_type == "ISCSI") ... # /*---------------------------------------------------------------------------8 # | END | diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml index 1583fa3d4f..24ae236ba9 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml @@ -120,6 +120,11 @@ - scs_cluster_type == "AFA" or database_cluster_type == "AFA" # scs_high_availability = true is already assumed - inventory_hostname == primary_instance_name block: + - name: "1.17 Generic Pacemaker - Check if the pacemaker package version is greater than pacemaker-2.0.4" + when: ansible_distribution_major_version in ["8", "9"] + ansible.builtin.set_fact: + is_pcmk_ver_gt_204: "{{ ansible_facts.packages['pacemaker'] is version('2.0.4', '>') | default(false) }}" + - name: "1.17 Generic Pacemaker - Ensure STONITH timeout is raised" ansible.builtin.command: pcs property set stonith-timeout=900 @@ -161,7 +166,7 @@ - use_msi_for_clusters - name: "1.17 Generic Pacemaker - Ensure the STONTIH device is configured" - ansible.builtin.command: > + ansible.builtin.shell: > pcs stonith create rsc_st_azure fence_azure_arm login="{{ fencing_spn_client_id }}" passwd="{{ fencing_spn_client_pwd }}" @@ -173,14 +178,14 @@ pcmk_monitor_timeout=120 pcmk_monitor_retries=4 pcmk_action_limit=3 - pcmk_delay_max=15 pcmk_host_map="{{ primary_instance_name }}:{{ hostvars[primary_instance_name]['primary_vm_name'] }};{{ secondary_instance_name }}:{{ hostvars[secondary_instance_name]['secondary_vm_name'] }}" + {%if not is_pcmk_ver_gt_204%}"pcmk_delay_max=15"{%endif%} when: - ansible_distribution_major_version in ["8", "9"] - not use_msi_for_clusters - name: "1.17 Generic Pacemaker - Ensure the STONTIH device is configured (MSI)" - ansible.builtin.command: > + ansible.builtin.shell: > pcs stonith create rsc_st_azure fence_azure_arm msi=true resourceGroup="{{ resource_group_name }}" @@ -190,12 +195,20 @@ pcmk_monitor_timeout=120 pcmk_monitor_retries=4 pcmk_action_limit=3 - pcmk_delay_max=15 pcmk_host_map="{{ primary_instance_name }}:{{ hostvars[primary_instance_name]['primary_vm_name'] }};{{ secondary_instance_name }}:{{ hostvars[secondary_instance_name]['secondary_vm_name'] }}" + {%if not is_pcmk_ver_gt_204%}"pcmk_delay_max=15"{%endif%} when: - ansible_distribution_major_version in ["8", "9"] - use_msi_for_clusters + # The property priority-fencing-delay is applicable for pacemaker-2.0.4-6.el8 version or higher. If you're setting up priority-fencing-delay on an existing cluster, make sure to unset the pcmk_delay_max option in the fencing device. + # to do: add ansible code to create the stonith resource without the pcmk_delay_max option + - name: "1.17 Generic Pacemaker - set priority-fencing-delay" + ansible.builtin.command: pcs property set priority-fencing-delay=15 + when: + - ansible_distribution_major_version in ["8", "9"] + - is_pcmk_ver_gt_204 + - name: "1.17 Generic Pacemaker - Update Monitor interval" ansible.builtin.command: pcs resource update rsc_st_azure op monitor interval=3600 diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml index 5e458976f3..62500369ee 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml @@ -177,7 +177,9 @@ pcmk_action_limit=3 power_timeout=240 pcmk_reboot_timeout=900 + pcmk_delay_max=15 pcmk_host_map="{{ primary_instance_name }}:{{ hostvars[primary_instance_name]['primary_vm_name'] }};{{ secondary_instance_name }}:{{ hostvars[secondary_instance_name]['secondary_vm_name'] }}" + op monitor interval=3600 timeout=120 when: - not use_msi_for_clusters or distribution_full_id in ["sles_sap12.4"] failed_when: crm_configure_result.rc > 1 @@ -192,7 +194,9 @@ pcmk_action_limit=3 power_timeout=240 pcmk_reboot_timeout=900 + pcmk_delay_max=15 pcmk_host_map="{{ primary_instance_name }}:{{ hostvars[primary_instance_name]['primary_vm_name'] }};{{ secondary_instance_name }}:{{ hostvars[secondary_instance_name]['secondary_vm_name'] }}" + op monitor interval=3600 timeout=120 failed_when: crm_configure_result.rc > 1 when: - use_msi_for_clusters @@ -346,11 +350,32 @@ - name: "1.17 Generic Pacemaker - Ensure maintenance mode is set" ansible.builtin.shell: crm configure property maintenance-mode=true + - name: "1.17 Generic Pacemaker - Set the pacemaker cluster node health strategy" + ansible.builtin.shell: crm configure property node-health-strategy=custom + + - name: "1.17 Generic Pacemaker - Set the pacemaker cluster node health constraint" + ansible.builtin.shell: >- + crm configure location loc_azure_health \ + /'!health-.*'/ rule '#health-azure': defined '#uname' + + - name: "1.17 Generic Pacemaker - Set initial value for cluster attributes for {{ primary_instance_name }}" + ansible.builtin.shell: crm_attribute --node {{ primary_instance_name }} --name '#health-azure' --update 0 + + - name: "1.17 Generic Pacemaker - Set initial value for cluster attributes for {{ secondary_instance_name }}" + ansible.builtin.shell: crm_attribute --node {{ secondary_instance_name }} --name '#health-azure' --update 0 + - name: "1.17 Generic Pacemaker - Ensure Pacemaker resources for the Azure agent is created" - ansible.builtin.shell: crm configure primitive rsc_azure-events ocf:heartbeat:azure-events op monitor interval=10s + ansible.builtin.shell: >- + crm configure primitive health-azure-events ocf:heartbeat:azure-events-az \ + meta allow-unhealthy-nodes=true failure-timeout=120s \ + op start start-delay=90s \ + op monitor interval=10s + register: crm_configure_result + failed_when: + - "crm_configure_result.stderr | lower | regex_search('error|fail')" # Check if the resource is created successfully - name: "1.17 Generic Pacemaker - Ensure clone resource azure-events is configured" - ansible.builtin.shell: crm configure clone cln_azure-events rsc_azure-events + ansible.builtin.shell: crm configure clone health-azure-events-cln health-azure-events - name: "1.17 Generic Pacemaker - Remove false positives" ansible.builtin.shell: crm_resource -C @@ -361,7 +386,7 @@ # /*---------------------------------------------------------------------------8 # | | -# | Azure scheduled events - BEGIN | +# | Azure scheduled events - END | # | | # +------------------------------------4--------------------------------------*/ diff --git a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml index 9669c97d94..2cec78ad02 100644 --- a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml +++ b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml @@ -255,11 +255,21 @@ packages: redhat8.1: redhat8.2: redhat8.4: - redhat8.6: + redhat8.6: + - { tier: 'os', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'present' } + - { tier: 'ha', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'absent' } redhat8.8: + - { tier: 'os', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'present' } + - { tier: 'ha', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'absent' } redhat8.9: + - { tier: 'os', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'present' } + - { tier: 'ha', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'absent' } redhat9.0: + - { tier: 'os', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'present' } + - { tier: 'ha', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'absent' } redhat9.2: + - { tier: 'os', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'present' } + - { tier: 'ha', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'absent' } sles_sap12: - { tier: 'os', package: 'chrony', node_tier: 'all', state: 'present' } diff --git a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml new file mode 100644 index 0000000000..b7927920b1 --- /dev/null +++ b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml @@ -0,0 +1,164 @@ +--- + +- name: "2.10.3 sap-notes: saptune - Register BoM" + when: + - bom is not defined + - not is_run_with_infraCreate_only + ansible.builtin.include_role: + name: roles-sap/3.3.1-bom-utility + tasks_from: bom-register + vars: + bom_name: "{{ bom_base_name }}" + task_prefix: "2.10.3 sap-notes : " + + +- name: "2.10.3 sap-notes : - Check if saptune package is installed" + ansible.builtin.package: + name: saptune>=3.0 + state: present + +# version 3 of saptune does not use tuned anymore +- name: "2.10.3 sap-notes : - Perform saptune service takeover" + ansible.builtin.shell: saptune service takeover + register: saptune_service_takeover + ignore_errors: true + + +# use ansible.builtin.service to stop and disable services +- name: "2.10.3 sap-notes : - Disable services like tuned and sapconf if they exist" + when: + - saptune_service_takeover.rc != 0 + ansible.builtin.service: + name: "{{ item }}" + state: stopped + enabled: false + loop: + - tuned + - sapconf + + +# check if saptune_check can run successfully +# this does not enable saptune but validates if there are any errors. +- name: "2.10.3 sap-notes : - Run saptune_check" + ansible.builtin.shell: saptune_check + register: saptune_check_result + + +- name: "2.10.3 sap-notes : - Interrogate active saptune solution" + ansible.builtin.shell: saptune solution enabled + register: active_saptune_solution + when: + - saptune_check_result.rc == 0 + +# We need to capture the first block of non-whitespace characters +# output from saptune solution enabled command has an empty line followed by solution name +- name: "2.10.3 sap-notes : - Set fact for active saptune solution" + ansible.builtin.set_fact: + saptune_solution_enabled: "{{ (active_saptune_solution.stdout_lines)[1] }}" + when: + - active_saptune_solution.stdout_lines is defined + - active_saptune_solution.stdout_lines | length > 0 + +- name: "2.10.3 sap-notes : - Show active saptune solution" + ansible.builtin.debug: + var: saptune_solution_enabled + +- name: "2.10.3 sap-notes : - Backward Compatibility - Check required Database HA variables " + when: + - db_high_availability is defined + - database_high_availability is not defined + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + tags: + - always + +- name: "2.10.3 sap-notes : - Set fact for high availability" + ansible.builtin.set_fact: + is_high_availability: >- + {%- if node_tier in ['scs', 'ers'] and scs_high_availability -%} + true + {%- elif node_tier == 'hana' and database_high_availability -%} + true + {%- else -%} + false + {%- endif -%} + +# For HA deployments on Azure, we need to override to disable tcp timestamps, reuse and recycle. +# This can be done by copying the sapnote file 2382421 from /usr/share/saptune/notes to /etc/saptune/override +# since this file does not exist by default, we need to create it and add the required values. +- name: "2.10.3 sap-notes : - Override sap note 2382421 for saptune when high availability" + when: + - is_high_availability + - node_tier in ['scs', 'ers', 'hana', 'db2', 'sybase'] + - saptune_solution_enabled == 'NONE' + block: + - name: "2.10.3 sap-notes : - Copy sapnote 2382421 to /etc/saptune/override" + ansible.builtin.blockinfile: + path: /etc/saptune/override/2382421 + create: true + backup: true + owner: root + group: root + mode: 0640 + marker: "# {mark} ANSIBLE MANAGED BLOCK" + block: >- + [sysctl] + net.ipv4.tcp_timestamps = 0 + net.ipv4.tcp_tw_reuse = 0 + net.ipv4.tcp_tw_recycle = 0 +# /usr/lib/sysctl.d/99-sysctl.conf +- name: "2.10.3 sap-notes : - Set fact for saptune solution to use" + ansible.builtin.set_fact: + saptune_solution_to_apply: >- + {%- if 'scs' in supported_tiers and 'hana' in supported_tiers and platform == 'HANA' and bom.product_ids.scs is search(':S4HANA') -%} + 'S4HANA-APP+DB' + {%- elif 'scs' in supported_tiers and 'hana' in supported_tiers and platform == 'HANA' and bom.product_ids.scs is search(':NW\d{3}') -%} + 'NETWEAVER+HANA' + {%- elif node_tier in ['scs', 'ers','pas','app'] and platform == 'HANA' and bom.product_ids.scs is search(':S4HANA') -%} + 'S4HANA-APPSERVER' + {%- elif node_tier == 'hana' and platform == 'HANA' and bom.product_ids.scs is search(':S4HANA') -%} + 'S4HANA-DBSERVER' + {%- elif node_tier in ['scs', 'ers', 'pas', 'app'] and platform == 'HANA' and bom.product_ids.scs is search(':BW4HANA') -%} + 'NETWEAVER' + {%- elif node_tier == 'hana' and platform == 'HANA' and bom.product_ids.scs is search(':BW4HANA') -%} + 'HANA' + {%- elif node_tier in ['scs', 'ers', 'pas', 'app'] and platform in ['SYBASE', 'DB2', 'ORACLE', 'ORACLE-ASM', 'SQLSERVER'] and bom.product_ids.scs is search(':NW\d{3}') -%} + 'NETWEAVER' + {%- elif node_tier == 'hana' and platform == 'HANA' and bom.product_ids.scs is search(':NW\d{3}') -%} + 'HANA' + {%- elif node_tier in ['sybase'] and platform == 'SYBASE' and bom.product_ids.scs is search(':NW\d{3}') -%} + 'SAP-ASE' + {%- else -%} + 'NETWEAVER' + {%- endif -%} + +- name: "2.10.3 sap-notes : - Run saptune solution verify against {{ saptune_solution_to_apply }}" + ansible.builtin.shell: saptune solution verify {{ saptune_solution_to_apply }} + register: saptune_solution_verify + changed_when: false + failed_when: false + +- name: "2.10.3 sap-notes : - Run saptune solution revert if verify fails" + when: + - saptune_solution_enabled != 'NONE' + - saptune_solution_verify.rc != 0 + ansible.builtin.command: "saptune solution revert {{ saptune_solution_enabled }}" + register: saptune_solution_revert + + +- name: "2.10.3 sap-notes : - Run saptune solution apply {{ saptune_solution_to_apply }}" + when: + - saptune_solution_verify.rc != 0 + ansible.builtin.command: "saptune solution apply {{ saptune_solution_to_apply }}" + register: saptune_solution_apply + +- name: "2.10.3 sap-notes : - Run saptune apply note 2382421" + when: + - is_high_availability + - node_tier in ['scs', 'ers'] + ansible.builtin.command: "saptune note apply 2382421" + register: saptune_note_apply + +- name: "2.10.3 sap-notes : - Run saptune solution verify" + ansible.builtin.command: "saptune solution verify {{ saptune_solution_to_apply }}" + changed_when: false diff --git a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml index f9ee5d99ad..749adc073d 100644 --- a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/main.yaml @@ -125,6 +125,10 @@ # ToDo RHEL7 and also the other SAP nodes # -------------------------------------+---------------------------------------8 +- name: "2.10.3 - saptune for SLES 12 and SLES 15" + ansible.builtin.include_tasks: 2.10.3.yaml + when: + - distribution_id in ['suse12', 'suse15', 'sles_sap12', 'sles_sap15'] # /*----------------------------------------------------------------------------8 # | END | diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index 4065c2470b..89d6be3b69 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -26,6 +26,11 @@ get_status_maintenance_mode.stdout is not search('maintenance-mode') or get_status_maintenance_mode.stdout is search('maintenance-mode: false') + - name: "5.5.4.1 HANA Cluster configuration - Check if the pacemaker package version is greater than pacemaker-2.0.4" + when: ansible_distribution_major_version in ["8", "9"] + ansible.builtin.set_fact: + is_pcmk_ver_gt_204: "{{ ansible_facts.packages['pacemaker'] is version('2.0.4', '>') | default(false) }}" + - name: "5.5.4.1 HANA Cluster configuration - Ensure the SAP HANA Topology resource is created" ansible.builtin.shell: > pcs resource create SAPHanaTopology_{{ db_sid | upper }}_{{ db_instance_number }} SAPHanaTopology @@ -127,6 +132,27 @@ - db_high_availability is defined - database_high_availability is not defined + # for two node clusters set properties + - name: "5.5.4.1 HANA Cluster configuration - Set the cluster properties for two node clusters" + when: + - database_high_availability + - not db_scale_out + - is_pcmk_ver_gt_204 + block: + - name: "5.5.4.1 HANA Cluster configuration - set resource defaults 'priority'" + ansible.builtin.shell: pcs resource defaults update priority=1 + register: update_priority + failed_when: update_priority.rc > 1 + + - name: "5.5.4.1 HANA Cluster configuration - set SAPHana defaults 'priority' to 10" + ansible.builtin.shell: pcs resource update SAPHana_{{ db_sid | upper }}_{{ db_instance_number }} meta priority=10 + register: update_priority_saphana + + - name: "5.5.4.1 HANA Cluster configuration - set priority-fencing-delay" + ansible.builtin.shell: pcs property set priority-fencing-delay=15s + register: constraint + failed_when: constraint.rc > 1 + - name: "5.5.4.1 HANA Cluster configuration - Configure cluster constraints for SAP HANA and ANF" when: - database_high_availability diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml index 193b038a93..4d4683f84f 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-Suse.yml @@ -84,6 +84,9 @@ register: sap_hana_msl failed_when: sap_hana_msl.rc > 1 + - name: "5.5.4.1 HANA Pacemaker configuration - Set priority for SAP HANA ms resource" + ansible.builtin.shell: crm resource meta msl_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }} set priority 100 + - name: "5.5.4.1 HANA Pacemaker configuration - Ensure SAP HANA Virtual IP resource is configured" ansible.builtin.shell: > crm configure primitive rsc_ip_{{ db_sid | upper }}_HDB{{ db_instance_number }} ocf:heartbeat:IPaddr2 @@ -136,9 +139,19 @@ - name: "5.5.4.1 HANA Pacemaker configuration - Ensure any required cluster resources are cleaned up" ansible.builtin.command: "crm resource cleanup rsc_SAPHana_{{ db_sid | upper }}_HDB{{ db_instance_number }}" + - name: "5.5.4.1 HANA Pacemaker configuration - cleanup cluster resources" + ansible.builtin.command: "crm resource cleanup" + + - name: "5.5.4.1 HANA Pacemaker configuration - Set the priority fencing delay" + ansible.builtin.shell: crm configure property priority-fencing-delay=30 + - name: "5.5.4.1 HANA Pacemaker configuration - Ensure maintenance mode is disabled" ansible.builtin.command: crm configure property maintenance-mode=false + - name: "5.5.4.1 HANA Pacemaker configuration - Wait for a few minutes for the cluster to stabilize" + ansible.builtin.wait_for: + timeout: 120 + when: inventory_hostname == primary_instance_name - name: "Backward Compatibility - Check required Database HA variables" diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml index 476ad229ba..c085f8a2b9 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml @@ -14,6 +14,32 @@ comment: "{{ sap_sid | lower }}adm User account" groups: haclient +# For systemd services the SAP ASCS/SCS and ERS resources are created as systemd services +# the path for the service file is /etc/systemd/system/SAP_.service +- name: "5.6 SCSERS - check if the SAP ASCS/SCS and ERS resources are created as systemd services" + ansible.builtin.stat: + path: "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ sap_instance_number }}.service" + register: systemd_service_file_path + loop: + - "{{ scs_instance_number }}" + - "{{ ers_instance_number }}" + loop_control: + loop_var: sap_instance_number + +- name: "5.6 SCSERS - Set fact for the systemd services existance" + ansible.builtin.debug: + var: systemd_service_file_path + verbosity: 2 + +- name: "5.6 SCSERS - Set fact for the systemd services existance" + ansible.builtin.set_fact: + systemd_service_names: "{{ + systemd_service_file_path.results + | selectattr('stat.exists', 'equalto', true) + | map(attribute='stat.path') + | regex_replace('/etc/systemd/system/', '') + }}" + - name: "5.6 SCSERS - SUSE - Add the ASCS and ERS service definitions from sapservices file" ansible.builtin.lineinfile: backup: true @@ -26,6 +52,24 @@ - {regexp: "LD_LIBRARY_PATH=/usr/sap/{{ sap_sid | upper }}/ERS{{ ers_instance_number }}/exe", lif: "LD_LIBRARY_PATH=/usr/sap/{{ sap_sid | upper }}/ERS{{ ers_instance_number }}/exe:$LD_LIBRARY_PATH; export LD_LIBRARY_PATH; /usr/sap/{{ sap_sid | upper }}/ERS{{ ers_instance_number }}/exe/sapstartsrv pf=/usr/sap/{{ sap_sid }}/SYS/profile/{{ sap_sid }}_ERS{{ ers_instance_number }}_{{ ers_virtual_hostname }} -D -u {{ sap_sid | lower }}adm" } when: - ansible_os_family | upper == "SUSE" + - not systemd_service_names is defined + - systemd_service_names | length > 0 + +- name: "5.6 SCSERS - SUSE - Add the ASCS and ERS systemd service definitions from sapservices file" + ansible.builtin.lineinfile: + backup: true + path: /usr/sap/sapservices + regexp: "{{ item.regexp }}" + line: "{{ item.lif }}" + state: present + loop: + - {regexp: "systemctl --no-ask-password start SAP{{ sap_sid | upper }}_{{ scs_instance_number }}", lif: "systemctl --no-ask-password start SAP{{ sap_sid | upper }}_{{ scs_instance_number }} # sapstartsrv pf=/usr/sap/{{ sap_sid | upper }}/SYS/profile/{{ sap_sid | upper }}_{{ instance_type | upper }}{{ scs_instance_number }}_{{ scs_virtual_hostname }}" } + - {regexp: "systemctl --no-ask-password start SAP{{ sap_sid | upper }}_{{ ers_instance_number }}", lif: "systemctl --no-ask-password start SAP{{ sap_sid | upper }}_{{ ers_instance_number }} # sapstartsrv pf=/usr/sap/{{ sap_sid | upper }}/SYS/profile/{{ sap_sid | upper }}_ERS{{ ers_instance_number }}_{{ ers_virtual_hostname }}" } + when: + - ansible_os_family | upper == "SUSE" + - hostvars[primary_instance_name]['ensa2'] | default(false) + - systemd_service_names is defined + - systemd_service_names | length > 0 - name: "5.6 SCSERS - REDHAT - Comment the ASCS and ERS service definitions from sapservices file" ansible.builtin.replace: diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index 79995446bc..9196be86d5 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -106,12 +106,29 @@ register: co_location failed_when: co_location.rc > 1 - # - name: "5.6 SCSERS - RHEL - ENSA2 - Set the Cluster out of maintenance mode" - # ansible.builtin.shell: pcs property set maintenance-mode=false +- name: "5.6 SCSERS - RHEL - Check if the pacemaker package version is greater than pacemaker-2.0.4" + when: ansible_distribution_major_version in ["8", "9"] + ansible.builtin.set_fact: + is_pcmk_ver_gt_204: "{{ ansible_facts.packages['pacemaker'] is version('2.0.4', '>') | default(false) }}" - # - name: "5.6 SCSERS - RHEL - ENSA2 - pause the execution for 2 minutes" - # ansible.builtin.wait_for: - # timeout: 120 +- name: "5.6 SCSERS - RHEL - Set properties for two node clusters" + when: + - inventory_hostname == primary_instance_name + - is_pcmk_ver_gt_204 | default(false) + block: + - name: "5.6 SCSERS - RHEL - set resource defaults 'priority'" + ansible.builtin.shell: pcs resource defaults update priority=1 + register: update_priority + failed_when: update_priority.rc > 1 + + - name: "5.6 SCSERS - RHEL - set ASCS/SCS default 'priority' to 10" + ansible.builtin.shell: pcs resource update rsc_sap_{{ sap_sid }}_{{ instance_type | upper }}{{ scs_instance_number }} meta priority=10 + register: update_priority_sapinstance + + - name: "5.6 SCSERS - RHEL - set priority-fencing-delay" + ansible.builtin.shell: pcs property set priority-fencing-delay=15s + register: constraint + failed_when: constraint.rc > 1 # /*---------------------------------------------------------------------------8 # | | diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse.yml index f3e1e4a531..0a8fd87603 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse.yml @@ -26,6 +26,7 @@ # - name: "5.6 SCSERS - SUSE - ENSA1 - Set the cluster on maintenance mode" # ansible.builtin.shell: crm configure property maintenance-mode=true + - name: "5.6 SCSERS - SUSE - ENSA1 - Configure SAP ASCS/SCS resources" ansible.builtin.shell: > crm configure primitive rsc_sap_{{ sap_sid }}_{{ instance_type | upper }}{{ scs_instance_number }} SAPInstance \ @@ -66,6 +67,9 @@ register: co_location failed_when: co_location.rc > 1 + - name: "5.6 SCSERS - SUSE - ENSA1 - remove priority-fencing-delay attribute" + ansible.builtin.shell: crm_attribute --delete --name priority-fencing-delay + # - name: " Bring primary node online " # ansible.builtin.shell: crm node online {{ primary_instance_name }} @@ -100,6 +104,8 @@ # - name: "5.6 SCSERS - SUSE - ENSA2 - Set the cluster on maintenance mode" # ansible.builtin.shell: crm configure property maintenance-mode=true + - name: "5.6 SCSERS - SUSE - ENSA2 - Set the priority fencing delay" + ansible.builtin.shell: crm configure property priority-fencing-delay=30 - name: "5.6 SCSERS - SUSE - ENSA2 - Configure SAP ASCS/SCS resources" ansible.builtin.shell: > @@ -109,7 +115,7 @@ params InstanceName={{ instance_name }} \ START_PROFILE="{{ start_profile_path }}" \ AUTOMATIC_RECOVER=false \ - meta resource-stickiness=5000 + meta resource-stickiness=5000 priority=100 register: ascs_rsc_sap failed_when: ascs_rsc_sap.rc > 1 diff --git a/deploy/terraform/run/sap_landscape/output.tf b/deploy/terraform/run/sap_landscape/output.tf index 4942ba1b70..1a5a88b9ab 100644 --- a/deploy/terraform/run/sap_landscape/output.tf +++ b/deploy/terraform/run/sap_landscape/output.tf @@ -334,5 +334,15 @@ output "iSCSI_servers" { ############################################################################### output ams_resource_id { description = "AMS resource ID" - value = module.sap_landscape.ams_resource_id + value = module.sap_landscape.ams_resource_id + } + +############################################################################### +# # +# NAT Gateway Resource # +# # +############################################################################### +output ng_resource_id { + description = "NAT Gateway resource ID" + value = module.sap_landscape.ng_resource_id } diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index de0c650dea..62f59a5901 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -828,3 +828,51 @@ variable "ams_laws_arm_id" { description = "If provided, Azure resource id for the Log analytics workspace in AMS" default = "" } + +#######################################4#######################################8 +# # +# NAT Gateway variables # +# # +#######################################4#######################################8 + +variable "deploy_nat_gateway" { + description = "If true, a NAT Gateway will be deployed" + type = bool + default = false + } + +variable "nat_gateway_name" { + description = "If provided, the name of the NAT Gateway" + type = string + default = "" + } + +variable "nat_gateway_arm_id" { + description = "If provided, Azure resource id for the NAT Gateway" + type = string + default = "" + } + +variable "nat_gateway_public_ip_zones" { + description = "If provided, the zones for the NAT Gateway public IP" + type = list(string) + default = [] + } + +variable "nat_gateway_public_ip_arm_id" { + description = "If provided, Azure resource id for the NAT Gateway public IP" + type = string + default = "" + } + +variable "nat_gateway_idle_timeout_in_minutes" { + description = "The idle timeout in minutes for the NAT Gateway" + type = number + default = 4 + } + +variable "nat_gateway_public_ip_tags" { + description = "Tags for the public_ip resource" + type = map(string) + default = null + } diff --git a/deploy/terraform/run/sap_landscape/transform.tf b/deploy/terraform/run/sap_landscape/transform.tf index b88cc683fa..9507d6f04c 100644 --- a/deploy/terraform/run/sap_landscape/transform.tf +++ b/deploy/terraform/run/sap_landscape/transform.tf @@ -162,11 +162,22 @@ locals { ) > 0 ams_instance = { - name = var.ams_instance_name - create_ams_instance = var.create_ams_instance - ams_laws_arm_id = var.ams_laws_arm_id + name = var.ams_instance_name + create_ams_instance = var.create_ams_instance + ams_laws_arm_id = var.ams_laws_arm_id } + nat_gateway = { + create_nat_gateway = var.deploy_nat_gateway + name = var.nat_gateway_name + arm_id = try(var.nat_gateway_arm_id, "") + region = lower(coalesce(var.location, try(var.infrastructure.region, ""))) + public_ip_zones = try(var.nat_gateway_public_ip_zones, ["1", "2", "3"]) + public_ip_arm_id = try(var.nat_gateway_public_ip_arm_id, "") + idle_timeout_in_minutes = var.nat_gateway_idle_timeout_in_minutes + ip_tags = try(var.nat_gateway_public_ip_tags, {}) + } + temp_infrastructure = { environment = coalesce(var.environment, try(var.infrastructure.environment, "")) region = lower(coalesce(var.location, try(var.infrastructure.region, ""))) @@ -568,6 +579,10 @@ locals { { "ams_instance" = local.ams_instance } + ), ( + { + "nat_gateway" = local.nat_gateway + } ),( local.iscsi.iscsi_count > 0 ? ( { @@ -608,6 +623,6 @@ locals { install_volume_throughput = var.ANF_install_volume_throughput install_volume_zone = var.ANF_install_volume_zone[0] - } + } } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/nat_gateway.tf b/deploy/terraform/terraform-units/modules/sap_landscape/nat_gateway.tf new file mode 100644 index 0000000000..c9c65e4053 --- /dev/null +++ b/deploy/terraform/terraform-units/modules/sap_landscape/nat_gateway.tf @@ -0,0 +1,74 @@ +# https://github.com/hashicorp/terraform-provider-azurerm/issues/18741 +# public IP address for the natGateway +resource "azurerm_public_ip" "ng_pip" { + provider = azurerm.main + count = local.create_nat_gateway ? 1 : 0 + name = local.nat_gateway_name + location = local.region + resource_group_name = azurerm_resource_group.resource_group[0].name + idle_timeout_in_minutes = local.nat_gateway_idle_timeout_in_minutes + zones = local.nat_gateway_public_ip_zones + ip_tags = local.nat_gateway_public_ip_tags + allocation_method = "Static" + sku = "Standard" +} + +# NAT Gateway +# Currently only Standard SKU is supported. +# https://learn.microsoft.com/en-us/azure/nat-gateway/nat-overview#availability-zones +# Only one Availability Zone can be defined. We will not provide a zone for now. +resource "azurerm_nat_gateway" "ng" { + provider = azurerm.main + count = local.create_nat_gateway ? 1 : 0 + name = local.nat_gateway_name + location = local.region + resource_group_name = azurerm_resource_group.resource_group[0].name + idle_timeout_in_minutes = local.nat_gateway_idle_timeout_in_minutes + sku_name = "Standard" + depends_on = [ + azurerm_public_ip.ng_pip + ] +} + + +# NAT Gateway IP Configuration +resource "azurerm_nat_gateway_public_ip_association" "ng_pip_assoc" { + provider = azurerm.main + count = local.create_nat_gateway ? 1 : 0 + nat_gateway_id = azurerm_nat_gateway.ng[0].id + public_ip_address_id = azurerm_public_ip.ng_pip[0].id +} + + +# NAT Gateway subnet association with app subnet +resource "azurerm_subnet_nat_gateway_association" "ng_subnet_assoc" { + provider = azurerm.main + count = local.create_nat_gateway ? 1 : 0 + nat_gateway_id = azurerm_nat_gateway.ng[0].id + subnet_id = azurerm_subnet.app[0].id + depends_on = [ + azurerm_subnet.app + ] +} + +# NAT Gateway subnet association with db subnet +resource "azurerm_subnet_nat_gateway_association" "ng_subnet_assoc_db" { + provider = azurerm.main + count = local.create_nat_gateway ? 1 : 0 + nat_gateway_id = azurerm_nat_gateway.ng[0].id + subnet_id = azurerm_subnet.db[0].id + depends_on = [ + azurerm_subnet.db + ] +} + +# NAT Gateway subnet association with web subnet +resource "azurerm_subnet_nat_gateway_association" "ng_subnet_assoc_web" { + provider = azurerm.main + count = local.create_nat_gateway ? 1 : 0 + nat_gateway_id = azurerm_nat_gateway.ng[0].id + subnet_id = azurerm_subnet.web[0].id + depends_on = [ + azurerm_subnet.web + ] +} diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf b/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf index 568751564d..864cb9efd3 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/outputs.tf @@ -552,3 +552,14 @@ output "ams_resource_id" { description = "Azure resource identifier for the AMS resource" value = local.create_ams_instance ? azapi_resource.ams_instance[0].id : "" } + +############################################################################### +# # +# NAT Gateway resource properties # +# # +############################################################################### + +output "ng_resource_id" { + description = "Azure resource identifier for the NAT Gateway" + value = local.create_nat_gateway ? azurerm_nat_gateway.ng[0].id : "" + } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf index bf46e32d7a..35fabcee2b 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf @@ -64,6 +64,25 @@ locals { ams_laws_arm_id = length(var.infrastructure.ams_instance.ams_laws_arm_id) > 0 ? ( var.infrastructure.ams_instance.ams_laws_arm_id) : "" + // NAT Gateway + create_nat_gateway = var.infrastructure.nat_gateway.create_nat_gateway + nat_gateway_name = length(var.infrastructure.nat_gateway.name) > 0 ? ( + var.infrastructure.nat_gateway.name) : ( + format("%s%s%s%s", + var.naming.resource_prefixes.nat_gateway, + local.prefix, + local.resource_suffixes.nat_gateway + ) + ) + nat_gateway_arm_id = length(var.infrastructure.nat_gateway.arm_id) > 0 ? ( + var.infrastructure.nat_gateway.arm_id) : "" + nat_gateway_public_ip_arm_id = length(var.infrastructure.nat_gateway.public_ip_arm_id) > 0 ? ( + var.infrastructure.nat_gateway.public_ip_arm_id) : "" + nat_gateway_public_ip_zones = length(var.infrastructure.nat_gateway.public_ip_zones) > 0 ? ( + var.infrastructure.nat_gateway.public_ip_zones) : [] + nat_gateway_idle_timeout_in_minutes = var.infrastructure.nat_gateway.idle_timeout_in_minutes + nat_gateway_public_ip_tags = var.infrastructure.nat_gateway.ip_tags + // SAP vnet SAP_virtualnetwork_id = try(var.infrastructure.vnets.sap.arm_id, "") SAP_virtualnetwork_exists = length(local.SAP_virtualnetwork_id) > 0 From 8eea367217af3dd0f3814f6da684531b800bcd22 Mon Sep 17 00:00:00 2001 From: daradicscsaba <62608496+daradicscsaba@users.noreply.github.com> Date: Tue, 28 May 2024 17:57:16 +0300 Subject: [PATCH 005/164] Fix regex necessary to comment lines in /usr/sap/sapservices (#584) Co-authored-by: Csaba Daradics --- .../5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml index c085f8a2b9..13cc11a5a3 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.1-scsersprofile.yaml @@ -75,7 +75,7 @@ ansible.builtin.replace: backup: true path: /usr/sap/sapservices - regexp: '^([^#\n].+{{ sapservice }}.+)$' + regexp: '^(?!#)(.*{{ sapservice }}.*)$' replace: '# \1' loop: - "{{ sap_sid | upper }}_{{ instance_type | upper }}{{ scs_instance_number }}_{{ scs_virtual_hostname }}" From 3e0f6416c22aa9f02c42cab0687a806f190e1ef7 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 29 May 2024 09:25:46 +0300 Subject: [PATCH 006/164] remove duplicate resource --- .../modules/sap_deployer/infrastructure.tf | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf index d301822699..cf2699a187 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf @@ -145,19 +145,4 @@ resource "azurerm_role_assignment" "resource_group_acsservice_msi" { principal_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].principal_id : data.azurerm_user_assigned_identity.deployer[0].principal_id } -resource "azurerm_role_assignment" "resource_group_acsservice" { - provider = azurerm.main - count = var.assign_subscription_permissions && var.deployer.add_system_assigned_identity ? var.deployer_vm_count : 0 - scope = local.resource_group_exists ? data.azurerm_resource_group.deployer[0].id : azurerm_resource_group.deployer[0].id - role_definition_name = "Azure Center for SAP solutions administrator" - principal_id = azurerm_linux_virtual_machine.deployer[count.index].identity[0].principal_id -} - -resource "azurerm_role_assignment" "resource_group_acsservice_msi" { - provider = azurerm.main - count = var.assign_subscription_permissions ? 1 : 0 - scope = local.resource_group_exists ? data.azurerm_resource_group.deployer[0].id : azurerm_resource_group.deployer[0].id - role_definition_name = "Azure Center for SAP solutions administrator" - principal_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].principal_id : data.azurerm_user_assigned_identity.deployer[0].principal_id -} From 50402b99b890bddd839032174d2242e9e840bf77 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 29 May 2024 10:09:15 +0300 Subject: [PATCH 007/164] Fix path --- deploy/pipelines/01-deploy-control-plane.yaml | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index ad8a3e39df..69ef1149ee 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -572,19 +572,14 @@ stages: echo SAP_AUTOMATION_REPO_PATH=$SAP_AUTOMATION_REPO_PATH >.sap_deployment_automation/config export SAP_AUTOMATION_REPO_PATH=$SAP_AUTOMATION_REPO_PATH - cd DEPLOYER - ls -lart - cd $(deployerfolder) - ls -lart - echo -e "$green--- File Validations ---$reset" - if [ ! -f DEPLOYER/$(deployerfolder)/$(deployerconfig) ]; then + if [ ! -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) ]; then echo -e "$boldred--- File ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) was not found ---$reset" echo "##vso[task.logissue type=error]File ${CONFIG_REPO_PATH}/${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) was not found." exit 2 fi - if [ ! -f LIBRARY/$(libraryfolder)/$(libraryconfig) ]; then + if [ ! -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) ]; then echo -e "$boldred--- File ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) was not found ---$reset" echo "##vso[task.logissue type=error]File ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) was not found." exit 2 From 4b831bf676b9c8258527622aa90b7e77c611b39c Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 30 May 2024 04:04:34 +0300 Subject: [PATCH 008/164] chore: Fix the count for the table resource --- deploy/terraform/terraform-units/modules/sap_library/dns.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_library/dns.tf b/deploy/terraform/terraform-units/modules/sap_library/dns.tf index c0b41cda3d..d9196a16e1 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/dns.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/dns.tf @@ -31,7 +31,7 @@ resource "azurerm_private_dns_zone" "blob" { resource "azurerm_private_dns_zone" "table" { provider = azurerm.main - count = local.use_local_private_dns && var.use_private_endpoint && length(try(var.deployer_tfstate.webapp_id),"")> 0 ? 1 : 0 + count = local.use_local_private_dns && var.use_private_endpoint && length(try(var.deployer_tfstate.webapp_id,""))> 0 ? 1 : 0 depends_on = [ azurerm_resource_group.library ] From 13a0adb84d35c218dc423c21a64f870e7129987d Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 30 May 2024 04:11:31 +0300 Subject: [PATCH 009/164] Misc fixes --- deploy/scripts/New-SDAFDevopsProject.ps1 | 28 ++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index 37f58847ff..7bc35aca37 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -40,7 +40,31 @@ $versionLabel = "v3.11.0.2" Write-Host "" Write-Host "" +# Check if access to the Azure DevOps organization is available and prompt for PAT if needed +# Exact permissions required, to be validated, and included in the Read-Host text. +if ($Env:AZURE_DEVOPS_EXT_PAT.Length -gt 0) { + Write-Host "Using the provided Personal Access Token (PAT) to authenticate to the Azure DevOps organization $ADO_Organization" -ForegroundColor Yellow +} + +$checkPAT = (az devops user list --organization $ADO_Organization --only-show-errors --top 1) +if ($checkPAT.Length -eq 0) { + $env:AZURE_DEVOPS_EXT_PAT = Read-Host "Please enter your Personal Access Token (PAT) with full access to the Azure DevOps organization $ADO_Organization" + $verifyPAT = (az devops user list --organization $ADO_Organization --only-show-errors --top 1) + if ($verifyPAT.Length -eq 0) { + Read-Host -Prompt "Failed to authenticate to the Azure DevOps organization, press to exit" + exit + } + else { + Write-Host "Successfully authenticated to the Azure DevOps organization $ADO_Organization" -ForegroundColor Green + } +} +else { + Write-Host "Successfully authenticated to the Azure DevOps organization $ADO_Organization" -ForegroundColor Green +} + +Write-Host "" +Write-Host "" if (Test-Path ".${pathSeparator}start.md") { Write-Host "Removing start.md" ; Remove-Item ".${pathSeparator}start.md" } if ($Env:SDAF_AuthenticationMethod.Length -eq 0) { @@ -845,11 +869,11 @@ else { $postBody = [PSCustomObject]@{ accessLevel = @{ - accountLicenseType = "stakeholder" + accountLicenseType = "Basic" } projectEntitlements = @([ordered]@{ group = @{ - groupType = "projectContributor" + groupType = "projectAdministrator" } projectRef = @{ id = $Project_ID From 12d6426ae8c6417d5bca273a7338e46d6fe120e3 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 30 May 2024 15:32:57 +0530 Subject: [PATCH 010/164] Fix systemd service reload in 1.4 Packages role --- deploy/ansible/roles-os/1.4-packages/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml index 429838dccf..9968853d49 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/main.yaml @@ -50,7 +50,7 @@ - name: "1.4 Packages: - Force systemd to reread configs {{ distro_name }}" - ansible.builtin.systemd_service: + ansible.builtin.systemd: daemon_reload: true - name: "1.4 Packages: - Restart WAAgent on {{ distro_name }}" From f960bbd8494f6e22d751fbdd20bd48bc97c7a756 Mon Sep 17 00:00:00 2001 From: daradicscsaba <62608496+daradicscsaba@users.noreply.github.com> Date: Thu, 30 May 2024 17:15:16 +0300 Subject: [PATCH 011/164] Various Terraform code fixes (#586) * Fix typo in terraform-units/modules/sap_landscape/providers.tf * Remove duplicate of resource azurerm_network_security_rule/nsr_controlplane_storage In terraform-units/modules/sap_landscape/nsg.tf * Remove fourth argument from nat_gateway_name definition In terraform-units/modules/sap_landscape/variables_local.tf * Remove duplicate for database_kdump_disks In terraform-units/modules/sap_system/anydb_node/outputs.tf * Remove all duplicates from terraform-units/modules/sap_system/app_tier/vm-scs.tf * Remove duplicates in terraform-units/modules/sap_system/output_files/sap-parameters.tmpl --------- Co-authored-by: Csaba Daradics --- .../modules/sap_landscape/nsg.tf | 31 ---- .../modules/sap_landscape/providers.tf | 2 +- .../modules/sap_landscape/variables_local.tf | 2 +- .../modules/sap_system/anydb_node/outputs.tf | 12 -- .../modules/sap_system/app_tier/vm-scs.tf | 139 ------------------ .../output_files/sap-parameters.tmpl | 3 - 6 files changed, 2 insertions(+), 187 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf b/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf index baf156a07c..6c593b21ae 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/nsg.tf @@ -211,37 +211,6 @@ resource "azurerm_network_security_rule" "nsr_controlplane_web" { destination_address_prefixes = local.web_subnet_existing ? data.azurerm_subnet.web[0].address_prefixes : azurerm_subnet.web[0].address_prefixes } -// Add SSH network security rule -resource "azurerm_network_security_rule" "nsr_controlplane_storage" { - provider = azurerm.main - - count = local.storage_subnet_defined ? local.storage_subnet_nsg_exists ? 0 : 1 : 0 - depends_on = [ - azurerm_network_security_group.storage - ] - name = "ConnectivityToSAPApplicationSubnetFromControlPlane-ssh-rdp-winrm-ANF" - resource_group_name = local.SAP_virtualnetwork_exists ? ( - data.azurerm_virtual_network.vnet_sap[0].resource_group_name - ) : ( - azurerm_virtual_network.vnet_sap[0].resource_group_name - ) - network_security_group_name = try(azurerm_network_security_group.storage[0].name, azurerm_network_security_group.app[0].name) - priority = 100 - direction = "Inbound" - access = "Allow" - protocol = "*" - source_port_range = "*" - destination_port_ranges = [22, 443, 3389, 5985, 5986, 111, 635, 2049, 4045, 4046, 4049, 2049, 111] - source_address_prefixes = compact(concat( - var.deployer_tfstate.subnet_mgmt_address_prefixes, - var.deployer_tfstate.subnet_bastion_address_prefixes, - local.SAP_virtualnetwork_exists ? ( - data.azurerm_virtual_network.vnet_sap[0].address_space) : ( - azurerm_virtual_network.vnet_sap[0].address_space - ))) - destination_address_prefixes = local.storage_subnet_existing ? data.azurerm_subnet.storage[0].address_prefixes : azurerm_subnet.storage[0].address_prefixes -} - // Add SSH network security rule resource "azurerm_network_security_rule" "nsr_controlplane_storage" { provider = azurerm.main diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf b/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf index ed1db2f2b8..fab440689c 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf @@ -7,7 +7,7 @@ terraform { } azapi = { - source = "Azure/azapi" + source = "azure/azapi" configuration_aliases = [azapi.api] } } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf index 35fabcee2b..9facd0f913 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf @@ -68,7 +68,7 @@ locals { create_nat_gateway = var.infrastructure.nat_gateway.create_nat_gateway nat_gateway_name = length(var.infrastructure.nat_gateway.name) > 0 ? ( var.infrastructure.nat_gateway.name) : ( - format("%s%s%s%s", + format("%s%s%s", var.naming.resource_prefixes.nat_gateway, local.prefix, local.resource_suffixes.nat_gateway diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf index a1bdeece3e..b711793c5f 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/outputs.tf @@ -177,15 +177,3 @@ output "database_kdump_disks" { ) ) } -output "database_kdump_disks" { - description = "List of Azure kdump disks" - value = distinct( - flatten( - [for vm in var.naming.virtualmachine_names.ANYDB_VMNAME : - [for idx, disk in azurerm_virtual_machine_data_disk_attachment.kdump : - format("{ host: '%s', LUN: %d, type: 'kdump' }", vm, disk.lun) - ] - ] - ) - ) - } diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index b50a5a13ab..1e172ef4fb 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -726,145 +726,6 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_scs_win" { } -resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_lnx" { - provider = azurerm.main - count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( - local.scs_server_count) : ( - 0 ) - virtual_machine_id = azurerm_linux_virtual_machine.scs[count.index].id - name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" - publisher = "Microsoft.Azure.Security.Monitoring" - type = "AzureSecurityLinuxAgent" - type_handler_version = "2.0" - auto_upgrade_minor_version = true - - settings = jsonencode( - { - "enableGenevaUpload" = true, - "enableAutoConfig" = true, - "reportSuccessOnUnsupportedDistro" = true, - } - ) -} - -resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_win" { - provider = azurerm.main - count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.scs_os.os_type) == "WINDOWS" ? ( - local.scs_server_count) : ( - 0 ) - virtual_machine_id = azurerm_windows_virtual_machine.scs[count.index].id - name = "Microsoft.Azure.Security.Monitoring.AzureSecurityWindowsAgent" - publisher = "Microsoft.Azure.Security.Monitoring" - type = "AzureSecurityWindowsAgent" - type_handler_version = "1.0" - auto_upgrade_minor_version = true - - settings = jsonencode( - { - "enableGenevaUpload" = true, - "enableAutoConfig" = true, - "reportSuccessOnUnsupportedDistro" = true, - } - ) -} - -######################################################################################### -# # -# Azure Data Disk for Kdump # -# # -#######################################+################################################# -resource "azurerm_managed_disk" "kdump" { - provider = azurerm.main - count = ( - local.enable_deployment && - var.application_tier.scs_high_availability && - ( - upper(var.application_tier.scs_os.os_type) == "LINUX" && - ( var.application_tier.fence_kdump_disk_size > 0 ) - ) - ) ? local.scs_server_count : 0 - - name = format("%s%s%s%s%s", - try( var.naming.resource_prefixes.fence_kdump_disk, ""), - local.prefix, - var.naming.separator, - var.naming.virtualmachine_names.SCS_VMNAME[count.index], - try( var.naming.resource_suffixes.fence_kdump_disk, "fence_kdump_disk" ) - ) - location = var.resource_group[0].location - resource_group_name = var.resource_group[0].name - create_option = "Empty" - storage_account_type = "Premium_LRS" - disk_size_gb = try(var.application_tier.fence_kdump_disk_size,64) - disk_encryption_set_id = try(var.options.disk_encryption_set_id, null) - tags = var.tags - - zone = local.scs_zonal_deployment ? ( - upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( - azurerm_linux_virtual_machine.scs[count.index].zone) : - null - ) : ( - null - ) - lifecycle { - ignore_changes = [ - create_option, - hyper_v_generation, - source_resource_id, - tags - ] - } - -} - -resource "azurerm_virtual_machine_data_disk_attachment" "kdump" { - provider = azurerm.main - count = ( - local.enable_deployment && - var.application_tier.scs_high_availability && - ( - upper(var.application_tier.scs_os.os_type) == "LINUX" && - ( var.application_tier.fence_kdump_disk_size > 0 ) - ) - ) ? local.scs_server_count : 0 - - managed_disk_id = azurerm_managed_disk.kdump[count.index].id - virtual_machine_id = (upper(var.application_tier.scs_os.os_type) == "LINUX" # If Linux - ) ? ( - azurerm_linux_virtual_machine.scs[count.index].id - ) : null - caching = "None" - lun = var.application_tier.fence_kdump_lun_number -} - -resource "azurerm_virtual_machine_extension" "monitoring_extension_scs_lnx" { - provider = azurerm.main - count = local.deploy_monitoring_extension && upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( - local.scs_server_count) : ( - 0 ) - virtual_machine_id = azurerm_linux_virtual_machine.scs[count.index].id - name = "Microsoft.Azure.Monitor.AzureMonitorLinuxAgent" - publisher = "Microsoft.Azure.Monitor" - type = "AzureMonitorLinuxAgent" - type_handler_version = "1.0" - auto_upgrade_minor_version = true -} - - -resource "azurerm_virtual_machine_extension" "monitoring_extension_scs_win" { - provider = azurerm.main - count = local.deploy_monitoring_extension && upper(var.application_tier.scs_os.os_type) == "WINDOWS" ? ( - local.scs_server_count) : ( - 0 ) - virtual_machine_id = azurerm_windows_virtual_machine.scs[count.index].id - name = "Microsoft.Azure.Monitor.AzureMonitorWindowsAgent" - publisher = "Microsoft.Azure.Monitor" - type = "AzureMonitorWindowsAgent" - type_handler_version = "1.0" - auto_upgrade_minor_version = true -} - - resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_lnx" { provider = azurerm.main count = var.infrastructure.deploy_defender_extension && upper(var.application_tier.scs_os.os_type) == "LINUX" ? ( diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl index 83397bc733..6047f02782 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl @@ -94,9 +94,6 @@ platform: ${platform} db_scale_out: ${scale_out} db_no_standby: ${scale_out_no_standby_role} -subnet_cidr_storage: ${subnet_cidr_storage} -%{~ endif } - subnet_cidr_storage: ${subnet_cidr_storage} %{~ endif } subnet_cidr_anf: ${subnet_cidr_anf} From 5d071dbc83cb63f5eeb32f488d9af5b605e7d172 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 30 May 2024 04:03:20 +0300 Subject: [PATCH 012/164] chore: Update count condition in dns.tf for local private DNS usage --- deploy/terraform/terraform-units/modules/sap_library/dns.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_library/dns.tf b/deploy/terraform/terraform-units/modules/sap_library/dns.tf index d9196a16e1..5eae5a3a7a 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/dns.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/dns.tf @@ -31,7 +31,7 @@ resource "azurerm_private_dns_zone" "blob" { resource "azurerm_private_dns_zone" "table" { provider = azurerm.main - count = local.use_local_private_dns && var.use_private_endpoint && length(try(var.deployer_tfstate.webapp_id,""))> 0 ? 1 : 0 + count = local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 depends_on = [ azurerm_resource_group.library ] From 90c88332ed0c64ddd6375d58a830651541e94c75 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 30 May 2024 23:37:40 +0530 Subject: [PATCH 013/164] chore: Update NAT Gateway public IP name format --- .../terraform-units/modules/sap_landscape/nat_gateway.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/nat_gateway.tf b/deploy/terraform/terraform-units/modules/sap_landscape/nat_gateway.tf index c9c65e4053..2890be7f6f 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/nat_gateway.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/nat_gateway.tf @@ -3,7 +3,7 @@ resource "azurerm_public_ip" "ng_pip" { provider = azurerm.main count = local.create_nat_gateway ? 1 : 0 - name = local.nat_gateway_name + name = format("%s%s", local.nat_gateway_name, "-pip") location = local.region resource_group_name = azurerm_resource_group.resource_group[0].name idle_timeout_in_minutes = local.nat_gateway_idle_timeout_in_minutes From 5ffb0963e8277251e4008088f0bc2fb426005130 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Fri, 31 May 2024 07:49:51 +0530 Subject: [PATCH 014/164] chore: Update NAT Gateway public IP lifecycle configuration --- .../terraform-units/modules/sap_landscape/nat_gateway.tf | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/nat_gateway.tf b/deploy/terraform/terraform-units/modules/sap_landscape/nat_gateway.tf index 2890be7f6f..26e4bfa3ce 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/nat_gateway.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/nat_gateway.tf @@ -11,6 +11,12 @@ resource "azurerm_public_ip" "ng_pip" { ip_tags = local.nat_gateway_public_ip_tags allocation_method = "Static" sku = "Standard" + lifecycle { + ignore_changes = [ + ip_tags + ] + create_before_destroy = true + } } # NAT Gateway From 1e9e55faa031548102e8fcb16f6f2ecf6aeda21a Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Fri, 31 May 2024 08:09:22 +0530 Subject: [PATCH 015/164] chore: Update NAT Gateway provider to azureng --- .../modules/sap_landscape/nat_gateway.tf | 12 ++++++------ .../modules/sap_landscape/providers.tf | 8 +++++++- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/nat_gateway.tf b/deploy/terraform/terraform-units/modules/sap_landscape/nat_gateway.tf index 26e4bfa3ce..7d6277902f 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/nat_gateway.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/nat_gateway.tf @@ -1,7 +1,7 @@ # https://github.com/hashicorp/terraform-provider-azurerm/issues/18741 # public IP address for the natGateway resource "azurerm_public_ip" "ng_pip" { - provider = azurerm.main + provider = azureng count = local.create_nat_gateway ? 1 : 0 name = format("%s%s", local.nat_gateway_name, "-pip") location = local.region @@ -24,7 +24,7 @@ resource "azurerm_public_ip" "ng_pip" { # https://learn.microsoft.com/en-us/azure/nat-gateway/nat-overview#availability-zones # Only one Availability Zone can be defined. We will not provide a zone for now. resource "azurerm_nat_gateway" "ng" { - provider = azurerm.main + provider = azureng count = local.create_nat_gateway ? 1 : 0 name = local.nat_gateway_name location = local.region @@ -39,7 +39,7 @@ resource "azurerm_nat_gateway" "ng" { # NAT Gateway IP Configuration resource "azurerm_nat_gateway_public_ip_association" "ng_pip_assoc" { - provider = azurerm.main + provider = azureng count = local.create_nat_gateway ? 1 : 0 nat_gateway_id = azurerm_nat_gateway.ng[0].id public_ip_address_id = azurerm_public_ip.ng_pip[0].id @@ -48,7 +48,7 @@ resource "azurerm_nat_gateway_public_ip_association" "ng_pip_assoc" { # NAT Gateway subnet association with app subnet resource "azurerm_subnet_nat_gateway_association" "ng_subnet_assoc" { - provider = azurerm.main + provider = azureng count = local.create_nat_gateway ? 1 : 0 nat_gateway_id = azurerm_nat_gateway.ng[0].id subnet_id = azurerm_subnet.app[0].id @@ -59,7 +59,7 @@ resource "azurerm_subnet_nat_gateway_association" "ng_subnet_assoc" { # NAT Gateway subnet association with db subnet resource "azurerm_subnet_nat_gateway_association" "ng_subnet_assoc_db" { - provider = azurerm.main + provider = azureng count = local.create_nat_gateway ? 1 : 0 nat_gateway_id = azurerm_nat_gateway.ng[0].id subnet_id = azurerm_subnet.db[0].id @@ -70,7 +70,7 @@ resource "azurerm_subnet_nat_gateway_association" "ng_subnet_assoc_db" { # NAT Gateway subnet association with web subnet resource "azurerm_subnet_nat_gateway_association" "ng_subnet_assoc_web" { - provider = azurerm.main + provider = azureng count = local.create_nat_gateway ? 1 : 0 nat_gateway_id = azurerm_nat_gateway.ng[0].id subnet_id = azurerm_subnet.web[0].id diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf b/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf index fab440689c..b0728df2d7 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf @@ -5,7 +5,13 @@ terraform { configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement, azurerm.peering] version = ">= 3.23" } - + + azureng = { + source = "hashicorp/azurerm" + configuration_aliases = [azurerm.ng] + version = ">= 3.71.0" + } + azapi = { source = "azure/azapi" configuration_aliases = [azapi.api] From b954e28c3f0739802800de772478c664efea7db9 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Fri, 31 May 2024 08:11:34 +0530 Subject: [PATCH 016/164] chore: Fix typo in azureng provider configuration alias in sap_landscape module --- .../terraform-units/modules/sap_landscape/providers.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf b/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf index b0728df2d7..11f167b960 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf @@ -8,7 +8,7 @@ terraform { azureng = { source = "hashicorp/azurerm" - configuration_aliases = [azurerm.ng] + configuration_aliases = [azureng.ng] version = ">= 3.71.0" } From e39ef3983783cfac6f1f9daaabfbf985e22978cd Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Fri, 31 May 2024 08:16:15 +0530 Subject: [PATCH 017/164] chore: Update NAT Gateway provider to azurerm.main --- .../modules/sap_landscape/nat_gateway.tf | 15 ++++++++------- .../modules/sap_landscape/providers.tf | 6 ------ 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/nat_gateway.tf b/deploy/terraform/terraform-units/modules/sap_landscape/nat_gateway.tf index 7d6277902f..37f0e425fa 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/nat_gateway.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/nat_gateway.tf @@ -1,14 +1,15 @@ # https://github.com/hashicorp/terraform-provider-azurerm/issues/18741 # public IP address for the natGateway resource "azurerm_public_ip" "ng_pip" { - provider = azureng + provider = azurerm.main count = local.create_nat_gateway ? 1 : 0 name = format("%s%s", local.nat_gateway_name, "-pip") location = local.region resource_group_name = azurerm_resource_group.resource_group[0].name idle_timeout_in_minutes = local.nat_gateway_idle_timeout_in_minutes zones = local.nat_gateway_public_ip_zones - ip_tags = local.nat_gateway_public_ip_tags + # ip_tags = local.nat_gateway_public_ip_tags + tags = local.nat_gateway_public_ip_tags allocation_method = "Static" sku = "Standard" lifecycle { @@ -24,7 +25,7 @@ resource "azurerm_public_ip" "ng_pip" { # https://learn.microsoft.com/en-us/azure/nat-gateway/nat-overview#availability-zones # Only one Availability Zone can be defined. We will not provide a zone for now. resource "azurerm_nat_gateway" "ng" { - provider = azureng + provider = azurerm.main count = local.create_nat_gateway ? 1 : 0 name = local.nat_gateway_name location = local.region @@ -39,7 +40,7 @@ resource "azurerm_nat_gateway" "ng" { # NAT Gateway IP Configuration resource "azurerm_nat_gateway_public_ip_association" "ng_pip_assoc" { - provider = azureng + provider = azurerm.main count = local.create_nat_gateway ? 1 : 0 nat_gateway_id = azurerm_nat_gateway.ng[0].id public_ip_address_id = azurerm_public_ip.ng_pip[0].id @@ -48,7 +49,7 @@ resource "azurerm_nat_gateway_public_ip_association" "ng_pip_assoc" { # NAT Gateway subnet association with app subnet resource "azurerm_subnet_nat_gateway_association" "ng_subnet_assoc" { - provider = azureng + provider = azurerm.main count = local.create_nat_gateway ? 1 : 0 nat_gateway_id = azurerm_nat_gateway.ng[0].id subnet_id = azurerm_subnet.app[0].id @@ -59,7 +60,7 @@ resource "azurerm_subnet_nat_gateway_association" "ng_subnet_assoc" { # NAT Gateway subnet association with db subnet resource "azurerm_subnet_nat_gateway_association" "ng_subnet_assoc_db" { - provider = azureng + provider = azurerm.main count = local.create_nat_gateway ? 1 : 0 nat_gateway_id = azurerm_nat_gateway.ng[0].id subnet_id = azurerm_subnet.db[0].id @@ -70,7 +71,7 @@ resource "azurerm_subnet_nat_gateway_association" "ng_subnet_assoc_db" { # NAT Gateway subnet association with web subnet resource "azurerm_subnet_nat_gateway_association" "ng_subnet_assoc_web" { - provider = azureng + provider = azurerm.main count = local.create_nat_gateway ? 1 : 0 nat_gateway_id = azurerm_nat_gateway.ng[0].id subnet_id = azurerm_subnet.web[0].id diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf b/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf index 11f167b960..7de1539916 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf @@ -6,12 +6,6 @@ terraform { version = ">= 3.23" } - azureng = { - source = "hashicorp/azurerm" - configuration_aliases = [azureng.ng] - version = ">= 3.71.0" - } - azapi = { source = "azure/azapi" configuration_aliases = [azapi.api] From 300206ceaf464c980d0ed636c1cd9007077231b2 Mon Sep 17 00:00:00 2001 From: Nadeen Noaman <95418928+nnoaman@users.noreply.github.com> Date: Fri, 31 May 2024 12:27:44 +0200 Subject: [PATCH 018/164] Update 01-deploy-control-plane.yaml --- deploy/pipelines/01-deploy-control-plane.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 69ef1149ee..651031e5eb 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -92,7 +92,7 @@ stages: export ARM_CLIENT_ID=$servicePrincipalId if [ -n "$(servicePrincipalKey)" ]; then - export ARM_CLIENT_SECRET=$servicePrincipalKey + export ARM_CLIENT_SECRET=$CP_ARM_CLIENT_SECRET else export ARM_USE_OIDC=true export ARM_USE_AZUREAD=true From a851954cc5a692a1b89ef5da79f8ed01b8ebdd5a Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 31 May 2024 14:07:08 +0300 Subject: [PATCH 019/164] chore: Update app_service_plan name format in sap_deployer module --- .../terraform-units/modules/sap_deployer/app_service.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf index d797689f59..6f81524ed7 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf @@ -57,7 +57,7 @@ data "azurerm_subnet" "webapp" { # Create the Windows App Service Plan resource "azurerm_service_plan" "appserviceplan" { count = var.use_webapp ? 1 : 0 - name = lower(format("%s%s%s%s", var.naming.resource_prefixes.app_service_plan, var.naming.prefix.LIBRARY, var.naming.resource_suffixes.app_service_plan, substr(random_id.deployer.hex, 0, 3))) + name = lower(format("%s%s%s%s", var.naming.resource_prefixes.app_service_plan, var.naming.prefix.DEPLOYER, var.naming.resource_suffixes.app_service_plan, substr(random_id.deployer.hex, 0, 3))) resource_group_name = local.resourcegroup_name location = local.rg_appservice_location os_type = "Windows" From 8b1e3569050516b3510a9dec7493d5503ab40376 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 31 May 2024 15:35:03 +0300 Subject: [PATCH 020/164] Update ARM_CLIENT_SECRET assignment in deploy control plane pipeline --- deploy/pipelines/01-deploy-control-plane.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 651031e5eb..7d584be18e 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -92,7 +92,7 @@ stages: export ARM_CLIENT_ID=$servicePrincipalId if [ -n "$(servicePrincipalKey)" ]; then - export ARM_CLIENT_SECRET=$CP_ARM_CLIENT_SECRET + export ARM_CLIENT_SECRET=$servicePrincipalKey else export ARM_USE_OIDC=true export ARM_USE_AZUREAD=true @@ -251,6 +251,7 @@ stages: --library_parameter_file ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) \ --subscription $ARM_SUBSCRIPTION_ID --auto-approve --ado --only_deployer --msi else + export ARM_CLIENT_SECRET=$CP_ARM_CLIENT_SECRET $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ --deployer_parameter_file ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) \ --library_parameter_file ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) \ From 8cdf6513da8af89a07524ecde2e005a2655aa03f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Fri, 31 May 2024 21:08:48 +0300 Subject: [PATCH 021/164] Add the compliance extensions also to the deployers --- .../bootstrap/sap_deployer/tfvar_variables.tf | 17 +++++++++ .../bootstrap/sap_deployer/transform.tf | 7 ++-- .../run/sap_deployer/tfvar_variables.tf | 35 ++++++++++++++----- .../terraform/run/sap_deployer/transform.tf | 3 ++ .../modules/sap_deployer/vm-deployer.tf | 35 +++++++++++++++++++ 5 files changed, 86 insertions(+), 11 deletions(-) diff --git a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf index 169eb08d7e..be8074c3b2 100644 --- a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf @@ -552,3 +552,20 @@ variable "use_spn" { description = "Log in using a service principal when performing the deployment" default = false } + +######################################################################################### +# # +# Extension variables # +# # +######################################################################################### + + +variable "deploy_monitoring_extension" { + description = "If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines" + default = false + } + +variable "deploy_defender_extension" { + description = "If defined, will add the Microsoft.Azure.Security.Monitoring extension to the virtual machines" + default = false + } diff --git a/deploy/terraform/bootstrap/sap_deployer/transform.tf b/deploy/terraform/bootstrap/sap_deployer/transform.tf index 95559452a8..84f49ec231 100644 --- a/deploy/terraform/bootstrap/sap_deployer/transform.tf +++ b/deploy/terraform/bootstrap/sap_deployer/transform.tf @@ -3,7 +3,7 @@ locals { use_webapp = lower(var.use_webapp) - infrastructure = { + infrastructure = { environment = coalesce( var.environment, try(var.infrastructure.environment, "") @@ -130,7 +130,10 @@ locals { } } } - } + deploy_monitoring_extension = var.deploy_monitoring_extension + deploy_defender_extension = var.deploy_defender_extension + + } deployer = { size = try( coalesce( diff --git a/deploy/terraform/run/sap_deployer/tfvar_variables.tf b/deploy/terraform/run/sap_deployer/tfvar_variables.tf index 5cc630753a..83ac018ba2 100644 --- a/deploy/terraform/run/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/run/sap_deployer/tfvar_variables.tf @@ -554,14 +554,31 @@ variable "user_assigned_identity_id" { default = "" } -variable "add_system_assigned_identity" { - description = "Boolean flag indicating if a system assigned identity should be added to the deployer" - default = false - type = bool - } +variable "add_system_assigned_identity" { + description = "Boolean flag indicating if a system assigned identity should be added to the deployer" + default = false + type = bool + } + +variable "use_spn" { + description = "Log in using a service principal when performing the deployment" + default = true + } -variable "use_spn" { - description = "Log in using a service principal when performing the deployment" - default = true - } +######################################################################################### +# # +# Extension variables # +# # +######################################################################################### + + +variable "deploy_monitoring_extension" { + description = "If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines" + default = false + } + +variable "deploy_defender_extension" { + description = "If defined, will add the Microsoft.Azure.Security.Monitoring extension to the virtual machines" + default = false + } diff --git a/deploy/terraform/run/sap_deployer/transform.tf b/deploy/terraform/run/sap_deployer/transform.tf index 8d941267c8..88936dcb28 100644 --- a/deploy/terraform/run/sap_deployer/transform.tf +++ b/deploy/terraform/run/sap_deployer/transform.tf @@ -127,6 +127,9 @@ locals { } } } + deploy_monitoring_extension = var.deploy_monitoring_extension + deploy_defender_extension = var.deploy_defender_extension + } deployer = { size = try( diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf b/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf index 8422bf9cfe..712b034e31 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf @@ -252,3 +252,38 @@ resource "azurerm_virtual_machine_extension" "configure" { ) } + +resource "azurerm_virtual_machine_extension" "monitoring_extension_deployer_lnx" { + provider = azurerm.main + count = var.infrastructure.deploy_monitoring_extension ? ( + var.deployer_vm_count) : ( + 0 ) + virtual_machine_id = azurerm_linux_virtual_machine.deployer[count.index].id + name = "Microsoft.Azure.Monitor.AzureMonitorLinuxAgent" + publisher = "Microsoft.Azure.Monitor" + type = "AzureMonitorLinuxAgent" + type_handler_version = "1.0" + auto_upgrade_minor_version = true +} + + +resource "azurerm_virtual_machine_extension" "monitoring_defender_deployer_lnx" { + provider = azurerm.main + count = var.infrastructure.deploy_defender_extension ? ( + var.deployer_vm_count) : ( + 0 ) + virtual_machine_id = azurerm_linux_virtual_machine.deployer[count.index].id + name = "Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent" + publisher = "Microsoft.Azure.Security.Monitoring" + type = "AzureSecurityLinuxAgent" + type_handler_version = "2.0" + auto_upgrade_minor_version = true + + settings = jsonencode( + { + "enableGenevaUpload" = true, + "enableAutoConfig" = true, + "reportSuccessOnUnsupportedDistro" = true, + } + ) +} From 8d84bbad3c8a4623c39a299bfea849191b6b70b8 Mon Sep 17 00:00:00 2001 From: Steffen Bo Thomsen Date: Mon, 3 Jun 2024 16:13:52 +0200 Subject: [PATCH 022/164] Ensure that custom_logical_volumes can be striped + have sensible stripesize default if a lvm consists of more than one pv. (#587) Use already established pattern from framework specific LVMs to define stripesize on custom logical volumes. --- .../roles-os/1.5-disk-setup/tasks/1.5-custom-disks.yml | 1 + deploy/ansible/vars/ansible-input-api.yaml | 2 ++ deploy/ansible/vars/disks_config.yml | 4 ++-- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-os/1.5-disk-setup/tasks/1.5-custom-disks.yml b/deploy/ansible/roles-os/1.5-disk-setup/tasks/1.5-custom-disks.yml index 9666dc8236..228aaa3fb4 100644 --- a/deploy/ansible/roles-os/1.5-disk-setup/tasks/1.5-custom-disks.yml +++ b/deploy/ansible/roles-os/1.5-disk-setup/tasks/1.5-custom-disks.yml @@ -47,6 +47,7 @@ lv: "{{ item.lv }}" vg: "{{ item.vg }}" size: "{{ item.size }}" + opts: "{{ lvol_opts_from_lv_item }}" active: true state: present shrink: false diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index a82476f9f9..5989235e44 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -83,6 +83,8 @@ sybase_temp_stripe_size: 128 oracle_data_stripe_size: 256 oracle_log_stripe_size: 128 +default_stripe_size: 128 + # Custom virtual hostnames custom_db_virtual_hostname: "" custom_ers_virtual_hostname: "" diff --git a/deploy/ansible/vars/disks_config.yml b/deploy/ansible/vars/disks_config.yml index 9362c37733..6e843317ff 100644 --- a/deploy/ansible/vars/disks_config.yml +++ b/deploy/ansible/vars/disks_config.yml @@ -418,9 +418,9 @@ vg_stripecount_from_lv_item: >- # '-i -I ' only when the LV 'item' has # stripesize specified, otherwise it will be an empty string. lvol_opts_from_lv_item: >- - {{ ('stripesize' in item) | + {{ ('stripesize' in item or vg_stripecount_from_lv_item | int > 1) | ternary('-i ' ~ vg_stripecount_from_lv_item ~ - ' -I ' ~ (item.stripesize | default(0)), + ' -I ' ~ (item.stripesize | default(default_stripe_size)), '') }} # Define a dynamic expression based upon the 'item' fact that can From 3b44b9eb54a6b1c5ac9e99c56b90ceff02abae98 Mon Sep 17 00:00:00 2001 From: devanshjain Date: Tue, 4 Jun 2024 23:52:43 +0000 Subject: [PATCH 023/164] Update AMS provider creation tasks in ansible playbook --- .../0.8-ams-providers/tasks/main.yaml | 35 +++++++++++-------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/deploy/ansible/roles-misc/0.8-ams-providers/tasks/main.yaml b/deploy/ansible/roles-misc/0.8-ams-providers/tasks/main.yaml index ce8b332a7f..75016b6b98 100644 --- a/deploy/ansible/roles-misc/0.8-ams-providers/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.8-ams-providers/tasks/main.yaml @@ -8,31 +8,38 @@ ers_hosts: "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_ERS') }}" ha_cluster_port_number: "{{ 9664 if ansible_os_family | upper == 'SUSE' else 44322 }}" -- name: "0.8.1 ams provider creation: - Install [AMS] cli extension" - delegate_to: localhost +- name: "0.8.1 ams provider creation: - Install [AMS] cli extension" + delegate_to: localhost ansible.builtin.shell: >- - az extension add --name workloads --yes || exit 1 + az extension add --name workloads --yes || exit 1 tags: - skip_ansible_lint -- name: "0.8.1 ams provider creation: - Get Access Token" - delegate_to: localhost +- name: "0.8.1 ams provider creation: - perform az login" + delegate_to: localhost + ansible.builtin.command: >- + az login --identity --allow-no-subscriptions --output none + no_log: true + changed_when: false + +- name: "0.8.1 ams provider creation: - Get Access Token" + delegate_to: localhost ansible.builtin.shell: >- - az account get-access-token --resource https://management.azure.com \ - --query accessToken -o tsv - register: ams_access_token + az account get-access-token --resource https://management.azure.com \ + --query accessToken -o tsv + register: ams_access_token tags: - skip_ansible_lint -- name: "0.8.1 ams provider creation: - Generate a guid for the AMS provider instance" - delegate_to: localhost - ansible.builtin.command: uuidgen - register: ams_provider_guid +- name: "0.8.1 ams provider creation: - Generate a guid for the AMS provider instance" + delegate_to: localhost + ansible.builtin.command: uuidgen + register: ams_provider_guid tags: - skip_ansible_lint -- name: "0.8.1 ams provider creation: - Create PrometheusOS (OS) provider in AMS" - delegate_to: localhost +- name: "0.8.1 ams provider creation: - Create PrometheusOS (OS) provider in AMS" + delegate_to: localhost when: - ansible_os_family | upper == 'SUSE' or ansible_os_family | upper == 'REDHAT' - enable_os_monitoring From 237c1fdf2cf7b01a43b5a79cac32e9d7e44061da Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 6 Jun 2024 13:09:47 +0530 Subject: [PATCH 024/164] Update deploy control plane pipeline to use service principal for authentication --- deploy/pipelines/01-deploy-control-plane.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 7d584be18e..8b9fe4dbc1 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -619,6 +619,10 @@ stages: else if [ $USE_MSI != "true" ]; then echo -e "$cyan--- Using SPN ---$reset" + export ARM_CLIENT_ID=$CP_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$CP_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$CP_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID export ARM_USE_MSI=false az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none From 0e1967e366497c03c9807e3f592d57be82ac415f Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 6 Jun 2024 17:00:41 +0530 Subject: [PATCH 025/164] chore: Temporarily set identity type to "SystemAssigned, UserAssigned" in app_service.tf --- .../modules/sap_deployer/app_service.tf | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf index 6f81524ed7..4913fc0c71 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/app_service.tf @@ -143,10 +143,13 @@ resource "azurerm_windows_web_app" "webapp" { key_vault_reference_identity_id = length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].id : data.azurerm_user_assigned_identity.deployer[0].id identity { - type = length(var.deployer.user_assigned_identity_id) == 0 ? ( - "SystemAssigned") : ( - "SystemAssigned, UserAssigned" - ) + # type = length(var.deployer.user_assigned_identity_id) == 0 ? ( + # "SystemAssigned") : ( + # "SystemAssigned, UserAssigned" + # ) + # for now set the identity type to "SystemAssigned, UserAssigned" as assigning identities + # is not supported by the provider when type is "SystemAssigned" + type = "SystemAssigned, UserAssigned" identity_ids = [length(var.deployer.user_assigned_identity_id) == 0 ? azurerm_user_assigned_identity.deployer[0].id : data.azurerm_user_assigned_identity.deployer[0].id ] } connection_string { From ac55a29144eb0c9675a2f9756b26208bc2bc7853 Mon Sep 17 00:00:00 2001 From: daradicscsaba <62608496+daradicscsaba@users.noreply.github.com> Date: Fri, 7 Jun 2024 14:54:38 +0300 Subject: [PATCH 026/164] Various Ansible fixes (#591) * Add Red Hat 8.10 repo to 1.3-repository vars * Create entries for Red Hat 8.10 in 1.4-packages vars * Add 'pam' to OS packages list for DB2 with state 'latest' Ensures that x86_64 package is updated, avoiding conflict with libpam.so.0 install, which requires i686 version of pam * Add "state: 'latest'" to loops * Correct cluster version check in 1.17-generic-pacemaker * Correct cluster version check in 5.6-scsers-pacemaker * Correct cluster version check in 5.5-hanadb-pacemaker * Create entries for Red Hat 8.10 in 1.17-generic-pacemaker --------- Co-authored-by: Csaba Daradics --- .../tasks/1.17.2.0-cluster-RedHat.yml | 2 +- .../1.17-generic-pacemaker/vars/main.yml | 3 +++ .../roles-os/1.3-repository/vars/repos.yaml | 1 + .../1.4-packages/tasks/1.4.1-packages.yaml | 24 ++++++++++++------- .../1.4-packages/vars/os-packages.yaml | 4 ++++ .../tasks/5.5.4.1-cluster-RedHat.yml | 2 +- .../tasks/5.6.4.2-sap-resources-RedHat.yml | 2 +- 7 files changed, 27 insertions(+), 11 deletions(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml index 24ae236ba9..077aaab435 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml @@ -123,7 +123,7 @@ - name: "1.17 Generic Pacemaker - Check if the pacemaker package version is greater than pacemaker-2.0.4" when: ansible_distribution_major_version in ["8", "9"] ansible.builtin.set_fact: - is_pcmk_ver_gt_204: "{{ ansible_facts.packages['pacemaker'] is version('2.0.4', '>') | default(false) }}" + is_pcmk_ver_gt_204: "{{ ansible_facts.packages['pacemaker'][0].version is version('2.0.4', '>') | default(false) }}" - name: "1.17 Generic Pacemaker - Ensure STONITH timeout is raised" ansible.builtin.command: pcs property set stonith-timeout=900 diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/vars/main.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/vars/main.yml index 1e57c00bc2..07d94b1d21 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/vars/main.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/vars/main.yml @@ -83,6 +83,9 @@ package_versions: redhat8.9: - {name: "pacemaker", version: "2.0.5", compare_operator: ">=", version_type: "loose"} - {name: "resource-agents", version: "4.9.0", compare_operator: ">=", version_type: "loose"} + redhat8.10: + - {name: "pacemaker", version: "2.0.5", compare_operator: ">=", version_type: "loose"} + - {name: "resource-agents", version: "4.9.0", compare_operator: ">=", version_type: "loose"} redhat9.0: - {name: "pacemaker", version: "2.0.5", compare_operator: ">=", version_type: "loose"} - {name: "resource-agents-cloud", version: "4.10.0", compare_operator: ">=", version_type: "loose"} diff --git a/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml b/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml index d63fdbf9c4..7f7dd3f387 100644 --- a/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml +++ b/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml @@ -26,6 +26,7 @@ repos: # - { tier: 'ha', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } redhat8.9: # - { tier: 'ha', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } + redhat8.10: redhat9.0: redhat9.2: # do not have any repos that are needed for RedHat at the moment. diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.1-packages.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.1-packages.yaml index 773f7a09ea..b7af028a92 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.1-packages.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.1-packages.yaml @@ -48,7 +48,8 @@ state: "{{ item.state }}" register: package_result loop: - - { state: 'present' } # First install required packages + - { state: 'latest' } # Update necessary packages + - { state: 'present' } # Install required packages - { state: 'absent' } # Then remove packages that we don't want rescue: - name: "1.4 Packages: - Show result from packages module" @@ -71,7 +72,8 @@ state: "{{ item.state }}" register: package_result loop: - - { state: 'present' } # First install required packages + - { state: 'latest' } # Update necessary packages + - { state: 'present' } # Install required packages - { state: 'absent' } # Then remove packages that we don't want rescue: - name: "Print stderr before getting error code" @@ -93,7 +95,8 @@ state: "{{ item.state }}" register: package_result loop: - - { state: 'present' } # First install required packages + - { state: 'latest' } # Update necessary packages + - { state: 'present' } # Install required packages - { state: 'absent' } # Then remove packages that we don't want rescue: @@ -116,7 +119,8 @@ state: "{{ item.state }}" register: package_result loop: - - { state: 'present' } # First install required packages + - { state: 'latest' } # Update necessary packages + - { state: 'present' } # Install required packages - { state: 'absent' } # Then remove packages that we don't want rescue: - name: "Print stderr before getting error code" @@ -140,7 +144,8 @@ state: "{{ item.state }}" register: package_result loop: - - { state: 'present' } # First install required packages + - { state: 'latest' } # Update necessary packages + - { state: 'present' } # Install required packages - { state: 'absent' } # Then remove packages that we don't want rescue: - name: "1.4 Packages: - Show result from packages module" @@ -163,7 +168,8 @@ state: "{{ item.state }}" register: package_result loop: - - { state: 'present' } # First install required packages + - { state: 'latest' } # Update necessary packages + - { state: 'present' } # Install required packages - { state: 'absent' } # Then remove packages that we don't want rescue: - name: "Print stderr before getting error code" @@ -186,7 +192,8 @@ state: "{{ item.state }}" register: package_result loop: - - { state: 'present' } # First install required packages + - { state: 'latest' } # Update necessary packages + - { state: 'present' } # Install required packages - { state: 'absent' } # Then remove packages that we don't want rescue: @@ -208,7 +215,8 @@ list }}" state: "{{ item.state }}" loop: - - { state: 'present' } # First install required packages + - { state: 'latest' } # Update necessary packages + - { state: 'present' } # Install required packages - { state: 'absent' } # Then remove packages that we don't want rescue: - name: "Print stderr before getting error code" diff --git a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml index 2cec78ad02..81f66d2e36 100644 --- a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml +++ b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml @@ -159,6 +159,7 @@ packages: - { tier: 'os', package: 'mksh', node_tier: 'db2', state: 'present' } - { tier: 'os', package: 'libstdc++.so.6', node_tier: 'db2', state: 'present' } - { tier: 'os', package: 'unzip', node_tier: 'db2', state: 'present' } + - { tier: 'os', package: 'pam', node_tier: 'db2', state: 'latest' } - { tier: 'os', package: 'libpam.so.0', node_tier: 'db2', state: 'present' } - { tier: 'db2', package: 'acl', node_tier: 'db2', state: 'present' } # --------------------------- End - Packages required for DB2 -------------------------------------------8 @@ -264,6 +265,9 @@ packages: redhat8.9: - { tier: 'os', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'present' } - { tier: 'ha', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'absent' } + redhat8.10: + - { tier: 'os', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'present' } + - { tier: 'ha', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'absent' } redhat9.0: - { tier: 'os', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'present' } - { tier: 'ha', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'absent' } diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index 89d6be3b69..dec3e1bf3b 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -29,7 +29,7 @@ - name: "5.5.4.1 HANA Cluster configuration - Check if the pacemaker package version is greater than pacemaker-2.0.4" when: ansible_distribution_major_version in ["8", "9"] ansible.builtin.set_fact: - is_pcmk_ver_gt_204: "{{ ansible_facts.packages['pacemaker'] is version('2.0.4', '>') | default(false) }}" + is_pcmk_ver_gt_204: "{{ ansible_facts.packages['pacemaker'][0].version is version('2.0.4', '>') | default(false) }}" - name: "5.5.4.1 HANA Cluster configuration - Ensure the SAP HANA Topology resource is created" ansible.builtin.shell: > diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index 9196be86d5..abe0919488 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -109,7 +109,7 @@ - name: "5.6 SCSERS - RHEL - Check if the pacemaker package version is greater than pacemaker-2.0.4" when: ansible_distribution_major_version in ["8", "9"] ansible.builtin.set_fact: - is_pcmk_ver_gt_204: "{{ ansible_facts.packages['pacemaker'] is version('2.0.4', '>') | default(false) }}" + is_pcmk_ver_gt_204: "{{ ansible_facts.packages['pacemaker'][0].version is version('2.0.4', '>') | default(false) }}" - name: "5.6 SCSERS - RHEL - Set properties for two node clusters" when: From e70bc59a40856e00c218547e761909b93b4803b4 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 11 Jun 2024 13:11:58 +0530 Subject: [PATCH 027/164] chore: Adjust Azure fence agent packages and remove unused Azure Python packages from list when deploying on SLES 15 SP5 --- .../1.4-packages/vars/os-packages.yaml | 38 ++++++++++++++++--- 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml index 81f66d2e36..154db3a4ec 100644 --- a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml +++ b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml @@ -375,12 +375,6 @@ packages: - { tier: 'ha', package: 'azure-cli', node_tier: 'hana', state: 'present' } - { tier: 'ha', package: 'azure-cli', node_tier: 'scs', state: 'present' } - { tier: 'ha', package: 'azure-cli', node_tier: 'ers', state: 'present' } - - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'hana', state: 'present' } - - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'hana', state: 'present' } - - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'scs', state: 'present' } - - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'scs', state: 'present' } - - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'ers', state: 'present' } - - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'ers', state: 'present' } # Added as part of documentation update - { tier: 'ha', package: 'sap-suse-cluster-connector', node_tier: 'hana', state: 'present' } - { tier: 'ha', package: 'sap-suse-cluster-connector', node_tier: 'scs', state: 'present' } - { tier: 'ha', package: 'sap-suse-cluster-connector', node_tier: 'ers', state: 'present' } @@ -396,12 +390,24 @@ packages: # than /usr/bin/python. # Required to enable ansible to use /usr/bin/python on SLE 15 SP2 - { tier: 'os', package: 'python2-rpm', node_tier: 'all', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'hana', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'hana', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'ers', state: 'present' } sles_sap15.2: - { tier: 'os', package: 'python-xml', node_tier: 'all', state: 'present' } - { tier: 'os', package: 'python2-rpm', node_tier: 'all', state: 'present' } - { tier: 'db2', package: 'system-user-bin', node_tier: 'db2', state: 'present' } - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'scs', state: 'present' } - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'hana', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'hana', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'ers', state: 'present' } sles_sap15.3: # - { tier: 'os', package: 'sle-module-public-cloud', state: 'present' } - { tier: 'os', package: 'python-xml', node_tier: 'all', state: 'present' } @@ -409,6 +415,12 @@ packages: - { tier: 'db2', package: 'system-user-bin', node_tier: 'db2', state: 'present' } - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'scs', state: 'present' } - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'hana', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'hana', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'ers', state: 'present' } sles_sap15.4: # - { tier: 'os', package: 'sle-module-public-cloud', state: 'present' } - { tier: 'os', package: 'python3-xml', node_tier: 'all', state: 'present' } @@ -416,6 +428,12 @@ packages: - { tier: 'db2', package: 'system-user-bin', node_tier: 'db2', state: 'present' } - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'scs', state: 'present' } - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'hana', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'hana', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'ers', state: 'present' } sles_sap15.5: # - { tier: 'os', package: 'sle-module-public-cloud', state: 'present' } - { tier: 'os', package: 'python3-xml', node_tier: 'all', state: 'present' } @@ -423,6 +441,14 @@ packages: - { tier: 'db2', package: 'system-user-bin', node_tier: 'db2', state: 'present' } - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'scs', state: 'present' } - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'ers', state: 'present' } + # These package cause issues on SLES15 SP5 due to changes to the public cloud SDKs + # https://www.suse.com/c/incompatible-changes-ahead-for-public-cloud-sdks/ + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'hana', state: 'absent' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'hana', state: 'absent' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'scs', state: 'absent' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'scs', state: 'absent' } + - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'ers', state: 'absent' } + - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'ers', state: 'absent' } # Adding packages for Oracle linux 8.4 to start with, copied the list from RHEL. # Adding additional Oracle linux packages as per SAP Note 2069760 - Oracle Linux 7.x SAP Installation and Upgrade. Need to add the groupinstall command. From 877dc61097d24ca0eb89c8767b503e3b03e9f2ec Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 11 Jun 2024 16:46:10 +0530 Subject: [PATCH 028/164] chore: Override changed status for saptune_check and active_saptune_solution tasks in 2.10.3.yaml --- deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml index b7927920b1..6c31a9fdae 100644 --- a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml +++ b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml @@ -42,13 +42,12 @@ - name: "2.10.3 sap-notes : - Run saptune_check" ansible.builtin.shell: saptune_check register: saptune_check_result - + changed_when: false - name: "2.10.3 sap-notes : - Interrogate active saptune solution" ansible.builtin.shell: saptune solution enabled register: active_saptune_solution - when: - - saptune_check_result.rc == 0 + changed_when: false # We need to capture the first block of non-whitespace characters # output from saptune solution enabled command has an empty line followed by solution name From e2b910ed66397cc9f15ee037fab7e6b5b93d3550 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 11 Jun 2024 16:50:30 +0530 Subject: [PATCH 029/164] chore: Add condition to check if saptune_solution_enabled is defined in 2.10.3.yaml --- deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml index 6c31a9fdae..590f1d7ad0 100644 --- a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml +++ b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml @@ -89,6 +89,7 @@ when: - is_high_availability - node_tier in ['scs', 'ers', 'hana', 'db2', 'sybase'] + - saptune_solution_enabled is defined - saptune_solution_enabled == 'NONE' block: - name: "2.10.3 sap-notes : - Copy sapnote 2382421 to /etc/saptune/override" From df49b8d69780eef8795eab03cf9c9c51e6da0e6e Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 11 Jun 2024 16:54:11 +0530 Subject: [PATCH 030/164] chore: Add condition to check if saptune_solution_enabled is defined in 2.10.3.yaml --- deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml index 590f1d7ad0..ae8e390dc5 100644 --- a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml +++ b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml @@ -140,6 +140,7 @@ - name: "2.10.3 sap-notes : - Run saptune solution revert if verify fails" when: + - saptune_solution_enabled is defined - saptune_solution_enabled != 'NONE' - saptune_solution_verify.rc != 0 ansible.builtin.command: "saptune solution revert {{ saptune_solution_enabled }}" From e8cff4c51c8fd5c92ed059e26f5c5e7470cfc949 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 11 Jun 2024 19:55:57 +0530 Subject: [PATCH 031/164] chore: Update New-SDAFDevopsWorkloadZone.ps1 script to fix variable group creation issue --- deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 b/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 index 6928d25560..eb955c4129 100644 --- a/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 +++ b/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 @@ -224,7 +224,7 @@ if ($authenticationMethod -eq "Service Principal") { $GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --organization $ADO_ORGANIZATION --project $ADO_Project --only-show-errors ) if ($GroupID.Length -eq 0) { Write-Host "Creating the variable group" $WorkloadZonePrefix -ForegroundColor Green - az pipelines variable-group create --name $WorkloadZonePrefix --variables Agent='Azure Pipelines' ARM_CLIENT_ID=$ARM_CLIENT_ID ARM_OBJECT_ID=$ARM_OBJECT_ID ARM_CLIENT_SECRET=$ARM_CLIENT_SECRET ARM_SUBSCRIPTION_ID=$Workload_zone_subscriptionID ARM_TENANT_ID=$ARM_TENANT_ID WZ_PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME=$Service_Connection_Name TF_LOG=OFF Logon_Using_SPN=true USE_MSI=false--output none --authorize true --organization $ADO_ORGANIZATION --project $ADO_Project + az pipelines variable-group create --name $WorkloadZonePrefix --variables Agent='Azure Pipelines' ARM_CLIENT_ID=$ARM_CLIENT_ID ARM_OBJECT_ID=$ARM_OBJECT_ID ARM_CLIENT_SECRET=$ARM_CLIENT_SECRET ARM_SUBSCRIPTION_ID=$Workload_zone_subscriptionID ARM_TENANT_ID=$ARM_TENANT_ID WZ_PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME=$Service_Connection_Name TF_LOG=OFF Logon_Using_SPN=true USE_MSI=false --output none --authorize true --organization $ADO_ORGANIZATION --project $ADO_Project $GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --organization $ADO_ORGANIZATION --project $ADO_Project --only-show-errors) } From 1ec39e8270f9ed4eb08f3de463f176e4a8852ee3 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 13 Jun 2024 11:18:31 +0530 Subject: [PATCH 032/164] chore: Refactor saptune_solution_to_apply logic in 2.10.3.yaml --- .../2.10-sap-notes/tasks/2.10.3.yaml | 52 ++++++++++++------- deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 | 2 +- 2 files changed, 35 insertions(+), 19 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml index ae8e390dc5..d9195cba62 100644 --- a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml +++ b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml @@ -107,26 +107,42 @@ net.ipv4.tcp_tw_reuse = 0 net.ipv4.tcp_tw_recycle = 0 # /usr/lib/sysctl.d/99-sysctl.conf -- name: "2.10.3 sap-notes : - Set fact for saptune solution to use" +- name: "2.10.3 sap-notes : - Set fact for saptune solution to use" ansible.builtin.set_fact: saptune_solution_to_apply: >- - {%- if 'scs' in supported_tiers and 'hana' in supported_tiers and platform == 'HANA' and bom.product_ids.scs is search(':S4HANA') -%} - 'S4HANA-APP+DB' - {%- elif 'scs' in supported_tiers and 'hana' in supported_tiers and platform == 'HANA' and bom.product_ids.scs is search(':NW\d{3}') -%} - 'NETWEAVER+HANA' - {%- elif node_tier in ['scs', 'ers','pas','app'] and platform == 'HANA' and bom.product_ids.scs is search(':S4HANA') -%} - 'S4HANA-APPSERVER' - {%- elif node_tier == 'hana' and platform == 'HANA' and bom.product_ids.scs is search(':S4HANA') -%} - 'S4HANA-DBSERVER' - {%- elif node_tier in ['scs', 'ers', 'pas', 'app'] and platform == 'HANA' and bom.product_ids.scs is search(':BW4HANA') -%} - 'NETWEAVER' - {%- elif node_tier == 'hana' and platform == 'HANA' and bom.product_ids.scs is search(':BW4HANA') -%} - 'HANA' - {%- elif node_tier in ['scs', 'ers', 'pas', 'app'] and platform in ['SYBASE', 'DB2', 'ORACLE', 'ORACLE-ASM', 'SQLSERVER'] and bom.product_ids.scs is search(':NW\d{3}') -%} - 'NETWEAVER' - {%- elif node_tier == 'hana' and platform == 'HANA' and bom.product_ids.scs is search(':NW\d{3}') -%} - 'HANA' - {%- elif node_tier in ['sybase'] and platform == 'SYBASE' and bom.product_ids.scs is search(':NW\d{3}') -%} + {%- if 'scs' in supported_tiers and 'hana' in supported_tiers and platform == 'HANA' -%} + {%- if bom.product_ids is defined -%} + {%- if bom.product_ids.scs is search(':S4HANA') -%} + 'S4HANA-APP+DB' + {%- elif bom.product_ids.scs is search(':NW\d{3}') -%} + 'NETWEAVER+HANA' + {%- else -%} + 'NETWEAVER' + {%- endif -%} + {%- else -%} + 'HANA' + {%- endif -%} + {%- elif node_tier == 'hana' and platform == 'HANA' -%} + {%- if bom.product_ids is defined -%} + {%- if bom.product_ids.scs is search(':S4HANA') -%} + 'S4HANA-DBSERVER' + {%- elif bom.product_ids.scs is search(':BW4HANA') -%} + 'HANA' + {%- elif bom.product_ids.scs is search(':NW\d{3}') -%} + 'HANA' + {%- endif -%} + {%- else -%} + 'HANA' + {%- endif -%} + {%- elif node_tier in ['scs', 'ers', 'pas', 'app'] and platform == 'HANA' and bom.product_ids is defined -%} + {%- if bom.product_ids.scs is search(':S4HANA') -%} + 'S4HANA-APPSERVER' + {%- elif bom.product_ids.scs is search(':BW4HANA') -%} + 'NETWEAVER' + {%- elif bom.product_ids.scs is search(':NW\d{3}') -%} + 'NETWEAVER' + {%- endif -%} + {%- elif node_tier in ['sybase'] and platform == 'SYBASE' and bom.product_ids is defined and bom.product_ids.scs is search(':NW\d{3}') -%} 'SAP-ASE' {%- else -%} 'NETWEAVER' diff --git a/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 b/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 index eb955c4129..30ab069ed0 100644 --- a/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 +++ b/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 @@ -212,7 +212,7 @@ if ($authenticationMethod -eq "Service Principal") { Write-Host "Creating the Service Principal" $workload_zone_spn_name -ForegroundColor Green $Data = (az ad sp create-for-rbac --role="Contributor" --scopes=$workload_zone_scopes --name=$workload_zone_spn_name --only-show-errors) | ConvertFrom-Json $ARM_CLIENT_SECRET = $Data.password - $ExistingData = (az ad sp list --all --filter "startswith(displayName,'$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'] | [0]" --only-show-errors) | ConvertFrom-Json + $ExistingData = (az ad sp list --all --filter "startswith(displayName,'$workload_zone_spn_name')" --query "[?displayName=='$workload_zone_spn_name'] | [0]" --only-show-errors) | ConvertFrom-Json $ARM_CLIENT_ID = $ExistingData.appId $ARM_TENANT_ID = $ExistingData.appOwnerOrganizationId $ARM_OBJECT_ID = $ExistingData.Id From 064c1ab17148dc9aceb22657a3a1cb8dfe797b15 Mon Sep 17 00:00:00 2001 From: Jesper Severinsen <30658160+jesperseverinsen@users.noreply.github.com> Date: Thu, 13 Jun 2024 09:05:03 +0200 Subject: [PATCH 033/164] Set HDB schema name for ABAP and JAVA systems (#593) * Set HDB Schema Name task * fix command error and remove ignore_errors * Fix parsing error --- .../roles-db/4.0.4-hdb-schema/tasks/main.yaml | 36 +++++++++++++++++ .../roles-db/4.0.4-hdb-schema/vars/main.yaml | 3 ++ .../roles-sap/5.1-dbload/tasks/main.yaml | 40 +++++-------------- .../roles-sap/5.2-pas-install/tasks/main.yaml | 21 +++------- .../roles-sap/5.3-app-install/tasks/main.yaml | 21 +++------- 5 files changed, 61 insertions(+), 60 deletions(-) create mode 100644 deploy/ansible/roles-db/4.0.4-hdb-schema/tasks/main.yaml create mode 100644 deploy/ansible/roles-db/4.0.4-hdb-schema/vars/main.yaml diff --git a/deploy/ansible/roles-db/4.0.4-hdb-schema/tasks/main.yaml b/deploy/ansible/roles-db/4.0.4-hdb-schema/tasks/main.yaml new file mode 100644 index 0000000000..7371ad1f82 --- /dev/null +++ b/deploy/ansible/roles-db/4.0.4-hdb-schema/tasks/main.yaml @@ -0,0 +1,36 @@ +--- +# /*---------------------------------------------------------------------------8 +# | | +# | 0 Set 'schema_name' fact for HDB Schema Name | +# | | +# +------------------------------------4--------------------------------------*/ + +- name: "HDB Schema: Get DEFAULT.PFL" + ansible.builtin.slurp: + src: "/sapmnt/{{ sap_sid | upper }}/profile/DEFAULT.PFL" + register: profilefile + +- name: "HDB Schema: Get schema property" + ansible.builtin.set_fact: + schema_property: "{{ profilefile['content'] | b64decode | split('\n') | select('search', property_name ) }}" + loop: "{{ hdb_schema_property_names }}" + loop_control: + loop_var: property_name + when: + - (schema_property | default([])) | length <= 0 + +- name: "HDB Schema: Parse schema name" + ansible.builtin.set_fact: + schema_name: "{{ schema_property | first | split('=') | last | trim }}" + when: + - (schema_property | default([])) | length > 0 + +- name: "HDB Schema: Set default schema" + ansible.builtin.set_fact: + schema_name: "{{ hana_schema }}" + when: + - schema_name is not defined + +- name: "HDB Schema: Show schema name" + ansible.builtin.debug: + msg: "Schema name: {{ schema_name }}" diff --git a/deploy/ansible/roles-db/4.0.4-hdb-schema/vars/main.yaml b/deploy/ansible/roles-db/4.0.4-hdb-schema/vars/main.yaml new file mode 100644 index 0000000000..30ea3d1819 --- /dev/null +++ b/deploy/ansible/roles-db/4.0.4-hdb-schema/vars/main.yaml @@ -0,0 +1,3 @@ +hdb_schema_property_names: + - "dbs/hdb/schema" # ABAP schema + - "j2ee/dbschema" # JAVA schema diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml index 49dc42b09a..20acb2f725 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml @@ -346,22 +346,12 @@ # when: # - node_tier in ["oracle","oracle-asm"] - - name: "DBLoad Install: Get DEFAULT.PFL" - ansible.builtin.slurp: - src: "/sapmnt/{{ sap_sid | upper }}/profile/DEFAULT.PFL" - register: profilefile - - - name: "DBLoad Install: Get schema name" - ansible.builtin.set_fact: - schema_name: "{{ profilefile['content'] | b64decode | split('\n') | select('search', 'dbs/hdb/schema') | first | split('=') | last | trim | default('{{ hana_schema }}') }}" - when: - - platform == 'HANA' - - - name: "DBLoad Install: Installation results" - ansible.builtin.debug: - msg: "Schema name {{ schema_name }}" + - name: "DBLoad Install: Set Schema Name" when: - - platform == 'HANA' + - platform == "HANA" + ansible.builtin.include_role: + name: "roles-db/4.0.4-hdb-schema" + public: true - name: "Backward Compatibility - Check required Database HA variables" ansible.builtin.set_fact: @@ -464,22 +454,12 @@ - db_high_availability is defined - database_high_availability is not defined - - name: "DBLoad Install: Get DEFAULT.PFL" - ansible.builtin.slurp: - src: "/sapmnt/{{ sap_sid | upper }}/profile/DEFAULT.PFL" - register: profilefile - - - name: "DBLoad Install: Get schema name" - ansible.builtin.set_fact: - schema_name: "{{ profilefile['content'] | b64decode | split('\n') | select('search', 'dbs/hdb/schema') | first | split('=') | last | trim | default('{{ hana_schema }}') }}" - when: - - platform == 'HANA' - - - name: "DBLoad Install: Installation results" - ansible.builtin.debug: - msg: "Schema name {{ schema_name }}" + - name: "DBLoad Install: Set Schema Name" when: - - platform == 'HANA' + - platform == "HANA" + ansible.builtin.include_role: + name: "roles-db/4.0.4-hdb-schema" + public: true - name: "DBLoad: Get hdbuserstore path" become: true diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index 9e80ca045c..5d7fd46e27 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -129,21 +129,12 @@ - platform == "HANA" - db_port_open.msg is defined -- name: "PAS Install: Set schema_name variable for HANA" - when: platform == "HANA" - block: - - name: "PAS Install: Get DEFAULT.PFL" - ansible.builtin.slurp: - src: "/sapmnt/{{ sap_sid | upper }}/profile/DEFAULT.PFL" - register: profilefile - - - name: "PAS Install: Get schema name" - ansible.builtin.set_fact: - schema_name: "{{ profilefile['content'] | b64decode | split('\n') | select('search', 'dbs/hdb/schema') | first | split('=') | last | trim | default('{{ hana_schema }}') }}" - - - name: "PAS Install: Show schema name" - ansible.builtin.debug: - msg: "Schema name {{ schema_name }}" +- name: "PAS Install: Set Schema Name" + when: + - platform == "HANA" + ansible.builtin.include_role: + name: "roles-db/4.0.4-hdb-schema" + public: true - name: "PAS Install" block: diff --git a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml index ddcdf93b89..967c1588a7 100644 --- a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml @@ -143,21 +143,12 @@ - platform == "HANA" - db_port_open.msg is defined -- name: "APP Install: Set schema_name variable for HANA" - when: platform == "HANA" - block: - - name: "APP Install: Get DEFAULT.PFL" - ansible.builtin.slurp: - src: "/sapmnt/{{ sap_sid | upper }}/profile/DEFAULT.PFL" - register: profilefile - - - name: "APP Install: Get schema name" - ansible.builtin.set_fact: - schema_name: "{{ profilefile['content'] | b64decode | split('\n') | select('search', 'dbs/hdb/schema') | first | split('=') | last | trim | default('{{ hana_schema }}') }}" - - - name: "APP Install: Show schema name" - ansible.builtin.debug: - msg: "Schema name {{ schema_name }}" +- name: "APP Install: Set Schema Name" + when: + - platform == "HANA" + ansible.builtin.include_role: + name: "roles-db/4.0.4-hdb-schema" + public: true # *====================================4=======================================8 # SAP APP: Install From d1edf7a28f01ba971355715dce7cac946e058903 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Fri, 14 Jun 2024 13:22:56 +0530 Subject: [PATCH 034/164] chore: Comment out unnecessary role assignments in New-SDAFDevopsProject.ps1 script --- deploy/scripts/New-SDAFDevopsProject.ps1 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index 7bc35aca37..f380a88fc1 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -932,9 +932,9 @@ if ($WebApp) { # $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors --display-name "SDAF") } - az role assignment create --assignee $CP_ARM_CLIENT_ID --role "Contributor" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none + # az role assignment create --assignee $CP_ARM_CLIENT_ID --role "Contributor" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none - az role assignment create --assignee $CP_ARM_CLIENT_ID --role "User Access Administrator" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none + # az role assignment create --assignee $CP_ARM_CLIENT_ID --role "User Access Administrator" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none $Control_plane_groupID = (az pipelines variable-group list --query "[?name=='$ControlPlanePrefix'].id | [0]" --only-show-errors) if ($Control_plane_groupID.Length -eq 0) { From 33f97ec414a556b805feca72c39d81cc870a5f5c Mon Sep 17 00:00:00 2001 From: Devansh Jain <86314060+devanshjainms@users.noreply.github.com> Date: Tue, 18 Jun 2024 22:47:31 -0700 Subject: [PATCH 035/164] Release testing (#597) * script from main branch * chore: Add "nat_gateway" variable to global variables in sap_namegenerator module * chore: Update bom-register.yaml to use the correct path for the Microsoft supplied BOM archive * chore: Add debug task to bom-register.yaml for Microsoft supplied BOM archive * Refactor bom-register.yaml to remove debug task and include correct path for Microsoft supplied BOM archive * Refactor sap-system-deployment.yaml to configure devops CLI extension * Refactor sap-system-deployment.yaml to configure devops CLI extension * Refactor sap-system-deployment.yaml to configure devops CLI extension * chore: Remove unnecessary code for extra parameters in DB and SAP installation pipeline * chore: Update SDAF version to 3.11.0.3 * Refactor az logout command in sap-workload-zone.yaml * Refactor az logout command in sap-workload-zone.yaml * Refactor SSH command in 1.17 Generic Pacemaker provision playbook --- .../tasks/1.17.2-provision.yml | 4 +- .../3.3.1-bom-utility/tasks/bom-register.yaml | 2 +- deploy/ansible/vars/ansible-input-api.yaml | 2 +- deploy/configs/version.txt | 2 +- deploy/pipelines/02-sap-workload-zone.yaml | 4 +- .../pipelines/03-sap-system-deployment.yaml | 11 -- .../pipelines/05-DB-and-SAP-installation.yaml | 7 - deploy/scripts/New-SDAFDevopsProject.ps1 | 167 +++--------------- .../sap_namegenerator/variables_global.tf | 2 + 9 files changed, 33 insertions(+), 168 deletions(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml index 7b9272d75d..b920faef12 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2-provision.yml @@ -44,14 +44,14 @@ when: ansible_hostname == primary_instance_name - name: 1.17 Generic Pacemaker - Ensure trust relationship is working from primary to secondary - ansible.builtin.command: ssh -oStrictHostKeyChecking=no {{ secondary_instance_name }} "hostname -s" + ansible.builtin.shell: ssh -oStrictHostKeyChecking=no {{ secondary_instance_name }} "hostname -s" register: primary_to_secondary_ssh_result changed_when: false failed_when: primary_to_secondary_ssh_result.stdout_lines[0] != secondary_instance_name when: ansible_hostname == primary_instance_name - name: 1.17 Generic Pacemaker - Ensure trust relationship is working from secondary to primary" - ansible.builtin.command: ssh -oStrictHostKeyChecking=no {{ primary_instance_name }} "hostname -s" + ansible.builtin.shell: ssh -oStrictHostKeyChecking=no {{ primary_instance_name }} "hostname -s" register: secondary_to_primary_ssh_result changed_when: false failed_when: secondary_to_primary_ssh_result.stdout_lines[0] != primary_instance_name diff --git a/deploy/ansible/roles-sap/3.3.1-bom-utility/tasks/bom-register.yaml b/deploy/ansible/roles-sap/3.3.1-bom-utility/tasks/bom-register.yaml index 495eb5c1db..017f8b9278 100644 --- a/deploy/ansible/roles-sap/3.3.1-bom-utility/tasks/bom-register.yaml +++ b/deploy/ansible/roles-sap/3.3.1-bom-utility/tasks/bom-register.yaml @@ -225,7 +225,7 @@ # - name: "{{ task_prefix }} Register Microsoft Supplied BOM {{ bom_name }} from archives" ansible.builtin.include_vars: - file: "{{ microsoft_supplied_bom_archive.path }}" + file: "{{ microsoft_supplied_bom_archive.stat.path }}" name: bom_temp when: microsoft_supplied_bom_archive.stat.exists # Step: 05-05-02 - END diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index 5989235e44..605738678b 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -5,7 +5,7 @@ become_user_name: root oracle_user_name: oracle orchestration_ansible_user: azureadm # ------------------- Begin - SDAF Ansible Version ---------------------------8 -SDAF_Version: "3.11.0.2" +SDAF_Version: "3.11.0.3" # ------------------- End - SDAF Ansible Version ---------------------------8 # ------------------- Begin - OS Config Settings variables -------------------8 diff --git a/deploy/configs/version.txt b/deploy/configs/version.txt index 064ddbda20..36d9b11208 100644 --- a/deploy/configs/version.txt +++ b/deploy/configs/version.txt @@ -1 +1 @@ -3.11.0.2 +3.11.0.3 diff --git a/deploy/pipelines/02-sap-workload-zone.yaml b/deploy/pipelines/02-sap-workload-zone.yaml index 572fc2e9f1..181f483b65 100644 --- a/deploy/pipelines/02-sap-workload-zone.yaml +++ b/deploy/pipelines/02-sap-workload-zone.yaml @@ -558,8 +558,6 @@ stages: export landscape_tfstate_key=$(cat ${workload_environment_file_name} | grep landscape_tfstate_key= | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Zone State File' $landscape_tfstate_key fi - az logout --output none - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "FENCING_SPN_ID.value") if [ -z ${az_var} ]; then echo "##vso[task.logissue type=warning]Variable FENCING_SPN_ID is not set. Required for highly available deployments" @@ -571,7 +569,7 @@ stages: az keyvault secret set --name ${workload_prefix}-fencing-spn-tenant --vault-name $workload_key_vault --value $(FENCING_SPN_TENANT) --output none fi fi - + az logout --output none echo -e "$green--- Add & update files in the DevOps Repository ---$reset" cd $(Build.Repository.LocalPath) git pull diff --git a/deploy/pipelines/03-sap-system-deployment.yaml b/deploy/pipelines/03-sap-system-deployment.yaml index 164d2248af..7ab95f1bf1 100644 --- a/deploy/pipelines/03-sap-system-deployment.yaml +++ b/deploy/pipelines/03-sap-system-deployment.yaml @@ -70,7 +70,6 @@ stages: else source /etc/profile.d/deploy_server.sh fi - export AZURE_DEVOPS_EXT_PAT=$PAT HOME_CONFIG=${CONFIG_REPO_PATH}/$(Deployment_Configuration_Path) cd $HOME_CONFIG; mkdir -p .sap_deployment_automation @@ -256,16 +255,6 @@ stages: fi fi - if [ -z $USE_MSI ]; then - USE_MSI="false" - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query USE_MSI.value --output table) - if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name USE_MSI --value false --output none --only-show-errors - else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name USE_MSI --value false --output none --only-show-errors - fi - fi - if [ $USE_MSI != "true" ]; then echo "Using SPN" diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index 1effad8bfd..27c9b38760 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -257,13 +257,6 @@ stages: new_parameters="$EXTRA_PARAMETERS $PIPELINE_EXTRA_PARAMETERS" fi - if [[ $EXTRA_PARAMETERS = "'$(EXTRA_PARAMETERS)'" ]]; then - new_parameters=$PIPELINE_EXTRA_PARAMETERS - else - echo "##vso[task.logissue type=warning]Extra parameters were provided - '$EXTRA_PARAMETERS'" - new_parameters="$EXTRA_PARAMETERS $PIPELINE_EXTRA_PARAMETERS" - fi - echo "##vso[task.setvariable variable=SSH_KEY_NAME;isOutput=true]${workload_prefix}-sid-sshkey" echo "##vso[task.setvariable variable=VAULT_NAME;isOutput=true]$workload_key_vault" echo "##vso[task.setvariable variable=PASSWORD_KEY_NAME;isOutput=true]${workload_prefix}-sid-password" diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index f380a88fc1..6a469ce711 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -25,7 +25,7 @@ $ControlPlaneSubscriptionName = $Env:SDAF_ControlPlaneSubscriptionName if ($IsWindows) { $pathSeparator = "\" } else { $pathSeparator = "/" } #endregion -$versionLabel = "v3.11.0.2" +$versionLabel = "v3.11.0.3" # az logout @@ -38,8 +38,6 @@ $versionLabel = "v3.11.0.2" # az login --output none --tenant $ARM_TENANT_ID --only-show-errors --scope https://graph.microsoft.com//.default # } -Write-Host "" -Write-Host "" # Check if access to the Azure DevOps organization is available and prompt for PAT if needed # Exact permissions required, to be validated, and included in the Read-Host text. @@ -65,21 +63,8 @@ else { Write-Host "" Write-Host "" -if (Test-Path ".${pathSeparator}start.md") { Write-Host "Removing start.md" ; Remove-Item ".${pathSeparator}start.md" } - -if ($Env:SDAF_AuthenticationMethod.Length -eq 0) { - $Title = "Select the authentication method to use" - $data = @('Service Principal', 'Managed Identity') - Show-Menu($data) - $selection = Read-Host $Title - $authenticationMethod = $data[$selection - 1] -} -else { - $authenticationMethod = $Env:SDAF_AuthenticationMethod -} - -Write-Host "Using authentication method: $authenticationMethod" -ForegroundColor Yellow +if (Test-Path ".${pathSeparator}start.md") { Write-Host "Removing start.md" ; Remove-Item ".${pathSeparator}start.md" } if ($Env:SDAF_AuthenticationMethod.Length -eq 0) { $Title = "Select the authentication method to use" @@ -224,7 +209,6 @@ else { Write-Host "Using an existing project" - $repo_id = (az repos list --query "[?name=='$ADO_Project'].id | [0]" --out tsv) az devops configure --defaults organization=$ADO_ORGANIZATION project=$ADO_PROJECT $repo_id = (az repos list --query "[?name=='$ADO_Project'].id | [0]" --output tsv) @@ -787,17 +771,20 @@ if ($WebApp) { # $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors --display-name "SDAF") } - $configureAuth = Read-Host "Configuring authentication for the App Registration?" - if ($configureAuth -eq 'y') { - az rest --method POST --uri "https://graph.microsoft.com/beta/applications/$APP_REGISTRATION_OBJECTID/federatedIdentityCredentials\" --body "{'name': 'ManagedIdentityFederation', 'issuer': 'https://login.microsoftonline.com/$ARM_TENANT_ID/v2.0', 'subject': '$MSI_objectId', 'audiences': [ 'api://AzureADTokenExchange' ]}" + if ($MSI_objectId -ne $null) { + $configureAuth = Read-Host "Configuring authentication for the App Registration?" + if ($configureAuth -eq 'y') { + az rest --method POST --uri "https://graph.microsoft.com/beta/applications/$APP_REGISTRATION_OBJECTID/federatedIdentityCredentials\" --body "{'name': 'ManagedIdentityFederation', 'issuer': 'https://login.microsoftonline.com/$ARM_TENANT_ID/v2.0', 'subject': '$MSI_objectId', 'audiences': [ 'api://AzureADTokenExchange' ]}" - $API_URL = "https://portal.azure.com/#view/Microsoft_AAD_RegisteredApps/ApplicationMenuBlade/~/ProtectAnAPI/appId/$APP_REGISTRATION_ID/isMSAApp~/false" + $API_URL = "https://portal.azure.com/#view/Microsoft_AAD_RegisteredApps/ApplicationMenuBlade/~/ProtectAnAPI/appId/$APP_REGISTRATION_ID/isMSAApp~/false" - Write-Host "The browser will now open, Please Add a new scope, by clicking the '+ Add a new scope link', accept the default name and click 'Save and Continue'" - Write-Host "In the Add a scope page enter the scope name 'user_impersonation'. Choose 'Admins and Users' in the who can consent section, next provide the Admin consent display name 'Access the SDAF web application' and 'Use SDAF' as the Admin consent description, accept the changes by clicking the 'Add scope' button" + Write-Host "The browser will now open, Please Add a new scope, by clicking the '+ Add a new scope link', accept the default name and click 'Save and Continue'" + Write-Host "In the Add a scope page enter the scope name 'user_impersonation'. Choose 'Admins and Users' in the who can consent section, next provide the Admin consent display name 'Access the SDAF web application' and 'Use SDAF' as the Admin consent description, accept the changes by clicking the 'Add scope' button" + + Start-Process $API_URL + Read-Host -Prompt "Once you have created and validated the scope, Press any key to continue" + } - Start-Process $API_URL - Read-Host -Prompt "Once you have created and validated the scope, Press any key to continue" } } @@ -834,117 +821,14 @@ if ($authenticationMethod -eq "Service Principal") { $CP_ARM_OBJECT_ID = $ExistingData.Id $CP_ARM_TENANT_ID = $ExistingData.appOwnerOrganizationId - $confirmation = Read-Host "Reset the Control Plane Service Principal password y/n?" - if ($confirmation -eq 'y') { - - $CP_ARM_CLIENT_SECRET = (az ad sp credential reset --id $CP_ARM_CLIENT_ID --append --query "password" --out tsv --only-show-errors).Replace("""", "") - } - } -} -else { - - if ($Env:MSI_OBJECT_ID.Length -ne 0) { - $MSI_objectId = $Env:MSI_OBJECT_ID - } - else { - - $Title = "Choose the subscription that contains the Managed Identity" - $subscriptions = $(az account list --query "[].{Name:name}" -o table | Sort-Object) - Show-Menu($subscriptions[2..($subscriptions.Length - 1)]) - $selection = Read-Host $Title - - $subscription = $subscriptions[$selection - 1] - - $Title = "Choose the Managed Identity" - $identities = $(az identity list --query "[].{Name:name}" --subscription $subscription --output table | Sort-Object) - Show-Menu($identities[2..($identities.Length - 1)]) - $selection = Read-Host $Title - $selectionOffset = [convert]::ToInt32($selection, 10) + 1 - - $identity = $identities[$selectionOffset] - Write-Host "Using Managed Identity:" $identity - - $id = $(az identity list --query "[?name=='$identity'].id" --subscription $subscription --output tsv) - $MSI_objectId = $(az identity show --ids $id --query "principalId" --output tsv) - - $postBody = [PSCustomObject]@{ - accessLevel = @{ - accountLicenseType = "Basic" - } - projectEntitlements = @([ordered]@{ - group = @{ - groupType = "projectAdministrator" - } - projectRef = @{ - id = $Project_ID - } - - }) - servicePrincipal = @{ - origin = "aad" - originId = $id - subjectKind = "servicePrincipal" - } - - } - - Set-Content -Path "user.json" -Value ($postBody | ConvertTo-Json -Depth 6) - - az devops invoke --area MemberEntitlementManagement --resource ServicePrincipalEntitlements --in-file user.json --api-version "7.1-preview" --http-method POST - - } -} - - -#region App registration -if ($WebApp) { - Write-Host "Creating the App registration in Azure Active Directory" -ForegroundColor Green - - $found_appRegistration = (az ad app list --all --filter "startswith(displayName, '$ApplicationName')" --query "[?displayName=='$ApplicationName'].displayName | [0]" --only-show-errors) - - if ($found_appRegistration.Length -ne 0) { - Write-Host "Found an existing App Registration:" $ApplicationName - $ExistingData = (az ad app list --all --filter "startswith(displayName, '$ApplicationName')" --query "[?displayName=='$ApplicationName']| [0]" --only-show-errors) | ConvertFrom-Json - - $APP_REGISTRATION_ID = $ExistingData.appId - $APP_REGISTRATION_OBJECTID = $ExistingData.id - - # $confirmation = Read-Host "Reset the app registration secret y/n?" + #$confirmation = Read-Host "Reset the Control Plane Service Principal password y/n?" # if ($confirmation -eq 'y') { - # $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors --display-name "SDAF") + + # $CP_ARM_CLIENT_SECRET = (az ad sp credential reset --id $CP_ARM_CLIENT_ID --append --query "password" --out tsv --only-show-errors).Replace("""", "") # } # else { - # $WEB_APP_CLIENT_SECRET = Read-Host "Please enter the app registration secret" + $CP_ARM_CLIENT_SECRET = Read-Host "Please enter the Control Plane Service Principal $spn_name password" # } - } - else { - Write-Host "Creating an App Registration for" $ApplicationName -ForegroundColor Green - if ($IsWindows) { $manifestPath = ".\manifest.json" } else { $manifestPath = "./manifest.json" } - Add-Content -Path manifest.json -Value '[{"resourceAppId":"00000003-0000-0000-c000-000000000000","resourceAccess":[{"id":"e1fe6dd8-ba31-4d61-89e7-88639da4683d","type":"Scope"}]}]' - - $APP_REGISTRATION_ID = (az ad app create --display-name $ApplicationName --enable-id-token-issuance true --sign-in-audience AzureADMyOrg --required-resource-access $manifestPath --query "appId" --output tsv) - $ExistingData = (az ad app list --all --filter "startswith(displayName, '$ApplicationName')" --query "[?displayName=='$ApplicationName']| [0]" --only-show-errors) | ConvertFrom-Json - $APP_REGISTRATION_OBJECTID = $ExistingData.id - - if (Test-Path $manifestPath) { Write-Host "Removing manifest.json" ; Remove-Item $manifestPath } - - - # $WEB_APP_CLIENT_SECRET = (az ad app credential reset --id $APP_REGISTRATION_ID --append --query "password" --out tsv --only-show-errors --display-name "SDAF") - } - - # az role assignment create --assignee $CP_ARM_CLIENT_ID --role "Contributor" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none - - # az role assignment create --assignee $CP_ARM_CLIENT_ID --role "User Access Administrator" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none - - $Control_plane_groupID = (az pipelines variable-group list --query "[?name=='$ControlPlanePrefix'].id | [0]" --only-show-errors) - if ($Control_plane_groupID.Length -eq 0) { - Write-Host "Creating the variable group" $ControlPlanePrefix -ForegroundColor Green - if ($WebApp) { - az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' APP_REGISTRATION_APP_ID=$APP_REGISTRATION_ID APP_REGISTRATION_OBJECTID=$APP_REGISTRATION_OBJECTID APP_TENANT_ID=$ARM_TENANT_ID CP_ARM_CLIENT_ID=$CP_ARM_CLIENT_ID CP_ARM_OBJECT_ID=$CP_ARM_OBJECT_ID CP_ARM_CLIENT_SECRET='Enter your SPN password here' CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID CP_ARM_TENANT_ID=$CP_ARM_TENANT_ID POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF --output none --authorize true - } - else { - $CP_ARM_CLIENT_SECRET = Read-Host "Please enter the Control Plane Service Principal password" - } } else { @@ -963,13 +847,12 @@ if ($WebApp) { az role assignment create --assignee $CP_ARM_CLIENT_ID --role "User Access Administrator" --subscription $Control_plane_subscriptionID --scope /subscriptions/$Control_plane_subscriptionID --output none -} -else { $Control_plane_groupID = (az pipelines variable-group list --query "[?name=='$ControlPlanePrefix'].id | [0]" --only-show-errors) if ($Control_plane_groupID.Length -eq 0) { Write-Host "Creating the variable group" $ControlPlanePrefix -ForegroundColor Green + if ($WebApp) { - az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' APP_REGISTRATION_APP_ID=$APP_REGISTRATION_ID APP_REGISTRATION_OBJECTID=$APP_REGISTRATION_OBJECTID APP_TENANT_ID=$ARM_TENANT_ID CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF USE_MSI=true --output none --authorize true + az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' APP_REGISTRATION_APP_ID=$APP_REGISTRATION_ID APP_REGISTRATION_OBJECTID=$APP_REGISTRATION_OBJECTID APP_TENANT_ID=$ARM_TENANT_ID CP_ARM_CLIENT_ID=$CP_ARM_CLIENT_ID CP_ARM_OBJECT_ID=$CP_ARM_OBJECT_ID CP_ARM_CLIENT_SECRET='Enter your SPN password here' PAT='Enter your personal access token here' CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID CP_ARM_TENANT_ID=$CP_ARM_TENANT_ID POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF --output none --authorize true } else { az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' CP_ARM_CLIENT_ID=$CP_ARM_CLIENT_ID CP_ARM_OBJECT_ID=$CP_ARM_OBJECT_ID CP_ARM_CLIENT_SECRET='Enter your SPN password here' CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID CP_ARM_TENANT_ID=$CP_ARM_TENANT_ID PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF --output none --authorize true @@ -1010,10 +893,10 @@ else { if ($Control_plane_groupID.Length -eq 0) { Write-Host "Creating the variable group" $ControlPlanePrefix -ForegroundColor Green if ($WebApp) { - az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' APP_REGISTRATION_APP_ID=$APP_REGISTRATION_ID APP_REGISTRATION_OBJECTID=$APP_REGISTRATION_OBJECTID APP_TENANT_ID=$ARM_TENANT_ID CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF USE_MSI=true --output none --authorize true + az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' APP_REGISTRATION_APP_ID=$APP_REGISTRATION_ID APP_REGISTRATION_OBJECTID=$APP_REGISTRATION_OBJECTID APP_TENANT_ID=$ARM_TENANT_ID CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF USE_MSI=true --output none --authorize true } else { - az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF USE_MSI=true --output none --authorize true + az pipelines variable-group create --name $ControlPlanePrefix --variables Agent='Azure Pipelines' CP_ARM_SUBSCRIPTION_ID=$Control_plane_subscriptionID PAT='Enter your personal access token here' POOL=$Pool_Name AZURE_CONNECTION_NAME='Control_Plane_Service_Connection' WORKLOADZONE_PIPELINE_ID=$wz_pipeline_id SYSTEM_PIPELINE_ID=$system_pipeline_id SDAF_GENERAL_GROUP_ID=$general_group_id SAP_INSTALL_PIPELINE_ID=$installation_pipeline_id TF_LOG=OFF USE_MSI=true --output none --authorize true } $Control_plane_groupID = (az pipelines variable-group list --query "[?name=='$ControlPlanePrefix'].id | [0]" --only-show-errors) @@ -1022,7 +905,7 @@ else { Write-Host Write-Host "" - Write-Host "The browser will now open, Please create a service connection with the name 'Control_Plane_Service_Connection'." + Write-Host "The browser will now open, Please create an 'Azure Resource Manager' service connection with the name 'Control_Plane_Service_Connection'." $connections_url = $ADO_ORGANIZATION + "/" + [uri]::EscapeDataString($ADO_Project) + "/_settings/adminservices" Write-Host "URL: " $connections_url @@ -1127,7 +1010,7 @@ if (!$AlreadySet -or $ResetPAT ) { accessLevel = @{ accountLicenseType = "stakeholder" } - user = @{ + user = @{ origin = "aad" originId = $MSI_objectId subjectKind = "servicePrincipal" @@ -1170,7 +1053,7 @@ Write-Host "" Write-Host "The browser will now open, Select the '"$ADO_PROJECT "Build Service' user and ensure that it has 'Allow' in the Contribute section." $permissions_url = $ADO_ORGANIZATION + "/" + [uri]::EscapeDataString($ADO_Project) + "/_settings/repositories?_a=permissions" - +Write-Host "URL: " $permissions_url Start-Process $permissions_url Read-Host -Prompt "Once you have verified the permission, Press any key to continue" @@ -1236,4 +1119,4 @@ else { } -Write-Host "The script has completed" -ForegroundColor Green +Write-Host "The script has completed" -ForegroundColor Green \ No newline at end of file diff --git a/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf index 3960773b27..c1c54fda18 100644 --- a/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf @@ -372,6 +372,7 @@ variable "resource_prefixes" { "witness_accesskey" = "" "witness_name" = "" "ams_subnet" = "" + "nat_gateway" = "" } } @@ -506,6 +507,7 @@ variable "resource_suffixes" { "witness_name" = "-witness-name" "ams_subnet" = "ams-subnet" "ams_instance" = "-AMS" + "nat_gateway" = "-nat-gateway" } } From 3d2c61a2488095b3b389bdc3a4e14b8a1af61158 Mon Sep 17 00:00:00 2001 From: dkSteBTh Date: Thu, 13 Jun 2024 13:35:12 +0200 Subject: [PATCH 036/164] Add SAP on Azure quality chekcs feature to the 05-DB-and-SAP-installation.yaml pipeline. --- ...ook_06_02_sap_on_azure_quality_checks.yaml | 72 +++++++++++++++ .../tasks/run_check.yaml | 92 +++++++++++++++++++ .../tasks/setup.yaml | 88 ++++++++++++++++++ .../vars/main.yaml | 35 +++++++ .../pipelines/05-DB-and-SAP-installation.yaml | 29 +++++- .../templates/collect-log-files.yaml | 91 ++++++++++++++++++ 6 files changed, 405 insertions(+), 2 deletions(-) create mode 100644 deploy/ansible/playbook_06_02_sap_on_azure_quality_checks.yaml create mode 100644 deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/run_check.yaml create mode 100644 deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/setup.yaml create mode 100644 deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/vars/main.yaml diff --git a/deploy/ansible/playbook_06_02_sap_on_azure_quality_checks.yaml b/deploy/ansible/playbook_06_02_sap_on_azure_quality_checks.yaml new file mode 100644 index 0000000000..298fd977f1 --- /dev/null +++ b/deploy/ansible/playbook_06_02_sap_on_azure_quality_checks.yaml @@ -0,0 +1,72 @@ +# /*---------------------------------------------------------------------------8 +# | | +# | Playbook for SAP on Azure quality checks | +# | | +# +------------------------------------4--------------------------------------*/ +--- + +- hosts: localhost + name: "SAP on Azure quality checks: - setup deployer" + gather_facts: true + vars_files: + - vars/ansible-input-api.yaml # API Input template with defaults + + tasks: + + - name: "SAP on Azure quality checks: - Create Progress folder" + ansible.builtin.file: + path: "{{ _workspace_directory }}/.progress" + state: directory + mode: 0755 + + - name: "SAP on Azure quality checks: - Remove sap-on-azure-quality-checks-done flag" + ansible.builtin.file: + path: "{{ _workspace_directory }}/.progress/sap-on-azure-quality-checks-done" + state: absent + + - name: "SAP on Azure quality checks: - setup prerequisites" + ansible.builtin.include_role: + name: "roles-misc/0.9-sap-on-azure-quality-checks" + tasks_from: "setup" + + + +- hosts: "{{ sap_sid | upper }}_DB : + {{ sap_sid | upper }}_SCS : + {{ sap_sid | upper }}_ERS : + {{ sap_sid | upper }}_PAS : + {{ sap_sid | upper }}_APP" + + name: "SAP on Azure quality checks: - run checks" + remote_user: "{{ orchestration_ansible_user }}" + gather_facts: true # Important to collect hostvars information + any_errors_fatal: true + vars_files: + - vars/ansible-input-api.yaml # API Input template with defaults + + tasks: + + - name: "SAP on Azure quality checks: - run check" + ansible.builtin.include_role: + name: "roles-misc/0.9-sap-on-azure-quality-checks" + tasks_from: "run_check" + + +- hosts: localhost + name: "SAP on Azure quality checks: - Done" + gather_facts: true + vars_files: + - vars/ansible-input-api.yaml # API Input template with defaults + + tasks: + + - name: "SAP on Azure quality checks: - Create sap-on-azure-quality-checks-done flag" + ansible.builtin.file: + path: "{{ _workspace_directory }}/.progress/sap-on-azure-quality-checks-done" + state: touch + mode: 0755 + +... +# /*---------------------------------------------------------------------------8 +# | END | +# +------------------------------------4--------------------------------------*/ diff --git a/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/run_check.yaml b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/run_check.yaml new file mode 100644 index 0000000000..c45640ee21 --- /dev/null +++ b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/run_check.yaml @@ -0,0 +1,92 @@ +# /*---------------------------------------------------------------------------8 +# | | +# | Run quality check | +# | | +# +------------------------------------4--------------------------------------*/ +--- + +- name: "SAP on Azure quality checks: - Check required Database HA variables" + ansible.builtin.set_fact: + database_high_availability: "{{ db_high_availability | default(false) }}" + when: + - db_high_availability is defined + - database_high_availability is not defined + + +- name: "SAP on Azure quality checks: - Retrieve Subscription ID and Resource Group Name" + ansible.builtin.uri: + url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 + use_proxy: false + headers: + Metadata: true + register: azure_metadata + + +# https://github.com/Azure/SAP-on-Azure-Scripts-and-Utilities/blob/main/QualityCheck/Readme.md#login-with-ssh-keys-no-password-required-for-sudo +- name: "SAP on Azure quality checks: - Set common quality check facts" + ansible.builtin.set_fact: + qc_subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" + qc_az_vm_resource_group: "{{ azure_metadata.json.compute.resourceGroupName }}" + qc_az_vm_name: "{{ azure_metadata.json.compute.name }}" + qc_vm_username: "{{ ansible_user }}" + qc_vm_hostname: "{{ ansible_hostname }}.{{ sap_fqdn }}" + qc_vm_operating_system: "{{ vm_operating_system_map[ansible_os_family | upper] }}" + qc_vm_database: "{{ vm_database_map[platform | upper] }}" + qc_vm_role: "{{ vm_role_map[node_tier | upper] }}" + qc_sid: "{{ db_sid if vm_role_map[node_tier | upper] == 'DB' else sap_sid }}" + qc_high_availability: "{{ (vm_role_map[node_tier | upper] == 'DB' and database_high_availability) or (vm_role_map[node_tier | upper] == 'ASCS' and scs_high_availability) }}" + + +- name: "SAP on Azure quality checks: - Debug variables" + ansible.builtin.debug: + msg: + - "Subscription ID: {{ qc_subscription_id }}" + - "Resource Group Name: {{ qc_az_vm_resource_group }}" + - "VM Name: {{ qc_az_vm_name }}" + - "VM Username: {{ qc_vm_username }}" + - "VM Hostname: {{ qc_vm_hostname }}" + - "VM Operating System: {{ qc_vm_operating_system }}" + - "VM Database: {{ qc_vm_database }}" + - "VM Role: {{ qc_vm_role }}" + - "SSH Key path {{ _workspace_directory }}/sshkey" + - "Output Directory {{ _workspace_directory }}/quality_checks" + - "SID: {{ qc_sid }}" + - "High Availability: {{ qc_high_availability }}" + verbosity: 2 + + +- name: "SAP on Azure quality checks: - Run quality check" + ansible.builtin.shell: + cmd: >- + Connect-AzAccount -AccountId $Env:ARM_CLIENT_ID ` + -AccessToken (az account get-access-token --subscription {{ qc_subscription_id }} | ConvertFrom-Json).accessToken ` + -Subscription {{ qc_subscription_id }} + + ./QualityCheck.ps1 -LogonWithUserSSHKey ` + -VMOperatingSystem {{ qc_vm_operating_system }} ` + -VMDatabase {{ qc_vm_database }} ` + -VMRole {{ qc_vm_role }} ` + -AzVMResourceGroup {{ qc_az_vm_resource_group }} ` + -AzVMName {{ qc_az_vm_name }} ` + -VMHostname {{ qc_vm_hostname }} ` + -VMUsername {{ qc_vm_username }} ` + -VMConnectionPort 22 ` + -SubscriptionId {{ qc_subscription_id }} ` + -SSHKey {{ _workspace_directory }}/sshkey ` + -Hardwaretype VM ` + -SID {{ qc_sid }} ` + -HighAvailability {{ '$' ~ qc_high_availability }} ` + -OutputDirName {{ _workspace_directory }}/quality_checks + args: + executable: "/usr/local/bin/pwsh" + chdir: "/opt/microsoft/quality_check" + no_log: true + delegate_to: localhost + register: quality_check_result + +- name: "SAP on Azure quality checks: - Debug quality check result" + ansible.builtin.debug: + msg: "{{ quality_check_result.stdout_lines }}" + verbosity: 2 + +... diff --git a/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/setup.yaml b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/setup.yaml new file mode 100644 index 0000000000..bc1d4eb0a5 --- /dev/null +++ b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/setup.yaml @@ -0,0 +1,88 @@ +# /*---------------------------------------------------------------------------8 +# | | +# | Setup quality check prerequisites | +# | | +# +------------------------------------4--------------------------------------*/ +--- + +- name: "SAP on Azure quality checks: - setup directories" + become: true + become_user: root + ansible.builtin.file: + path: "{{ item.path }}" + state: directory + mode: 0755 + owner: "{{ item.owner }}" + loop: + - { path: "/opt/microsoft/powershell/v{{ powershell_version }}", owner: "root" } + - { path: "/opt/microsoft/quality_check", owner: "{{ orchestration_ansible_user }}" } + - { path: "{{ _workspace_directory }}/quality_checks", owner: "{{ orchestration_ansible_user }}" } + + +- name: "SAP on Azure quality checks: - extract PowerShell binary" + become: true + become_user: root + ansible.builtin.unarchive: + src: "https://github.com/PowerShell/PowerShell/releases/download/v{{ powershell_version }}/powershell-{{ powershell_version }}-linux-x64.tar.gz" + dest: "/opt/microsoft/powershell/v{{ powershell_version }}" + creates: "/opt/microsoft/powershell/v{{ powershell_version }}/pwsh" + remote_src: true + + +- name: "SAP on Azure quality checks: - create PowerShell symbolic link" + become: true + become_user: root + ansible.builtin.file: + src: "/opt/microsoft/powershell/v{{ powershell_version }}/pwsh" + dest: "/usr/local/bin/pwsh" + state: link + mode: 0755 + + +- name: "SAP on Azure quality checks: - fetch quality check config" + become: true + become_user: root + ansible.builtin.get_url: + url: "{{ azure_utility_repo }}/main/QualityCheck/QualityCheck.json" + dest: "/opt/microsoft/quality_check/QualityCheck.json" + owner: "{{ orchestration_ansible_user }}" + timeout: 30 + register: qc_json_result + until: qc_json_result is succeeded or not qc_json_result.changed + retries: 2 + delay: 5 + + +- name: "SAP on Azure quality checks: - fetch quality check script" + become: true + become_user: root + ansible.builtin.get_url: + url: "{{ azure_utility_repo }}/main/QualityCheck/QualityCheck.ps1" + dest: "/opt/microsoft/quality_check/QualityCheck.ps1" + owner: "{{ orchestration_ansible_user }}" + mode: 0755 + timeout: 30 + register: qc_ps_result + until: qc_ps_result is succeeded or not qc_ps_result.changed + retries: 2 + delay: 5 + +- name: "SAP on Azure quality checks: - run PowerShell setup" + become: true + become_user: root + ansible.builtin.shell: >- + Update-AzConfig -EnableLoginByWam $false + + $modules = @("Az", "Az.NetAppFiles", "Posh-SSH") + + foreach ($module in $modules) { + if (-not (Get-Module -ListAvailable -Name $module)) { + Install-Module $module -Force -Scope AllUsers -Confirm:$false + } + } + register: qc_modules_result + failed_when: qc_modules_result.rc != 0 + args: + chdir: "/opt/microsoft/quality_check" + executable: "/usr/local/bin/pwsh" +... diff --git a/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/vars/main.yaml b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/vars/main.yaml new file mode 100644 index 0000000000..15353e7813 --- /dev/null +++ b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/vars/main.yaml @@ -0,0 +1,35 @@ + +azure_utility_repo: https://raw.githubusercontent.com/Azure/SAP-on-Azure-Scripts-and-Utilities +powershell_version: 7.3.12 + +# https://github.com/Azure/SAP-on-Azure-Scripts-and-Utilities/blob/main/QualityCheck/QualityCheck.ps1 +vm_operating_system_map: + SUSE: "SUSE" + REDHAT: "RedHat" + ORACLELINUX: "OracleLinux" + WINDOWS: "Windows" + +vm_database_map: + HANA: HANA + DB2: Db2 + SYBASE: ASE + SQLSERVER: MSSQL + ORACLE: Oracle + ORACLE-ASM: Oracle + +vm_role_map: + HANA: DB + DB2: DB + SYBASE: DB + SQLSERVER: DB + ORACLE: DB + ORACLE-ASM: DB + SCS: ASCS + ERS: ASCS + PAS: APP + APP: APP + +high_availability_agent_map: + AFA: FencingAgent + SBD: SBD + ISCSI: SBD diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index 27c9b38760..43d55dbb6c 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -77,6 +77,11 @@ parameters: type: boolean default: false + - name: sap_on_azure_quality_checks + displayName: SAP on Azure Quality Checks + type: boolean + default: false + - name: post_configuration_actions displayName: Post Configuration Actions type: boolean @@ -525,6 +530,24 @@ stages: azureTenantId: $(ARM_TENANT_ID) azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) USE_MSI: $(USE_MSI) + - ${{ if eq(parameters.sap_on_azure_quality_checks, true) }}: + - template: templates\run-ansible.yaml + parameters: + displayName: SAP on Azure quality checks + ansibleFilePath: ${{ parameters.sap_automation_repo_path }}/deploy/ansible/playbook_06_02_sap_on_azure_quality_checks.yaml + secretName: "$(Preparation.SSH_KEY_NAME)" + passwordSecretName: "$(Preparation.PASSWORD_KEY_NAME)" + userNameSecretName: "$(Preparation.USERNAME_KEY_NAME)" + vaultName: $(Preparation.VAULT_NAME) + parametersFolder: $(Preparation.FOLDER) + sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" + sidHosts: $(Preparation.HOSTS) + extraParams: "$(Preparation.NEW_PARAMETERS)" + azureClientId: $(ARM_CLIENT_ID) + azureClientSecret: $(ARM_CLIENT_SECRET) + azureTenantId: $(ARM_TENANT_ID) + azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + USE_MSI: $(USE_MSI) - ${{ if eq(parameters.post_configuration_actions, true) }}: - template: templates\run-ansible.yaml parameters: @@ -580,6 +603,8 @@ stages: azureClientSecret: $(ARM_CLIENT_SECRET) azureTenantId: $(ARM_TENANT_ID) azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) - - template: templates\collect-log-files.yaml + - template: templates\collect-log-files.yaml parameters: - logPath: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/logs + logPath: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/logs + qualityCheckPath: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/quality_checks + collectQualityChecks: ${{ parameters.sap_on_azure_quality_checks }} diff --git a/deploy/pipelines/templates/collect-log-files.yaml b/deploy/pipelines/templates/collect-log-files.yaml index 6fc4996df8..c536faade5 100644 --- a/deploy/pipelines/templates/collect-log-files.yaml +++ b/deploy/pipelines/templates/collect-log-files.yaml @@ -1,5 +1,7 @@ parameters: logPath: "" + qualityCheckPath: "" + collectQualityChecks: false steps: - script: | #!/bin/bash @@ -80,3 +82,92 @@ steps: condition: always() env: LOG_PATH: ${{ parameters.logPath }} + + - script: | + #!/bin/bash + # Exit immediately if a command exits with a non-zero status. + # Treat unset variables as an error when substituting. + set -eu + + echo "Collecting log files from ${{ parameters.logPath }}" + + if [ -d ${LOG_PATH} ] && [ $(ls ${LOG_PATH}/*.zip | wc -l ) -gt 0 ]; then + echo "Found log files in ${LOG_PATH}" + + cd ${LOG_PATH} + ls -ltr + + git config --global user.email "${USER_EMAIL}" + git config --global user.name "${USER_NAME}" + + echo "Checking out ${SOURCE_BRANCH} branch..." + git checkout -q ${SOURCE_BRANCH} + echo "Pulling last changes..." + git pull + + echo "Adding new logs..." + git add --ignore-errors *.zip + if [ $(git diff --name-only --cached | wc -l) -gt 0 ]; then + echo "Committing changes..." + git commit -m "Adding new logs" + echo "Pushing changes..." + git push + else + echo "No changes to commit" + fi + else + echo No logs found in "${LOG_PATH}" + fi + displayName: Store log files in repository + enabled: true + env: + USER_EMAIL: $(Build.RequestedForEmail) + USER_NAME: $(Build.RequestedFor) + SOURCE_BRANCH: $(Build.SourceBranchName) + SYSTEM_ACCESSTOKEN: $(System.AccessToken) + LOG_PATH: ${{ parameters.logPath }} + + - script: | + #!/bin/bash + # Exit immediately if a command exits with a non-zero status. + # Treat unset variables as an error when substituting. + set -eu + + echo "Collecting quality check files from ${{ parameters.qualityCheckPath }}" + + if [ -d ${QUALITY_CHECK_PATH} ] && [ $(ls ${QUALITY_CHECK_PATH}/*.html | wc -l ) -gt 0 ]; then + echo "Found new quality check files in ${QUALITY_CHECK_PATH}" + + cd ${QUALITY_CHECK_PATH} + ls -ltr + + git config --global user.email "${USER_EMAIL}" + git config --global user.name "${USER_NAME}" + + echo "Checking out ${SOURCE_BRANCH} branch..." + git checkout -q ${SOURCE_BRANCH} + echo "Pulling last changes..." + git pull + + echo "Adding new quality check files..." + git add --ignore-errors *.html + if [ $(git diff --name-only --cached | wc -l) -gt 0 ]; then + echo "Committing changes..." + git commit -m "Adding new quality check files" + echo "Pushing changes..." + git push + else + echo "No changes to commit" + fi + else + echo No quality check files found in "${QUALITY_CHECK_PATH}" + fi + displayName: Store quality check files in repository + enabled: true + condition: ${{ eq(parameters.collectQualityChecks, true) }} + env: + USER_EMAIL: $(Build.RequestedForEmail) + USER_NAME: $(Build.RequestedFor) + SOURCE_BRANCH: $(Build.SourceBranchName) + SYSTEM_ACCESSTOKEN: $(System.AccessToken) + QUALITY_CHECK_PATH: ${{ parameters.qualityCheckPath }} From 6a8793a1f71fa594387d442448549369183a6fdf Mon Sep 17 00:00:00 2001 From: dkSteBTh Date: Thu, 13 Jun 2024 13:45:43 +0200 Subject: [PATCH 037/164] remove duplicate block --- .../templates/collect-log-files.yaml | 44 ------------------- 1 file changed, 44 deletions(-) diff --git a/deploy/pipelines/templates/collect-log-files.yaml b/deploy/pipelines/templates/collect-log-files.yaml index c536faade5..3544239fa6 100644 --- a/deploy/pipelines/templates/collect-log-files.yaml +++ b/deploy/pipelines/templates/collect-log-files.yaml @@ -83,50 +83,6 @@ steps: env: LOG_PATH: ${{ parameters.logPath }} - - script: | - #!/bin/bash - # Exit immediately if a command exits with a non-zero status. - # Treat unset variables as an error when substituting. - set -eu - - echo "Collecting log files from ${{ parameters.logPath }}" - - if [ -d ${LOG_PATH} ] && [ $(ls ${LOG_PATH}/*.zip | wc -l ) -gt 0 ]; then - echo "Found log files in ${LOG_PATH}" - - cd ${LOG_PATH} - ls -ltr - - git config --global user.email "${USER_EMAIL}" - git config --global user.name "${USER_NAME}" - - echo "Checking out ${SOURCE_BRANCH} branch..." - git checkout -q ${SOURCE_BRANCH} - echo "Pulling last changes..." - git pull - - echo "Adding new logs..." - git add --ignore-errors *.zip - if [ $(git diff --name-only --cached | wc -l) -gt 0 ]; then - echo "Committing changes..." - git commit -m "Adding new logs" - echo "Pushing changes..." - git push - else - echo "No changes to commit" - fi - else - echo No logs found in "${LOG_PATH}" - fi - displayName: Store log files in repository - enabled: true - env: - USER_EMAIL: $(Build.RequestedForEmail) - USER_NAME: $(Build.RequestedFor) - SOURCE_BRANCH: $(Build.SourceBranchName) - SYSTEM_ACCESSTOKEN: $(System.AccessToken) - LOG_PATH: ${{ parameters.logPath }} - - script: | #!/bin/bash # Exit immediately if a command exits with a non-zero status. From 27dd64f95b62ac25e9906786e70049b388fbadb3 Mon Sep 17 00:00:00 2001 From: dkSteBTh Date: Thu, 13 Jun 2024 13:52:04 +0200 Subject: [PATCH 038/164] remove blank line --- deploy/ansible/playbook_06_02_sap_on_azure_quality_checks.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/ansible/playbook_06_02_sap_on_azure_quality_checks.yaml b/deploy/ansible/playbook_06_02_sap_on_azure_quality_checks.yaml index 298fd977f1..7c3a21747c 100644 --- a/deploy/ansible/playbook_06_02_sap_on_azure_quality_checks.yaml +++ b/deploy/ansible/playbook_06_02_sap_on_azure_quality_checks.yaml @@ -30,7 +30,6 @@ tasks_from: "setup" - - hosts: "{{ sap_sid | upper }}_DB : {{ sap_sid | upper }}_SCS : {{ sap_sid | upper }}_ERS : From 2e29558851b3c7a74e1b5e40b7a94695b6057965 Mon Sep 17 00:00:00 2001 From: dkSteBTh Date: Thu, 13 Jun 2024 13:53:47 +0200 Subject: [PATCH 039/164] add mode to get_url downloaded file. --- .../roles-misc/0.9-sap-on-azure-quality-checks/tasks/setup.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/setup.yaml b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/setup.yaml index bc1d4eb0a5..72f5c00248 100644 --- a/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/setup.yaml +++ b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/setup.yaml @@ -46,6 +46,7 @@ url: "{{ azure_utility_repo }}/main/QualityCheck/QualityCheck.json" dest: "/opt/microsoft/quality_check/QualityCheck.json" owner: "{{ orchestration_ansible_user }}" + mode: 0755 timeout: 30 register: qc_json_result until: qc_json_result is succeeded or not qc_json_result.changed From a95b725e6b2916d9732a0eb4717ae65068ce5442 Mon Sep 17 00:00:00 2001 From: dkSteBTh Date: Thu, 13 Jun 2024 13:54:26 +0200 Subject: [PATCH 040/164] remove blank line from start of file. --- .../roles-misc/0.9-sap-on-azure-quality-checks/vars/main.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/vars/main.yaml b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/vars/main.yaml index 15353e7813..ce6e23c1f9 100644 --- a/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/vars/main.yaml +++ b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/vars/main.yaml @@ -1,4 +1,3 @@ - azure_utility_repo: https://raw.githubusercontent.com/Azure/SAP-on-Azure-Scripts-and-Utilities powershell_version: 7.3.12 From ae132eef4a8afe81bf826cf840d0bfa7d3eace7b Mon Sep 17 00:00:00 2001 From: Devansh Jain <86314060+devanshjainms@users.noreply.github.com> Date: Wed, 19 Jun 2024 19:29:17 -0700 Subject: [PATCH 041/164] Rename quality check to quality assurance (#600) * script from main branch * chore: Add "nat_gateway" variable to global variables in sap_namegenerator module * chore: Update bom-register.yaml to use the correct path for the Microsoft supplied BOM archive * chore: Add debug task to bom-register.yaml for Microsoft supplied BOM archive * Refactor bom-register.yaml to remove debug task and include correct path for Microsoft supplied BOM archive * Refactor sap-system-deployment.yaml to configure devops CLI extension * Refactor sap-system-deployment.yaml to configure devops CLI extension * Refactor sap-system-deployment.yaml to configure devops CLI extension * chore: Remove unnecessary code for extra parameters in DB and SAP installation pipeline * chore: Update SDAF version to 3.11.0.3 * Refactor az logout command in sap-workload-zone.yaml * Refactor az logout command in sap-workload-zone.yaml * Refactor SSH command in 1.17 Generic Pacemaker provision playbook * chore: Remove cacheable flag from 3.3 BoM Processing task * Refactor bom-register.yaml to remove debug task and include correct path for Microsoft supplied BOM archive * Add SAP on Azure quality chekcs feature to the 05-DB-and-SAP-installation.yaml pipeline. * remove duplicate block * remove blank line * add mode to get_url downloaded file. * remove blank line from start of file. * Refactor sap-system-deployment.yaml to configure devops CLI extension * Refactor sap-system-deployment.yaml to configure devops CLI extension * chore: Update SDAF version to 3.11.0.3 * Refactor az logout command in sap-workload-zone.yaml * chore: move SAP on Azure quality checks after post configuration * chore: Update quality check paths to quality_assurance * chore: Update quality assurance file paths * chore: Refactor YAML files to improve code organization and readability * chore: Add cacheable flag to 3.3 BoM Processing task --------- Co-authored-by: dkSteBTh --- .../tasks/run_check.yaml | 4 ++-- .../tasks/setup.yaml | 2 +- .../3.3.1-bom-utility/tasks/bom-register.yaml | 4 ++++ .../pipelines/05-DB-and-SAP-installation.yaml | 22 +++++++++---------- .../templates/collect-log-files.yaml | 20 ++++++++--------- 5 files changed, 28 insertions(+), 24 deletions(-) diff --git a/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/run_check.yaml b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/run_check.yaml index c45640ee21..f72ae7982b 100644 --- a/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/run_check.yaml +++ b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/run_check.yaml @@ -49,7 +49,7 @@ - "VM Database: {{ qc_vm_database }}" - "VM Role: {{ qc_vm_role }}" - "SSH Key path {{ _workspace_directory }}/sshkey" - - "Output Directory {{ _workspace_directory }}/quality_checks" + - "Output Directory {{ _workspace_directory }}/quality_assurance" - "SID: {{ qc_sid }}" - "High Availability: {{ qc_high_availability }}" verbosity: 2 @@ -76,7 +76,7 @@ -Hardwaretype VM ` -SID {{ qc_sid }} ` -HighAvailability {{ '$' ~ qc_high_availability }} ` - -OutputDirName {{ _workspace_directory }}/quality_checks + -OutputDirName {{ _workspace_directory }}/quality_assurance args: executable: "/usr/local/bin/pwsh" chdir: "/opt/microsoft/quality_check" diff --git a/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/setup.yaml b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/setup.yaml index 72f5c00248..5f745c8d8d 100644 --- a/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/setup.yaml +++ b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/setup.yaml @@ -16,7 +16,7 @@ loop: - { path: "/opt/microsoft/powershell/v{{ powershell_version }}", owner: "root" } - { path: "/opt/microsoft/quality_check", owner: "{{ orchestration_ansible_user }}" } - - { path: "{{ _workspace_directory }}/quality_checks", owner: "{{ orchestration_ansible_user }}" } + - { path: "{{ _workspace_directory }}/quality_assurance", owner: "{{ orchestration_ansible_user }}" } - name: "SAP on Azure quality checks: - extract PowerShell binary" diff --git a/deploy/ansible/roles-sap/3.3.1-bom-utility/tasks/bom-register.yaml b/deploy/ansible/roles-sap/3.3.1-bom-utility/tasks/bom-register.yaml index 017f8b9278..deeeab6308 100644 --- a/deploy/ansible/roles-sap/3.3.1-bom-utility/tasks/bom-register.yaml +++ b/deploy/ansible/roles-sap/3.3.1-bom-utility/tasks/bom-register.yaml @@ -260,6 +260,10 @@ # Step: 06 # Description: Validate that BoM was found # + +- name: "{{ task_prefix }} Show BOM object" + ansible.builtin.debug: + var: bom - name: "{{ task_prefix }} Validate that a BOM object is created" ansible.builtin.fail: msg: "Unable to find the Bill of materials file for {{ bom_name }}." diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index 43d55dbb6c..8e8e4b86fe 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -77,13 +77,13 @@ parameters: type: boolean default: false - - name: sap_on_azure_quality_checks - displayName: SAP on Azure Quality Checks + - name: post_configuration_actions + displayName: Post Configuration Actions type: boolean default: false - - name: post_configuration_actions - displayName: Post Configuration Actions + - name: sap_on_azure_quality_checks + displayName: SAP on Azure Quality Checks type: boolean default: false @@ -530,11 +530,11 @@ stages: azureTenantId: $(ARM_TENANT_ID) azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) USE_MSI: $(USE_MSI) - - ${{ if eq(parameters.sap_on_azure_quality_checks, true) }}: + - ${{ if eq(parameters.post_configuration_actions, true) }}: - template: templates\run-ansible.yaml parameters: - displayName: SAP on Azure quality checks - ansibleFilePath: ${{ parameters.sap_automation_repo_path }}/deploy/ansible/playbook_06_02_sap_on_azure_quality_checks.yaml + displayName: Post Configuration Actions + ansibleFilePath: ${{ parameters.sap_automation_repo_path }}/deploy/ansible/playbook_08_00_00_post_configuration_actions.yaml secretName: "$(Preparation.SSH_KEY_NAME)" passwordSecretName: "$(Preparation.PASSWORD_KEY_NAME)" userNameSecretName: "$(Preparation.USERNAME_KEY_NAME)" @@ -548,11 +548,11 @@ stages: azureTenantId: $(ARM_TENANT_ID) azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) USE_MSI: $(USE_MSI) - - ${{ if eq(parameters.post_configuration_actions, true) }}: + - ${{ if eq(parameters.sap_on_azure_quality_checks, true) }}: - template: templates\run-ansible.yaml parameters: - displayName: Post Configuration Actions - ansibleFilePath: ${{ parameters.sap_automation_repo_path }}/deploy/ansible/playbook_08_00_00_post_configuration_actions.yaml + displayName: SAP on Azure quality checks + ansibleFilePath: ${{ parameters.sap_automation_repo_path }}/deploy/ansible/playbook_06_02_sap_on_azure_quality_checks.yaml secretName: "$(Preparation.SSH_KEY_NAME)" passwordSecretName: "$(Preparation.PASSWORD_KEY_NAME)" userNameSecretName: "$(Preparation.USERNAME_KEY_NAME)" @@ -606,5 +606,5 @@ stages: - template: templates\collect-log-files.yaml parameters: logPath: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/logs - qualityCheckPath: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/quality_checks + qualityAssuranceResultsPath: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/quality_assurance collectQualityChecks: ${{ parameters.sap_on_azure_quality_checks }} diff --git a/deploy/pipelines/templates/collect-log-files.yaml b/deploy/pipelines/templates/collect-log-files.yaml index 3544239fa6..0155e60604 100644 --- a/deploy/pipelines/templates/collect-log-files.yaml +++ b/deploy/pipelines/templates/collect-log-files.yaml @@ -1,6 +1,6 @@ parameters: logPath: "" - qualityCheckPath: "" + qualityAssuranceResultsPath: "" collectQualityChecks: false steps: - script: | @@ -89,12 +89,12 @@ steps: # Treat unset variables as an error when substituting. set -eu - echo "Collecting quality check files from ${{ parameters.qualityCheckPath }}" + echo "Collecting quality assurance results files from ${{ parameters.qualityAssuranceResultsPath }}" - if [ -d ${QUALITY_CHECK_PATH} ] && [ $(ls ${QUALITY_CHECK_PATH}/*.html | wc -l ) -gt 0 ]; then - echo "Found new quality check files in ${QUALITY_CHECK_PATH}" + if [ -d ${QUALITY_ASSURANCE_RESULTS_PATH} ] && [ $(ls ${QUALITY_ASSURANCE_RESULTS_PATH}/*.html | wc -l ) -gt 0 ]; then + echo "Found new quality assurance results files in ${QUALITY_ASSURANCE_RESULTS_PATH}" - cd ${QUALITY_CHECK_PATH} + cd ${QUALITY_ASSURANCE_RESULTS_PATH} ls -ltr git config --global user.email "${USER_EMAIL}" @@ -105,20 +105,20 @@ steps: echo "Pulling last changes..." git pull - echo "Adding new quality check files..." + echo "Adding new quality assurance files..." git add --ignore-errors *.html if [ $(git diff --name-only --cached | wc -l) -gt 0 ]; then echo "Committing changes..." - git commit -m "Adding new quality check files" + git commit -m "Adding new quality assurance files" echo "Pushing changes..." git push else echo "No changes to commit" fi else - echo No quality check files found in "${QUALITY_CHECK_PATH}" + echo No quality assurance files found in "${QUALITY_ASSURANCE_RESULTS_PATH}" fi - displayName: Store quality check files in repository + displayName: Store quality assurance files in repository enabled: true condition: ${{ eq(parameters.collectQualityChecks, true) }} env: @@ -126,4 +126,4 @@ steps: USER_NAME: $(Build.RequestedFor) SOURCE_BRANCH: $(Build.SourceBranchName) SYSTEM_ACCESSTOKEN: $(System.AccessToken) - QUALITY_CHECK_PATH: ${{ parameters.qualityCheckPath }} + QUALITY_ASSURANCE_RESULTS_PATH: ${{ parameters.qualityAssuranceResultsPath }} From 1b46a2ee73fcfc27ff230983065c52f9d0b225eb Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 20 Jun 2024 20:55:18 +0530 Subject: [PATCH 042/164] chore: Update os-packages.yaml for redhat8.6 --- deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml index 154db3a4ec..1afc5215ae 100644 --- a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml +++ b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml @@ -256,7 +256,7 @@ packages: redhat8.1: redhat8.2: redhat8.4: - redhat8.6: + redhat8.6: - { tier: 'os', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'present' } - { tier: 'ha', package: 'NetworkManager-cloud-setup', node_tier: 'all', state: 'absent' } redhat8.8: From 9684fde8301b0b26b3e570ac2ba923abe923b5dc Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Tue, 25 Jun 2024 21:57:08 +0530 Subject: [PATCH 043/164] chore: Create directories for SAP deployment automation --- deploy/ansible/playbook_01_os_base_config.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/deploy/ansible/playbook_01_os_base_config.yaml b/deploy/ansible/playbook_01_os_base_config.yaml index 6f1d78cb60..c0c4e76705 100644 --- a/deploy/ansible/playbook_01_os_base_config.yaml +++ b/deploy/ansible/playbook_01_os_base_config.yaml @@ -91,6 +91,15 @@ tags: - always + - name: "OS configuration playbook: - Create directories" + become: true + ansible.builtin.file: + path: '/etc/sap_deployment_automation/{{ sap_sid | upper }}' + state: directory + mode: '0755' + tags: + - always + - name: "OS configuration playbook: - Set sudoers" ansible.builtin.include_role: name: roles-os/1.0-sudoers From fb36046b506fde0c4568e20f21dc10bbc8c74be5 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 26 Jun 2024 10:03:58 +0530 Subject: [PATCH 044/164] chore: Update authentication prompt for App Registration configuration --- deploy/scripts/New-SDAFDevopsProject.ps1 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index 6a469ce711..ac4dbd63d7 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -772,7 +772,7 @@ if ($WebApp) { } if ($MSI_objectId -ne $null) { - $configureAuth = Read-Host "Configuring authentication for the App Registration?" + $configureAuth = Read-Host "Configuring authentication for the App Registration (y/n)?" if ($configureAuth -eq 'y') { az rest --method POST --uri "https://graph.microsoft.com/beta/applications/$APP_REGISTRATION_OBJECTID/federatedIdentityCredentials\" --body "{'name': 'ManagedIdentityFederation', 'issuer': 'https://login.microsoftonline.com/$ARM_TENANT_ID/v2.0', 'subject': '$MSI_objectId', 'audiences': [ 'api://AzureADTokenExchange' ]}" @@ -1119,4 +1119,4 @@ else { } -Write-Host "The script has completed" -ForegroundColor Green \ No newline at end of file +Write-Host "The script has completed" -ForegroundColor Green From 2882b0b833aba5eb48406b106802b392a08f7cbb Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 27 Jun 2024 11:54:38 +0530 Subject: [PATCH 045/164] chore: Update PostBuildCleanup task to version 4 in 01-deploy-control-plane.yaml --- deploy/pipelines/01-deploy-control-plane.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index 8b9fe4dbc1..eb09c169b4 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -71,7 +71,7 @@ stages: clean: all steps: - template: templates\download.yaml - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 # Set Variables. - task: AzureCLI@2 continueOnError: false From d46a2b7dc221775498dee9234f5c46f0f5eccbdc Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 27 Jun 2024 12:07:18 +0530 Subject: [PATCH 046/164] chore: Update PostBuildCleanup task to version 4 for all stages in 01-deploy-control-plane.yaml --- deploy/pipelines/01-deploy-control-plane.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index eb09c169b4..f4628b0578 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -400,7 +400,7 @@ stages: workspace: clean: all steps: - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - template: templates\download.yaml parameters: getLatestFromBranch: true @@ -945,7 +945,7 @@ stages: clean: all steps: - template: templates\download.yaml - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - task: DotNetCoreCLI@2 displayName: "Build the Configuration Web Application" inputs: From ff51fe50e34ac9914109e921be42830327e8b573 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 27 Jun 2024 12:15:05 +0530 Subject: [PATCH 047/164] chore: Update PostBuildCleanup task to version 4 in deploy pipelines --- deploy/pipelines/02-sap-workload-zone.yaml | 2 +- deploy/pipelines/03-sap-system-deployment.yaml | 2 +- deploy/pipelines/05-DB-and-SAP-installation.yaml | 2 +- deploy/pipelines/06-post-installation-tooling.yaml | 4 ++-- deploy/pipelines/12-remove-control-plane.yaml | 4 ++-- deploy/pipelines/21-deploy-web-app.yaml | 2 +- deploy/pipelines/22-sample-deployer-config.yaml | 2 +- deploy/pipelines/23-levelup-configuration.yaml | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/deploy/pipelines/02-sap-workload-zone.yaml b/deploy/pipelines/02-sap-workload-zone.yaml index 181f483b65..88f49d9177 100644 --- a/deploy/pipelines/02-sap-workload-zone.yaml +++ b/deploy/pipelines/02-sap-workload-zone.yaml @@ -112,7 +112,7 @@ stages: clean: all steps: - template: templates\download.yaml - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - bash: | #!/bin/bash green="\e[1;32m" ; reset="\e[0m" ; boldred="\e[1;31m" ; cyan="\e[1;36m" diff --git a/deploy/pipelines/03-sap-system-deployment.yaml b/deploy/pipelines/03-sap-system-deployment.yaml index 7ab95f1bf1..8d11ad3054 100644 --- a/deploy/pipelines/03-sap-system-deployment.yaml +++ b/deploy/pipelines/03-sap-system-deployment.yaml @@ -44,7 +44,7 @@ stages: clean: all steps: - template: templates\download.yaml - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - script: | #!/bin/bash echo "##vso[build.updatebuildnumber]Deploying the SAP System defined in $(sap_system_folder)" diff --git a/deploy/pipelines/05-DB-and-SAP-installation.yaml b/deploy/pipelines/05-DB-and-SAP-installation.yaml index 8e8e4b86fe..96fdd975e2 100644 --- a/deploy/pipelines/05-DB-and-SAP-installation.yaml +++ b/deploy/pipelines/05-DB-and-SAP-installation.yaml @@ -142,7 +142,7 @@ stages: - template: templates\download.yaml parameters: getLatestFromBranch: true - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - bash: | #!/bin/bash # Exit immediately if a command exits with a non-zero status. diff --git a/deploy/pipelines/06-post-installation-tooling.yaml b/deploy/pipelines/06-post-installation-tooling.yaml index 34eb9338c0..dbb4dcd1b7 100644 --- a/deploy/pipelines/06-post-installation-tooling.yaml +++ b/deploy/pipelines/06-post-installation-tooling.yaml @@ -76,7 +76,7 @@ stages: workspace: clean: all steps: - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - checkout: self persistCredentials: true submodules: true @@ -408,7 +408,7 @@ stages: parameters_folder: $[ stageDependencies.Preparation_for_Ansible.Preparation_step.outputs['Preparation.FOLDER'] ] DEPLOYMENT_REPO_PATH: $[ stageDependencies.Preparation_for_Ansible.Preparation_step.outputs['Preparation.DEPLOYMENT_REPO_PATH'] ] steps: - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - script: | #!/bin/bash echo "nothing to do here right now" diff --git a/deploy/pipelines/12-remove-control-plane.yaml b/deploy/pipelines/12-remove-control-plane.yaml index f2bbd87f43..747cd637cd 100644 --- a/deploy/pipelines/12-remove-control-plane.yaml +++ b/deploy/pipelines/12-remove-control-plane.yaml @@ -59,7 +59,7 @@ stages: clean: all steps: - template: templates\download.yaml - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - bash: | #!/bin/bash # Treat unset variables as an error when substituting. @@ -407,7 +407,7 @@ stages: - template: templates\download.yaml parameters: getLatestFromBranch: true - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - task: AzureCLI@2 continueOnError: false inputs: diff --git a/deploy/pipelines/21-deploy-web-app.yaml b/deploy/pipelines/21-deploy-web-app.yaml index c62a9028b0..46edd5d0f8 100644 --- a/deploy/pipelines/21-deploy-web-app.yaml +++ b/deploy/pipelines/21-deploy-web-app.yaml @@ -78,7 +78,7 @@ stages: workspace: clean: all steps: - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - checkout: self persistCredentials: true - bash: | diff --git a/deploy/pipelines/22-sample-deployer-config.yaml b/deploy/pipelines/22-sample-deployer-config.yaml index e9ff87dd91..c7e8e7bcbc 100644 --- a/deploy/pipelines/22-sample-deployer-config.yaml +++ b/deploy/pipelines/22-sample-deployer-config.yaml @@ -90,7 +90,7 @@ stages: workspace: clean: all steps: - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - checkout: self persistCredentials: true - task: PowerShell@2 diff --git a/deploy/pipelines/23-levelup-configuration.yaml b/deploy/pipelines/23-levelup-configuration.yaml index 60b0ae3065..7c8cd801e1 100644 --- a/deploy/pipelines/23-levelup-configuration.yaml +++ b/deploy/pipelines/23-levelup-configuration.yaml @@ -54,7 +54,7 @@ stages: workspace: clean: all steps: - - task: PostBuildCleanup@3 + - task: PostBuildCleanup@4 - checkout: self persistCredentials: true submodules: true From e13031de43a4fef8fdbb1a599b70c67c03ae1372 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 27 Jun 2024 13:07:43 +0530 Subject: [PATCH 048/164] chore: Update clusterPrep-RedHat.yml to avoid resource discovery during location constraints --- .../5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml index 51a1b93169..552922cf83 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml @@ -147,7 +147,7 @@ - name: "Configure location constraints" ansible.builtin.shell: > pcs constraint location {{ item.group_name }} - avoids {{ item.node }} + avoids {{ item.node }} resource-discovery=never register: nfs_location_constraints failed_when: false ignore_errors: true From ced537bcdc8c02b9b876f0491f05cac1e129f08e Mon Sep 17 00:00:00 2001 From: Steffen Bo Thomsen Date: Tue, 2 Jul 2024 14:57:13 +0200 Subject: [PATCH 049/164] Do not fail on saptune solution verify (#602) Set failed_when to false, so that saptune does not fail on N/A parameters. --- deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml index d9195cba62..34f6fddd94 100644 --- a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml +++ b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml @@ -179,3 +179,4 @@ - name: "2.10.3 sap-notes : - Run saptune solution verify" ansible.builtin.command: "saptune solution verify {{ saptune_solution_to_apply }}" changed_when: false + failed_when: false From e86dff14a149d8c866b2ce5b4570f2212959c062 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 3 Jul 2024 19:56:07 +0530 Subject: [PATCH 050/164] chore: Update New-SDAFDevopsProject.ps1 to use tsv output format for subscription and identity lists --- deploy/scripts/New-SDAFDevopsProject.ps1 | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index ac4dbd63d7..23ccc625b8 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -89,14 +89,14 @@ az extension add --name azure-devops --only-show-errors if ($Control_plane_subscriptionID.Length -eq 0) { Write-Host "$Env:ControlPlaneSubscriptionID is not set!" -ForegroundColor Red $Title = "Choose the subscription for the Control Plane" - $subscriptions = $(az account list --query "[].{Name:name}" -o table | Sort-Object) - Show-Menu($subscriptions[2..($subscriptions.Length - 1)]) + $subscriptions = $(az account list --query "[].{Name:name}" --output tsv | Sort-Object) + Show-Menu($subscriptions) $selection = Read-Host $Title - $selectionOffset = [convert]::ToInt32($selection, 10) + 1 + #$selectionOffset = [convert]::ToInt32($selection, 10) + 1 - $ControlPlaneSubscriptionName = $subscriptions[$selectionOffset] + $ControlPlaneSubscriptionName = $subscriptions[$selection] az account set --subscription $ControlPlaneSubscriptionName $Control_plane_subscriptionID = (az account show --query id -o tsv) @@ -711,22 +711,19 @@ if ($authenticationMethod -eq "Managed Identity") { else { $Title = "Choose the subscription that contains the Managed Identity" - $subscriptions = $(az account list --query "[].{Name:name}" -o table | Sort-Object) - Show-Menu($subscriptions[2..($subscriptions.Length - 1)]) + $subscriptions = $(az account list --query "[].{Name:name}" --output tsv | Sort-Object) + Show-Menu($subscriptions) $selection = Read-Host $Title - $selectionOffset = [convert]::ToInt32($selection, 10) + 1 - - $subscription = $subscriptions[$selectionOffset] + $subscription = $subscriptions[$selection] Write-Host "Using subscription:" $subscription $Title = "Choose the Managed Identity" - $identities = $(az identity list --query "[].{Name:name}" --subscription $subscription --output table | Sort-Object) - Show-Menu($identities[2..($identities.Length - 1)]) + $identities = $(az identity list --query "[].{Name:name}" --subscription $subscription --output tsv | Sort-Object) + Show-Menu($identities) $selection = Read-Host $Title - $selectionOffset = [convert]::ToInt32($selection, 10) + 1 - $identity = $identities[$selectionOffset] + $identity = $identities[$selection] Write-Host "Using Managed Identity:" $identity $id = $(az identity list --query "[?name=='$identity'].id" --subscription $subscription --output tsv) From 726af9217c7cd06ebfe154b5f622ee878f5a5970 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Wed, 3 Jul 2024 20:59:28 +0530 Subject: [PATCH 051/164] Revert "chore: Update New-SDAFDevopsProject.ps1 to use tsv output format for subscription and identity lists" This reverts commit e86dff14a149d8c866b2ce5b4570f2212959c062. --- deploy/scripts/New-SDAFDevopsProject.ps1 | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index 23ccc625b8..ac4dbd63d7 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -89,14 +89,14 @@ az extension add --name azure-devops --only-show-errors if ($Control_plane_subscriptionID.Length -eq 0) { Write-Host "$Env:ControlPlaneSubscriptionID is not set!" -ForegroundColor Red $Title = "Choose the subscription for the Control Plane" - $subscriptions = $(az account list --query "[].{Name:name}" --output tsv | Sort-Object) - Show-Menu($subscriptions) + $subscriptions = $(az account list --query "[].{Name:name}" -o table | Sort-Object) + Show-Menu($subscriptions[2..($subscriptions.Length - 1)]) $selection = Read-Host $Title - #$selectionOffset = [convert]::ToInt32($selection, 10) + 1 + $selectionOffset = [convert]::ToInt32($selection, 10) + 1 - $ControlPlaneSubscriptionName = $subscriptions[$selection] + $ControlPlaneSubscriptionName = $subscriptions[$selectionOffset] az account set --subscription $ControlPlaneSubscriptionName $Control_plane_subscriptionID = (az account show --query id -o tsv) @@ -711,19 +711,22 @@ if ($authenticationMethod -eq "Managed Identity") { else { $Title = "Choose the subscription that contains the Managed Identity" - $subscriptions = $(az account list --query "[].{Name:name}" --output tsv | Sort-Object) - Show-Menu($subscriptions) + $subscriptions = $(az account list --query "[].{Name:name}" -o table | Sort-Object) + Show-Menu($subscriptions[2..($subscriptions.Length - 1)]) $selection = Read-Host $Title - $subscription = $subscriptions[$selection] + $selectionOffset = [convert]::ToInt32($selection, 10) + 1 + + $subscription = $subscriptions[$selectionOffset] Write-Host "Using subscription:" $subscription $Title = "Choose the Managed Identity" - $identities = $(az identity list --query "[].{Name:name}" --subscription $subscription --output tsv | Sort-Object) - Show-Menu($identities) + $identities = $(az identity list --query "[].{Name:name}" --subscription $subscription --output table | Sort-Object) + Show-Menu($identities[2..($identities.Length - 1)]) $selection = Read-Host $Title + $selectionOffset = [convert]::ToInt32($selection, 10) + 1 - $identity = $identities[$selection] + $identity = $identities[$selectionOffset] Write-Host "Using Managed Identity:" $identity $id = $(az identity list --query "[?name=='$identity'].id" --subscription $subscription --output tsv) From a1c0d657de7be397e19bf0297c4a9086dac918a1 Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Mon, 22 Jul 2024 11:30:42 +0530 Subject: [PATCH 052/164] chore: Update accelerated networking configuration in Terraform modules, as enable_accelerated_networking is deprecated; new parameter is accelerated_networking_enabled --- .../modules/sap_system/anydb_node/vm-anydb.tf | 4 ++-- .../modules/sap_system/anydb_node/vm-observer.tf | 2 +- .../terraform-units/modules/sap_system/app_tier/vm-app.tf | 4 ++-- .../terraform-units/modules/sap_system/app_tier/vm-scs.tf | 4 ++-- .../modules/sap_system/app_tier/vm-webdisp.tf | 4 ++-- .../modules/sap_system/common_infrastructure/vm-anchor.tf | 2 +- .../terraform-units/modules/sap_system/hdb_node/vm-hdb.tf | 6 +++--- 7 files changed, 13 insertions(+), 13 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf index f1f8d5e25f..8d9719394b 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf @@ -18,7 +18,7 @@ resource "azurerm_network_interface" "anydb_db" { location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = true + accelerated_networking_enabled = true tags = var.tags dynamic "ip_configuration" { iterator = pub @@ -77,7 +77,7 @@ resource "azurerm_network_interface" "anydb_admin" { ) location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = true + accelerated_networking_enabled = true tags = var.tags ip_configuration { diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-observer.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-observer.tf index 084cf3e463..5c42c95ad1 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-observer.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-observer.tf @@ -17,7 +17,7 @@ resource "azurerm_network_interface" "observer" { ) resource_group_name = var.resource_group[0].name location = var.resource_group[0].location - enable_accelerated_networking = false + accelerated_networking_enabled = false tags = var.tags ip_configuration { diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index 47041e1856..5bf88f7ab6 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -17,7 +17,7 @@ resource "azurerm_network_interface" "app" { ) location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = local.app_sizing.compute.accelerated_networking + accelerated_networking_enabled = local.app_sizing.compute.accelerated_networking tags = var.tags dynamic "ip_configuration" { @@ -76,7 +76,7 @@ resource "azurerm_network_interface" "app_admin" { ) location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = local.app_sizing.compute.accelerated_networking + accelerated_networking_enabled = local.app_sizing.compute.accelerated_networking tags = var.tags ip_configuration { diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index 1e172ef4fb..e226b5fdef 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -16,7 +16,7 @@ resource "azurerm_network_interface" "scs" { ) location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = local.scs_sizing.compute.accelerated_networking + accelerated_networking_enabled = local.scs_sizing.compute.accelerated_networking tags = var.tags dynamic "ip_configuration" { @@ -79,7 +79,7 @@ resource "azurerm_network_interface" "scs_admin" { ) location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = local.scs_sizing.compute.accelerated_networking + accelerated_networking_enabled = local.scs_sizing.compute.accelerated_networking ip_configuration { name = "IPConfig1" diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf index 69f8ce36a0..85d6d7e8b8 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf @@ -16,7 +16,7 @@ resource "azurerm_network_interface" "web" { ) location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = local.web_sizing.compute.accelerated_networking + accelerated_networking_enabled = local.web_sizing.compute.accelerated_networking tags = var.tags dynamic "ip_configuration" { @@ -84,7 +84,7 @@ resource "azurerm_network_interface" "web_admin" { ) location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = local.web_sizing.compute.accelerated_networking + accelerated_networking_enabled = local.web_sizing.compute.accelerated_networking ip_configuration { name = "IPConfig1" diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/vm-anchor.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/vm-anchor.tf index 35ed5409d7..bd705a9a9a 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/vm-anchor.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/vm-anchor.tf @@ -12,7 +12,7 @@ resource "azurerm_network_interface" "anchor" { ) resource_group_name = local.resource_group_exists ? data.azurerm_resource_group.resource_group[0].name : azurerm_resource_group.resource_group[0].name location = local.resource_group_exists ? data.azurerm_resource_group.resource_group[0].location : azurerm_resource_group.resource_group[0].location - enable_accelerated_networking = var.infrastructure.anchor_vms.accelerated_networking + accelerated_networking_enabled = var.infrastructure.anchor_vms.accelerated_networking ip_configuration { name = "IPConfig1" diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf index 7839b80b0b..977f383ee2 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf @@ -35,7 +35,7 @@ resource "azurerm_network_interface" "nics_dbnodes_admin" { location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = true + accelerated_networking_enabled = true tags = var.tags ip_configuration { @@ -74,7 +74,7 @@ resource "azurerm_network_interface" "nics_dbnodes_db" { location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = true + accelerated_networking_enabled = true tags = var.tags dynamic "ip_configuration" { iterator = pub @@ -131,7 +131,7 @@ resource "azurerm_network_interface" "nics_dbnodes_storage" { location = var.resource_group[0].location resource_group_name = var.resource_group[0].name - enable_accelerated_networking = true + accelerated_networking_enabled = true tags = var.tags ip_configuration { From 2900638eea8ec092dc1ab592f71191d2711202df Mon Sep 17 00:00:00 2001 From: Steffen Bo Thomsen Date: Tue, 23 Jul 2024 06:53:28 +0200 Subject: [PATCH 053/164] Ensure we are in the right context when getting access tokens and subsequently running the ps1 script, where we already have the trust setup for the SSH key. Not doing it this way, leads to either needing to manually create an SSH session inside pwsh with POSH-SSH to ensure the known_hosts entry is updated or having to update the quality check script upstream, to allow the -Force flag for the SSH session. (#603) --- .../tasks/run_check.yaml | 51 ++++++++++++------- 1 file changed, 33 insertions(+), 18 deletions(-) diff --git a/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/run_check.yaml b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/run_check.yaml index f72ae7982b..957f226e53 100644 --- a/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/run_check.yaml +++ b/deploy/ansible/roles-misc/0.9-sap-on-azure-quality-checks/tasks/run_check.yaml @@ -54,34 +54,49 @@ - "High Availability: {{ qc_high_availability }}" verbosity: 2 +- name: "SAP on Azure quality checks: - get access token in the context of azureadm on deployer" + delegate_to: localhost + no_log: true + ansible.builtin.command: az account get-access-token --subscription {{ qc_subscription_id }} --query "accessToken" + failed_when: qc_access_token_result.stdout == "" + register: qc_access_token_result + +- name: "SAP on Azure quality checks: - retrieve client id in the context of azureadm on deployer" + delegate_to: localhost + no_log: true + ansible.builtin.command: echo $ARM_CLIENT_ID + failed_when: gz_arm_client_id_result.stdout == "" + register: gz_arm_client_id_result - name: "SAP on Azure quality checks: - Run quality check" ansible.builtin.shell: cmd: >- - Connect-AzAccount -AccountId $Env:ARM_CLIENT_ID ` - -AccessToken (az account get-access-token --subscription {{ qc_subscription_id }} | ConvertFrom-Json).accessToken ` - -Subscription {{ qc_subscription_id }} + Connect-AzAccount -AccountId {{ gz_arm_client_id_result.stdout }} ` + -AccessToken {{ qc_access_token_result.stdout }} ` + -Subscription {{ qc_subscription_id }} - ./QualityCheck.ps1 -LogonWithUserSSHKey ` - -VMOperatingSystem {{ qc_vm_operating_system }} ` - -VMDatabase {{ qc_vm_database }} ` - -VMRole {{ qc_vm_role }} ` - -AzVMResourceGroup {{ qc_az_vm_resource_group }} ` - -AzVMName {{ qc_az_vm_name }} ` - -VMHostname {{ qc_vm_hostname }} ` - -VMUsername {{ qc_vm_username }} ` - -VMConnectionPort 22 ` - -SubscriptionId {{ qc_subscription_id }} ` - -SSHKey {{ _workspace_directory }}/sshkey ` - -Hardwaretype VM ` - -SID {{ qc_sid }} ` - -HighAvailability {{ '$' ~ qc_high_availability }} ` - -OutputDirName {{ _workspace_directory }}/quality_assurance + ./QualityCheck.ps1 -LogonWithUserSSHKey ` + -VMOperatingSystem {{ qc_vm_operating_system }} ` + -VMDatabase {{ qc_vm_database }} ` + -VMRole {{ qc_vm_role }} ` + -AzVMResourceGroup {{ qc_az_vm_resource_group }} ` + -AzVMName {{ qc_az_vm_name }} ` + -VMHostname {{ qc_vm_hostname }} ` + -VMUsername {{ qc_vm_username }} ` + -VMConnectionPort 22 ` + -SubscriptionId {{ qc_subscription_id }} ` + -SSHKey {{ _workspace_directory }}/sshkey ` + -Hardwaretype VM ` + -SID {{ qc_sid }} ` + -HighAvailability {{ '$' ~ qc_high_availability }} ` + -OutputDirName {{ _workspace_directory }}/quality_assurance args: executable: "/usr/local/bin/pwsh" chdir: "/opt/microsoft/quality_check" no_log: true delegate_to: localhost + become_user: root + become: true register: quality_check_result - name: "SAP on Azure quality checks: - Debug quality check result" From 6c6bf6fd1efc9efb8cf9b6ac44eb71283a921fab Mon Sep 17 00:00:00 2001 From: Jaskirat Singh <108129510+jaskisin@users.noreply.github.com> Date: Wed, 24 Jul 2024 14:22:58 +0530 Subject: [PATCH 054/164] Fix for catching AHCO_INA_SERVICE delivery Unit import failure (#605) Co-authored-by: jasksingh --- deploy/ansible/roles-db/4.0.0-hdb-install/tasks/main.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/deploy/ansible/roles-db/4.0.0-hdb-install/tasks/main.yaml b/deploy/ansible/roles-db/4.0.0-hdb-install/tasks/main.yaml index 57b28315da..b772bba222 100644 --- a/deploy/ansible/roles-db/4.0.0-hdb-install/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.0-hdb-install/tasks/main.yaml @@ -120,6 +120,11 @@ failed_when: hana_installation.rc > 0 when: hana_installation.rc == 1 rescue: + - name: "Fail if HANA installation failed while importing the delivery unit AHCO_INA_SERVICE" + ansible.builtin.fail: + msg: "INSTALL:0026:Execute hdblcm failed at delivery unit AHCO_INA_SERVICE." + when: hana_installation.stderr is search(".*Import of delivery units failed.*Cannot import delivery unit.*AHCO_INA_SERVICE.tgz.*") + - name: "Fail if HANA installation failed on second attempt." ansible.builtin.fail: msg: "INSTALL:0022:Execute hdblcm failed." From 16070a8319163e6c883b0a3dd695036519476985 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Thu, 1 Aug 2024 20:46:28 +0300 Subject: [PATCH 055/164] Web App Component updates --- Webapp/SDAF/SDAFWebApp.csproj | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/Webapp/SDAF/SDAFWebApp.csproj b/Webapp/SDAF/SDAFWebApp.csproj index 7231158be1..ed10178397 100644 --- a/Webapp/SDAF/SDAFWebApp.csproj +++ b/Webapp/SDAF/SDAFWebApp.csproj @@ -15,24 +15,24 @@ - - + + - - - - - - - - - - + + + + + + + + + + - + From d4a095ee0e231b6708925f4a6f3d65f96b7e7911 Mon Sep 17 00:00:00 2001 From: Harm Jan Stam Date: Mon, 5 Aug 2024 17:32:31 +0200 Subject: [PATCH 056/164] Several (bug)fixes for RHEL deployments and deployments in general (#604) * Add fast_stop=no to pacemaker fileystem resources Pacemaker isn't respecting the stop timeout on filesystem resources due to the default setting fast_stop=yes. Without setting fencing will occur because if SAP (A)SCS / ERS isn't stopped in time processes will be terminated which are restarted by sapstartsrv and node will be fenced because fileystem can't be unmounted. https://www.suse.com/support/kb/doc/?id=000020860 https://access.redhat.com/solutions/4801371 * Distribute systemd services between SCS / ERS nodes and stop services Both (A)SCS and ERS systemd services should be present on SCS and ERS nodes otherwise pacemaker only handles SCS on the SCS node and ERS on the ERS node with the systemd integration. * Add resource clear for move contrainsts on (A)SCS resource group * Bugfix folders on local disks to be managed after mounting local disk * sid_private_key isn't required * Add sdu_secret prefix/suffix to manage custom Key Vault secret naming --- .../2.2-sapPermissions/tasks/main.yaml | 1 + .../2.3-sap-exports/tasks/main.yaml | 174 ++++-------------- .../2.6-sap-mounts/tasks/main.yaml | 152 ++++++--------- .../tasks/5.6.4.0-cluster-RedHat.yml | 4 +- .../tasks/5.6.4.0-cluster-Suse.yml | 4 +- .../tasks/5.6.4.2-sap-resources-RedHat.yml | 25 +-- .../tasks/5.6.7-config-systemd-sap-start.yml | 124 +++++++------ .../sap_namegenerator/variables_global.tf | 2 + .../key_vault_sap_system.tf | 10 +- .../common_infrastructure/variables_local.tf | 2 +- 10 files changed, 171 insertions(+), 327 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.2-sapPermissions/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.2-sapPermissions/tasks/main.yaml index c14832f0b9..59c144d58d 100644 --- a/deploy/ansible/roles-sap-os/2.2-sapPermissions/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.2-sapPermissions/tasks/main.yaml @@ -16,6 +16,7 @@ - { node_tier: 'hana', path: '/hana', mode: '0755', owner: 'root', group: 'root', state: 'directory' } - { node_tier: 'pas', path: '/sapmnt', mode: '0755', owner: 'root', group: 'sapsys', state: 'directory' } - { node_tier: 'app', path: '/sapmnt', mode: '0755', owner: 'root', group: 'sapsys', state: 'directory' } + - { node_tier: 'scs', path: '/sapmnt', mode: '0755', owner: 'root', group: 'sapsys', state: 'directory' } when: - item.node_tier == "all" or item.node_tier == node_tier - not users_created.stat.exists diff --git a/deploy/ansible/roles-sap-os/2.3-sap-exports/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.3-sap-exports/tasks/main.yaml index e92e1f5dc1..977d8c82ce 100644 --- a/deploy/ansible/roles-sap-os/2.3-sap-exports/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.3-sap-exports/tasks/main.yaml @@ -1,72 +1,10 @@ --- - -- name: "Exports: Create SAP Directories - {{ target_media_location }}" - ansible.builtin.file: - path: "{{ target_media_location }}" - state: directory - mode: 0755 +- name: "2.3 Exports: - Create SAP install export" when: - node_tier == 'scs' - - MULTI_SIDS is undefined - usr_sap_install_mountpoint is undefined - -- name: "Exports: Create SAP Directories - saptrans" - ansible.builtin.file: - path: "/usr/sap/trans" - state: directory - mode: 0755 - when: - - node_tier == 'scs' - - MULTI_SIDS is undefined - - sap_trans is undefined - -- name: "Exports: Create SAP Directories - saptrans" - ansible.builtin.file: - path: "/sapmnt/{{ sap_sid | upper }}" - state: directory - mode: 0755 - when: - - node_tier == 'scs' - - MULTI_SIDS is undefined - - sap_mnt is undefined - - -- name: "Exports: Create SAP Directories for MSIDs" - ansible.builtin.file: - path: "{{ item.path }}" - state: directory - mode: 0755 - loop: - - { path: '/sapmnt' } - when: - - node_tier == 'scs' - - MULTI_SIDS is defined - -# Create Folders under sapmnt whilst using Local disk for Multi-SID installation. -- name: Create Filesystems under sapmnt block: - - name: Create Filesystems for multi-sid installation - ansible.builtin.file: - path: /sapmnt/{{ item.sid }} - state: directory - mode: 0755 - when: - - node_tier == 'scs' - - MULTI_SIDS is defined - loop: "{{ MULTI_SIDS }}" - -- name: "2.3 Exports: - Create SAP Directories (install)" - block: - - - name: "2.3 Exports: - Create SAP Directories (install)" - ansible.builtin.file: - path: "{{ item.path }}" - state: directory - mode: 0755 - loop: - - { path: '{{ target_media_location }}' } - - - name: "2.3 Exports: - NFS Server Config on Suse (install)" + - name: "2.3 Exports: - NFS Server Config on Suse (install)" ansible.builtin.lineinfile: path: "{{ item.path }}" regexp: "{{ item.regexp }}" @@ -76,7 +14,6 @@ mode: 0644 loop: - { path: '/etc/exports', regexp: '^{{ target_media_location }}', line: '{{ target_media_location }} *(rw,sync,no_wdelay,no_root_squash)' } - # - { tier: 'preparation', path: '/etc/sysconfig/nfs', regexp: '^NFS3_SERVER_SUPPORT=', line: 'NFS3_SERVER_SUPPORT="no"' } - { path: '/etc/sysconfig/nfs', regexp: '^NFS3_SERVER_SUPPORT=', line: 'NFS3_SERVER_SUPPORT="yes"' } - { path: '/etc/sysconfig/nfs', regexp: '^NFS4_SUPPORT=', line: 'NFS4_SUPPORT="yes"' } when: @@ -98,29 +35,19 @@ when: - distribution_id in ["redhat8", "redhat9"] +- name: "2.3 Exports: - Create SAP sapmnt export" when: - node_tier == 'scs' - - usr_sap_install_mountpoint is undefined - -- name: "2.3 Exports: - Create SAP Directories (sapmnt)" + - sap_mnt is undefined block: - - - name: "2.3 Exports: - Create SAP Directories (sapmnt)" - ansible.builtin.file: - path: "{{ item.path }}" - state: directory - mode: 0755 - loop: - - { path: '/sapmnt/{{ sap_sid | upper }}' } - - - name: "2.3 Exports: - NFS Server Config on Suse (sapmnt)" + - name: "2.3 Exports: - NFS Server Config on Suse (sapmnt)" ansible.builtin.lineinfile: - path: "{{ item.path }}" - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" - owner: root - group: root - mode: 0644 + path: "{{ item.path }}" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + owner: root + group: root + mode: '0644' loop: - { path: '/etc/exports', regexp: '^/sapmnt/{{ sap_sid | upper }}', line: '/sapmnt/{{ sap_sid | upper }} *(rw,sync,no_wdelay,no_root_squash)' } - { path: '/etc/sysconfig/nfs', regexp: '^# RPCNFSDARGS=', line: 'RPCNFSDARGS="-N 2 -N 3 -U"' } @@ -130,12 +57,12 @@ - name: "2.3 Exports: - NFS Server Config on : {{ ansible_os_family | lower ~ ansible_distribution_major_version }} (sapmnt)" ansible.builtin.lineinfile: - path: "{{ item.path }}" - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" - owner: root - group: root - mode: 0644 + path: "{{ item.path }}" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + owner: root + group: root + mode: '0644' loop: - { path: '/etc/exports', regexp: '^/sapmnt/{{ sap_sid | upper }}', line: '/sapmnt/{{ sap_sid | upper }} *(rw,sync,no_wdelay,no_root_squash)' } - { path: '/etc/nfs.conf', regexp: '^# vers3=', line: ' vers3=y' } @@ -144,21 +71,11 @@ when: - distribution_id == "redhat8" +- name: "2.3 Exports: - Create SAP trans export" when: - node_tier == 'scs' - - sap_mnt is undefined - -- name: "2.3 Exports: - Create SAP Directories (saptrans)" + - sap_trans is undefined block: - - - name: "2.3 Exports: - Exports: Create SAP Directories (saptrans)" - ansible.builtin.file: - path: "{{ item.path }}" - state: directory - mode: 0755 - loop: - - { path: '/usr/sap/trans' } - - name: "Exports: NFS Server Config on : {{ ansible_distribution | lower ~ ansible_distribution_major_version }}" ansible.builtin.lineinfile: path: "{{ item.path }}" @@ -166,7 +83,7 @@ line: "{{ item.line }}" owner: root group: root - mode: 0644 + mode: '0644' loop: - { path: '/etc/exports', regexp: '^/usr/sap/trans', line: '/usr/sap/trans *(rw,sync,no_wdelay,no_root_squash)' } - { path: '/etc/sysconfig/nfs', regexp: '^# RPCNFSDARGS=', line: 'RPCNFSDARGS="-N 2 -N 3 -U"' } @@ -181,7 +98,7 @@ line: "{{ item.line }}" owner: root group: root - mode: 0644 + mode: '0644' loop: - { path: '/etc/exports', regexp: '^/usr/sap/trans', line: '/usr/sap/trans *(rw,sync,no_wdelay,no_root_squash)' } - { path: '/etc/nfs.conf', regexp: '^# vers3=', line: ' vers3=y' } @@ -190,34 +107,6 @@ when: - distribution_id == "redhat8" - when: - - node_tier == 'scs' - - sap_trans is undefined - -- name: "Exports: Create SAP Directories for MSIDs" - ansible.builtin.file: - path: "{{ item.path }}" - state: directory - mode: 0755 - loop: - - { path: '/sapmnt' } - when: - - node_tier == 'scs' - - MULTI_SIDS is defined - -# Create Folders under sapmnt whilst using Local disk for Multi-SID installation. -- name: Create Filesystems under sapmnt - block: - - name: Create Filesystems for multi-sid installation - ansible.builtin.file: - path: /sapmnt/{{ item.sid }} - state: directory - mode: 0755 - when: - - node_tier == 'scs' - - MULTI_SIDS is defined - loop: "{{ MULTI_SIDS }}" - - name: "Exports: NFS Server Config on Oracle Linux 8" ansible.builtin.lineinfile: path: "{{ item.path }}" @@ -225,7 +114,7 @@ line: "{{ item.line }}" owner: root group: root - mode: 0644 + mode: '0644' loop: - { tier: 'preparation', path: '/etc/exports', regexp: '^/sapmnt/{{ sap_sid | upper }}', line: '/sapmnt/{{ sap_sid | upper }} *(rw,sync,no_wdelay,no_root_squash)' } - { tier: 'preparation', path: '/etc/exports', regexp: '^/usr/sap/trans', line: '/usr/sap/trans *(rw,sync,no_wdelay,no_root_squash)' } @@ -246,7 +135,7 @@ line: "{{ item.line }}" owner: root group: root - mode: 0644 + mode: '0644' loop: - { tier: 'preparation', path: '/etc/exports', regexp: '^/usr/sap/trans', line: '/usr/sap/trans *(rw,sync,no_wdelay,no_root_squash)' } - { tier: 'preparation', path: '/etc/exports', regexp: '^{{ target_media_location }}', line: '{{ target_media_location }} *(rw,sync,no_wdelay,no_root_squash)' } @@ -266,7 +155,7 @@ line: "/sapmnt/{{ item.sid | upper }} *(rw,sync,no_wdelay,no_root_squash)" owner: root group: root - mode: 0644 + mode: '0644' loop: "{{ MULTI_SIDS }}" when: - (ansible_distribution | lower ~ ansible_distribution_major_version) == "oraclelinux8" @@ -280,25 +169,28 @@ - custom_exports is defined - name: "2.3 Exports: - Local NFS" + when: + - node_tier == 'scs' + - sap_trans is undefined or usr_sap_install_mountpoint is undefined or sap_mnt is undefined block: - name: "2.3 Exports: - Set the NFS Service name {{ distribution_id }}" ansible.builtin.set_fact: - nfs_service: 'nfsserver' + nfs_service: nfsserver when: "'SUSE' == ansible_os_family | upper" - name: "2.3 Exports: - Set the NFS Service name {{ distribution_id }}" ansible.builtin.set_fact: - nfs_service: "nfs-server" + nfs_service: nfs-server when: "'redhat8' == distribution_id or 'redhat9' == distribution_id" - name: "2.3 Exports: - Set the NFS Service name oracle {{ distribution_id }}" ansible.builtin.set_fact: - nfs_service: "nfs-server" + nfs_service: nfs-server when: "'oraclelinux8' == distribution_id" - name: "2.3 Exports: - Set the NFS Service name {{ distribution_id }}" ansible.builtin.set_fact: - nfs_service: 'nfs' + nfs_service: nfs when: "'redhat7' == distribution_id" - name: "2.3 Exports: - NFS Ensure the NFS service is started" @@ -311,7 +203,3 @@ ansible.builtin.systemd: name: "{{ nfs_service }}" state: restarted - - when: - - node_tier == 'scs' - - (sap_trans is undefined) or (usr_sap_install_mountpoint is undefined) or (sap_mnt is undefined) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml index 097f3c0609..1a57a9dbfe 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/main.yaml @@ -30,7 +30,6 @@ map(attribute='type') | sort | unique | list | length | int }}" - - name: "2.6 SAP Mounts: - choose the shared disk" ansible.builtin.set_fact: sharedpath: "{% if shareddisk == '1' %}/dev/vg_hana_shared/lv_hana_shared\ @@ -59,7 +58,6 @@ - "Shared path: {{ sharedpath }}" # Mount Filesystems - - name: "2.6 SAP Mounts: - Check if the 'sap' disk exists" ansible.builtin.set_fact: sap_disk_exists: "{{ disks | selectattr('host', 'defined') | @@ -70,9 +68,9 @@ - name: "2.6 SAP Mounts: - Mount local sap file systems" ansible.posix.mount: - src: '/dev/vg_sap/lv_usrsap' - path: '/usr/sap' - fstype: 'xfs' + src: /dev/vg_sap/lv_usrsap + path: /usr/sap + fstype: xfs opts: defaults state: mounted when: @@ -82,9 +80,9 @@ - name: "2.6 SAP Mounts: - Mount local kdump file path to save vmcore" ansible.posix.mount: - src: "/dev/vg_{{ node_tier | lower }}_kdump/lv_{{ node_tier | lower }}_kdump" - path: '/usr/crash' - fstype: 'xfs' + src: /dev/vg_{{ node_tier | lower }}_kdump/lv_{{ node_tier | lower }}_kdump + path: /usr/crash + fstype: xfs opts: defaults state: mounted when: @@ -95,8 +93,8 @@ - name: "2.6 SAP Mounts: - Mount local file systems (shared)" ansible.posix.mount: src: "{{ sharedpath }}" - path: '/hana/shared' - fstype: 'xfs' + path: /hana/shared + fstype: xfs opts: defaults state: mounted when: @@ -105,9 +103,9 @@ - name: "2.6 SAP Mounts: - Mount local file systems (backup)" ansible.posix.mount: - src: '/dev/vg_hana_backup/lv_hana_backup' + src: /dev/vg_hana_backup/lv_hana_backup path: '{{ hana_backup_path }}' - fstype: 'xfs' + fstype: xfs opts: defaults state: mounted when: @@ -125,9 +123,9 @@ - name: "2.6 SAP Mounts: - Mount local file systems (hana data)" ansible.posix.mount: - src: '/dev/vg_hana_data/lv_hana_data' - path: '/hana/data' - fstype: 'xfs' + src: /dev/vg_hana_data/lv_hana_data + path: /hana/data + fstype: xfs opts: defaults state: mounted when: @@ -136,39 +134,28 @@ - name: "2.6 SAP Mounts: - Mount local file systems (hana log)" ansible.posix.mount: - src: '/dev/vg_hana_log/lv_hana_log' - path: '/hana/log' - fstype: 'xfs' + src: /dev/vg_hana_log/lv_hana_log + path: /hana/log + fstype: xfs opts: defaults state: mounted when: - node_tier == 'hana' - hana_log_mountpoint is undefined -- name: "Exports: Create SAP Trans MSIDs" +- name: "2.6 SAP Mounts: Create SAP Trans" ansible.builtin.file: - path: '/usr/sap/trans' + path: /usr/sap/trans state: directory - mode: 0755 + mode: '0755' when: - node_tier == 'scs' - - MULTI_SIDS is defined - - sap_trans is undefined - -- name: "Exports: Create SAP Trans on PAS and APP Servers" - ansible.builtin.file: - path: '/usr/sap/trans' - state: directory - mode: 0755 - when: - - node_tier in ['pas','app'] - sap_trans is undefined - # Mount SAP TransFilesystems - name: Mount Filesystems block block: - - name: Mount SAP Transport Filesystems when not using external NFS (all app tier) + - name: "2.6 SAP Mounts: Mount SAP Transport Filesystems when not using external NFS (all app tier)" ansible.posix.mount: src: "{{ item.src }}" path: "{{ item.path }}" @@ -181,25 +168,7 @@ - tier == 'sapos' - node_tier in ['pas', 'app'] - sap_trans is undefined - - nfs_server != ansible_hostname - rescue: - - name: Re-mount Filesystems when not using external NFS (app & pas) - ansible.builtin.debug: - msg: "Trying to remount sap transport " - - name: Re-mount Filesystems when not using external NFS (app & pas) - ansible.posix.mount: - src: "{{ item.src }}" - path: "{{ item.path }}" - fstype: "{{ item.type }}" - opts: defaults - state: remounted - loop: - - { type: 'nfs4', src: '{{ nfs_server }}:/usr/sap/trans', path: '/usr/sap/trans' } - when: - - tier == 'sapos' - - node_tier in ['pas', 'app'] - - sap_trans is undefined - - nfs_server != ansible_hostname + - nfs_server != ansible_hostname - name: "2.6 SAP Mounts: - Debug" ansible.builtin.debug: @@ -207,9 +176,9 @@ - name: "2.6 SAP Mounts: - Mount local install file system on SCS (when not using AFS)" ansible.posix.mount: - src: '/dev/vg_sap/lv_usrsapinstall' - path: '{{ target_media_location }}' - fstype: 'xfs' + src: /dev/vg_sap/lv_usrsapinstall + path: "{{ target_media_location }}" + fstype: xfs opts: defaults state: mounted when: @@ -225,15 +194,15 @@ ansible.builtin.file: path: "{{ tmp_directory }}" state: directory - mode: 0775 + mode: '0775' when: not tmp_dir.stat.isdir # Mount Filesystems - name: "2.6 SAP Mounts: - Mount local sapmnt on (scs) {{ ansible_hostname }}" ansible.posix.mount: - src: '/dev/vg_sap/lv_sapmnt' - path: '/sapmnt/{{ sap_sid | upper }}' - fstype: 'xfs' + src: /dev/vg_sap/lv_sapmnt + path: /sapmnt/{{ sap_sid | upper }} + fstype: xfs opts: defaults state: mounted when: @@ -244,15 +213,12 @@ - "'scs' in supported_tiers" - name: "2.6 SAP Mounts: - Create SAP Directories (sapmnt)" - become: true - become_user: root ansible.builtin.file: - owner: '{% if platform == "SYBASE" %}{{ asesidadm_uid }}{% else %}{{ sidadm_uid }}{% endif %}' + owner: "{% if platform == 'SYBASE' %}{{ asesidadm_uid }}{% else %}{{ sidadm_uid }}{% endif %}" group: sapsys - mode: 0755 - path: "/sapmnt/{{ sap_sid | upper }}" + mode: '0755' + path: /sapmnt/{{ sap_sid | upper }} state: directory - recurse: true when: - node_tier not in ['oracle-asm', 'hana'] @@ -260,9 +226,9 @@ block: - name: "2.6 SAP Mounts: - Mount sapmnt file system when not using external NFS (all app tier)" ansible.posix.mount: - src: '{{ nfs_server }}:/sapmnt/{{ sap_sid | upper }}' - path: '/sapmnt/{{ sap_sid | upper }}' - fstype: 'nfs4' + src: "{{ nfs_server }}:/sapmnt/{{ sap_sid | upper }}" + path: /sapmnt/{{ sap_sid | upper }} + fstype: nfs4 opts: defaults state: mounted when: @@ -270,16 +236,16 @@ - node_tier in ['pas', 'app', 'ers', 'oracle', 'db2', 'sybase'] - sap_mnt is undefined - MULTI_SIDS is undefined - - nfs_server != ansible_hostname + - nfs_server != ansible_hostname rescue: - name: "2.6 SAP Mounts: - Re-mount File systems when not using external NFS (app & pas)" ansible.builtin.debug: - msg: "Trying to remount sap_mnt" + msg: Trying to remount sap_mnt - name: "2.6 SAP Mounts: - Mount sapmnt file system when not using external NFS (all app tier)" ansible.posix.mount: - src: '{{ nfs_server }}:/sapmnt/{{ sap_sid | upper }}' - path: '/sapmnt/{{ sap_sid | upper }}' - fstype: 'nfs4' + src: "{{ nfs_server }}:/sapmnt/{{ sap_sid | upper }}" + path: /sapmnt/{{ sap_sid | upper }} + fstype: nfs4 opts: defaults state: remounted when: @@ -291,9 +257,9 @@ - name: "2.6 SAP Mounts: - Mount Install folder when not using AFS" ansible.posix.mount: - src: '{{ usr_sap_install_mount_point }}' - path: '{{ target_media_location }}' - fstype: 'nfs4' + src: "{{ usr_sap_install_mount_point }}" + path: "{{ target_media_location }}" + fstype: nfs4 opts: defaults state: mounted when: @@ -320,7 +286,7 @@ - name: "2.6 SAP Mounts: - Create file systems under sapmnt for oracle shared home installation" ansible.builtin.file: path: /sapmnt/{{ item.sid }} - owner: '{{ item.sidadm_uid }}' + owner: "{{ item.sidadm_uid }}" group: sapsys state: directory mode: '0644' @@ -332,12 +298,10 @@ - sap_mnt is undefined - name: "2.6 SAP Mounts: - Mount SAP File systems sapmnt for oracle shared home installation" - become: true - become_user: root ansible.posix.mount: - src: '{{ nfs_server }}:/sapmnt/{{ item.sid | upper }}' - path: '/sapmnt/{{ item.sid | upper }}' - fstype: 'nfs4' + src: "{{ nfs_server }}:/sapmnt/{{ item.sid | upper }}" + path: /sapmnt/{{ item.sid | upper }} + fstype: nfs4 opts: defaults state: mounted vars: @@ -366,7 +330,6 @@ - sap_mnt is defined or sap_trans is defined or usr_sap_install_mountpoint is defined - NFS_provider == 'AFS' - # Import this task only if the sap_mnt is defined, i.e. ANF is used - name: "2.6 SAP Mounts: - Import ANF tasks" ansible.builtin.import_tasks: 2.6.1-anf-mounts.yaml @@ -386,30 +349,24 @@ # - db_scale_out # - sap_mnt is defined or sap_trans is defined or usr_sap_install_mountpoint is defined - # Import this task only if the tier is ora. - name: "2.6 SAP Mounts: - Import Oracle tasks" ansible.builtin.import_tasks: "2.6.2-oracle-mounts.yaml" - when: - - node_tier == "oracle" + when: node_tier == "oracle" # Import this task only if the tier is ora for oracle-asm. - name: "2.6 SAP Mounts: - Import Oracle ASM pre-requisite tasks" ansible.builtin.import_tasks: "2.6.3-oracle-asm-prereq.yaml" - when: - - node_tier == "oracle-asm" + when: node_tier == "oracle-asm" # Import this task only if the tier is ora for oracle-asm. - name: "2.6 SAP Mounts: - Import Oracle ASM tasks" ansible.builtin.import_tasks: "2.6.3-oracle-asm-mounts.yaml" - when: - - node_tier == "oracle-asm" - # - tier == "ora" + when: node_tier == "oracle-asm" - name: "2.6 SAP Mounts: - Import Oracle observer tasks" ansible.builtin.import_tasks: "2.6.3-oracle-observer.yaml" - when: - - node_tier == "observer" + when: node_tier == "observer" - name: "2.6 SAP Mounts: - Import Oracle shared home tasks" ansible.builtin.import_tasks: "2.6.3-oracle-multi-sid.yaml" @@ -420,14 +377,12 @@ # Import this task only if the node_tier is db2. - name: "2.6 SAP Mounts: - Import DB2 tasks" ansible.builtin.import_tasks: "2.6.4-db2-mounts.yaml" - when: - - node_tier == "db2" + when: node_tier == "db2" # Import this task only if the node_tier is ase. - name: "2.6 SAP Mounts: - Import SYBASE tasks" ansible.builtin.import_tasks: "2.6.6-sybase-mounts.yaml" - when: - - node_tier == "sybase" + when: node_tier == "sybase" # Update : Deprecated as the scale out anf mount code functionality is now integrated into 2.6.1 and 2.6.8 @@ -454,10 +409,7 @@ when: - node_tier in ['oracle','oracle-asm','observer'] - - name: "2.6 SAP Mounts: - Set permissions" - become: true - become_user: root when: node_tier == "hana" block: - name: "2.6 SAP Mounts: - Set permissions on hana folders" diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-RedHat.yml index a0570ca3e2..6fc8ada131 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-RedHat.yml @@ -35,7 +35,7 @@ ansible.builtin.shell: > pcs resource create fs_{{ sap_sid | upper }}_{{ instance_type | upper }} Filesystem \ device='{{ ascs_filesystem_device }}' \ - directory='{{ profile_directory }}' fstype='nfs' force_unmount=safe options='sec=sys,vers=4.1' \ + directory='{{ profile_directory }}' fstype='nfs' fast_stop=no force_unmount=safe options='sec=sys,vers=4.1' \ op start interval=0 timeout=60 \ op stop interval=0 timeout=120 \ op monitor interval=200 timeout={{ clus_fs_mon_timeout | int }} \ @@ -178,7 +178,7 @@ ansible.builtin.shell: > pcs resource create fs_{{ sap_sid | upper }}_ERS Filesystem \ device='{{ ers_filesystem_device }}' \ - directory='/usr/sap/{{ sap_sid | upper }}/ERS{{ ers_instance_number }}' fstype='nfs' force_unmount=safe options='sec=sys,vers=4.1' \ + directory='/usr/sap/{{ sap_sid | upper }}/ERS{{ ers_instance_number }}' fstype='nfs' fast_stop=no force_unmount=safe options='sec=sys,vers=4.1' \ op start interval=0 timeout=60 \ op stop interval=0 timeout=120 \ op monitor interval=200 timeout={{ clus_fs_mon_timeout | int }} \ diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-Suse.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-Suse.yml index 2a783a3597..74de0225cd 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-Suse.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-Suse.yml @@ -24,7 +24,7 @@ ansible.builtin.shell: > crm configure primitive fs_{{ sap_sid | upper }}_{{ instance_type | upper }} Filesystem \ device='{{ ascs_filesystem_device }}' \ - directory='{{ profile_directory }}' fstype='nfs' options='sec=sys,vers=4.1' \ + directory='{{ profile_directory }}' fstype='nfs' fast_stop=no options='sec=sys,vers=4.1' \ op start timeout="{{ cluster_sap_scs_timeouts.start }}" interval=0 \ op stop timeout="{{ cluster_sap_scs_timeouts.stop }}" interval=0 \ op monitor interval=200 timeout={{ clus_fs_mon_timeout | int }} @@ -169,7 +169,7 @@ ansible.builtin.shell: > crm configure primitive fs_{{ sap_sid | upper }}_ERS Filesystem \ device='{{ ers_filesystem_device }}' \ - directory='/usr/sap/{{ sap_sid | upper }}/ERS{{ ers_instance_number }}' fstype='nfs' options='sec=sys,vers=4.1' \ + directory='/usr/sap/{{ sap_sid | upper }}/ERS{{ ers_instance_number }}' fstype='nfs' fast_stop=no options='sec=sys,vers=4.1' \ op start timeout="{{ cluster_sap_scs_timeouts.start }}" interval=0 \ op stop timeout="{{ cluster_sap_scs_timeouts.stop }}" interval=0 \ op monitor interval=200 timeout={{ clus_fs_mon_timeout | int }} diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml index abe0919488..38bc5e99b9 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-RedHat.yml @@ -148,17 +148,6 @@ - is_rhel_82_or_newer is defined - is_rhel_82_or_newer | default(false) - # - name: "5.6 SCSERS - validate that the drop-in file is active" - # when: - # - is_rhel_82_or_newer is defined - # - is_rhel_82_or_newer - # ansible.builtin.shell: >- - # systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service' - # register: dropinfile_validation - # changed_when: false - # failed_when: dropinfile_validation.rc > 0 - - # /*---------------------------------------------------------------------------8 # | | # | Systemd-Based SAP Startup Framework - END | @@ -171,9 +160,6 @@ # | These are common tasks | # +------------------------------------+---------------------------------------*| -# - name: "5.6 SCSERS - RHEL - Enable Maintenance mode for the cluster" -# ansible.builtin.shell: pcs property set maintenance-mode=true - - name: "5.6 SCSERS - RHEL - Reboot and wait 5 minutes" ansible.builtin.debug: msg: "Reboot and wait 5 minutes" @@ -185,12 +171,19 @@ - name: "5.6 SCSERS - RHEL - Set the Cluster out of maintenance mode" ansible.builtin.shell: pcs property set maintenance-mode=false + run_once: true - name: "5.6 SCSERS - RHEL - Wait for 120 seconds for the cluster to stabilize" ansible.builtin.wait_for: timeout: 120 register: wait_for_connection_results +# SCS node has been put on standby and resources have moved. The resource move constraints need to be cleared +# Warning: Following resources have been moved and their move constraints are still in place: 'g-SID_ASCS' +- name: "5.6 SCSERS - RHEL - Clear move constraints" + ansible.builtin.shell: pcs resource clear g-{{ sap_sid | upper }}_{{ instance_type | upper }} + when: inventory_hostname == primary_instance_name + - name: "5.6 SCSERS - RHEL ensure SAPInstance resources are started" ansible.builtin.shell: | set -o pipefail @@ -202,7 +195,6 @@ run_once: true failed_when: false - - name: "5.6 SCSERS - RHEL - SCS cluster group validation" ansible.builtin.include_tasks: file: "5.6.6-validate.yml" @@ -211,8 +203,7 @@ become_user: root tags: - "5.6.6-validate" - when: - - inventory_hostname == primary_instance_name + when: inventory_hostname == primary_instance_name - name: "5.6 SCSERS: Set Resources Flag" ansible.builtin.file: diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml index 8d88753f42..0df7b494db 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.7-config-systemd-sap-start.yml @@ -21,7 +21,7 @@ var: systemd_service_file_path verbosity: 2 -- name: "5.6 SCSERS - Set fact for the systemd services existance" +- name: "5.6 SCSERS - Set facts for the systemd services and files" ansible.builtin.set_fact: systemd_service_names: "{{ systemd_service_file_path.results @@ -29,6 +29,12 @@ | map(attribute='stat.path') | regex_replace('/etc/systemd/system/', '') }}" + scs_systemd_files: + - { file: "SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service", directory: /etc/systemd/system } + - { file: "10-SAP{{ sap_sid | upper }}-{{ scs_instance_number }}.rules", directory: /etc/polkit-1/rules.d } + ers_systemd_files: + - { file: "SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service", directory: /etc/systemd/system } + - { file: "10-SAP{{ sap_sid | upper }}-{{ ers_instance_number }}.rules", directory: /etc/polkit-1/rules.d } - name: "5.6 SCSERS - Show fact for the systemd services existance" ansible.builtin.debug: @@ -40,77 +46,83 @@ - systemd_service_names is defined - systemd_service_names | length > 0 block: - # - name: "5.6 SCSERS - Disable the services if they exist" - # ansible.builtin.systemd: - # name: "{{ service_name }}" - # enabled: false - # failed_when: false - # loop: "{{ systemd_service_names }}" - # loop_control: - # loop_var: service_name - - name: "5.6 SCSERS - Disable and Stop the services if they exist" - become: true - become_user: root - ansible.builtin.systemd: - name: "{{ service_name }}" - enabled: false - state: "stopped" - failed_when: false - loop: "{{ systemd_service_names }}" - loop_control: - loop_var: service_name + - name: "5.6 SCSERS - Fetch systemd files from (A)SCS node" + when: node_tier == 'scs' + ansible.builtin.fetch: + src: "{{ item.directory }}/{{ item.file }}" + dest: /tmp/{{ sap_sid }}/ + flat: true + loop: "{{ scs_systemd_files }}" + + - name: "5.6 SCSERS - Fetch systemd files from ERS node" + when: node_tier == 'ers' + ansible.builtin.fetch: + src: "{{ item.directory }}/{{ item.file }}" + dest: /tmp/{{ sap_sid }}/ + flat: true + loop: "{{ ers_systemd_files }}" - - name: "5.6 SCSERS Pacemaker - (systemd) Create drop-in file" - become: true - become_user: root - ansible.builtin.lineinfile: - path: '{{ dropfile }}' - create: true - backup: true + - name: "5.6 SCSERS - Copy ERS systemd files on (A)SCS node" + when: node_tier == 'scs' + ansible.builtin.copy: + src: /tmp/{{ sap_sid }}/{{ item.file }} + dest: "{{ item.directory }}" + mode: '0644' owner: root - group: root + group: sapinst + loop: "{{ ers_systemd_files }}" + + - name: "5.6 SCSERS - Copy (A)SCS systemd files on ERS node" + when: node_tier == 'ers' + ansible.builtin.copy: + src: /tmp/{{ sap_sid }}/{{ item.file }} + dest: "{{ item.directory }}" mode: '0644' - line: "[Service]" + owner: root + group: sapinst + loop: "{{ scs_systemd_files }}" + + - name: "5.6 SCSERS Pacemaker - Create systemd service override directory" + ansible.builtin.file: + path: "{{ override_dir }}" + state: directory + owner: root + group: root + mode: '0755' loop: - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" + - /etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d + - /etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d loop_control: - loop_var: dropfile + loop_var: override_dir - - name: "5.6 SCSERS Pacemaker - (systemd) Update drop-in file" - become: true - become_user: root - ansible.builtin.lineinfile: - path: '{{ dropfile }}' - create: true - backup: true + - name: "5.6 SCSERS Pacemaker - Create systemd HA override files" + ansible.builtin.copy: + content: |- + [Service] + Restart=no + dest: "{{ dropfile }}" owner: root group: root mode: '0644' - insertafter: '^[Service]$' - line: "Restart=no" loop: - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf" - - "/etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf" + - /etc/systemd/system/SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service.d/HA.conf + - /etc/systemd/system/SAP{{ sap_sid | upper }}_{{ ers_instance_number }}.service.d/HA.conf loop_control: loop_var: dropfile - register: dropinfile - - name: "5.6 SCSERS - systemd reload" + - name: "5.6 SCSERS - Disable and Stop services" ansible.builtin.systemd: + name: "{{ service_name }}" daemon_reload: true - when: - - dropinfile.changed - - # - name: "5.6 SCSERS - validate that the drop-in file is active" - # when: - # ansible.builtin.shell: >- - # systemd-delta | grep 'SAP{{ sap_sid | upper }}_{{ scs_instance_number }}.service' - # register: dropinfile_validation - # changed_when: false - # failed_when: dropinfile_validation.rc > 0 - + enabled: false + state: stopped + failed_when: false + loop: + - SAP{{ sap_sid | upper }}_{{ scs_instance_number }} + - SAP{{ sap_sid | upper }}_{{ ers_instance_number }} + loop_control: + loop_var: service_name # /*---------------------------------------------------------------------------8 # | | diff --git a/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf index c1c54fda18..d113ad9d07 100644 --- a/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_namegenerator/variables_global.tf @@ -352,6 +352,7 @@ variable "resource_prefixes" { "scs_fs_rule" = "" "scs_scs_rule" = "" "sdu_rg" = "" + "sdu_secret" = "" "tfstate" = "" "transport_volume" = "" "vm" = "" @@ -485,6 +486,7 @@ variable "resource_suffixes" { "scs_fs_rule" = "scsFs-rule" "scs_scs_rule" = "scsScs-rule" "sdu_rg" = "" + "sdu_secret" = "" "tfstate" = "tfstate" "transport_volume" = "transport" "usrsap" = "usrsap" diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/key_vault_sap_system.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/key_vault_sap_system.tf index c1dcaeeb6f..4aa763ad93 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/key_vault_sap_system.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/key_vault_sap_system.tf @@ -103,12 +103,11 @@ resource "random_password" "password" { override_special = "_%@" } - // Store the logon username in KV when authentication type is password resource "azurerm_key_vault_secret" "auth_username" { provider = azurerm.main count = local.enable_sid_deployment && local.use_local_credentials ? 1 : 0 - name = format("%s-username", local.prefix) + name = format("%s-username", try(coalesce(var.naming.resource_prefixes.sdu_secret, local.prefix), "")) value = local.sid_auth_username key_vault_id = length(local.user_key_vault_id) > 0 ? data.azurerm_key_vault.sid_keyvault_user[0].id : azurerm_key_vault.sid_keyvault_user[0].id tags = var.tags @@ -118,7 +117,7 @@ resource "azurerm_key_vault_secret" "auth_username" { resource "azurerm_key_vault_secret" "auth_password" { provider = azurerm.main count = local.enable_sid_deployment && local.use_local_credentials ? 1 : 0 - name = format("%s-password", local.prefix) + name = format("%s-password", try(coalesce(var.naming.resource_prefixes.sdu_secret, local.prefix), "")) value = local.sid_auth_password key_vault_id = length(local.user_key_vault_id) > 0 ? data.azurerm_key_vault.sid_keyvault_user[0].id : azurerm_key_vault.sid_keyvault_user[0].id tags = var.tags @@ -134,12 +133,11 @@ resource "tls_private_key" "sdu" { rsa_bits = 2048 } - // By default the SSH keys are stored in landscape key vault. By defining the authenticationb block the SDU keyvault resource "azurerm_key_vault_secret" "sdu_private_key" { provider = azurerm.main count = local.enable_sid_deployment && local.use_local_credentials ? 1 : 0 - name = format("%s-sshkey", local.prefix) + name = format("%s-sshkey", try(coalesce(var.naming.resource_prefixes.sdu_secret, local.prefix), "")) value = local.sid_private_key key_vault_id = length(local.user_key_vault_id) > 0 ? data.azurerm_key_vault.sid_keyvault_user[0].id : azurerm_key_vault.sid_keyvault_user[0].id tags = var.tags @@ -148,7 +146,7 @@ resource "azurerm_key_vault_secret" "sdu_private_key" { resource "azurerm_key_vault_secret" "sdu_public_key" { provider = azurerm.main count = local.enable_sid_deployment && local.use_local_credentials ? 1 : 0 - name = format("%s-sshkey-pub", local.prefix) + name = format("%s-sshkey-pub", try(coalesce(var.naming.resource_prefixes.sdu_secret, local.prefix), "")) value = local.sid_public_key key_vault_id = length(local.user_key_vault_id) > 0 ? data.azurerm_key_vault.sid_keyvault_user[0].id : azurerm_key_vault.sid_keyvault_user[0].id tags = var.tags diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_local.tf index a21dd0aaea..35a5447222 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_local.tf @@ -468,7 +468,7 @@ locals { sid_private_key = local.use_local_credentials ? ( try( file(var.authentication.path_to_private_key), - tls_private_key.sdu[0].private_key_pem + try(tls_private_key.sdu[0].private_key_pem, "") )) : ( "" ) From c226cf5e1a29f795616381ce37eecdfb8de2cb20 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 6 Aug 2024 12:40:09 +0300 Subject: [PATCH 057/164] Update Web App to support NAT Gateway --- Webapp/SDAF/Models/CustomValidators.cs | 17 ++++ Webapp/SDAF/Models/LandscapeModel.cs | 27 +++++- .../ParameterDetails/LandscapeDetails.json | 96 +++++++++++++++++++ .../ParameterDetails/LandscapeTemplate.txt | 27 ++++++ 4 files changed, 166 insertions(+), 1 deletion(-) diff --git a/Webapp/SDAF/Models/CustomValidators.cs b/Webapp/SDAF/Models/CustomValidators.cs index 3b6da854f1..bab3b8854c 100644 --- a/Webapp/SDAF/Models/CustomValidators.cs +++ b/Webapp/SDAF/Models/CustomValidators.cs @@ -242,6 +242,23 @@ public override bool IsValid(object value) } } + public class NATIdValidator : ValidationAttribute + { + public override bool IsValid(object value) + { + string pattern = @"^\/subscriptions\/\w{8}-\w{4}-\w{4}-\w{4}-\w{12}\/resourceGroups\/[a-zA-Z0-9-_]+\/providers\/Microsoft.Network\/natGateways\/[a-zA-Z0-9-_]+$"; + return RegexValidation(value, pattern); + } + } + + public class PIPIdValidator : ValidationAttribute + { + public override bool IsValid(object value) + { + string pattern = @"^\/subscriptions\/\w{8}-\w{4}-\w{4}-\w{4}-\w{12}\/resourceGroups\/[a-zA-Z0-9-_]+\/providers\/Microsoft.Network\/publicIPAddresses\/[a-zA-Z0-9-_]+$"; + return RegexValidation(value, pattern); + } + } public class ScaleSetIdValidator : ValidationAttribute { diff --git a/Webapp/SDAF/Models/LandscapeModel.cs b/Webapp/SDAF/Models/LandscapeModel.cs index a26f3c8e94..fe1f241b8b 100644 --- a/Webapp/SDAF/Models/LandscapeModel.cs +++ b/Webapp/SDAF/Models/LandscapeModel.cs @@ -1,4 +1,5 @@ using AutomationForm.Models; +using Microsoft.Azure.Pipelines.WebApi; using System.ComponentModel; using System.ComponentModel.DataAnnotations; using static AutomationForm.Models.CustomValidators; @@ -430,8 +431,32 @@ public bool IsValid() public string ams_instance_name { get; set; } - [AMSIdValidator(ErrorMessage = "Invalid User Assigned id")] + [AMSIdValidator(ErrorMessage = "Invalid Workspace id")] public string ams_laws_arm_id { get; set; } + + /*---------------------------------------------------------------------------8 + | | + | NAT Gateway information | + | | + +------------------------------------4--------------------------------------*/ + + public bool? deploy_nat_gateway { get; set; } = false; + + public string nat_gateway_name { get; set; } + + + [NATIdValidator(ErrorMessage = "Invalid NAT Gateway id")] + public string nat_gateway_arm_id { get; set; } + + public string[] nat_gateway_public_ip_zones { get; set; } + + [PIPIdValidator(ErrorMessage = "Invalid Public IP id")] + public string nat_gateway_public_ip_arm_id { get; set; } + + public int? nat_gateway_idle_timeout_in_minutes { get; set; } + + public Tag[] nat_gateway_public_ip_tags { get; set; } + } } diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index 52c4e85ac1..603d27afd3 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -1566,5 +1566,101 @@ "Display": 2 } ] + }, + { + "Section": "NAT Gateway", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#dns-support", + "Parameters": [ + { + "Name": "deploy_nat_gateway", + "Required": false, + "Description": "Defines if a NAT gateway will be created.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "nat_gateway_name", + "Required": false, + "Description": "The name of the NAT Gateway", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "nat_gateway_arm_id", + "Required": false, + "Description": "Defines the Azure resource id for the NAT gateway", + "Type": "lookup", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "nat_gateway_public_ip_zones", + "Required": false, + "Description": "Defines the zones for the NAT Gateway public IP", + "Type": "list", + "Options": [ + { + "Text": "", + "Value": "" + }, + { + "Text": "1", + "Value": "1" + }, + { + "Text": "2", + "Value": "2" + }, + { + "Text": "3", + "Value": "3" + } + ], + "Overrules": "", + "Display": 2 + }, + { + "Name": "nat_gateway_public_ip_arm_id", + "Required": false, + "Description": "Azure resource id for the NAT Gateway public IP", + "Type": "lookup", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "management_dns_subscription_id", + "Required": false, + "Description": "Subscription for the DNS zone, if different from the management subscription", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "nat_gateway_idle_timeout_in_minutes", + "Required": false, + "Description": "The idle timeout in minutes for the NAT Gateway", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "nat_gateway_public_ip_tags", + "Required": false, + "Description": "Defines a list of tags for the NAT Gateway public IP", + "Type": "tag", + "Options": [], + "Overrules": "", + "Display": 2 + } + ] } + ] diff --git a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt index 67ddd31369..2d2c740c2c 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt @@ -601,3 +601,30 @@ $$ams_instance_name$$ # ams_laws_arm_id if provided, Azure resource id for the Log analytics workspace in AMS $$ams_laws_arm_id$$ + +#######################################4#######################################8 +# # +# NAT Gateway variables # +# # +#######################################4#######################################8 + +# If true, a NAT gateway will be created +$$deploy_nat_gateway$$ + +# If provided, the name of the NAT Gateway +$$nat_gateway_name$$ + +# If provided, the Azure resource id for the NAT Gateway +$$nat_gateway_arm_id$$ + +# If provided, the zones for the NAT Gateway public IP +$$nat_gateway_public_ip_zones$$ + +# If provided, Azure resource id for the NAT Gateway public IP +$$nat_gateway_public_ip_arm_id$$ + +# The idle timeout in minutes for the NAT Gateway +$$nat_gateway_idle_timeout_in_minutes$$ + +# If provided, the tags for the NAT Gateway public IP +$$nat_gateway_public_ip_tags$$ From be1849a111ee7119b9c7bab82512aded5d966467 Mon Sep 17 00:00:00 2001 From: Devansh Jain <86314060+devanshjainms@users.noreply.github.com> Date: Thu, 8 Aug 2024 10:32:29 -0700 Subject: [PATCH 058/164] Hotfix release after testing with new RHEL image (#611) * update: added fixed encountered during RHEL94 testing * chore: Update Red Hat and SLES package versions for Red Hat 9.4 * update: add network rules to deployer diagnostic storage account * chore: Update sap-vm-resources.tmpl to include additional server information * chore: Add ${SID}_virtual_machines.json to git if it exists * chore: Update sap_system module to use database_server_vm_resource_ids for database server VMs * chore: Update sap_system module to include empty lists for SCS, application, and webdisp server VMs * chore: Update sap-vm-resources.tmpl to include additional server information * chore: Update sap-vm-resources.tmpl to include additional server information * chore: Update sap-vm-resources.tmpl to include additional server information * chore: Update sap_system module to use comma-separated database server VM resource IDs * chore: Update sap-vm-resources.tmpl to include additional server information * chore: Update sap_system module to include empty lists for SCS, application, and webdisp server VMs * chore: Update sap_system module to include application server VM resource IDs * chore: Refactor cluster_group_location task in ACSS registration role * Refactor cluster_group_location task in ACSS registration role * Refactor cluster_group_location task in ACSS registration role * Refactor ACSS registration role to use 'ansible.builtin.command' instead of 'ansible.builtin.shell' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor cluster_group_location task in ACSS registration role * Refactor cluster_group_location task in ACSS registration role * Refactor ACSS registration role to use 'ansible.builtin.command' instead of 'ansible.builtin.shell' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' instead of 'ansible.builtin.shell' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' instead of 'ansible.builtin.shell' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' for executing Azure CLI commands * Refactor ACSS registration role to use 'ansible.builtin.command' instead of 'ansible.builtin.shell' for executing Azure CLI commands * Refactor use_spn variable to default to false for all SAP deployment modules * Refactor systemd process limit configuration for pacemaker cluster * Refactor systemd process limit configuration for pacemaker cluster --- .../0.5-ACSS-registration/tasks/main.yaml | 145 +++++++++++++++--- .../tasks/1.17.2.0-cluster-RedHat.yml | 48 +++--- .../tasks/1.17.2.0-cluster-Suse.yml | 13 ++ .../tasks/5.5.4.0-clusterPrep-RedHat.yml | 28 ++-- .../tasks/5.6.1-set_runtime_facts.yml | 4 +- deploy/pipelines/01-deploy-control-plane.yaml | 1 + .../pipelines/03-sap-system-deployment.yaml | 5 + .../bootstrap/sap_library/tfvar_variables.tf | 2 +- .../run/sap_deployer/tfvar_variables.tf | 2 +- .../run/sap_landscape/tfvar_variables.tf | 2 +- .../run/sap_library/tfvar_variables.tf | 2 +- deploy/terraform/run/sap_system/module.tf | 1 + .../run/sap_system/tfvar_variables.tf | 2 +- .../modules/sap_deployer/infrastructure.tf | 6 + .../modules/sap_landscape/storage_accounts.tf | 6 +- .../modules/sap_library/storage_accounts.tf | 2 + .../sap_system/app_tier/infrastructure.tf | 4 +- .../sap_system/output_files/inventory.tf | 11 ++ .../output_files/sap-vm-resources.tmpl | 4 + .../output_files/variables_global.tf | 1 + 20 files changed, 217 insertions(+), 72 deletions(-) create mode 100644 deploy/terraform/terraform-units/modules/sap_system/output_files/sap-vm-resources.tmpl diff --git a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml index 77e386572e..293a6c7b40 100644 --- a/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.5-ACSS-registration/tasks/main.yaml @@ -4,10 +4,6 @@ - name: "0.5.1 acss registration: - Set Python version {{ distribution_id }}" ansible.builtin.set_fact: python_version: "python3" - -- name: "0.0 Validations: - Set Python version {{ distribution_id }}" - ansible.builtin.set_fact: - python_version: "python2" when: (ansible_distribution | lower ~ ansible_distribution_major_version) in ['sles_sap12'] - name: "0.5.1 acss registration: - Determine if SCS is running on {{ ansible_hostname }}" @@ -75,19 +71,6 @@ crm_resource --resource g-{{ sap_sid | upper }}_{{ instance_type | upper }} --locate | cut -d ':' -f 2| cut -d " " -f 2 when: - scs_high_availability - - ansible_os_family | upper == 'SUSE' - register: cluster_group_location - failed_when: cluster_group_location.stdout != hostvars[ansible_hostname]['scs_running_on'] - tags: - - skip_ansible_lint - - - name: "0.5.1 acss registration: - Check where the cluster group g-{{ sap_sid | upper }}_{{ instance_type | upper }} is running" - ansible.builtin.shell: >- - set -o pipefail; - pcs constraint location show resources g-{{ sap_sid | upper }}_{{ instance_type | upper }} | grep "Node" | awk '{print $2}' - when: - - scs_high_availability - - ansible_os_family | upper == 'REDHAT' register: cluster_group_location failed_when: cluster_group_location.stdout != hostvars[ansible_hostname]['scs_running_on'] tags: @@ -107,17 +90,133 @@ - name: "0.5.1 acss registration: - Set variables from Azure IMDS" delegate_facts: true delegate_to: localhost + ansible.builtin.set_fact: + acss_scs_instance_metadata: "{{ azure_metadata }}" + when: + - hostvars[ansible_hostname]['scs_running_on'] is defined + - ansible_hostname == hostvars[ansible_hostname]['scs_running_on'] + +- name: "0.5.1 acss registration: - Register SAP System to ACSS" + delegate_to: localhost + when: + - hostvars[ansible_hostname]['scs_running_on'] is defined + block: + - name: "0.5.1 acss registration: - Get Azure metadata from the VM where scs_running_on is defined" + ansible.builtin.set_fact: + az_instance_metadata: "{{ hostvars.localhost.acss_scs_instance_metadata }}" + + - name: "0.5.1 acss registration: - Print metadata" + ansible.builtin.debug: + var: az_instance_metadata + verbosity: 2 + + - name: "0.5.1 acss registration: - Set variables from Azure IMDS" + ansible.builtin.set_fact: + acss_resource_id: "{{ az_instance_metadata.json.compute.resourceId }}" + acss_subscription_id: "{{ az_instance_metadata.json.compute.subscriptionId }}" + acss_resource_group: "{{ az_instance_metadata.json.compute.resourceGroupName }}" + acss_location: "{{ az_instance_metadata.json.compute.location }}" + acss_sid: "{{ sap_sid | upper }}" + acss_instance_type: "{{ instance_type }}" + + - name: "0.5.1 acss registration: - Install [ACSS] cli extension" + ansible.builtin.shell: >- + az extension add --name workloads --yes || exit 1 + tags: + - skip_ansible_lint + + - name: "0.5.1 acss registration: - perform az login" ansible.builtin.command: >- - "az extension add --name workloads --yes || exit 1" + az login --identity --allow-no-subscriptions --output none + no_log: true + changed_when: false + + - name: "0.5.1 acss registration: - Get Access Token" + ansible.builtin.shell: >- + az account get-access-token --resource https://management.azure.com \ + --query accessToken -o tsv + register: acss_access_token + changed_when: false + no_log: true tags: - skip_ansible_lint - - name: "Create [ACSS] virtual instance" - ansible.builtin.command: "az workloads sap-virtual-instance create --sap-virtual-instance-name {{ acss_sid }} --resource-group {{ acss_resource_group }} --location {{ acss_location }} --environment {{ acss_environment }} --sap-product {{ acss_sap_product }} --configuration {{ acss_configuration }}" - when: - - ansible_hostname == primary_instance_name - - cluster_group_location.stdout != ansible_hostname + - name: "0.5.1 acss registration: - Generate a guid for the ACSS instance" + ansible.builtin.command: uuidgen + register: acss_guid + tags: + - skip_ansible_lint + + - name: "0.5.1 acss registration: - Check if we have [ACSS] virtual instance write" + ansible.builtin.shell: >- + az provider show --namespace Microsoft.Workloads \ + --query "resourceTypes[?resourceType=='sapVirtualInstances'].permissions[?contains(@.actions, 'Microsoft.Workloads/sapVirtualInstances/write')]" + register: acss_virtual_instance_write + changed_when: false + + - name: "0.5.1 acss registration: - Debug [ACSS] virtual instance parameters" + ansible.builtin.debug: + msg: + - "acss_resource_id: {{ acss_resource_id }}" + - "acss_subscription_id: {{ acss_subscription_id }}" + - "acss_resource_group: {{ acss_resource_group }}" + - "acss_location: {{ acss_location }}" + - "acss_sid: {{ acss_sid }}" + - "acss_instance_type: {{ acss_instance_type }}" + - "acss_environment: {{ acss_environment }}" + - "acss_sap_product: {{ acss_sap_product }}" + - "acss_guid: {{ acss_guid.stdout }}" + - "acss_vm_id: {{ hostvars[ansible_hostname]['scs_running_on'] }}" + - "acss_write_auth: {{ acss_virtual_instance_write.stdout }}" + verbosity: 2 tags: - skip_ansible_lint + - name: "0.5.1 acss registration: - Create [ACSS] virtual instance" + ansible.builtin.uri: + url: "https://management.azure.com/subscriptions/{{ acss_subscription_id }}/resourceGroups/{{ acss_resource_group }}/providers/Microsoft.Workloads/sapVirtualInstances/{{ acss_sid }}?api-version=2023-04-01" + method: PUT + body_format: json + body: | + { + "properties": { + "environment": "{{ acss_environment }}", + "sapProduct": "{{ acss_sap_product }}", + "configuration": { + "configurationType": "Discovery", + "centralServerVmId": "{{ acss_resource_id }}" + } + }, + "location": "{{ acss_location }}" + } + # status_code: [200, 201] + headers: + Authorization: "Bearer {{ acss_access_token.stdout }}" + x-ms-rpaas-new-resource: "true" + x-ms-client-request-id: "SDAF-{{ acss_guid.stdout }}" + register: create_vis_response + failed_when: create_vis_response.json.properties.provisioningState != 'Accepted' and create_vis_response.json.properties.provisioningState != 'Succeeded' + no_log: false + + - name: "0.5.1 acss registration: - Debug [ACSS] virtual instance creation response" + ansible.builtin.debug: + msg: "{{ create_vis_response }}" + tags: + - skip_ansible_lint + + - name: "0.5.1 acss registration: - Check the registered [ACSS] virtual instance" + ansible.builtin.uri: + url: "https://management.azure.com/subscriptions/{{ acss_subscription_id }}/resourceGroups/{{ acss_resource_group }}/providers/Microsoft.Workloads/sapVirtualInstances/{{ acss_sid }}?api-version=2023-04-01" + method: GET + # status_code: [200, 201] + headers: + Authorization: "Bearer {{ acss_access_token.stdout }}" + x-ms-rpaas-new-resource: "true" + x-ms-client-request-id: "SDAF-{{ acss_guid.stdout }}" + register: get_vis_response + until: get_vis_response.json.properties.provisioningState == 'Succeeded' + retries: 10 + delay: 60 + no_log: true + ... diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml index 077aaab435..0d229f9d65 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml @@ -167,18 +167,18 @@ - name: "1.17 Generic Pacemaker - Ensure the STONTIH device is configured" ansible.builtin.shell: > - pcs stonith create rsc_st_azure fence_azure_arm - login="{{ fencing_spn_client_id }}" - passwd="{{ fencing_spn_client_pwd }}" - resourceGroup="{{ resource_group_name }}" - tenantId="{{ fencing_spn_tenant_id }}" - subscriptionId="{{ fencing_spn_subscription_id }}" - power_timeout=240 - pcmk_reboot_timeout=900 - pcmk_monitor_timeout=120 - pcmk_monitor_retries=4 - pcmk_action_limit=3 - pcmk_host_map="{{ primary_instance_name }}:{{ hostvars[primary_instance_name]['primary_vm_name'] }};{{ secondary_instance_name }}:{{ hostvars[secondary_instance_name]['secondary_vm_name'] }}" + pcs stonith create rsc_st_azure fence_azure_arm \ + username="{{ fencing_spn_client_id }}" \ + password="{{ fencing_spn_client_pwd }}" \ + resourceGroup="{{ resource_group_name }}" \ + tenantId="{{ fencing_spn_tenant_id }}" \ + subscriptionId="{{ fencing_spn_subscription_id }}" \ + power_timeout=240 \ + pcmk_reboot_timeout=900 \ + pcmk_monitor_timeout=120 \ + pcmk_monitor_retries=4 \ + pcmk_action_limit=3 \ + pcmk_host_map="{{ primary_instance_name }}:{{ hostvars[primary_instance_name]['primary_vm_name'] }};{{ secondary_instance_name }}:{{ hostvars[secondary_instance_name]['secondary_vm_name'] }}" \ {%if not is_pcmk_ver_gt_204%}"pcmk_delay_max=15"{%endif%} when: - ansible_distribution_major_version in ["8", "9"] @@ -186,16 +186,16 @@ - name: "1.17 Generic Pacemaker - Ensure the STONTIH device is configured (MSI)" ansible.builtin.shell: > - pcs stonith create rsc_st_azure fence_azure_arm - msi=true - resourceGroup="{{ resource_group_name }}" - subscriptionId="{{ fencing_spn_subscription_id }}" - power_timeout=240 - pcmk_reboot_timeout=900 - pcmk_monitor_timeout=120 - pcmk_monitor_retries=4 - pcmk_action_limit=3 - pcmk_host_map="{{ primary_instance_name }}:{{ hostvars[primary_instance_name]['primary_vm_name'] }};{{ secondary_instance_name }}:{{ hostvars[secondary_instance_name]['secondary_vm_name'] }}" + pcs stonith create rsc_st_azure fence_azure_arm \ + msi=true \ + resourceGroup="{{ resource_group_name }}" \ + subscriptionId="{{ fencing_spn_subscription_id }}" \ + power_timeout=240 \ + pcmk_reboot_timeout=900 \ + pcmk_monitor_timeout=120 \ + pcmk_monitor_retries=4 \ + pcmk_action_limit=3 \ + pcmk_host_map="{{ primary_instance_name }}:{{ hostvars[primary_instance_name]['primary_vm_name'] }};{{ secondary_instance_name }}:{{ hostvars[secondary_instance_name]['secondary_vm_name'] }}" \ {%if not is_pcmk_ver_gt_204%}"pcmk_delay_max=15"{%endif%} when: - ansible_distribution_major_version in ["8", "9"] @@ -388,10 +388,10 @@ - "{{ secondary_instance_name }}" - name: "1.17 Generic Pacemaker - Configure the resources in Pacemaker" - ansible.builtin.command: pcs resource create health-azure-events ocf:heartbeat:azure-events-az op monitor interval=10s + ansible.builtin.command: pcs resource create health-azure-events ocf:heartbeat:azure-events-az op monitor interval=10s timeout=240s op start timeout=10s start-delay=90s - name: "1.17 Generic Pacemaker - Ensure clone resource azure-events is configured" - ansible.builtin.command: pcs resource clone health-azure-events allow-unhealthy-nodes=true + ansible.builtin.command: pcs resource clone health-azure-events allow-unhealthy-nodes=true failure-timeout=120s - name: "1.17 Generic Pacemaker - Ensure maintenance mode is disabled" ansible.builtin.command: pcs property set maintenance-mode=false diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml index 62500369ee..2f5051702a 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml @@ -14,6 +14,19 @@ regexp: "^#?\\s*DefaultTasksMax=" line: "DefaultTasksMax=4096" register: raise_process_limit + when: (ansible_facts.packages['systemd'][0].version | float) < 234 + +# Create a drop in file for systemd.conf to raise the process limit in the directory +# /etc/systemd/system.conf.d and update the value of DefaultTasksMax to 4096 +- name: "1.17 Generic Pacemaker - Ensure Process limit is raised" + ansible.builtin.copy: + dest: /etc/systemd/system.conf.d/99-pacemaker.conf + content: | + [Manager] + DefaultTasksMax=4096 + mode: '0644' + register: raise_process_limit + when: (ansible_facts.packages['systemd'][0].version | float) > 233 # eth0 is the "db" NIC - name: "1.17 Generic Pacemaker - Ensure clustering can manage Virtual IPs on the Database Interface" diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml index 552922cf83..e6f9471e0c 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml @@ -147,7 +147,7 @@ - name: "Configure location constraints" ansible.builtin.shell: > pcs constraint location {{ item.group_name }} - avoids {{ item.node }} resource-discovery=never + avoids {{ item.node }} register: nfs_location_constraints failed_when: false ignore_errors: true @@ -158,19 +158,19 @@ loop_var: item when: is_nfs_secondary_configured - # - name: "Configure location constraints" - # ansible.builtin.shell: > - # pcs constraint location {{ item.group_name }} - # rule score=-INFINITY resource-discovery=never \#uname eq {{ item.node }} - # register: nfs_location_constraints - # failed_when: false - # ignore_errors: true - # loop: - # - { group_name: 'g_hana_{{ db_sid | upper }}_NFS_1', node: '{{ secondary_instance_name }}' } - # - { group_name: 'g_hana_{{ db_sid | upper }}_NFS_2', node: '{{ primary_instance_name }}' } - # loop_control: - # loop_var: item - # when: is_nfs_secondary_configured + - name: "Configure location constraints" + ansible.builtin.shell: > + pcs constraint location {{ item.group_name }} + rule score=-INFINITY resource-discovery=never \#uname eq {{ item.node }} + register: nfs_location_constraints + failed_when: false + ignore_errors: true + loop: + - { group_name: 'g_hana_{{ db_sid | upper }}_NFS_1', node: '{{ secondary_instance_name }}' } + - { group_name: 'g_hana_{{ db_sid | upper }}_NFS_2', node: '{{ primary_instance_name }}' } + loop_control: + loop_var: item + when: is_nfs_secondary_configured - name: "Check if location constraints did not error on {{ primary_instance_name }}" ansible.builtin.set_fact: diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml index f17e29eda1..918e60f17f 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.1-set_runtime_facts.yml @@ -79,7 +79,7 @@ clus_fs_mon_timeout: >- {%- set _timeoutvalue = 40 -%} {%- if (NFS_provider == "ANF") -%} - {%- if NFS_version != "NFSv3" -%} + {%- if NFS_version == "NFSv3" -%} {%- set _timeoutvalue = 40 -%} {%- elif NFS_version == "NFSv4.1" -%} {%- set _timeoutvalue = 105 -%} @@ -96,7 +96,7 @@ clus_sap_mon_timeout: >- {%- set _timeoutvalue = 60 -%} {%- if (NFS_provider == "ANF") -%} - {%- if NFS_version != "NFSv3" -%} + {%- if NFS_version == "NFSv3" -%} {%- set _timeoutvalue = 60 -%} {%- elif NFS_version == "NFSv4.1" -%} {%- set _timeoutvalue = 105 -%} diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index f4628b0578..f8ce940d34 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -251,6 +251,7 @@ stages: --library_parameter_file ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) \ --subscription $ARM_SUBSCRIPTION_ID --auto-approve --ado --only_deployer --msi else + export ARM_CLIENT_ID="$CP_ARM_CLIENT_ID" export ARM_CLIENT_SECRET=$CP_ARM_CLIENT_SECRET $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ --deployer_parameter_file ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) \ diff --git a/deploy/pipelines/03-sap-system-deployment.yaml b/deploy/pipelines/03-sap-system-deployment.yaml index 8d11ad3054..719fd60763 100644 --- a/deploy/pipelines/03-sap-system-deployment.yaml +++ b/deploy/pipelines/03-sap-system-deployment.yaml @@ -382,6 +382,11 @@ stages: added=1 fi + if [ -f ${SID}_virtual_machines.json ]; then + git add ${SID}_virtual_machines.json + added=1 + fi + if [ 1 == $added ]; then git config --global user.email "$(Build.RequestedForEmail)" git config --global user.name "$(Build.RequestedFor)" diff --git a/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf index 6653d9f325..d792b636aa 100644 --- a/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf @@ -39,7 +39,7 @@ variable "place_delete_lock_on_resources" { variable "use_spn" { description = "Log in using a service principal when performing the deployment" - default = true + default = false } #######################################4#######################################8 diff --git a/deploy/terraform/run/sap_deployer/tfvar_variables.tf b/deploy/terraform/run/sap_deployer/tfvar_variables.tf index 83ac018ba2..cab0027079 100644 --- a/deploy/terraform/run/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/run/sap_deployer/tfvar_variables.tf @@ -562,7 +562,7 @@ variable "add_system_assigned_identity" { variable "use_spn" { description = "Log in using a service principal when performing the deployment" - default = true + default = false } diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index 62f59a5901..016b76254f 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -404,7 +404,7 @@ variable "automation_path_to_private_key" { variable "use_spn" { description = "Log in using a service principal when performing the deployment" - default = true + default = false } variable "user_assigned_identity_id" { diff --git a/deploy/terraform/run/sap_library/tfvar_variables.tf b/deploy/terraform/run/sap_library/tfvar_variables.tf index b2785da311..ff1509c3c9 100644 --- a/deploy/terraform/run/sap_library/tfvar_variables.tf +++ b/deploy/terraform/run/sap_library/tfvar_variables.tf @@ -45,7 +45,7 @@ variable "short_named_endpoints_nics" { variable "use_spn" { description = "Log in using a service principal when performing the deployment" - default = true + default = false } #######################################4#######################################8 diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index c6b7aac883..e4e6b743ef 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -396,6 +396,7 @@ module "output_files" { scs_instance_number = var.scs_instance_number scs_server_loadbalancer_ip = module.app_tier.scs_server_loadbalancer_ip scs_server_ips = module.app_tier.scs_server_ips + scs_server_vm_resource_ids = module.app_tier.scs_vm_ids scs_server_secondary_ips = module.app_tier.scs_server_secondary_ips scs_vm_names = module.app_tier.scs_vm_names use_local_credentials = module.common_infrastructure.use_local_credentials diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index 151cfff01f..3e3577a903 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -351,7 +351,7 @@ variable "automation_path_to_private_key" { variable "use_spn" { description = "Log in using a service principal when performing the deployment" - default = true + default = false } diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf index cf2699a187..b40a56d9f4 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf @@ -88,6 +88,12 @@ resource "azurerm_storage_account" "deployer" { min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false shared_access_key_enabled = var.deployer.shared_access_key_enabled + network_rules { + default_action = "Deny" + virtual_network_subnet_ids = [azurerm_subnet.subnet_mgmt[0].id] + } + cross_tenant_replication_enabled = false + depends_on = [ azurerm_subnet.subnet_mgmt ] } data "azurerm_storage_account" "deployer" { diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf index d1c2296a9e..6e0dc17b94 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf @@ -28,7 +28,7 @@ resource "azurerm_storage_account" "storage_bootdiag" { enable_https_traffic_only = true min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false - + cross_tenant_replication_enabled = false tags = var.tags } @@ -145,7 +145,7 @@ resource "azurerm_storage_account" "witness_storage" { enable_https_traffic_only = true min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false - + cross_tenant_replication_enabled = false public_network_access_enabled = var.public_network_access_enabled tags = var.tags @@ -293,6 +293,7 @@ resource "azurerm_storage_account" "transport" { min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false + cross_tenant_replication_enabled = false public_network_access_enabled = var.public_network_access_enabled network_rules { @@ -511,6 +512,7 @@ resource "azurerm_storage_account" "install" { allow_nested_items_to_be_public = false enable_https_traffic_only = false min_tls_version = "TLS1_2" + cross_tenant_replication_enabled = false public_network_access_enabled = var.public_network_access_enabled tags = var.tags network_rules { diff --git a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf index 4b36ff4f80..f24d54e4b2 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf @@ -31,6 +31,7 @@ resource "azurerm_storage_account" "storage_tfstate" { enable_https_traffic_only = true + cross_tenant_replication_enabled = false shared_access_key_enabled = var.storage_account_sapbits.shared_access_key_enabled blob_properties { @@ -304,6 +305,7 @@ resource "azurerm_storage_account" "storage_sapbits" { allow_nested_items_to_be_public = false + cross_tenant_replication_enabled = false public_network_access_enabled = var.storage_account_sapbits.public_network_access_enabled routing { diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf index 7ca4ec7ba4..b29f6228db 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf @@ -198,7 +198,7 @@ resource "azurerm_lb_rule" "scs" { backend_address_pool_ids = [azurerm_lb_backend_address_pool.scs[0].id] probe_id = azurerm_lb_probe.scs[0].id enable_floating_ip = true - enable_tcp_reset = true + enable_tcp_reset = false idle_timeout_in_minutes = var.idle_timeout_scs_ers } @@ -230,7 +230,7 @@ resource "azurerm_lb_rule" "ers" { backend_address_pool_ids = [azurerm_lb_backend_address_pool.scs[0].id] probe_id = azurerm_lb_probe.scs[1].id enable_floating_ip = true - enable_tcp_reset = true + enable_tcp_reset = false idle_timeout_in_minutes = var.idle_timeout_scs_ers } diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf index ecadfbcd64..e4feea4c7e 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf @@ -346,3 +346,14 @@ resource "local_file" "sap_inventory_for_wiki_md" { file_permission = "0660" directory_permission = "0770" } + + +resource "local_file" "sap_vms_resource_id" { + content = templatefile(format("%s/sap-vm-resources.tmpl", path.module), { + scs_server_vms = length(var.scs_server_vm_resource_ids) > 0 ? element(var.scs_server_vm_resource_ids, 0) : "" + } + ) + filename = format("%s/%s_virtual_machines.json", path.cwd, var.sap_sid) + file_permission = "0660" + directory_permission = "0770" +} diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-vm-resources.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-vm-resources.tmpl new file mode 100644 index 0000000000..0f4d8ac37b --- /dev/null +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-vm-resources.tmpl @@ -0,0 +1,4 @@ +{ + "configurationType": "Discovery", + "centralServerVmId": "${scs_server_vms}" +} \ No newline at end of file diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf index 8ad31ffff5..8ba41d10e4 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf @@ -172,6 +172,7 @@ variable "scs_server_count" { } variable "scs_server_ips" { description = "List of IP addresses for the SCS Servers" } variable "scs_server_secondary_ips" { description = "List of secondary IP addresses for the SCS Servers" } +variable "scs_server_vm_resource_ids" { description = "List of Virtual Machine resource IDs for the SCS servers" } variable "scs_vm_names" { description = "List of VM names for the SCS Servers" } variable "shared_home" { description = "If defined provides shared-home support" } variable "sid_keyvault_user_id" { description = "Defines the names for the resources" } From 3b91c6a56f535881d9ae1e409a3ecde8266db77e Mon Sep 17 00:00:00 2001 From: Devansh Jain <86314060+devanshjainms@users.noreply.github.com> Date: Fri, 9 Aug 2024 01:58:01 -0700 Subject: [PATCH 059/164] Update os-packages.yaml (#613) --- deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml index 1afc5215ae..2e4041684f 100644 --- a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml +++ b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml @@ -434,6 +434,9 @@ packages: - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'scs', state: 'present' } - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'ers', state: 'present' } - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'hana', state: 'present' } sles_sap15.5: # - { tier: 'os', package: 'sle-module-public-cloud', state: 'present' } - { tier: 'os', package: 'python3-xml', node_tier: 'all', state: 'present' } @@ -449,7 +452,9 @@ packages: - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'scs', state: 'absent' } - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'ers', state: 'absent' } - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'ers', state: 'absent' } - + - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'hana', state: 'present' } # Adding packages for Oracle linux 8.4 to start with, copied the list from RHEL. # Adding additional Oracle linux packages as per SAP Note 2069760 - Oracle Linux 7.x SAP Installation and Upgrade. Need to add the groupinstall command. oraclelinux8: From 4fc0598bf7b7bfeb48d54a30674b16b34801db91 Mon Sep 17 00:00:00 2001 From: Devansh Jain <86314060+devanshjainms@users.noreply.github.com> Date: Mon, 12 Aug 2024 09:27:05 -0700 Subject: [PATCH 060/164] chore: Refactor Azure Fencing Agent creation in 1.17 Generic Pacemaker role (#614) --- .../tasks/1.17.2.0-cluster-Suse.yml | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml index 2f5051702a..892791ec8e 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml @@ -173,25 +173,25 @@ - name: "1.17 Generic Pacemaker - Enable Stonith" ansible.builtin.shell: | - crm configure property stonith-enabled=true + crm configure property stonith-enabled=true \ crm configure property concurrent-fencing=true register: crm_configure_result failed_when: crm_configure_result.rc > 1 - name: "1.17 Generic Pacemaker - Create Azure Fencing Agent" ansible.builtin.shell: > - crm configure primitive rsc_st_azure stonith:fence_azure_arm params - subscriptionId="{{ fencing_spn_subscription_id }}" - resourceGroup="{{ resource_group_name }}" - tenantId="{{ fencing_spn_tenant_id }}" - login="{{ fencing_spn_client_id }}" - passwd="{{ fencing_spn_client_pwd }}" - pcmk_monitor_retries=4 - pcmk_action_limit=3 - power_timeout=240 - pcmk_reboot_timeout=900 - pcmk_delay_max=15 - pcmk_host_map="{{ primary_instance_name }}:{{ hostvars[primary_instance_name]['primary_vm_name'] }};{{ secondary_instance_name }}:{{ hostvars[secondary_instance_name]['secondary_vm_name'] }}" + crm configure primitive rsc_st_azure stonith:fence_azure_arm params \ + subscriptionId="{{ fencing_spn_subscription_id }}" \ + resourceGroup="{{ resource_group_name }}" \ + tenantId="{{ fencing_spn_tenant_id }}" \ + login="{{ fencing_spn_client_id }}" \ + passwd="{{ fencing_spn_client_pwd }}" \ + pcmk_monitor_retries=4 \ + pcmk_action_limit=3 \ + power_timeout=240 \ + pcmk_reboot_timeout=900 \ + pcmk_delay_max=15 \ + pcmk_host_map="{{ primary_instance_name }}:{{ hostvars[primary_instance_name]['primary_vm_name'] }};{{ secondary_instance_name }}:{{ hostvars[secondary_instance_name]['secondary_vm_name'] }}" \ op monitor interval=3600 timeout=120 when: - not use_msi_for_clusters or distribution_full_id in ["sles_sap12.4"] @@ -199,16 +199,16 @@ - name: "1.17 Generic Pacemaker - Create Azure Fencing Agent (MSI)" ansible.builtin.shell: > - crm configure primitive rsc_st_azure stonith:fence_azure_arm params - subscriptionId="{{ fencing_spn_subscription_id }}" - resourceGroup="{{ resource_group_name }}" - msi=true - pcmk_monitor_retries=4 - pcmk_action_limit=3 - power_timeout=240 - pcmk_reboot_timeout=900 - pcmk_delay_max=15 - pcmk_host_map="{{ primary_instance_name }}:{{ hostvars[primary_instance_name]['primary_vm_name'] }};{{ secondary_instance_name }}:{{ hostvars[secondary_instance_name]['secondary_vm_name'] }}" + crm configure primitive rsc_st_azure stonith:fence_azure_arm params \ + subscriptionId="{{ fencing_spn_subscription_id }}" \ + resourceGroup="{{ resource_group_name }}" \ + msi=true \ + pcmk_monitor_retries=4 \ + pcmk_action_limit=3 \ + power_timeout=240 \ + pcmk_reboot_timeout=900 \ + pcmk_delay_max=15 \ + pcmk_host_map="{{ primary_instance_name }}:{{ hostvars[primary_instance_name]['primary_vm_name'] }};{{ secondary_instance_name }}:{{ hostvars[secondary_instance_name]['secondary_vm_name'] }}" \ op monitor interval=3600 timeout=120 failed_when: crm_configure_result.rc > 1 when: From 16667b47df9efead8e150f1738d31dfcbe57177d Mon Sep 17 00:00:00 2001 From: Nadeen Noaman <95418928+nnoaman@users.noreply.github.com> Date: Tue, 13 Aug 2024 12:36:49 +0200 Subject: [PATCH 061/164] Add SAP-CAL Integration for non-HA Installation (#608) * Add AVG support for Scale out scenarios (#577) * Add data and log volumes * Refactor AVG logic * Fix proximity_placement_group_id calculation in avg.tf * Refactor for_each condition in avg.tf * Refactor for_each condition in avg.tf * Refactor volume creation logic in variables_local.tf * Refactor volume creation logic in variables_local.tf * Refactor volume creation logic in variables_local.tf * Refactor zone calculation logic in variables_local.tf * Refactor proximity_placement_group_id calculation in avg.tf * Add dependency on azurerm_virtual_machine_data_disk_attachment.scs in vm-app.tf * Add dependency on azurerm_virtual_machine_data_disk_attachment.scs in infrastructure.tf * Refactor package update condition in 1.4.3-update-packages-RedHat.yaml --------- Co-authored-by: Kimmo Forss * Update subnet_cidr_storage in sap-parameters.tmpl * Update hosts jinja for client subnet * Update SAP-specific configuration playbook for HANA database scale-out scenario * Version update * Simplify Web App Identity management * Update Azure package versions in SDAFWebApp.csproj * Update Web Application authentication configuration script * Update Web Application authentication configuration script * Update Web Application authentication configuration script * Add SLES 15.3, 15.4, and 15.5 repositories * Update Web Application authentication configuration script and simplify Web App Identity management * Refactor Web App Identity management and update authentication configuration script * Update Web Application authentication configuration script * Update Web Application authentication configuration script and simplify Web App Identity management * Commented out SSH trust relationship checks in 1.17.2-provision.yml * Revert "Commented out SSH trust relationship checks in 1.17.2-provision.yml" This reverts commit 09cd30de6003a891b5c8c31b4c96b495b676aa9b. * ACSS updates * Oracle simplification * Add AutoUpdate.Enabled configuration in 1.1-swap role and enable package cache update in 1.4-packages role * Update deployment type configuration in OS and SAP specific playbooks * Update AutoUpdate.Enabled configuration in 1.1-swap role and add Extensions.WaitForCloudInit configuration * Update AutoUpdate.Enabled configuration in 1.1-swap role and add Extensions.WaitForCloudInit configuration * Update WAAgent package and restart service in 1.1-swap role * Updated key_vault_sap_landscape.tf * Revert "Updated key_vault_sap_landscape.tf" * Update WAAgent package and restart service in 1.1-swap role * Add SAP CAL Integration * Update AutoUpdate.Enabled configuration in 1.1-swap role and add Extensions.WaitForCloudInit configuration * Revert "Add SAP CAL Integration" This reverts commit adae6662ba478d9f1d4d0de7f5c175e4f5da739b. * Update WAAgent package and restart service in 1.4-packages role * Update waagent configuration check in 1.4-packages role * Update waagent configuration check and systemd service reload in 1.4-packages role * Update AutoUpdate.Enabled configuration and add Extensions.WaitForCloudInit configuration in 1.1-swap role * Update waagent configuration check and systemd service reload in 1.1-swap role * Update waagent configuration check and systemd service reload in 1.1-swap role * Update database_high_availability condition in playbook_04_00_01_db_ha.yaml * Add the ability to block app registration * Update systemd service file path in 5.6.7-config-systemd-sap-start.yml * Update systemd service file path in 5.6.7-config-systemd-sap-start.yml * Update systemd service file path in 5.6.7-config-systemd-sap-start.yml * Update web_instance_number and add web_sid variable in sap_system/transform.tf * Fix validation error message for web dispatcher sid in variables_global.tf * Remove chkconfig package from os-packages.yaml * Update systemd service file path in 5.6.7-config-systemd-sap-start.yml * Update OS version check for RHEL 8.2 and SLES 15 in 5.6.1-set_runtime_facts.yml * Update OS version check for RHEL 9.0 or newer in 1.4.0-packages-RedHat-prep.yaml * Update Oracle ASM backup process and fix file permissions * Fix file path in 1.4.0-packages-RedHat-prep.yaml * Update OS version check for RHEL 9.0 or newer in 1.4.0-packages-RedHat-prep.yaml * Update file path and preserve file permissions in 1.4.0-packages-RedHat-prep.yaml * Fix action values in playbook_04_00_01_db_ha.yaml and roles-db/4.1.3-ora-dg/tasks/main.yaml * Fix action values in playbook_04_00_01_db_ha.yaml and roles-db/4.1.3-ora-dg/tasks/main.yaml * Update wait time for StartService in 5.6 SCS/ERS Validation * Update Terraform version to 1.8.0 in deployment scripts and tfvar_variables.tf files * Fix missing else statement in deploy control plane pipeline * Fix missing else statement in deploy control plane pipeline * Fix missing else statement in deploy control plane pipeline * Fix missing else statement in deploy control plane pipeline * Fix missing else statement in deploy control plane pipeline * Fix missing else statement in deploy control plane pipeline * Fix missing else statement in deploy control plane pipeline * Update virtual machine extension reference in vm.tf * Update virtual machine extension version to 1.0 in vm.tf * Fix missing else statement in deploy control plane pipeline * Update network interface and virtual machine counts in vm-observer.tf * Update database high availability configuration * Update use_spn property to false in LandscapeModel and SystemModel * Update Terraform and Ansible versions to 1.7.5 in deployment scripts and variables * Update Display value in SystemDetails.json * Fix validation condition in variables_global.tf * Add ORACLE Post Processing: Reboot after Enabling HugePages task * Fix typo in Oracle Data Guard - Observer: Change UID for Oracle user task * install passlib * Add patch_mode support * Update deployment playbook to set single_server fact based on host count * Update patch_mode configuration in Terraform files * Update file permissions in SAP deployment playbook * Update deployment playbooks to set single_server fact consistently * Fix waagent configuration in swap role * Fix indentation in swap role tasks/main.yaml * Fix cluster group move command in 5.6 SCS/ERS Validation playbook * Fix condition in 1.17-generic-pacemaker playbook to exclude node_tier 'hana' * Fix commented out corosync configuration in 1.17-generic-pacemaker playbook * Create the SID subfolder * Update verbosity level in 5.6.7-config-systemd-sap-start.yml * Add passlib * Simplify Python logic * Update app_bom_id variable in 5.3-app-install/tasks/main.yaml * Update passlib installation in Ansible playbooks * Update reboot timeout and post-reboot delay in 5.6.4.2-sap-resources-Suse.yml * Update swap role and package tasks * Fix condition in 1.17-generic-pacemaker playbook to exclude node_tier 'hana' * Fix failed_when condition in 1.17-generic-pacemaker playbook * Refactor cluster initialization commands in 1.17-generic-pacemaker playbook * enable corosync and pacemaker on Suse * change from command to shell * Update verbosity level for debug message in 5.6.4.0-cluster-Suse.yml * Refactor command to shell in 5.6-scsers-pacemaker tasks * Refactor command to shell in 5.6-scsers-pacemaker tasks * Refactor cluster initialization commands in 1.17-generic-pacemaker playbook * Refactor cluster initialization commands in 1.17-generic-pacemaker playbook * Refactor cluster initialization commands in 5.6-scsers-pacemaker tasks * Refactor path in ora-dg-observer-setup.yaml to include sap_sid variable * Refactor cluster initialization commands in 5.6-scsers-pacemaker tasks and add SAP component installation check * Refactor cluster initialization commands in 1.17-generic-pacemaker playbook * Refactor cluster initialization commands in 1.17-generic-pacemaker playbook and 5.6-scsers-pacemaker tasks * add missing quotes * Fix disk space validation in playbook_00_validate_parameters.yaml * Refactor SAP resource flag setting in Ansible playbooks * Refactor SAP component installation check in 5.6-scsers-pacemaker tasks * Refactor SAP resources installed message in 5.6-scsers-pacemaker tasks * Refactor SCS/ERS validation tasks in 5.6-scsers-pacemaker playbook * Refactor SAP resource flag setting in Ansible playbooks * Refactor ORACLE: Find MOPatch tasks in 4.1.0-ora-install playbook * support for pools with auto qos * support for pools with auto qos * support for pools with auto qos * provide a way to override the oracle user * Update Web Application Configuration documentation * Fix default value for SAP_installed in 5.6-scsers-pacemaker tasks * Fix default value for SAP_installed in 5.6-scsers-pacemaker tasks * Fix shell command in 5.6-scsers-pacemaker pre_checks.yml * Passwordless Web App * Passwordless * Update variable group creation in New-SDAFDevopsProject.ps1 script * Fix client_id reference in app_service.tf * Update packages * Update Web Application Configuration to use resource group scope for role assignments * Update Web Application Configuration documentation * Fix target_nodes value in 2.6.1-anf-mounts.yaml * Web App updates * Update enable_db_lb_deployment logic in variables_local.tf * Bump up the dotnet version * Remove PAT * Remove PAT * Fix TF_VAR_agent_pat assignment in deploy control plane pipeline * Fix PAT assignment in deploy control plane pipeline * Update TF_VAR_agent_pool assignment in deploy control plane pipeline * Add MSI registration * Fix typo * Update versionLabel to v3.11.0.2 in New-SDAFDevopsProject.ps1 * Fix typo in New-SDAFDevopsProject.ps1 + add PAT back for Control Plane * Update ANF mount paths in 2.6.1-anf-mounts.yaml * Fix PostBuildCleanup task in deploy control plane pipeline * Update PostBuildCleanup task to version 4 in deploy control plane pipeline * Update SAP_AUTOMATION_REPO_PATH assignment in deploy control plane pipeline * Update DEPLOYER folder and file validations in deploy control plane pipeline * Update deploy control plane pipeline with environment and location information * Update deploy control plane pipeline with Deployer TFvars variable * Update deploy control plane pipeline with Library TFvars variable * Update SAP_AUTOMATION_REPO_PATH assignment in deploy control plane pipeline * Update installer.sh to display parameter file and current directory * Update deploy control plane pipeline with Library and Deployer TFvars variables * Update SAP_AUTOMATION_REPO_PATH assignment in deploy control plane pipeline * Update PostBuildCleanup task to version 3 in deploy control plane pipeline * Update dotnet-sdk installation in configure_deployer.sh.tmpl * Update deploy control plane pipeline with TF_VAR_agent_pat variable * Update deploy control plane pipeline with Azure CLI version display * Update deploy control plane pipeline with Workload TFvars variable * Update deploy control plane pipeline with removal of AZURE_DEVOPS_EXT_PAT environment variable * Update deploy control plane pipeline with removal of AZURE_DEVOPS_EXT_PAT environment variable * Update deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml to remove the 'recurse' option in the ansible.builtin.file task * Update deploy/ansible/roles-db/4.1.3-ora-dg/tasks/ora-dg-setup-secondary.yaml to fix failed_when condition in rman restore tasks * chore: Update app_service.tf to add WHICH_ENV variable * Update app_service.tf to allow specific app registrations * chore: Update NuGet.Packaging dependency to version 6.9.1 * chore: Update app_service.tf to remove unused app setting and add WHICH_ENV variable * chore: Update deploy control plane pipeline with removal of AZURE_DEVOPS_EXT_PAT environment variable * chore: Update AFS Mount task to exclude 'app' node tier * chore: Update hosts.j2 template to exclude virtual hosts for non-high availability scenarios * chore: Update New-SDAFDevopsProject.ps1 to improve App Registration creation process * Change the ID to add * chore: Update New-SDAFDevopsProject.ps1 to improve App Registration creation process * Add SAP-CAL Integration * Linting * chore: Update deploy control plane pipeline with necessary environment variables * chore: Update deploy control plane pipeline to use idToken for ARM_CLIENT_SECRET * chore: Update deploy control plane pipeline to use idToken for ARM_CLIENT_SECRET * chore: Update deploy control plane pipeline to use idToken for ARM_CLIENT_SECRET * chore: Update deploy control plane pipeline to use System.AccessToken for AZURE_DEVOPS_EXT_PAT * chore: Update deploy control plane pipeline to remove unused agent pool check * chore: Remove unused agent pool check in deploy control plane pipeline * chore: Update deploy control plane pipeline to use $(PAT) for AZURE_DEVOPS_EXT_PAT * changes to ERS group * chore: Update deploy control plane pipeline to improve error handling and logging * chore: Update deploy control plane pipeline to enable Azure AD authentication * chore: Update deploy control plane pipeline to extract deployer_random_id from environment file * chore: Improve error handling and logging in deploy control plane pipeline * chore: Update deploy control plane pipeline to extract deployer_random_id from environment file * chore: Update deploy control plane pipeline to create variable group variables for key vault, terraform remote storage subscription, and deployer random ID seed * chore: Update deploy control plane pipeline to fix typo in ARM_USE_AZUREAD variable * chore: Update deploy control plane pipeline to fix typo in ARM_USE_AZUREAD variable * chore: Update deploy control plane pipeline to fix typo in ARM_USE_AZUREAD variable * chore: Update deploy control plane pipeline to use $(PAT) instead of $(System.AccessToken) for AZURE_DEVOPS_EXT_PAT * chore: Update deploy control plane pipeline to improve error handling and logging * chore: Update deploy control plane pipeline to remove unnecessary Azure login * chore: Update deploy control plane pipeline to remove unnecessary Azure login * chore: Update deploy control plane pipeline to remove unnecessary Azure login * chore: Update bootstrap flag to false in sap_library module * chore: Update storage account network rules for tfstate and sapbits * chore: Update dotnet-sdk installation to version 8.0 * chore: Update dotnet-sdk installation to latest version * chore: Update HttpClient usage in RestHelper.cs and Azure SDK versions in SDAFWebApp.csproj * chore: Update random_id_b64 format in output.tf files * chore: Update RestHelper.cs to accept a type parameter in the constructor * chore: Ignore changes to app_settings in azurerm_windows_web_app resource * chore: Update random_id_b64 format in output.tf files * chore: Update RestHelper.cs to use HttpClient instead of HttpClientGH * chore: Add Build Service user to Build Administrators group * Add the ability to authenticate using PAT * chore: Update RestHelper.cs to use HttpClient instead of HttpClientGH * Update on devops login * chore: Update New-SDAFDevopsProject.ps1 to use tsv output for project creation * chore: Refactor RestHelper.cs to use HttpClient and support PAT authentication * Change module name * update: SAP ASCS/SCS/ERS start resources configuration for SUSE - ENSA1 and ENSA2 when using simple mount. This commit updates the configuration of SAP ASCS/SCS/ERS start resources for SUSE - ENSA1 and ENSA2. * chore: Update SAP Directories creation in ansible playbook This commit updates the ansible playbook to create SAP Directories. It modifies the tasks to create the directories "/usr/sap/trans" and "/sapmnt/{{ sap_sid | upper }}". These changes improve the handling of SAP Transport Filesystems in the deployment process. * feat: Add additional destination port ranges for NSG rules This commit updates the NSG rules in the `sap_landscape` module to include additional destination port ranges. The destination port ranges for the `nsr_controlplane_app`, `nsr_controlplane_web`, `nsr_controlplane_storage`, `nsr_controlplane_db`, and `nsr_controlplane_admin` rules have been expanded to include ports 2049 and 111. * Update error message * Update SAP ASCS/SCS/ERS start resources configuration for SUSE - ENSA1 and ENSA2 when using simple mount. * Add the MSI to the project * Added debug statement to playbook_sapcal_integration.yaml * Added debug statement to playbook_sapcal_integration.yaml * Revert "Added debug statement to playbook_sapcal_integration.yaml" This reverts commit 839170ef4c76cc1b50a020e4ca3d5d3b1b20b932. * Revert "Added debug statement to playbook_sapcal_integration.yaml" This reverts commit 5170d0b0eaa69964306c16541568bf5325403345. * Skip all BOM related tasks if enable_sap_cal is true * Updated the variable name for consistency * Ensured tasks run with appropriate privileges * Store SAP-CAL API response/file in the repository * Lint code and set default values * Use a secure tempfile --------- Co-authored-by: Kimmo Forss Co-authored-by: Kimmo Forss Co-authored-by: devanshjain Co-authored-by: hdamecharla Co-authored-by: Devansh Jain <86314060+devanshjainms@users.noreply.github.com> --- deploy/ansible/action_plugins/public_api.py | 1783 +++++++++++++++++ .../playbook_00_validate_parameters.yaml | 10 + .../ansible/playbook_sapcal_integration.yaml | 122 ++ .../roles-misc/0.2-kv-secrets/tasks/main.yaml | 2 +- .../2.10-sap-notes/tasks/2.10.3.yaml | 1 + .../tasks/2.6-set_runtime_facts.yaml | 1 + .../tasks/5.6.4.2-sap-resources-Suse.yml | 1 - .../6.0.0-sapcal-install/defaults/main.yml | 11 + .../6.0.0-sapcal-install/tasks/main.yml | 76 + .../6.0.0-sapcal-install/vars/main.yml | 14 + deploy/ansible/vars/ansible-input-api.yaml | 5 + deploy/pipelines/07-sap-cal-installation.yaml | 404 ++++ .../templates/collect-calapi-file.yaml | 37 + .../07-sap-cal-installation-variables.yaml | 25 + deploy/scripts/New-SDAFDevopsProject.ps1 | 12 + deploy/terraform/run/sap_system/module.tf | 7 + .../run/sap_system/tfvar_variables.tf | 18 + .../sap_system/output_files/inventory.tf | 3 + .../output_files/sap-parameters.tmpl | 14 + .../output_files/variables_global.tf | 13 + 20 files changed, 2557 insertions(+), 2 deletions(-) create mode 100644 deploy/ansible/action_plugins/public_api.py create mode 100644 deploy/ansible/playbook_sapcal_integration.yaml create mode 100644 deploy/ansible/roles-sap/6.0.0-sapcal-install/defaults/main.yml create mode 100644 deploy/ansible/roles-sap/6.0.0-sapcal-install/tasks/main.yml create mode 100644 deploy/ansible/roles-sap/6.0.0-sapcal-install/vars/main.yml create mode 100644 deploy/pipelines/07-sap-cal-installation.yaml create mode 100644 deploy/pipelines/templates/collect-calapi-file.yaml create mode 100644 deploy/pipelines/variables/07-sap-cal-installation-variables.yaml diff --git a/deploy/ansible/action_plugins/public_api.py b/deploy/ansible/action_plugins/public_api.py new file mode 100644 index 0000000000..30d572c8ce --- /dev/null +++ b/deploy/ansible/action_plugins/public_api.py @@ -0,0 +1,1783 @@ +#!/usr/bin/env python3.9 +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import json +import os +import tempfile +import requests +from cryptography.fernet import Fernet +from azure.common.exceptions import AuthenticationError +from msrest.exceptions import ClientRequestError +from azure.common.credentials import ServicePrincipalCredentials +from azure.keyvault import KeyVaultClient +from ansible.errors import AnsibleConnectionFailure, AnsibleActionFail +from ansible.utils.display import Display +from ansible.module_utils.urls import Request, ConnectionError +from six.moves.urllib.error import HTTPError, URLError +from ansible.plugins.action import ActionBase + +method_spec_product = dict( + method=dict(type="str", required=True), + calKeyvaultId=dict(type="str", required=True, no_log=True), + clientId=dict(type="str", no_log=True), + clientSecret=dict(type="str", no_log=True), +) +method_spec_progress = dict( + method=dict(type="str", required=True), + calKeyvaultId=dict(type="str", required=True, no_log=True), + clientId=dict(type="str", no_log=True), + clientSecret=dict(type="str", no_log=True), + systemId=dict(type="str", required=True), + outputDirectoryPath=dict(type="str", no_log=True), + outputFile=dict(type="str", no_log=True), +) +method_spec_deployment = dict( + method=dict(type="str", required=True), + outputDirectoryPath=dict(type="str", no_log=True), + outputFile=dict(type="str", no_log=True), + calKeyvaultId=dict(type="str", required=True, no_log=True), + clientId=dict(type="str", no_log=True), + clientSecret=dict(type="str", no_log=True), + tenantId=dict(type="str", no_log=True), + accountId=dict(type="str", required=True, no_log=True), + productId=dict(type="str", required=True, no_log=True), + cloudProvider=dict(type="str", required=True), + planTemplateId=dict(type="str", required=True, no_log=True), + planTemplateName=dict(type="str", required=True, no_log=True), + region=dict(type="str", default="eastus2"), + availabilityScenario=dict( + type="str", + choices=["non-ha", "hana-system-replication", "clustering"], + default="clustering", + ), + infrastructureParameterSet=dict( + type="dict", + required=True, + options=dict( + operatingSystem=dict( + type="str", default="SUSE/sles-sap-15-sp3/gen1/2022.11.09" + ), + privateDnsZone=dict(type="str", required=True), + reversePrivateDnsZone=dict(type="str", required=True, no_log=True), + transitNetwork=dict(type="str", required=True, no_log=True), + workloadNetwork=dict(type="str", required=True, no_log=True), + sharedServicesNetwork=dict(type="str", required=True, no_log=True), + sharedServicesSubnet=dict(type="str", required=True, no_log=True), + workloadNetworkHanaSubnet=dict(type="str", required=True, no_log=True), + workloadNetworkAsSubnet=dict(type="str", required=True, no_log=True), + technicalCommunicationUser=dict(type="str", required=True, no_log=True), + techUserPassword=dict(type="str", required=True, no_log=True), + maintenancePlannerTransaction=dict(type="str", required=True, no_log=True), + hanaVmSize=dict(type="str", required=False, default="Standard_E20ds_v5"), + centralServicesVmSize=dict( + type="str", required=False, default="Standard_D4ds_v5" + ), + enqueueReplicationServerVmSize=dict( + type="str", required=False, default="Standard_D4ds_v5" + ), + applicationServerVmSize=dict( + type="str", required=False, default="Standard_E4ds_v5" + ), + numberOfApplicationServers=dict(type="int", required=False, default="0"), + webDispatcherVmSize=dict( + type="str", required=False, default="Standard_D2s_v5" + ), + ), + ), + installationParameterSets=dict( + type="dict", + required=True, + apply_defaults=True, + options=dict( + clientId=dict( + type="str", + required_if=[("availabilityScenario", "==", "clustering")], + no_log=True, + ), + clientSecret=dict( + type="str", + required_if=[("availabilityScenario", "==", "clustering")], + no_log=True, + ), + hanaDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + DBSID=dict(type="str", default="HDB"), + DBSIDAdminUserId=dict(type="str", default="1050"), + instanceNumber=dict(type="str", default="00"), + frontendHostname=dict(type="str", default="vhdbdb"), + primaryHanaPhysicalHostname=dict(type="str", default="phdbdbpr"), + primaryHanaVirtualHostname=dict(type="str", default="vhdbdbpr"), + secondaryHanaPhysicalHostname=dict(type="str", default="phdbdbsr"), + secondaryHanaVirtualHostname=dict(type="str", default="vhdbdbsr"), + ), + ), + s4hanaDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + SID=dict(type="str", default="S4H"), + SAPSysAdminUserId=dict(type="str", default="1079"), + SAPSysAdminGroupId=dict(type="str", default="79"), + sapGuiDefaultLanguage=dict(type="str", default="en"), + SAPSystemAdditionalLanguages=dict(type="str", default=""), + numberOfDialogWorkProcesses=dict(type="int", default="10"), + numberOfBatchWorkProcesses=dict(type="int", default="7"), + ), + ), + centralServicesDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + instanceNumber=dict(type="str", default="00"), + ABAPMessageServerPort=dict(type="str", default="3600"), + physicalHostname=dict(type="str", default="ps4hcs"), + virtualHostname=dict(type="str", default="vs4hcs"), + ), + ), + enqueueReplicationServerDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + instanceNumber=dict(type="str", default="10"), + physicalHostname=dict(type="str", default="ps4hers"), + virtualHostname=dict(type="str", default="vs4hers"), + ), + ), + primaryApplicationServerDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + instanceNumber=dict(type="str", default="00"), + physicalHostname=dict(type="str", default="ps4hpas"), + virtualHostname=dict(type="str", default="vs4hpas"), + ), + ), + additionalApplicationServersDeployment=dict( + type="list", + elements="dict", + apply_defaults=True, + options=dict( + instanceNumber=dict(type="str", default="00"), + physicalHostname=dict(type="str", default="ps4haas1"), + virtualHostname=dict(type="str", default="vs4haas1"), + ), + ), + webDispatcherDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + installationType=dict( + type="str", + choices=["Standalone", "Embedded", "None", "External"], + default="None", + ), + primaryInstanceNumber=dict(type="str", default="00"), + primaryPhysicalHostname=dict(ype="str", default="ps4hwdpr"), + primaryVirtualHostname=dict(type="str", default="vs4hwdpr"), + secondaryInstanceNumber=dict(type="str", default="00"), + secondaryPhysicalHostname=dict(type="str", default="ps4hwdsr"), + secondaryVirtualHostname=dict(type="str", default="vs4hwdsr"), + userIdOfSIDAdmin=dict(type="str", default="1080"), + virtualHostname=dict(type="str", default="vs4hwdext"), + fioriHostname=dict(type="str", default="vs4hwdext"), + fioriHostPort=dict(type="int", default="44300"), + productiveClientNumber=dict(type="str", default="500"), + ), + ), + ), + ), +) +method_spec_provisioning = dict( + method=dict(type="str", required=True), + outputDirectoryPath=dict(type="str", no_log=True), + outputFile=dict(type="str", no_log=True), + productId=dict(type="str", required=True, no_log=True), + planTemplateId=dict(type="str", no_log=True, default="default"), + availabilityScenario=dict( + type="str", + choices=["non-ha", "hana-system-replication", "clustering"], + default="clustering", + ), + calKeyvaultId=dict(type="str", required=True, no_log=True), + clientId=dict(type="str", no_log=True), + clientSecret=dict(type="str", no_log=True), + tenantId=dict(type="str", no_log=True), + infrastructureParameterSet=dict( + type="dict", + required=True, + required_one_of=[ + ["domainName", "privateDnsZone"], + ["techUserPassword", "techUserPasswordReference"], + ], + mutually_exclusive=[ + ["domainName", "privateDnsZone"], + ["techUserPassword", "techUserPasswordReference"], + ], + options=dict( + privateDnsZone=dict(type="str", no_log=True), + domainName=dict(type="str", no_log=True), + secretStoreId=dict(type="str", required=True, no_log=True), + deploymentServerSubnet=dict(type="str", no_log=True), + executionEngineSubnet=dict(type="str", no_log=True), + technicalCommunicationUser=dict(type="str", required=True, no_log=True), + techUserPassword=dict(type="str", no_log=True, default=""), + techUserPasswordReference=dict(type="str", no_log=True), + remoteOsUser=dict(type="str", required=True, no_log=True), + deploymentServerResourceGroup=dict(type="str", required=False, no_log=True), + sshPublicKeySecretName=dict(type="str", required=True, no_log=True), + sshPrivateKeySecretName=dict(type="str", required=True, no_log=True), + parameters=dict(type="str", no_log=True), + ), + ), + installationParameterSets=dict( + type="dict", + required=True, + apply_defaults=True, + options=dict( + hanaDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + primaryVmResourceId=dict(type="str", required=True), + secondaryVmResourceId=dict(type="str", default=""), + loadBalancerResourceId=dict(type="str", default=""), + frontEndIp=dict(type="str", default=""), + DBSID=dict(type="str", default="HDB"), + DBSIDAdminUserId=dict(type="str", default="1050"), + instanceNumber=dict(type="str", default="00"), + frontendHostname=dict(type="str", default=""), + primaryPhysicalHostname=dict(type="str", default=""), + primaryVirtualHostname=dict(type="str", default=""), + secondaryPhysicalHostname=dict(type="str", default=""), + secondaryVirtualHostname=dict(type="str", default=""), + ), + ), + s4hanaDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + SID=dict(type="str", default="S4H"), + SAPSysAdminUserId=dict(type="str", default="1079"), + SAPSysAdminGroupId=dict(type="str", default="79"), + sapGuiDefaultLanguage=dict(type="str", default="en"), + SAPSystemAdditionalLanguages=dict(type="str", default=""), + numberOfDialogWorkProcesses=dict(type="str", default="10"), + numberOfBatchWorkProcesses=dict(type="str", default="7"), + ), + ), + centralServicesDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + vmResourceId=dict(type="str", required=True), + loadBalancerResourceId=dict(type="str", default=""), + frontEndIp=dict(type="str", default=""), + instanceNumber=dict(type="str", default="00"), + ABAPMessageServerPort=dict(type="str", default=""), + physicalHostname=dict(type="str", default=""), + virtualHostname=dict(type="str", default=""), + loadBalancerHostname=dict( + type="str", + required_if=[("availabilityScenario", "==", "clustering")], + ), + ), + ), + enqueueReplicationServerDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + vmResourceId=dict(type="str", default=""), + frontEndIp=dict(type="str", default=""), + instanceNumber=dict(type="str", default="10"), + physicalHostname=dict(type="str", default=""), + virtualHostname=dict(type="str", default=""), + loadBalancerHostname=dict(type="str"), + ), + ), + applicationServersDeployment=dict( + type="list", + elements="dict", + apply_defaults=True, + options=dict( + vmResourceId=dict(type="str", default=""), + instanceNumber=dict(type="str", default="00"), + physicalHostname=dict(type="str", default=""), + virtualHostname=dict(type="str", default=""), + ), + ), + fioriConfiguration=dict( + type="dict", + apply_defaults=True, + options=dict( + fioriHostname=dict(type="str", default=""), + fioriHostPort=dict(type="str", default="44300"), + productiveClientNumber=dict(type="str", default="500"), + ossUser=dict(type="str", default=""), + ossUserPassword=dict(type="str", default=""), + ossUserPasswordReference=dict(type="str", default=""), + ), + ), + webDispatcherDeployment=dict( + type="dict", + apply_defaults=True, + options=dict( + installationType=dict( + type="str", + choices=["Standalone", "Embedded", "None", "External"], + default="None", + ), + virtualHostname=dict(type="str", default=""), + primaryVmResourceId=dict(type="str", default=""), + primaryInstanceNumber=dict(type="str", default="00"), + primaryPhysicalHostname=dict(type="str", default=""), + primaryVirtualHostname=dict(type="str", default=""), + userIdOfSIDAdmin=dict(type="str", default="1080"), + secondaryVmResourceId=dict(type="str", default=""), + loadBalancerResourceId=dict(type="str", default=""), + frontEndIp=dict(type="str", default=""), + secondaryInstanceNumber=dict(type="str", default="00"), + secondaryPhysicalHostname=dict(type="str", default=""), + secondaryVirtualHostname=dict(type="str", default=""), + ), + ), + ), + ), +) + +required_together = [["clientId", "clientSecret", "tenantId"]] + +# Generate a key for encryption/decryption +FERNET_KEY = os.environ.get("FERNET_KEY", Fernet.generate_key().decode()) +fernet = Fernet(FERNET_KEY.encode()) + + +class SAPsystem: + def __init__(self, params): + self.input_params = params + method = params.get("method") + scenario = params.get("availabilityScenario") + self.infrastructureParameterSet = params.get("infrastructureParameterSet") + self.installationParameterSets = params.get("installationParameterSets") + webdisp_type = self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("installationType") + if method == "deployment": + self.props = self.get_nonha_deployment_params() + if scenario == "hana-system-replication": + self.props.get("installationParameterSets").update( + self.get_ha_deployment_params() + ) + elif scenario == "clustering": + self.props.get("installationParameterSets").update( + self.get_ha_deployment_params() + ) + self.props.get("installationParameterSets").update( + self.get_cluster_deployment_params() + ) + if webdisp_type != "No": + if webdisp_type == "Standalone": + self.props["installationParameterSets"]["webDispatcherDeployment"][ + "parameters" + ] += self.get_webdisp_deployment_standalone_params().get( + "parameters" + ) + self.props["installationParameterSets"]["webDispatcherDeployment"][ + "parameters" + ] += self.get_webdisp_deployment_params().get("parameters") + if scenario != "NON_HA": + self.props["installationParameterSets"][ + "webDispatcherDeployment" + ]["parameters"] += self.get_webdisp_ha_deployment_params().get( + "parameters" + ) + else: + self.props["installationParameterSets"]["webDispatcherDeployment"][ + "parameters" + ] += self.get_webdisp_deployment_params().get("parameters") + elif method == "software_provisioning": + self.props = self.get_nonha_provisioning_params() + if scenario == "hana-system-replication": + self.props.get("deploymentParameterSets").update( + self.get_ha_provisioning_params() + ) + elif scenario == "clustering": + self.props.get("deploymentParameterSets").update( + self.get_ha_provisioning_params() + ) + + def clean_parameters(self, parameters): + # Filter out parameter dictionaries with value == "" or missing 'value' key + return [param for param in parameters if param.get("value") not in [None, ""]] + + def clean_structure(self, structure): + # Apply cleaning to the structure recursively + if isinstance(structure, dict): + cleaned_structure = {} + for k, v in structure.items(): + if k == "parameters" and isinstance(v, list): + cleaned_structure[k] = self.clean_parameters(v) + else: + cleaned_structure[k] = self.clean_structure(v) + return cleaned_structure + elif isinstance(structure, list): + return [self.clean_structure(item) for item in structure if item != ""] + else: + return structure + + def get_props(self): + return self.clean_structure(self.props) + + def get_nonha_deployment_params(self): + return { + "accountId": self.input_params.get("accountId"), + "productId": self.input_params.get("productId"), + "planTemplateId": self.input_params.get("planTemplateId"), + "planTemplateName": self.input_params.get("planTemplateName"), + "region": self.input_params.get("region"), + "cloudProvider": self.input_params.get("cloudProvider"), + "availabilityScenario": self.input_params.get("availabilityScenario"), + "infrastructureParameterSet": { + "operatingSystem": self.infrastructureParameterSet.get( + "operatingSystem" + ), + "privateDnsZone": self.infrastructureParameterSet.get("privateDnsZone"), + "reversePrivateDnsZone": self.infrastructureParameterSet.get( + "reversePrivateDnsZone" + ), + "transitNetwork": self.infrastructureParameterSet.get("transitNetwork"), + "workloadNetwork": self.infrastructureParameterSet.get( + "workloadNetwork" + ), + "sharedServicesNetwork": self.infrastructureParameterSet.get( + "sharedServicesNetwork" + ), + "sharedServicesSubnet": self.infrastructureParameterSet.get( + "sharedServicesSubnet" + ), + "workloadNetworkHanaSubnet": self.infrastructureParameterSet.get( + "workloadNetworkHanaSubnet" + ), + "workloadNetworkAsSubnet": self.infrastructureParameterSet.get( + "workloadNetworkAsSubnet" + ), + "hanaVmSize": self.infrastructureParameterSet.get("hanaVmSize"), + "centralServicesVmSize": self.infrastructureParameterSet.get( + "centralServicesVmSize" + ), + "enqueueReplicationServerVmSize": self.infrastructureParameterSet.get( + "enqueueReplicationServerVmSize" + ), + "applicationServerVmSize": self.infrastructureParameterSet.get( + "applicationServerVmSize" + ), + "numberOfApplicationServers": self.infrastructureParameterSet.get( + "numberOfApplicationServers" + ), + "webDispatcherVmSize": self.infrastructureParameterSet.get( + "webDispatcherVmSize" + ), + }, + "installationParameterSets": { + "downloadBinaries": { + "name": "Download Binaries", + "parameters": [ + { + "name": "technicalCommunicationUser", + "value": self.infrastructureParameterSet.get( + "technicalCommunicationUser" + ), + }, + { + "name": "techUserPassword", + "value": self.infrastructureParameterSet.get( + "techUserPassword" + ), + }, + { + "name": "maintenancePlannerTransaction", + "value": self.infrastructureParameterSet.get( + "maintenancePlannerTransaction" + ), + }, + ], + }, + "hanaDeployment": { + "name": "HANA Deployment", + "parameters": [ + { + "name": "DBSID", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("DBSID"), + }, + { + "name": "DBSIDAdminUserId", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("DBSIDAdminUserId"), + }, + { + "name": "instanceNumber", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("instanceNumber"), + }, + { + "name": "primaryHanaPhysicalHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("primaryHanaPhysicalHostname"), + }, + { + "name": "primaryHanaVirtualHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("primaryHanaVirtualHostname"), + }, + ], + }, + "s4hanaDeployment": { + "name": "S/4HANA Deployment", + "parameters": [ + { + "name": "SID", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("SID"), + }, + { + "name": "SAPSysAdminUserId", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("SAPSysAdminUserId"), + }, + { + "name": "SAPSysAdminGroupId", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("SAPSysAdminGroupId"), + }, + { + "name": "sapGuiDefaultLanguage", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("sapGuiDefaultLanguage"), + }, + { + "name": "SAPSystemAdditionalLanguages", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("SAPSystemAdditionalLanguages"), + }, + { + "name": "numberOfDialogWorkProcesses", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("numberOfDialogWorkProcesses"), + }, + { + "name": "numberOfBatchWorkProcesses", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("numberOfBatchWorkProcesses"), + }, + ], + }, + "centralServicesDeployment": { + "name": "ABAP SAP Central Services Deployment", + "parameters": [ + { + "name": "instanceNumber", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("instanceNumber"), + }, + { + "name": "ABAPMessageServerPort", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("ABAPMessageServerPort"), + }, + { + "name": "physicalHostname", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("physicalHostname"), + }, + { + "name": "virtualHostname", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("virtualHostname"), + }, + ], + }, + "primaryApplicationServerDeployment": { + "name": "Primary Application Server Deployment", + "parameters": [ + { + "name": "instanceNumber", + "value": self.installationParameterSets.get( + "primaryApplicationServerDeployment" + ).get("instanceNumber"), + }, + { + "name": "physicalHostname", + "value": self.installationParameterSets.get( + "primaryApplicationServerDeployment" + ).get("physicalHostname"), + }, + { + "name": "virtualHostname", + "value": self.installationParameterSets.get( + "primaryApplicationServerDeployment" + ).get("virtualHostname"), + }, + ], + }, + "additionalApplicationServersDeployment": self.installationParameterSets.get( + "additionalApplicationServersDeployment" + ), + "webDispatcherDeployment": { + "name": "SAP Web Dispatcher and Fiori Configuration", + "parameters": [ + { + "name": "installationType", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("installationType"), + } + ], + }, + }, + } + + def get_nonha_provisioning_params(self): + params = { + "productId": self.input_params.get("productId"), + "planTemplateId": self.input_params.get("planTemplateId"), + "availabilityScenario": self.input_params.get("availabilityScenario"), + "adaptiveDeployment": "false", + "dryRun": "false", + "infrastructureParameterSet": { + ### privateDnsZone or domainName is added ### + "deploymentServerSubnet": self.infrastructureParameterSet.get( + "deploymentServerSubnet" + ), + "executionEngineSubnet": self.infrastructureParameterSet.get( + "executionEngineSubnet" + ), + "osUser": self.infrastructureParameterSet.get("remoteOsUser"), + "secretStoreId": self.infrastructureParameterSet.get("secretStoreId"), + "sshPublicKeySecretName": self.infrastructureParameterSet.get( + "sshPublicKeySecretName" + ), + "sshPrivateKeySecretName": self.infrastructureParameterSet.get( + "sshPrivateKeySecretName" + ), + "deploymentServerResourceGroup": self.infrastructureParameterSet.get( + "deploymentServerResourceGroup" + ), + "parameters": [], + }, + "deploymentParameterSets": { + "downloadUser": { + "name": "Download User", + "parameters": [ + { + "name": "technicalCommunicationUser", + "value": self.infrastructureParameterSet.get( + "technicalCommunicationUser" + ), + }, + { + "name": "techUserPassword", + "value": self.infrastructureParameterSet.get( + "techUserPassword" + ), + }, + ], + }, + "hanaDeployment": { + "name": "HANA Deployment", + "parameters": [ + { + "name": "primaryVmResourceId", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("primaryVmResourceId"), + }, + { + "name": "DBSID", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("DBSID"), + }, + { + "name": "DBSIDAdminUserId", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("DBSIDAdminUserId"), + }, + { + "name": "instanceNumber", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("instanceNumber"), + }, + { + "name": "primaryPhysicalHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("primaryPhysicalHostname"), + }, + { + "name": "primaryVirtualHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("primaryVirtualHostname"), + }, + ], + }, + "s4hanaDeployment": { + "name": "S/4HANA Deployment", + "parameters": [ + { + "name": "SID", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("SID"), + }, + { + "name": "SAPSysAdminUserId", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("SAPSysAdminUserId"), + }, + { + "name": "SAPSysAdminGroupId", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("SAPSysAdminGroupId"), + }, + { + "name": "sapGuiDefaultLanguage", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("sapGuiDefaultLanguage"), + }, + { + "name": "SAPSystemAdditionalLanguages", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("SAPSystemAdditionalLanguages"), + }, + { + "name": "numberOfDialogWorkProcesses", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("numberOfDialogWorkProcesses"), + }, + { + "name": "numberOfBatchWorkProcesses", + "value": self.installationParameterSets.get( + "s4hanaDeployment" + ).get("numberOfBatchWorkProcesses"), + }, + ], + }, + "centralServicesDeployment": { + "name": "ABAP SAP Central Services Deployment", + "parameters": [ + { + "name": "vmResourceId", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("vmResourceId"), + }, + { + "name": "instanceNumber", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("instanceNumber"), + }, + { + "name": "ABAPMessageServerPort", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("ABAPMessageServerPort"), + }, + { + "name": "physicalHostname", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("physicalHostname"), + }, + { + "name": "virtualHostname", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("virtualHostname"), + }, + ], + }, + "fioriConfiguration": { + "name": "SAP Fiori Configuration", + "parameters": [ + { + "name": "fioriHostname", + "value": self.installationParameterSets.get( + "fioriConfiguration" + ).get("fioriHostname"), + }, + { + "name": "fioriHostPort", + "value": self.installationParameterSets.get( + "fioriConfiguration" + ).get("fioriHostPort"), + }, + { + "name": "productiveClientNumber", + "value": self.installationParameterSets.get( + "fioriConfiguration" + ).get("productiveClientNumber"), + }, + { + "name": "ossUser", + "value": self.installationParameterSets.get( + "fioriConfiguration" + ).get("ossUser"), + }, + { + "name": "ossUserPassword", + "value": self.installationParameterSets.get( + "fioriConfiguration" + ).get("ossUserPassword"), + }, + ], + }, + "webDispatcherDeployment": { + "name": "SAP Web Dispatcher Configuration", + "parameters": [ + { + "name": "installationType", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("installationType"), + }, + { + "name": "primaryVmResourceId", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryVmResourceId"), + }, + { + "name": "virtualHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("virtualHostname"), + }, + { + "name": "primaryInstanceNumber", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryInstanceNumber"), + }, + { + "name": "primaryPhysicalHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryPhysicalHostname"), + }, + { + "name": "primaryVirtualHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryVirtualHostname"), + }, + { + "name": "userIdOfSIDAdmin", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("userIdOfSIDAdmin"), + }, + ], + }, + }, + } + + self.transform_application_servers() + params["deploymentParameterSets"]["applicationServersDeployment"] = ( + self.installationParameterSets.get("applicationServersDeployment") + ) + + # Check if privateDnsZone is provided, and add it to infrastructure parameters if true + if self.infrastructureParameterSet.get("privateDnsZone") is not None: + params["infrastructureParameterSet"]["privateDnsZone"] = ( + self.infrastructureParameterSet.get("privateDnsZone") + ) + + # Check if domainName is provided, and add it to infrastructure parameters if true + if self.infrastructureParameterSet.get("domainName") is not None: + params["infrastructureParameterSet"]["domainName"] = ( + self.infrastructureParameterSet.get("domainName") + ) + + if self.infrastructureParameterSet.get("techUserPasswordReference") is not None: + new_parameter = { + "name": "passwordReference", + "value": self.infrastructureParameterSet.get( + "techUserPasswordReference" + ), + } + params["deploymentParameterSets"]["downloadUser"]["parameters"].append( + new_parameter + ) + if ( + self.installationParameterSets.get("fioriConfiguration").get( + "ossUserPasswordReference" + ) + is not None + ): + new_parameter = { + "name": "ossUserPasswordReference", + "value": self.installationParameterSets.get("fioriConfiguration").get( + "ossUserPasswordReference" + ), + } + params["deploymentParameterSets"]["fioriConfiguration"][ + "parameters" + ].append(new_parameter) + return params + + def get_ha_deployment_params(self): + return dict( + hanaDeployment={ + "name": "HANA Deployment", + "parameters": [ + { + "name": "DBSID", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("DBSID"), + }, + { + "name": "DBSIDAdminUserId", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("DBSIDAdminUserId"), + }, + { + "name": "instanceNumber", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("instanceNumber"), + }, + { + "name": "frontendHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("frontendHostname"), + }, + { + "name": "primaryHanaPhysicalHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("primaryHanaPhysicalHostname"), + }, + { + "name": "primaryHanaVirtualHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("primaryHanaVirtualHostname"), + }, + { + "name": "secondaryHanaPhysicalHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("secondaryHanaPhysicalHostname"), + }, + { + "name": "secondaryHanaVirtualHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("secondaryHanaVirtualHostname"), + }, + ], + }, + enqueueReplicationServerDeployment={ + "name": "Enqueue Replication Server Deployment", + "parameters": [ + { + "name": "instanceNumber", + "value": self.installationParameterSets.get( + "enqueueReplicationServerDeployment" + ).get("instanceNumber"), + }, + { + "name": "physicalHostname", + "value": self.installationParameterSets.get( + "enqueueReplicationServerDeployment" + ).get("physicalHostname"), + }, + { + "name": "virtualHostname", + "value": self.installationParameterSets.get( + "enqueueReplicationServerDeployment" + ).get("virtualHostname"), + }, + ], + }, + ) + + def get_cluster_deployment_params(self): + return dict( + clustering={ + "name": "Service Principal for High Availability Cluster", + "parameters": [ + { + "name": "clientId", + "value": self.installationParameterSets.get("clientId"), + }, + { + "name": "clientSecret", + "value": self.installationParameterSets.get("clientSecret"), + }, + ], + } + ) + + def get_webdisp_deployment_standalone_params(self): + return dict( + parameters=( + { + "name": "primaryInstanceNumber", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryInstanceNumber"), + }, + { + "name": "primaryPhysicalHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryPhysicalHostname"), + }, + { + "name": "primaryVirtualHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryVirtualHostname"), + }, + { + "name": "userIdOfSIDAdmin", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("userIdOfSIDAdmin"), + }, + { + "name": "fioriHostPort", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("fioriHostPort"), + }, + { + "name": "virtualHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("virtualHostname"), + }, + ) + ) + + def get_webdisp_ha_deployment_params(self): + return dict( + parameters=( + { + "name": "secondaryInstanceNumber", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("secondaryInstanceNumber"), + }, + { + "name": "secondaryPhysicalHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("secondaryPhysicalHostname"), + }, + { + "name": "secondaryVirtualHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("secondaryVirtualHostname"), + }, + { + "name": "fioriHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("fioriHostname"), + }, + { + "name": "fioriHostPort", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("fioriHostPort"), + }, + { + "name": "productiveClientNumber", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("productiveClientNumber"), + }, + ) + ) + + def get_webdisp_deployment_params(self): + return dict( + parameters=( + { + "name": "fioriHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("fioriHostname"), + }, + { + "name": "fioriHostPort", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("fioriHostPort"), + }, + { + "name": "productiveClientNumber", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("productiveClientNumber"), + }, + ) + ) + + def get_ha_provisioning_params(self): + params = dict( + hanaDeployment={ + "name": "HANA Deployment", + "parameters": [ + { + "name": "primaryVmResourceId", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("primaryVmResourceId"), + }, + { + "name": "DBSID", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("DBSID"), + }, + { + "name": "DBSIDAdminUserId", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("DBSIDAdminUserId"), + }, + { + "name": "instanceNumber", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("instanceNumber"), + }, + { + "name": "primaryPhysicalHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("primaryPhysicalHostname"), + }, + { + "name": "primaryVirtualHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("primaryVirtualHostname"), + }, + { + "name": "secondaryVmResourceId", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("secondaryVmResourceId"), + }, + { + "name": "loadBalancerResourceId", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("loadBalancerResourceId"), + }, + { + "name": "frontEndIp", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("frontEndIp"), + }, + { + "name": "frontendHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("frontendHostname"), + }, + { + "name": "secondaryPhysicalHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("secondaryPhysicalHostname"), + }, + { + "name": "secondaryVirtualHostname", + "value": self.installationParameterSets.get( + "hanaDeployment" + ).get("secondaryVirtualHostname"), + }, + ], + }, + centralServicesDeployment={ + "name": "ABAP SAP Central Services Deployment", + "parameters": [ + { + "name": "loadBalancerResourceId", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("loadBalancerResourceId"), + }, + { + "name": "loadBalancerHostname", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("loadBalancerHostname"), + }, + { + "name": "frontEndIp", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("frontEndIp"), + }, + { + "name": "vmResourceId", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("vmResourceId"), + }, + { + "name": "instanceNumber", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("instanceNumber"), + }, + { + "name": "ABAPMessageServerPort", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("ABAPMessageServerPort"), + }, + { + "name": "physicalHostname", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("physicalHostname"), + }, + { + "name": "virtualHostname", + "value": self.installationParameterSets.get( + "centralServicesDeployment" + ).get("virtualHostname"), + }, + ], + }, + enqueueReplicationServerDeployment={ + "name": "Enqueue Replication Server Deployment", + "parameters": [ + { + "name": "vmResourceId", + "value": self.installationParameterSets.get( + "enqueueReplicationServerDeployment" + ).get("vmResourceId"), + }, + { + "name": "loadBalancerHostname", + "value": self.installationParameterSets.get( + "enqueueReplicationServerDeployment" + ).get("loadBalancerHostname"), + }, + { + "name": "frontEndIp", + "value": self.installationParameterSets.get( + "enqueueReplicationServerDeployment" + ).get("frontEndIp"), + }, + { + "name": "instanceNumber", + "value": self.installationParameterSets.get( + "enqueueReplicationServerDeployment" + ).get("instanceNumber"), + }, + { + "name": "physicalHostname", + "value": self.installationParameterSets.get( + "enqueueReplicationServerDeployment" + ).get("physicalHostname"), + }, + { + "name": "virtualHostname", + "value": self.installationParameterSets.get( + "enqueueReplicationServerDeployment" + ).get("virtualHostname"), + }, + ], + }, + webDispatcherDeployment={ + "name": "SAP Web Dispatcher and Fiori Configuration", + "parameters": [ + { + "name": "installationType", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("installationType"), + }, + { + "name": "primaryVmResourceId", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryVmResourceId"), + }, + { + "name": "virtualHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("virtualHostname"), + }, + { + "name": "primaryInstanceNumber", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryInstanceNumber"), + }, + { + "name": "primaryPhysicalHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryPhysicalHostname"), + }, + { + "name": "primaryVirtualHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("primaryVirtualHostname"), + }, + { + "name": "userIdOfSIDAdmin", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("userIdOfSIDAdmin"), + }, + { + "name": "secondaryVmResourceId", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("secondaryVmResourceId"), + }, + { + "name": "loadBalancerResourceId", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("loadBalancerResourceId"), + }, + { + "name": "frontEndIp", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("frontEndIp"), + }, + { + "name": "secondaryInstanceNumber", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("secondaryInstanceNumber"), + }, + { + "name": "secondaryPhysicalHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("secondaryPhysicalHostname"), + }, + { + "name": "secondaryVirtualHostname", + "value": self.installationParameterSets.get( + "webDispatcherDeployment" + ).get("secondaryVirtualHostname"), + }, + ], + }, + ) + return params + + def transform_application_servers(self): + application_servers = self.installationParameterSets.get( + "applicationServersDeployment", [] + ) + + transformed_application_servers = [] + for index, server in enumerate(application_servers, start=1): + name = f"Application Server {index} Deployment" + parameters = [ + {"name": "vmResourceId", "value": server.get("vmResourceId", "")}, + {"name": "instanceNumber", "value": server.get("instanceNumber", "")}, + { + "name": "physicalHostname", + "value": server.get("physicalHostname", ""), + }, + {"name": "virtualHostname", "value": server.get("virtualHostname", "")}, + ] + + transformed_application_servers.append( + {"name": name, "parameters": parameters} + ) + + self.installationParameterSets["applicationServersDeployment"] = ( + transformed_application_servers + ) + + +class Connection: + def __init__(self, address, outputDir, outputFile): + self._address = address.rstrip("/") + self._headers = {} + self._client = Request() + self.logLocation = f"{outputDir}/{outputFile}" + + def _request(self, method, path, payload=None): + headers = self._headers.copy() + data = None + if payload: + data = json.dumps(payload) + headers["Content-Type"] = "application/json" + + url = self._address + path + r_data = {} # Initialize r_data to avoid referencing an uninitialized variable + try: + r = self._client.open(method, url, data=data, headers=headers, timeout=60) + r_status = r.getcode() + r_headers = dict(r.headers) + data = r.read().decode("utf-8") + r_data = json.loads(data) if data else {} + except HTTPError as e: + r_status = e.code + r_headers = dict(e.headers) + try: + r_data = e.read().decode("utf-8") + + except UnicodeDecodeError: + raise AnsibleConnectionFailure(f"HTTPError {r_status}: {r_headers}") + raise AnsibleConnectionFailure( + f"HTTPError {r_status}: {r_headers} Response {r_data}" + ) + finally: + if isinstance(r_data, str): + r_data = json.loads(r_data) + file_data = r_data.copy() + with open(self.logLocation, "w") as f: + if file_data.get("access_token"): + file_data.pop("access_token") + json.dump(file_data, f, sort_keys=True, indent=4) + return r_status, r_headers, r_data + + def get(self, path): + return self._request("GET", path) + + def post(self, path, payload=None): + return self._request("POST", path, payload) + + def delete(self, path): + return self._request("DELETE", path) + + def get_full_path(self, file_name): + absolute_path = os.path.dirname(__file__) + relative_path = file_name + full_path = os.path.join(absolute_path, relative_path) + return full_path + + def login(self, oauthServerUrl, apiEndpoint): + self._address = oauthServerUrl + self._client.client_cert, cert_temp_file = self.create_temp_file_from_encrypted( + self.get_full_path("cert_file.pem") + ) + self._client.client_key, key_temp_file = self.create_temp_file_from_encrypted( + self.get_full_path("key_file") + ) + status, headers, data = self.post("") + try: + if status in [200, 201, 204, 206]: + token = data.get("access_token") + self._address = apiEndpoint + if token is not None: + self._headers["Authorization"] = "Bearer " + token + else: + raise AnsibleActionFail( + "Unable to fetch CAL token. Exit code %s" % status + ) + finally: + # Clean up temporary files + if self.get_full_path("cert_file.pem"): + os.remove(self.get_full_path("cert_file.pem")) + if self.get_full_path("key_file"): + os.remove(self.get_full_path("key_file")) + self._client.client_cert = None + self._client.client_key = None + + def create_temp_file_from_encrypted(self, encrypted_file_path): + with open(encrypted_file_path, "rb") as file: + encrypted_data = file.read() + decrypted_data = fernet.decrypt(encrypted_data).decode() + + fd, temp_file_path = tempfile.mkstemp() + with os.fdopen(fd, "w") as tmp: + tmp.write(decrypted_data) + + return temp_file_path, temp_file_path + + def decrypt_file(self, file_path): + with open(file_path, "rb") as file: + encrypted_data = file.read() + decrypted_data = fernet.decrypt(encrypted_data).decode() + with open(file_path, "w") as file: + file.write(decrypted_data) + + +class AzureKeyVaultManager: + def __init__(self, vault_url, client_id=None, secret=None, tenant=None): + self.vault_url = vault_url + self.client_id = client_id + self.secret = secret + self.tenant = tenant + self.token = None + self.token_acquired = False + self.get_token() + + def get_token(self): + display = Display() + token_params = { + "api-version": "2018-02-01", + "resource": "https://vault.azure.net", + } + token_headers = {"Metadata": "true"} + try: + token_res = requests.get( + "http://169.254.169.254/metadata/identity/oauth2/token", + params=token_params, + headers=token_headers, + ) + token = token_res.json().get("access_token") + if token is not None: + self.token_acquired = True + self.token = token + else: + display.v("No token was available.") + except requests.exceptions.RequestException: + display.v( + "Try using service principal if provided. Unable to fetch MSI token. " + ) + self.token_acquired = False + + def get_secrets(self, secrets): + ret = [] + if self.vault_url is None: + raise AnsibleActionFail("Failed to get a valid vault URL.") + if self.token_acquired: + secret_params = {"api-version": "2016-10-01"} + secret_headers = {"Authorization": "Bearer " + self.token} + for secret in secrets: + try: + secret_res = requests.get( + self.vault_url + "/secrets/" + secret, + params=secret_params, + headers=secret_headers, + ) + ret.append(secret_res.json()["value"]) + except requests.exceptions.RequestException: + raise AnsibleActionFail( + "Failed to fetch secret: " + secret + " via MSI endpoint." + ) + except KeyError: + raise AnsibleActionFail("Failed to fetch secret " + secret + ".") + return ret + else: + return self.get_secret_non_msi(secrets) + + def get_secret_non_msi(self, secrets): + try: + credentials = ServicePrincipalCredentials( + client_id=self.client_id, secret=self.secret, tenant=self.tenant + ) + client = KeyVaultClient(credentials) + except AuthenticationError: + raise AnsibleActionFail( + "Invalid credentials for the subscription provided." + ) + + ret = [] + for secret in secrets: + try: + secret_val = client.get_secret(self.vault_url, secret, "").value + ret.append(secret_val) + except ClientRequestError: + raise AnsibleActionFail("Error occurred in the request") + return ret + + def create_certificates_files(self, client_cert, client_key): + script_dir = os.path.dirname(os.path.abspath(__file__)) + cert_file_path = os.path.join(script_dir, "cert_file.pem") + key_file_path = os.path.join(script_dir, "key_file") + # Encrypt and save the certificates + self.encrypt_and_save(client_cert, cert_file_path) + self.encrypt_and_save(client_key, key_file_path) + + def encrypt_and_save(self, data, file_path): + encrypted_data = fernet.encrypt(data.encode()) + with open(file_path, "wb") as file: + file.write(encrypted_data) + + +class ActionModule(ActionBase): + def __init__(self, *args, **kwargs): + super(ActionModule, self).__init__(*args, **kwargs) + self._supports_check_mode = False + + def run(self, tmp=None, task_vars=None): + result = super(ActionModule, self).run(tmp, task_vars) + # Get parameters from task arguments + method = self._task.args.get("method") + output_directory = self._task.args.get("outputDirectoryPath", "/tmp") + output_file = self._task.args.get("outputFile", "output.txt") + azure_arg_mapping = { + "calKeyvaultId": "vault_url", + "clientId": "client_id", + "clientSecret": "secret", + "tenantId": "tenant", + } + + # Extract relevant arguments and map them to AzureKeyVaultManager constructor argument names + azure_args = { + azure_arg_mapping[key]: value + for key, value in self._task.args.items() + if key in azure_arg_mapping + } + + # Retrieve secrets from Azure Key Vault + azure_mngr = AzureKeyVaultManager(**azure_args) + api_secrets = azure_mngr.get_secrets( + ["apiEndpoint", "clientCertificate", "clientPrivateKey", "oauthServerUrl"] + ) + + apiEndPoint, clientCertificate, clientPrivateKey, oathUrl = api_secrets + + # Create certificate files + azure_mngr.create_certificates_files(clientCertificate, clientPrivateKey) + + conn = Connection("", output_directory, output_file) + + if method == "get_product": + validation_result, new_module_args = self.validate_argument_spec( + method_spec_product, required_together=required_together + ) + conn.login(oathUrl, apiEndPoint) + status, _, data = conn.get("/solutions/v1/products") + result.update(status=status, response=str(data)) + elif method == "get_progress": + validation_result, new_module_args = self.validate_argument_spec( + method_spec_progress, required_together=required_together + ) + conn.login(oathUrl, apiEndPoint) + system_id = new_module_args.get("systemId") + status, _, data = conn.get( + "/workloads/v1/systems/" + system_id + "/provisioningProgress" + ) + result.update(status=status, response=str(data)) + elif method == "deployment": + validation_result, new_module_args = self.validate_argument_spec( + method_spec_deployment, required_together=required_together + ) + conn.login(oathUrl, apiEndPoint) + status, _, data = conn.get("/solutions/v1/products") + + if data is not None: + products_dict = {p["productId"]: p for p in data.get("products")} + product = products_dict.get(new_module_args.get("productId")) + product_constraints = [ + item + for item in product.get("availableProviders") + if "Microsoft Azure" in item["name"] + ][0] + if not product: + raise AnsibleActionFail( + "Product not found. Choose from the available products' list %s" + % products_dict + ) + + method_spec_deployment.get("infrastructureParameterSet").get("options").get( + "operatingSystem" + ).update({"choices": product_constraints.get("availableOperatingSystems")}) + method_spec_deployment.get("infrastructureParameterSet").get("options").get( + "hanaVmSize" + ).update({"choices": product_constraints.get("availableHanaVmSizes")}) + method_spec_deployment.get("infrastructureParameterSet").get("options").get( + "centralServicesVmSize" + ).update( + {"choices": product_constraints.get("availableCentralServicesVmSizes")} + ) + method_spec_deployment.get("infrastructureParameterSet").get("options").get( + "enqueueReplicationServerVmSize" + ).update( + { + "choices": product_constraints.get( + "availableEnqueueReplicationServerVmSizes" + ) + } + ) + method_spec_deployment.get("infrastructureParameterSet").get("options").get( + "applicationServerVmSize" + ).update( + { + "choices": product_constraints.get( + "availableApplicationServerVmSizes" + ) + } + ) + method_spec_deployment.get("infrastructureParameterSet").get("options").get( + "webDispatcherVmSize" + ).update( + {"choices": product_constraints.get("availableWebDispatcherVmSizes")} + ) + + validation_result, new_module_args = self.validate_argument_spec( + method_spec_deployment + ) + system = SAPsystem(new_module_args) + system_request = system.get_props() + status, _, data = conn.post( + "/workloads/v1/systems/provisioning", payload=system_request + ) + result.update(status=status, response=str(data)) + elif method == "software_provisioning": + conn.login(oathUrl, apiEndPoint) + validation_result, new_module_args = self.validate_argument_spec( + method_spec_provisioning, required_together=required_together + ) + system = SAPsystem(new_module_args) + system_request = system.get_props() + status, _, data = conn.post( + "/workloads/v1/systems/softwareProvisioning", payload=system_request + ) + result.update( + status=status, response=str(data) + ) # Write response to output file + + result["changed"] = True + + return result diff --git a/deploy/ansible/playbook_00_validate_parameters.yaml b/deploy/ansible/playbook_00_validate_parameters.yaml index c1cfdb0980..ea7593d589 100644 --- a/deploy/ansible/playbook_00_validate_parameters.yaml +++ b/deploy/ansible/playbook_00_validate_parameters.yaml @@ -424,6 +424,16 @@ tags: - 0.0-agent-diskspace + - name: "0.0 Validations - Check SAP CAL variables are present and not empty" + when: enable_sap_cal is defined and enable_sap_cal + ansible.builtin.assert: + that: + - calapi_kv is defined + - calapi_kv | type_debug != 'NoneType' + - calapi_kv | trim | length > 1 + fail_msg: "Please provide the SAP CAL API key vault name in calapi_kv parameter" + tags: + - always # /*---------------------------------------------------------------------------8 # | | diff --git a/deploy/ansible/playbook_sapcal_integration.yaml b/deploy/ansible/playbook_sapcal_integration.yaml new file mode 100644 index 0000000000..0fdb2923a3 --- /dev/null +++ b/deploy/ansible/playbook_sapcal_integration.yaml @@ -0,0 +1,122 @@ +--- + +- name: "SAP CAL Integration" + hosts: "{{ sap_sid | upper }}_DB : + {{ sap_sid | upper }}_SCS : + {{ sap_sid | upper }}_PAS : + {{ sap_sid | upper }}_APP" + become: true + gather_facts: true + vars_files: vars/ansible-input-api.yaml + tasks: + - name: 6.0.0-sapcal-install - Retrieve Resourced Data + become: true + when: + - ansible_os_family | upper == "SUSE" or ansible_os_family | upper == "REDHAT" + - enable_sap_cal is defined and enable_sap_cal + block: + - name: "Retrieve Resource Group Name and ResourceID" + ansible.builtin.uri: + url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 + use_proxy: false + headers: + Metadata: true + register: azure_metadata + + - name: "Set ResourceID for SCS" + ansible.builtin.set_fact: + subscription_id: "{{ azure_metadata.json.compute.subscriptionId }}" + resource_group_name: "{{ azure_metadata.json.compute.resourceGroupName }}" + scs_resource_id: "{{ azure_metadata.json.compute.resourceId }}" + scs_physical_hostname: "{{ ansible_hostname }}" + scs_virtual_hostname: "{{ virtual_host }}" + when: + - "'scs' in supported_tiers" + - not scs_high_availability + + - name: "Set ResourceID for DB" + ansible.builtin.set_fact: + db_resource_id: "{{ azure_metadata.json.compute.resourceId }}" + db_physical_hostname: "{{ ansible_hostname }}" + db_virtual_hostname: "{{ virtual_host }}" + when: + - "'hana' in supported_tiers" + - not db_high_availability + + - name: "Set ResourceID for PAS" + ansible.builtin.set_fact: + pas_resource_id: "{{ azure_metadata.json.compute.resourceId }}" + pas_physical_hostname: "{{ ansible_hostname }}" + pas_virtual_hostname: "{{ virtual_host }}" + when: + - "'pas' in supported_tiers" + + - name: "Set ResourceID for APP" + ansible.builtin.set_fact: + app_resource_id: "{{ azure_metadata.json.compute.resourceId }}" + app_physical_hostname: "{{ ansible_hostname }}" + app_virtual_hostname: "{{ virtual_host }}" + when: + - "'app' in supported_tiers" + +- name: "Provision a new SAP environment" + hosts: localhost + connection: local + gather_facts: true + vars_files: vars/ansible-input-api.yaml + tasks: + + - name: "Check if Enable SAP CAL is true" + ansible.builtin.assert: + that: + - enable_sap_cal is defined + - enable_sap_cal | bool + fail_msg: "Please set enable_sap_cal to true in the sap-parameters.yaml file to enable SAP CAL integration" + + - name: Run the keyvault role + ansible.builtin.include_role: + name: roles-misc/0.2-kv-secrets + vars: + operation: sapcal + tags: + - kv-secrets + +# Once the Ansible Module is updated, this task will be moved to OS configuration playbook + - name: "SAP-CAL Integration: - Ensure azure-keyvault is installed" + become: true + when: enable_sap_cal is defined and enable_sap_cal + block: + - name: "SAP-CAL Integration: - Ensure azure-keyvault is installed" + ansible.builtin.pip: + name: + - azure-keyvault==1.1.0 + - azure-keyvault-secrets + state: present + tags: + - always + + - name: "Set facts from other hosts" + ansible.builtin.set_fact: + "{{ item.key }}": "{{ hostvars[groups[sap_sid | upper + '_' + item.value][0]][item.key] }}" + loop: + - { key: 'subscription_id', value: 'SCS' } + - { key: 'resource_group_name', value: 'SCS' } + - { key: 'scs_resource_id', value: 'SCS' } + - { key: 'scs_physical_hostname', value: 'SCS' } + - { key: 'scs_virtual_hostname', value: 'SCS' } + - { key: 'db_resource_id', value: 'DB' } + - { key: 'db_physical_hostname', value: 'DB' } + - { key: 'db_virtual_hostname', value: 'DB' } + - { key: 'pas_resource_id', value: 'PAS' } + - { key: 'pas_physical_hostname', value: 'PAS' } + - { key: 'pas_virtual_hostname', value: 'PAS' } + - { key: 'app_resource_id', value: 'APP' } + - { key: 'app_physical_hostname', value: 'APP' } + - { key: 'app_virtual_hostname', value: 'APP' } + + - name: 6.0.0-sapcal-install - CALL SAP CAL API + when: enable_sap_cal is defined and enable_sap_cal + block: + - name: Import the 6.0.0-sapcal-install role + ansible.builtin.import_role: + name: "roles-sap/6.0.0-sapcal-install" diff --git a/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml b/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml index 85940d854d..fd3334d8a3 100644 --- a/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml +++ b/deploy/ansible/roles-misc/0.2-kv-secrets/tasks/main.yaml @@ -40,7 +40,7 @@ - name: "0.2 Key Vault: - Import S User tasks" ansible.builtin.import_tasks: "s_user.yaml" when: - - operation == "SoftwareAcquisition" + - operation == "SoftwareAcquisition" or operation == "sapcal" # -------------------------------------+---------------------------------------8 diff --git a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml index 34f6fddd94..09eb25eeb7 100644 --- a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml +++ b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml @@ -4,6 +4,7 @@ when: - bom is not defined - not is_run_with_infraCreate_only + - enable_sap_cal is not defined or not enable_sap_cal ansible.builtin.include_role: name: roles-sap/3.3.1-bom-utility tasks_from: bom-register diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6-set_runtime_facts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6-set_runtime_facts.yaml index 38fecf2c36..78c3097c94 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6-set_runtime_facts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6-set_runtime_facts.yaml @@ -12,6 +12,7 @@ when: - bom is not defined - not is_run_with_infraCreate_only + - enable_sap_cal is not defined or not enable_sap_cal # default to ASCS instance when BOM is not defined or instance type in BOM is not defined - name: "2.6 SCS HA Install: Default instance type" diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse.yml index 0a8fd87603..3cd80bbc63 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.2-sap-resources-Suse.yml @@ -26,7 +26,6 @@ # - name: "5.6 SCSERS - SUSE - ENSA1 - Set the cluster on maintenance mode" # ansible.builtin.shell: crm configure property maintenance-mode=true - - name: "5.6 SCSERS - SUSE - ENSA1 - Configure SAP ASCS/SCS resources" ansible.builtin.shell: > crm configure primitive rsc_sap_{{ sap_sid }}_{{ instance_type | upper }}{{ scs_instance_number }} SAPInstance \ diff --git a/deploy/ansible/roles-sap/6.0.0-sapcal-install/defaults/main.yml b/deploy/ansible/roles-sap/6.0.0-sapcal-install/defaults/main.yml new file mode 100644 index 0000000000..35b4d2f16c --- /dev/null +++ b/deploy/ansible/roles-sap/6.0.0-sapcal-install/defaults/main.yml @@ -0,0 +1,11 @@ +--- +# defaults file for 6.0.0-sapcal-install + +db_sid_admin_user_id: "1050" +sap_sysadmin_user_id: "1079" +sap_sysadmin_group_id: "79" +sap_gui_default_language: "en" +sap_additional_languages: "" +number_of_dialog_work_processes: "10" +number_of_batch_work_processes: "7" +abap_message_server_port: "3600" diff --git a/deploy/ansible/roles-sap/6.0.0-sapcal-install/tasks/main.yml b/deploy/ansible/roles-sap/6.0.0-sapcal-install/tasks/main.yml new file mode 100644 index 0000000000..eb6606c04d --- /dev/null +++ b/deploy/ansible/roles-sap/6.0.0-sapcal-install/tasks/main.yml @@ -0,0 +1,76 @@ +--- +# tasks file for 6.0.0-sapcal-install + + +- name: "Retrieve SAP-CAL Product Id" + ansible.builtin.set_fact: + product_id: "{{ sap_cal_product | selectattr('name', 'equalto', sap_cal_product_name) | map(attribute='id') | first }}" + +# ------------------------------------- +- name: "Print SAP-CAL Parameters" + ansible.builtin.debug: + msg: + - "SAP SID : {{ sap_sid | upper }}" + - "DB SID : {{ db_sid | upper }}" + - "SAP-CAL Product Name : {{ sap_cal_product_name }}" + - "SAP-CAL Product Id : {{ product_id }}" + - "Domain Name : {{ sap_fqdn }}" + verbosity: 2 +# ------------------------------------ + +- name: Call provisioning API endpoint + public_api: + method: "software_provisioning" + calKeyvaultId: "https://{{ calapi_kv }}.vault.azure.net/" + outputDirectoryPath: "{{ _workspace_directory }}" + clientId: "" + clientSecret: "" + tenantId: "" + outputFile: "sapcal_provisioning.json" + productId: "{{ product_id }}" + availabilityScenario: "non-ha" + infrastructureParameterSet: + domainName: "{{ sap_fqdn }}" + remoteOsUser: "{{ orchestration_ansible_user }}" + secretStoreId: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ secret_prefix }}-INFRASTRUCTURE/providers/Microsoft.KeyVault/vaults/{{ kv_name }}" + sshPublicKeySecretName: "{{ secret_prefix }}-sid-sshkey-pub" + sshPrivateKeySecretName: "{{ secret_prefix }}-sid-sshkey" + deploymentServerResourceGroup: "{{ resource_group_name }}-SAPCAL-DS" + technicalCommunicationUser: "{{ s_user }}" + techUserPassword: "{{ s_password }}" + installationParameterSets: + hanaDeployment: + primaryVmResourceId: "{{ db_resource_id }}" + DBSID: "{{ db_sid | upper }}" + DBSIDAdminUserId: "{{ db_sid_admin_user_id }}" + instanceNumber: "{{ db_instance_number }}" + primaryPhysicalHostname: "{{ db_physical_hostname }}" + primaryVirtualHostname: "{{ db_virtual_hostname }}" + s4hanaDeployment: + SID: "{{ sap_sid | upper }}" + SAPSysAdminUserId: "{{ sap_sysadmin_user_id }}" + SAPSysAdminGroupId: "{{ sap_sysadmin_group_id }}" + sapGuiDefaultLanguage: "{{ sap_gui_default_language }}" + SAPSystemAdditionalLanguages: "{{ sap_additional_languages }}" + numberOfDialogWorkProcesses: "{{ number_of_dialog_work_processes }}" + numberOfBatchWorkProcesses: "{{ number_of_batch_work_processes }}" + centralServicesDeployment: + vmResourceId: "{{ scs_resource_id}}" + instanceNumber: "{{ scs_instance_number }}" + ABAPMessageServerPort: "{{ abap_message_server_port }}" + physicalHostname: "{{ scs_physical_hostname }}" + virtualHostname: "{{ scs_virtual_hostname }}" + applicationServersDeployment: + - vmResourceId: "{{ pas_resource_id }}" + instanceNumber: "{{ pas_instance_number }}" + physicalHostname: "{{ pas_physical_hostname }}" + virtualHostname: "{{ pas_virtual_hostname }}" + - vmResourceId: "{{ app_resource_id }}" + instanceNumber: "{{ app_instance_number }}" + physicalHostname: "{{ app_physical_hostname }}" + virtualHostname: "{{ app_virtual_hostname }}" + register: sapcal_provisioning + +- name: "Print SAP-CAL provisioning response" + ansible.builtin.debug: + var: sapcal_provisioning diff --git a/deploy/ansible/roles-sap/6.0.0-sapcal-install/vars/main.yml b/deploy/ansible/roles-sap/6.0.0-sapcal-install/vars/main.yml new file mode 100644 index 0000000000..5057996055 --- /dev/null +++ b/deploy/ansible/roles-sap/6.0.0-sapcal-install/vars/main.yml @@ -0,0 +1,14 @@ +--- +# vars file for 6.0.0-sapcal-install + +sap_cal_product: + - { name: "S/4HANA_2023-Initial_Shipment_Stack", id: "88f59e31-d776-45ea-811c-1da6577e4d25" } + - { name: "S/4HANA_2022-Initial_Shipment_Stack", id: "3b1dc287-c865-4f79-b9ed-d5ec2dc755e9" } + - { name: "S/4HANA_2021-Initial_Shipment_Stack", id: "108febf9-5e7b-4e47-a64d-231b6c4c821d" } + - { name: "S/4HANA_2022-FPS_01_022023", id: "1294f31c-2697-443c-bacc-117d5924fcb2" } + - { name: "S/4HANA_2022-FPS_02_052023", id: "c86d7a56-4130-4459-8060-ffad1a1118ce" } + - { name: "S/4HANA_2021-FPS_02_052022", id: "4d5f19a7-d3cb-4d47-9f44-0a9e133b11de" } + - { name: "S/4HANA_2021-FPS_01_022022", id: "1c796928-0617-490b-a87d-478568a49628" } + - { name: "S/4HANA_2021-04_052023", id: "29403c63-6504-4919-b5dd-319d7a99804e" } + - { name: "S/4HANA_2021-03_112022", id: "6921f2f8-169b-45bb-9e0b-d89b4abee1f3" } + - { name: "S/4HANA 2020-04_052022", id: "615c5c18-5226-4dcb-b0ab-19d0141baf9b" } diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index 605738678b..229d13744d 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -254,5 +254,10 @@ enable_os_monitoring: false enable_ha_monitoring: false # ------------------- End - Azure Monitor for SAP (AMS) variables --------------8 +# ------------------- Begin - SAP CAL Integration variables --------------------8 +enable_sap_cal: false +calapi_kv: "" +sap_cal_product_name: "" +# ------------------- End - SAP CAL Integration variables ----------------------8 python_version: "python3" diff --git a/deploy/pipelines/07-sap-cal-installation.yaml b/deploy/pipelines/07-sap-cal-installation.yaml new file mode 100644 index 0000000000..c528e44771 --- /dev/null +++ b/deploy/pipelines/07-sap-cal-installation.yaml @@ -0,0 +1,404 @@ +--- +# /*---------------------------------------------------------------------------8 +# | | +# | This pipeline performs the software installation | +# | and must run on a self hosted deployment agent | +# | due to long run time. | +# | | +# +------------------------------------4--------------------------------------*/ + +parameters: + - name: sap_system_configuration_name + displayName: "SAP System configuration name, use the following syntax: ENV-LOCA-VNET-SID" + type: string + default: DEV-WEEU-SAP01-X00 + + - name: environment + displayName: Workload Environment (DEV, QUA, PRD, ...) + type: string + default: DEV + + - name: sap_cal_product_name + displayName: SAP CAL Product Name + type: string + + - name: extra_params + displayName: Extra Parameters + type: string + default: "" + + - name: base_os_configuration + displayName: Core Operating System Configuration + type: boolean + default: true + + - name: sap_os_configuration + displayName: SAP Operating System Configuration + type: boolean + default: true + + - name: sapcal_integration + displayName: SAP CAL Integration + type: boolean + default: false + +# 20220929 MKD - ACSS Registration + - name: acss_registration + displayName: Register System in ACSS + type: boolean + default: true + + - name: acss_environment + displayName: ACSS Prod/NonProd + type: string + values: + - NonProd + - Prod + + - name: acss_sap_product + displayName: System Type + type: string + values: + - S4HANA + - ECC + - Other + # 20220929 MKD - ACSS Registration + + - name: sap_automation_repo_path + displayName: The local path on the agent where the sap_automation repo can be found + type: string + + - name: config_repo_path + displayName: The local path on the agent where the config repo can be found + type: string + +stages: + - stage: Preparation_for_Ansible + condition: and(not(failed()), not(canceled())) + variables: + - template: variables/07-sap-cal-installation-variables.yaml + parameters: + environment: ${{ parameters.environment }} + displayName: OS Configuration and SAP Installation + jobs: + - job: Installation_step + displayName: OS Configuration and SAP Installation + timeoutInMinutes: 0 + workspace: + clean: all + steps: + - template: templates\download.yaml + parameters: + getLatestFromBranch: true + - task: PostBuildCleanup@3 + - bash: | + #!/bin/bash + # Exit immediately if a command exits with a non-zero status. + set -e + + green="\e[1;32m" ; reset="\e[0m" ; boldred="\e[1;31m" + if [[ ! -f /etc/profile.d/deploy_server.sh ]]; then + echo -e "$green --- Install dos2unix ---$reset" + sudo apt-get -qq install dos2unix + fi + echo -e "$green--- Convert config file to UX format ---$reset" + echo -e "$green--- Update .sap_deployment_automation/config as DEPLOYMENT_REPO_PATH can change on devops agent ---$reset" + export HOME=${CONFIG_REPO_PATH}/$(Deployment_Configuration_Path) + cd $HOME + + echo -e "$green--- Configure devops CLI extension ---$reset" + az config set extension.use_dynamic_install=yes_without_prompt --output none + + az extension add --name azure-devops --output none + + az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none + + echo -e "$green--- Validations ---$reset" + ENVIRONMENT=$(echo ${SAP_SYSTEM_CONFIGURATION_NAME} | awk -F'-' '{print $1}' | xargs) ; echo Environment $ENVIRONMENT + LOCATION=$(echo ${SAP_SYSTEM_CONFIGURATION_NAME} | awk -F'-' '{print $2}' | xargs) ; echo Location $LOCATION + NETWORK=$(echo ${SAP_SYSTEM_CONFIGURATION_NAME} | awk -F'-' '{print $3}' | xargs) ; echo Virtual network logical name $NETWORK + SID=$(echo ${SAP_SYSTEM_CONFIGURATION_NAME} | awk -F'-' '{print $4}' | xargs) ; echo SID $SID + + environment_file_name=$HOME/.sap_deployment_automation/$ENVIRONMENT$LOCATION$NETWORK ; echo configuration_file $environment_file_name + params_file=$HOME/SYSTEM/${SAP_SYSTEM_CONFIGURATION_NAME}/sap-parameters.yaml ; echo sap_parameters_file $params_file + + if [ "azure pipelines" = "$(this_agent)" ]; then + echo "##vso[task.logissue type=error]Please use a self hosted agent for this playbook. Define it in the SDAF-${ENVIRONMENT} variable group using the 'POOL' variable." + exit 2 + fi + + if [ ! -f $environment_file_name ]; then + echo -e "$boldred--- $environment_file_name was not found ---$reset" + echo "##vso[task.logissue type=error]Workload zone configuration file $environment_file_name was not found." + exit 2 + fi + + if [ ! -f $params_file ]; then + echo -e "$boldred--- $HOME/SYSTEM/${SAP_SYSTEM_CONFIGURATION_NAME}/sap-parameters.yaml was not found ---$reset" + echo "##vso[task.logissue type=error]File $HOME/SYSTEM/${SAP_SYSTEM_CONFIGURATION_NAME}/sap-parameters.yaml was not found." + exit 2 + else + dos2unix -q $HOME/SYSTEM/${SAP_SYSTEM_CONFIGURATION_NAME}/sap-parameters.yaml + fi + + if [ ! -n ${SID} ]; then + echo "##vso[task.logissue type=error]SID was not found in ${SAP_SYSTEM_CONFIGURATION_NAME}." + exit 2 + fi + + if [ ! -f $HOME/SYSTEM/${SAP_SYSTEM_CONFIGURATION_NAME}/${SID}_hosts.yaml ]; then + echo -e "$boldred--- $HOME/SYSTEM/${SAP_SYSTEM_CONFIGURATION_NAME}/${SID}_hosts.yaml was not found ---$reset" + echo "##vso[task.logissue type=error]File $HOME/SYSTEM/${SAP_SYSTEM_CONFIGURATION_NAME}/${SID}_hosts.yaml was not found." + exit 2 + fi + dos2unix -q $HOME/SYSTEM/${SAP_SYSTEM_CONFIGURATION_NAME}/${SID}_hosts.yaml + + export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") + echo '$(variable_group) id: ' $VARIABLE_GROUP_ID + if [ -z ${VARIABLE_GROUP_ID} ]; then + echo "##vso[task.logissue type=error]Variable group $(variable_group) could not be found." + exit 2 + fi + + echo "##vso[build.updatebuildnumber]Deploying ${SAP_SYSTEM_CONFIGURATION_NAME} using SAP CAL" + + echo "##vso[task.setvariable variable=SID;isOutput=true]${SID}" + echo "##vso[task.setvariable variable=SAP_PARAMETERS;isOutput=true]sap-parameters.yaml" + echo "##vso[task.setvariable variable=FOLDER;isOutput=true]$HOME/SYSTEM/${SAP_SYSTEM_CONFIGURATION_NAME}" + echo "##vso[task.setvariable variable=HOSTS;isOutput=true]${SID}_hosts.yaml" + + echo -e "$green--- Get Files from the DevOps Repository ---$reset" + cd ${CONFIG_REPO_PATH}/$(Deployment_Configuration_Path)/SYSTEM/${SAP_SYSTEM_CONFIGURATION_NAME} + sap_params_updated=0 + + fqdn="$(grep -m1 "$sap_fqdn:" sap-parameters.yaml | cut -d':' -f2- | tr -d ' ' | tr -d '"')" + if [ -z $fqdn ] ; then + sed -i 's|sap_fqdn:.*|sap_fqdn: '"$(sap_fqdn)"'|' sap-parameters.yaml + fi + + if [[ -n "${sapcalProductName}" ]]; then + echo -e "$green--- Add SAP CAL Product Name $sapcalProductName to sap-parameters.yaml ---$reset" + sed -i 's|sap_cal_product_name:.*|sap_cal_product_name: '"$sapcalProductName"'|' sap-parameters.yaml + fi + + echo -e "$green--- Get connection details ---$reset" + mkdir -p artifacts + + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "${NETWORK}"Workload_Key_Vault.value --output tsv) + if [ -z ${az_var} ]; then + export workload_key_vault=$(cat "${environment_file_name}" | grep workloadkeyvault | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Key Vault' ${workload_key_vault} + else + export workload_key_vault=${az_var} ; echo 'Workload Key Vault' ${workload_key_vault} ; + fi + + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "${NETWORK}"Workload_Secret_Prefix.value --output tsv) + if [ -z ${az_var} ]; then + export workload_prefix=$(cat "${environment_file_name}" | grep workload_zone_prefix | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Prefix' ${workload_prefix} + else + export workload_prefix=${az_var} ; echo 'Workload Prefix' ${workload_prefix}; + fi + + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query Terraform_Remote_Storage_Subscription.value --output tsv) + if [ -z ${az_var} ]; then + export control_plane_subscription=$(cat "${environment_file_name}" | grep STATE_SUBSCRIPTION | awk -F'=' '{print $2}' | xargs) ; echo 'Control Plane Subscription' ${control_plane_subscription} + else + export control_plane_subscription=${az_var} ; echo 'Control Plane Subscription' ${control_plane_subscription} + fi + + if [[ $EXTRA_PARAMETERS = "'$(EXTRA_PARAMETERS)'" ]]; then + new_parameters=$PIPELINE_EXTRA_PARAMETERS + else + echo "##vso[task.logissue type=warning]Extra parameters were provided - '$EXTRA_PARAMETERS'" + new_parameters="$EXTRA_PARAMETERS $PIPELINE_EXTRA_PARAMETERS" + fi + + echo "##vso[task.setvariable variable=SSH_KEY_NAME;isOutput=true]${workload_prefix}-sid-sshkey" + echo "##vso[task.setvariable variable=VAULT_NAME;isOutput=true]$workload_key_vault" + echo "##vso[task.setvariable variable=PASSWORD_KEY_NAME;isOutput=true]${workload_prefix}-sid-password" + echo "##vso[task.setvariable variable=USERNAME_KEY_NAME;isOutput=true]${workload_prefix}-sid-username" + echo "##vso[task.setvariable variable=NEW_PARAMETERS;isOutput=true]${new_parameters}" + echo "##vso[task.setvariable variable=CP_SUBSCRIPTION;isOutput=true]${control_plane_subscription}" + + + echo -e "$green--- az login ---$reset" + # If the deployer_file exists we run on a deployer configured by the framework instead of a azdo hosted one + deployer_file=/etc/profile.d/deploy_server.sh + if [ "$USE_MSI" = "true" ]; then + echo "Using MSI" + source /etc/profile.d/deploy_server.sh + az account set --subscription $control_plane_subscription + + else + if [ ! -n $AZURE_CLIENT_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined." + exit 2 + fi + + if [ ! -n $AZURE_CLIENT_SECRET ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined." + exit 2 + fi + + if [ ! -n $AZURE_TENANT_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined." + exit 2 + fi + az login --service-principal --username $AZURE_CLIENT_ID --password=${AZURE_CLIENT_SECRET} --tenant $AZURE_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi + az account set --subscription $control_plane_subscription + fi + + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi + + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" | tr -d \") + if [ -n ${az_var} ]; then + kv_name=${az_var}; echo "Key Vault="$kv_name + else + kv_name=$(cat .sap_deployment_automation/$(environment_code)$(location_code) | grep keyvault |awk -F'=' '{print $2}'); echo "Key Vault="$kv_name + fi + + echo "##vso[task.setvariable variable=KV_NAME;isOutput=true]$kv_name" + + if [ "your S User" == "${SUsername}" ]; then + echo "##vso[task.logissue type=error]Please define the S-Username variable." + exit 2 + fi + + if [ "your S user password" == "${SPassword}" ]; then + echo "##vso[task.logissue type=error]Please define the S-Password variable." + exit 2 + fi + + echo -e "$green--- Set S-Username and S-Password in the key_vault if not yet there ---$reset" + + export SUsernamefromVault=$(az keyvault secret list --vault-name "${kv_name}" --subscription "${ARM_SUBSCRIPTION_ID}" --query "[].{Name:name} | [? contains(Name,'S-Username')] | [0]" -o tsv) + if [ $SUsernamefromVault == $SUsername ]; then + echo -e "$green--- $SUsername present in keyvault. In case of download errors check that user and password are correct ---$reset" + echo "##vso[task.setvariable variable=SUSERNAME;isOutput=true]$SUsernamefromVault" + else + echo -e "$green--- Setting the S username in key vault ---$reset" + az keyvault secret set --name "S-Username" --vault-name $kv_name --value="${SUsername}" --subscription "${ARM_SUBSCRIPTION_ID}" --output none + echo "##vso[task.setvariable variable=SUSERNAME;isOutput=true]$SUsername" + fi + + export SPasswordfromVault=$(az keyvault secret list --vault-name "${kv_name}" --subscription "${ARM_SUBSCRIPTION_ID}" --query "[].{Name:name} | [? contains(Name,'S-Password')] | [0]" -o tsv) + if [ ${SPassword} == $SPasswordfromVault ]; then + echo "##vso[task.setvariable variable=SPASSWORD;isOutput=true]${SPasswordfromVault}" + echo -e "$green--- Password present in keyvault. In case of download errors check that user and password are correct ---$reset" + else + echo -e "$green--- Setting the S user name password in key vault ---$reset" + az keyvault secret set --name "S-Password" --vault-name $kv_name --value "${SPassword}" --subscription "${ARM_SUBSCRIPTION_ID}" --output none + echo "##vso[task.setvariable variable=SPASSWORD;isOutput=true]${SPassword}" + fi + + az keyvault secret show --name ${workload_prefix}-sid-sshkey --vault-name $workload_key_vault --subscription $AZURE_SUBSCRIPTION_ID --query value -o tsv > artifacts/${SAP_SYSTEM_CONFIGURATION_NAME}_sshkey + cp sap-parameters.yaml artifacts/. + cp ${SID}_hosts.yaml artifacts/. + + 2> >(while read line; do (>&2 echo "STDERROR: $line"); done) + name: Preparation + displayName: Preparation for Ansible + env: + SCRIPT_PATH: $${{ parameters.sap_automation_repo_path }}/deploy/pipelines/templates/*.sh + SYSTEM_ACCESSTOKEN: $(System.AccessToken) + AZURE_DEVOPS_EXT_PAT: $(System.AccessToken) + ANSIBLE_HOST_KEY_CHECKING: false + AZURE_CLIENT_ID: $(ARM_CLIENT_ID) + AZURE_CLIENT_SECRET: $(ARM_CLIENT_SECRET) + AZURE_TENANT_ID: $(ARM_TENANT_ID) + AZURE_SUBSCRIPTION_ID: $(Terraform_Remote_Storage_Subscription) + ANSIBLE_COLLECTIONS_PATHS: /opt/ansible/collections + CONFIG_REPO_PATH: ${{ parameters.config_repo_path }} + SAP_SYSTEM_CONFIGURATION_NAME: ${{ parameters.sap_system_configuration_name }} + EXTRA_PARAMETERS: $(EXTRA_PARAMETERS) + PIPELINE_EXTRA_PARAMETERS: ${{ parameters.extra_params }} + USE_MSI: $(USE_MSI) + SUsername: $(S-Username) + SPassword: $(S-Password) + sapcalProductName: ${{ parameters.sap_cal_product_name }} + + - template: templates\run-ansible.yaml + parameters: + displayName: "Parameter validation" + ansibleFilePath: ${{ parameters.sap_automation_repo_path }}/deploy/ansible/playbook_00_validate_parameters.yaml + secretName: "$(Preparation.SSH_KEY_NAME)" + passwordSecretName: "$(Preparation.PASSWORD_KEY_NAME)" + userNameSecretName: "$(Preparation.USERNAME_KEY_NAME)" + vaultName: $(Preparation.VAULT_NAME) + parametersFolder: $(Preparation.FOLDER) + sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" + sidHosts: $(Preparation.HOSTS) + extraParams: "$(Preparation.NEW_PARAMETERS)" + azureClientId: $(ARM_CLIENT_ID) + azureClientSecret: $(ARM_CLIENT_SECRET) + azureTenantId: $(ARM_TENANT_ID) + azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + USE_MSI: $(USE_MSI) + - ${{ if eq(parameters.base_os_configuration, true) }}: + - template: templates\run-ansible.yaml + parameters: + displayName: "Operating System Configuration" + ansibleFilePath: ${{ parameters.sap_automation_repo_path }}/deploy/ansible/playbook_01_os_base_config.yaml + secretName: "$(Preparation.SSH_KEY_NAME)" + passwordSecretName: "$(Preparation.PASSWORD_KEY_NAME)" + userNameSecretName: "$(Preparation.USERNAME_KEY_NAME)" + vaultName: $(Preparation.VAULT_NAME) + parametersFolder: $(Preparation.FOLDER) + sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" + sidHosts: $(Preparation.HOSTS) + extraParams: "$(Preparation.NEW_PARAMETERS)" + azureClientId: $(ARM_CLIENT_ID) + azureClientSecret: $(ARM_CLIENT_SECRET) + azureTenantId: $(ARM_TENANT_ID) + azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + USE_MSI: $(USE_MSI) + - ${{ if eq(parameters.sap_os_configuration, true) }}: + - template: templates\run-ansible.yaml + parameters: + displayName: "SAP Specific Operating System Configuration" + ansibleFilePath: ${{ parameters.sap_automation_repo_path }}/deploy/ansible/playbook_02_os_sap_specific_config.yaml + secretName: "$(Preparation.SSH_KEY_NAME)" + passwordSecretName: "$(Preparation.PASSWORD_KEY_NAME)" + userNameSecretName: "$(Preparation.USERNAME_KEY_NAME)" + vaultName: $(Preparation.VAULT_NAME) + parametersFolder: $(Preparation.FOLDER) + sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" + sidHosts: $(Preparation.HOSTS) + extraParams: "$(Preparation.NEW_PARAMETERS)" + azureClientId: $(ARM_CLIENT_ID) + azureClientSecret: $(ARM_CLIENT_SECRET) + azureTenantId: $(ARM_TENANT_ID) + azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + USE_MSI: $(USE_MSI) + - ${{ if eq(parameters.sapcal_integration, true) }}: + - template: templates\run-ansible.yaml + parameters: + displayName: "SAPCAL Integration" + ansibleFilePath: ${{ parameters.sap_automation_repo_path }}/deploy/ansible/playbook_sapcal_integration.yaml + secretName: "$(Preparation.SSH_KEY_NAME)" + passwordSecretName: "$(Preparation.PASSWORD_KEY_NAME)" + userNameSecretName: "$(Preparation.USERNAME_KEY_NAME)" + vaultName: $(Preparation.VAULT_NAME) + parametersFolder: $(Preparation.FOLDER) + sapParams: "${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }}/artifacts/$(Preparation.SAP_PARAMETERS)" + sidHosts: $(Preparation.HOSTS) + extraParams: "$(Preparation.NEW_PARAMETERS)" + azureClientId: $(ARM_CLIENT_ID) + azureClientSecret: $(ARM_CLIENT_SECRET) + azureTenantId: $(ARM_TENANT_ID) + azureSubscriptionId: $(ARM_SUBSCRIPTION_ID) + sapcalProductName: ${{ parameters.sap_cal_product_name }} + USE_MSI: $(USE_MSI) + - template: templates\collect-calapi-file.yaml + parameters: + filePath: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path)/SYSTEM/${{ parameters.sap_system_configuration_name }} diff --git a/deploy/pipelines/templates/collect-calapi-file.yaml b/deploy/pipelines/templates/collect-calapi-file.yaml new file mode 100644 index 0000000000..b250fff633 --- /dev/null +++ b/deploy/pipelines/templates/collect-calapi-file.yaml @@ -0,0 +1,37 @@ +parameters: + filePath: "" +steps: + - script: | + #!/bin/bash + set -eu + echo "Collecting sapcal_provisioning.json ${{ parameters.filePath }}" + cd ${FILE_PATH} + if [ -f "sapcal_provisioning.json" ]; then + echo "Found sapcal_provisioning.json" + git config --global user.email "${USER_EMAIL}" + git config --global user.name "${USER_NAME}" + echo "Checking out ${SOURCE_BRANCH} branch..." + git checkout -q ${SOURCE_BRANCH} + echo "Pulling last changes..." + git pull + echo "Adding sapcal_provisioning.json..." + git add sapcal_provisioning.json + if [ $(git diff --name-only --cached | wc -l) -gt 0 ]; then + echo "Committing changes..." + git commit -m "Adding sapcal_provisioning.json" + echo "Pushing changes..." + git push + else + echo "No changes to commit for sapcal_provisioning.json" + fi + else + echo "sapcal_provisioning.json not found" + fi + displayName: Store SAP-CAL API response in repository + enabled: true + env: + USER_EMAIL: $(Build.RequestedForEmail) + USER_NAME: $(Build.RequestedFor) + SOURCE_BRANCH: $(Build.SourceBranchName) + SYSTEM_ACCESSTOKEN: $(System.AccessToken) + FILE_PATH: ${{ parameters.filePath }} diff --git a/deploy/pipelines/variables/07-sap-cal-installation-variables.yaml b/deploy/pipelines/variables/07-sap-cal-installation-variables.yaml new file mode 100644 index 0000000000..94f39757d8 --- /dev/null +++ b/deploy/pipelines/variables/07-sap-cal-installation-variables.yaml @@ -0,0 +1,25 @@ +#--------------------------------------+------------------------------------------------8 +# | +# Defines the parameters and variables for the SAP Software Install using SAP CAL | +# | +#--------------------------------------+------------------------------------------------8 + +parameters: + environment: "" + +variables: + - group: "SDAF-General" + + - group: SDAF-${{ parameters.environment }} + + - name: agent_name + value: $[coalesce(variables['POOL'], variables['Agent'])] + + - name: this_agent + value: $[lower(coalesce(variables['POOL'], variables['Agent']))] + + - name: variable_group + value: SDAF-${{ parameters.environment }} + + - name: key_vault + value: $[variables['Deployer_Key_Vault']] diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index ac4dbd63d7..2e277c55cd 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -627,6 +627,18 @@ $this_pipeline_url = $ADO_ORGANIZATION + "/" + [uri]::EscapeDataString($ADO_Proj $log = ("[" + $pipeline_name + "](" + $this_pipeline_url + ")") Add-Content -Path $fname -Value $log +$pipeline_name = 'SAP installation using SAP-CAL' +$sapcal_installation_pipeline_id = (az pipelines list --query "[?name=='$pipeline_name'].id | [0]") +if ($sapcal_installation_pipeline_id.Length -eq 0) { + az pipelines create --name $pipeline_name --branch main --description 'Configures the Operating System and installs the SAP application using SAP CAL' --skip-run --yaml-path "/pipelines/07-sap-cal-installation.yml" --repository $repo_id --repository-type tfsgit --output none --only-show-errors + $sapcal_installation_pipeline_id = (az pipelines list --query "[?name=='$pipeline_name'].id | [0]") +} +$pipelines.Add($sapcal_installation_pipeline_id) + +$this_pipeline_url = $ADO_ORGANIZATION + "/" + [uri]::EscapeDataString($ADO_Project) + "/_build?definitionId=" + $sapcal_installation_pipeline_id +$log = ("[" + $pipeline_name + "](" + $this_pipeline_url + ")") +Add-Content -Path $fname -Value $log + $pipeline_name = 'Remove System or Workload Zone' $pipeline_id = (az pipelines list --query "[?name=='$pipeline_name'].id | [0]") if ($pipeline_id.Length -eq 0) { diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index e4e6b743ef..5f53f12cd0 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -461,4 +461,11 @@ module "output_files" { ams_resource_id = try(coalesce(var.ams_resource_id, try(data.terraform_remote_state.landscape.outputs.ams_resource_id, "")),"") enable_ha_monitoring = var.enable_ha_monitoring enable_os_monitoring = var.enable_os_monitoring + + ######################################################################################### + # SAP CAL # + ######################################################################################### + enable_sap_cal = var.enable_sap_cal + calapi_kv = var.calapi_kv + sap_cal_product_name = var.sap_cal_product_name } diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index 3e3577a903..a4315b7da3 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -1413,3 +1413,21 @@ variable "stand_by_node_count" { description = "The number of standby nodes" default = 0 } +######################################################################################### +# # +# SAP CAL Integration variables # +# # +######################################################################################### + +variable "enable_sap_cal" { + description = "If true, will enable the SAP CAL integration" + default = false + } +variable "calapi_kv" { + description = "The SAP CAL API Key Vault" + default = "" + } +variable "sap_cal_product_name" { + description = "If defined, will use SAP CAL for system installation" + default = "" + } diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf index e4feea4c7e..2780be48ec 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/inventory.tf @@ -250,6 +250,9 @@ resource "local_file" "sap-parameters_yml" { ams_resource_id = var.ams_resource_id enable_os_monitoring = var.enable_os_monitoring enable_ha_monitoring = var.enable_ha_monitoring + enable_sap_cal = var.enable_sap_cal + calapi_kv = var.calapi_kv + sap_cal_product_name = var.sap_cal_product_name } ) diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl index 6047f02782..7eb233fe32 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl @@ -210,4 +210,18 @@ ams_resource_id: ${ams_resource_id} enable_os_monitoring: ${enable_os_monitoring} enable_ha_monitoring: ${enable_ha_monitoring} +%{~ if enable_sap_cal } + +############################################################################# +# # +# SAP CAL Integration # +# # +############################################################################# + +# Defines if the installation is to be deployed using SAP CAL +enable_sap_cal: ${enable_sap_cal} +calapi_kv: ${calapi_kv} +sap_cal_product_name: ${sap_cal_product_name} + +%{~ endif } ... diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf index 8ba41d10e4..b8bb496685 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/variables_global.tf @@ -212,3 +212,16 @@ variable "ams_resource_id" { description = "Resource ID for variable "enable_os_monitoring" { description = "Enable OS monitoring" } variable "enable_ha_monitoring" { description = "Enable HA monitoring" } +variable "enable_sap_cal" { + description = "Enable SAP CAL" + default = false + type = bool + } +variable "calapi_kv" { + description = "Keyvault for CAL API" + default = "" + } +variable "sap_cal_product_name" { + description = "Product name of SAP CAL" + default = "" + } From 7029e1b2feebd905a647e19477dbf99fcb9a3511 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 20 Aug 2024 19:09:59 +0300 Subject: [PATCH 062/164] Allow for splitting out the privatelink resources (#616) * Add the ability to split out the privatelink resources * feat: Add privatelinkdnsmanagement provider configuration * refactor: Update storage_accounts.tf to use var.dns_settings.dns_zone_names.table_dns_zone_name * refactor: Update DNS zone names in dns.tf and storage_accounts.tf * refactor: Update DNS zone names in storage_accounts.tf to use var.dns_settings.dns_zone_names.table_dns_zone_name * refactor: Update DNS zone names in infrastructure.tf, key_vault.tf, and keyvault_endpoint.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in keyvault_endpoint.tf and storage_accounts.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in storage_accounts.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in storage_accounts.tf to use var.dns_settings.dns_zone_names * refactor: Update count condition in dns.tf to use local.use_local_privatelink_dns instead of negation of it * refactor: Update DNS zone names in storage_accounts.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in storage_accounts.tf and sap_deployer/tfvar_variables.tf to use var.dns_settings.dns_zone_names * Add the ability to split out DNS records for privatelink resources * refactor: Update DNS zone names to use var.dns_settings.dns_zone_names * refactor: Add privatelink DNS resource group and subscription properties to LandscapeModel * refactor: Update DNS zone names in infrastructure.tf, key_vault.tf, and keyvault_endpoint.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in LandscapeDetails.json, storage_accounts.tf, infrastructure.tf, and transform.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in transform.tf to use var.dns_settings.dns_zone_names * refactor: Update DNS zone names in storage_accounts.tf to use var.dns_settings.dns_zone_names * Add register_virtual_network_to_dns attribute * Add the ability to control the patch mode * add vm_agent_platform_updates_enabled * refactor: Remove patch_mode from vm-scs.tf * refactor: Remove patch_mode from vm-anchor.tf * Add auto update of the extensions * refactor: Tweak the Windows patch mode * Windows update settings * Debug show SystemD version * refactor: Update SystemD version debug message in 1.17 Generic Pacemaker role * refactor: Update VM patch information in SystemModel and LandscapeModel * refactor: Update Process limit configuration in 1.17 Generic Pacemaker role * refactor: Update process limit configuration for pacemaker version in 1.17 Generic Pacemaker role * refactor: Update process limit configuration for systemd version in 1.17 Generic Pacemaker role * refactor: Update process limit configuration for systemd version in 1.17 Generic Pacemaker role * Remove the white space * fix: Associate the iSCSI subnet with the route table * refactor: Add python3-pip package for different node tiers in HA setup * refactor: remove the lower pipe from distro name * refactor: Split out OracleLinux tasks * refactor: Update iSCSI subnet association with route table * chore: Update NuGet.Packaging dependency to version 6.11.0 * TEswt if we can handle no read access scenarios to key vault * revert casing * refactor: Split out OracleLinux tasks * chore: Add condition to include custom repositories in 1.3 Repository tasks * refactor: Update 1.3 Repository tasks to include custom repositories for SUSE and RedHat * refactor: Remove unnecessary OracleLinux tasks and custom repositories * refactor: Update VM deployment configuration * Remove the token check * refactor: Add TF_VAR_agent_pat to control plane deployment pipeline * refactor: Fix private DNS zone ID in keyvault_endpoint.tf * Web App and version updates * Restore patch_mode * Web App updates * chore: Add System.Data.SqlClient package reference * refactor: Update 1.3 Repository tasks to include custom repositories for SUSE and RedHat * refactor: Update tfvar_variables.tf with new variables for tfstate storage account and deployer's tfstate file * Remove some of the python packages * Remove unnecessary python packages * refactor: Remove trailing spaces in LandscapeDetails.json and SystemDetails.json * refactor: Remove trailing spaces in LandscapeDetails.json and SystemDetails.json * Fix reboot on RHEL * refactor: Fix typo in DBLoad task names * refactor: Update cluster resource monitor intervals to 20 seconds * LINT fixes --------- Co-authored-by: Kimmo Forss --- Webapp/SDAF/Models/LandscapeModel.cs | 34 +- Webapp/SDAF/Models/SystemModel.cs | 12 + .../ParameterDetails/LandscapeDetails.json | 127 ++++-- .../ParameterDetails/LandscapeTemplate.txt | 81 ++-- .../SDAF/ParameterDetails/SystemDetails.json | 151 ++++--- .../SDAF/ParameterDetails/SystemTemplate.txt | 376 ++++++++++-------- Webapp/SDAF/SDAFWebApp.csproj | 14 +- .../roles-os/1.1-swap/handlers/main.yaml | 11 + .../tasks/1.17.2.0-cluster-Suse.yml | 22 +- .../1.3.2-custom-repositories-RedHat.yaml | 39 ++ .../roles-os/1.3-repository/tasks/main.yml | 8 +- .../roles-os/1.3-repository/vars/repos.yaml | 1 + .../tasks/1.4.0-packages-RedHat-prep.yaml | 1 - .../tasks/1.4.3-update-packages-RedHat.yaml | 1 + .../1.4-packages/vars/os-packages.yaml | 20 +- .../2.10-sap-notes/handlers/main.yaml | 12 +- .../roles-sap/5.1-dbload/tasks/main.yaml | 6 +- .../tasks/5.6.4.0-cluster-RedHat.yml | 4 +- .../tasks/5.6.4.0-cluster-Suse.yml | 4 +- deploy/ansible/vars/ansible-input-api.yaml | 2 +- deploy/configs/version.txt | 2 +- deploy/pipelines/01-deploy-control-plane.yaml | 1 + .../bootstrap/sap_deployer/module.tf | 5 +- .../bootstrap/sap_deployer/providers.tf | 9 + .../bootstrap/sap_deployer/tfvar_variables.tf | 16 +- .../bootstrap/sap_deployer/transform.tf | 10 + .../terraform/bootstrap/sap_library/module.tf | 7 +- .../bootstrap/sap_library/providers.tf | 11 + .../bootstrap/sap_library/tfvar_variables.tf | 29 +- .../bootstrap/sap_library/transform.tf | 13 + deploy/terraform/run/sap_deployer/module.tf | 5 +- .../run/sap_deployer/tfvar_variables.tf | 17 +- .../terraform/run/sap_deployer/transform.tf | 11 + deploy/terraform/run/sap_landscape/module.tf | 12 +- deploy/terraform/run/sap_landscape/output.tf | 16 + .../run/sap_landscape/tfvar_variables.tf | 53 +++ .../terraform/run/sap_landscape/transform.tf | 19 + .../run/sap_landscape/variables_global.tf | 11 - deploy/terraform/run/sap_library/module.tf | 7 +- deploy/terraform/run/sap_library/providers.tf | 12 + .../run/sap_library/tfvar_variables.tf | 23 ++ deploy/terraform/run/sap_library/transform.tf | 11 + deploy/terraform/run/sap_system/module.tf | 24 +- .../run/sap_system/tfvar_variables.tf | 23 ++ deploy/terraform/run/sap_system/transform.tf | 15 + .../templates/configure_deployer.sh.tmpl | 1 + .../modules/sap_deployer/variables_global.tf | 32 +- .../modules/sap_deployer/vm-deployer.tf | 4 +- .../modules/sap_landscape/ams.tf | 12 + .../modules/sap_landscape/infrastructure.tf | 31 +- .../modules/sap_landscape/iscsi.tf | 20 + .../sap_landscape/key_vault_sap_landscape.tf | 25 +- .../modules/sap_landscape/storage_accounts.tf | 44 +- .../modules/sap_landscape/subnets.tf | 11 - .../modules/sap_landscape/variables_global.tf | 38 +- .../modules/sap_landscape/variables_local.tf | 2 +- .../modules/sap_landscape/vm.tf | 18 + .../modules/sap_library/dns.tf | 26 +- .../modules/sap_library/infrastructure.tf | 16 +- .../modules/sap_library/key_vault.tf | 9 +- .../modules/sap_library/keyvault_endpoint.tf | 12 +- .../modules/sap_library/providers.tf | 2 +- .../modules/sap_library/storage_accounts.tf | 44 +- .../modules/sap_library/variables_global.tf | 34 +- .../modules/sap_library/variables_local.tf | 3 +- .../sap_system/anydb_node/infrastructure.tf | 4 +- .../sap_system/anydb_node/variables_global.tf | 25 +- .../modules/sap_system/anydb_node/vm-anydb.tf | 20 +- .../sap_system/app_tier/infrastructure.tf | 12 +- .../sap_system/app_tier/variables_global.tf | 18 +- .../modules/sap_system/app_tier/vm-app.tf | 22 +- .../modules/sap_system/app_tier/vm-scs.tf | 18 +- .../modules/sap_system/app_tier/vm-webdisp.tf | 17 + .../common_infrastructure/storage_accounts.tf | 4 +- .../common_infrastructure/variables_global.tf | 38 +- .../common_infrastructure/vm-anchor.tf | 15 +- .../sap_system/hdb_node/infrastructure.tf | 4 +- .../sap_system/hdb_node/variables_global.tf | 30 +- .../modules/sap_system/hdb_node/vm-hdb.tf | 7 + 79 files changed, 1179 insertions(+), 727 deletions(-) diff --git a/Webapp/SDAF/Models/LandscapeModel.cs b/Webapp/SDAF/Models/LandscapeModel.cs index fe1f241b8b..fd20dc3229 100644 --- a/Webapp/SDAF/Models/LandscapeModel.cs +++ b/Webapp/SDAF/Models/LandscapeModel.cs @@ -1,5 +1,3 @@ -using AutomationForm.Models; -using Microsoft.Azure.Pipelines.WebApi; using System.ComponentModel; using System.ComponentModel.DataAnnotations; using static AutomationForm.Models.CustomValidators; @@ -264,7 +262,7 @@ public bool IsValid() public string[] ANF_install_volume_zone { get; set; } - + /*---------------------------------------------------------------------------8 | | | DNS information | @@ -275,6 +273,10 @@ public bool IsValid() public string management_dns_subscription_id { get; set; } + public string privatelink_dns_resourcegroup_name { get; set; } + + public string privatelink_dns_subscription_id { get; set; } + public bool? use_custom_dns_a_registration { get; set; } = false; public string dns_label { get; set; } @@ -356,11 +358,23 @@ public bool IsValid() [PrivateEndpointIdValidator] public string install_private_endpoint_id { get; set; } -/*---------------------------------------------------------------------------8 -| | -| Utility VM information | -| | -+------------------------------------4--------------------------------------*/ + + + /*---------------------------------------------------------------------------8 + | | + | VM patch information | + | | + +------------------------------------4--------------------------------------*/ + + public string patch_mode { get; set; } = "ImageDefault"; + public string patch_assessment_mode { get; set; } = "ImageDefault"; + + + /*---------------------------------------------------------------------------8 + | | + | Utility VM information | + | | + +------------------------------------4--------------------------------------*/ public int? utility_vm_count { get; set; } = 0; @@ -369,7 +383,7 @@ public bool IsValid() public string utility_vm_os_disk_size { get; set; } = "128"; public string utility_vm_os_disk_type { get; set; } = "Premium_LRS"; - + public bool? utility_vm_useDHCP { get; set; } = true; public Image utility_vm_image { get; set; } @@ -445,7 +459,7 @@ public bool IsValid() public string nat_gateway_name { get; set; } - + [NATIdValidator(ErrorMessage = "Invalid NAT Gateway id")] public string nat_gateway_arm_id { get; set; } diff --git a/Webapp/SDAF/Models/SystemModel.cs b/Webapp/SDAF/Models/SystemModel.cs index dcf36f3eb4..e0c5da2989 100644 --- a/Webapp/SDAF/Models/SystemModel.cs +++ b/Webapp/SDAF/Models/SystemModel.cs @@ -668,6 +668,18 @@ public bool IsValid() public int? use_fence_kdump_lun_scs { get; set; } = 4; + + /*---------------------------------------------------------------------------8 + | | + | VM patch information | + | | + +------------------------------------4--------------------------------------*/ + + public string patch_mode { get; set; } = "ImageDefault"; + public string patch_assessment_mode { get; set; } = "ImageDefault"; + + + } public class Tag diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index 603d27afd3..9755b9c8bd 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -118,24 +118,6 @@ "Section": "Infrastructure settings", "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/automation-configure-workload-zone#environment-parameters", "Parameters": [ - { - "Name": "deploy_monitoring_extension", - "Required": false, - "Description": "If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 2 - }, - { - "Name": "deploy_defender_extension", - "Required": false, - "Description": "If defined, will add the Microsoft.Azure.Security.Monitoring extension to the virtual machines", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 2 - }, { "Name": "place_delete_lock_on_resources", "Required": false, @@ -908,7 +890,7 @@ { "Name": "dns_server_list", "Required": false, - "Description": "Boolean value indicating if a custom dns record should be created for the storage account", + "Description": "List of IP addresses to add as DNS servers", "Type": "list", "Options": [ { @@ -963,6 +945,24 @@ "Options": [], "Overrules": "", "Display": 3 + }, + { + "Name": "privatelink_dns_subscription_id", + "Required": false, + "Description": "Subscription for the DNS zone containing the PrivateLink resources, if different from the management subscription", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "privatelink_dns_resourcegroup_name", + "Required": false, + "Description": "Resource group for the DNS zone containing the PrivateLink resources, if different from the SAP Library resource group", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 } ] }, @@ -1317,7 +1317,13 @@ ], "Overrules": "", "Display": 3 - }, + } + ] + }, + { + "Section": "Common Virtual Machine settings", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#vm-parameters", + "Parameters": [ { "Name": "user_assigned_identity_id", "Required": false, @@ -1326,7 +1332,79 @@ "Options": [], "Overrules": "", "Display": 3 + }, + { + "Name": "deploy_monitoring_extension", + "Required": false, + "Description": "If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "deploy_defender_extension", + "Required": false, + "Description": "If defined, will add the Microsoft.Azure.Security.Monitoring extension to the virtual machines", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "patch_mode", + "Required": false, + "Description": "Defines the patching mode for the Virtual Machines.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + }, + { + "Text": "ImageDefault (Linux)", + "Value": "ImageDefault" + }, + { + "Text": "Manual (Windows)", + "Value": "Manual" + }, + { + "Text": "AutomaticByOS (Windows)", + "Value": "AutomaticByOS" + }, + { + "Text": "AutomaticByPlatform (Azure Orchestrated patching)", + "Value": "AutomaticByPlatform" + } + ], + "Overrules": "", + "Display": 2 + }, + { + "Name": "patch_assessment_mode", + "Required": false, + "Description": "Specifies the mode of VM Guest Patching for the Virtual Machine.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + }, + { + "Text": "ImageDefault", + "Value": "ImageDefault" + }, + { + "Text": "AutomaticByPlatform (Azure Orchestrated patching)", + "Value": "AutomaticByPlatform" + } + ], + "Overrules": "", + "Display": 2 } + + ] }, { @@ -1633,15 +1711,6 @@ "Overrules": "", "Display": 2 }, - { - "Name": "management_dns_subscription_id", - "Required": false, - "Description": "Subscription for the DNS zone, if different from the management subscription", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, { "Name": "nat_gateway_idle_timeout_in_minutes", "Required": false, diff --git a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt index 2d2c740c2c..dee38b8200 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt @@ -36,28 +36,6 @@ $$Description$$ #If you want to provide a custom naming json use the following parameter. $$name_override_file$$ -# If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines -$$deploy_monitoring_extension$$ - -# If defined, will add the Microsoft.Azure.Security.Monitoring extension to the virtual machines -$$deploy_defender_extension$$ - - -######################################################################################### -# # -# Resource group details # -# # -######################################################################################### - -# The two resource group name and arm_id can be used to control the naming and the creation of the resource group - -# The resourcegroup_name value is optional, it can be used to override the name of the resource group that will be provisioned -$$resourcegroup_name$$ - -# The resourcegroup_name arm_id is optional, it can be used to provide an existing resource group for the deployment -$$resourcegroup_arm_id$$ - - ######################################################################################### # # # Networking # @@ -291,22 +269,68 @@ $$storage_subnet_nsg_arm_id$$ $$storage_subnet_nsg_name$$ +######################################################################################### +# # +# Common Virtual Machine settings # +# # +######################################################################################### + +# user_assigned_identity_id defines the user assigned identity to be assigned to the Virtual Machines +$$user_assigned_identity_id$$ + +# If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the Virtual Machines +$$deploy_monitoring_extension$$ + +# If defined, will add the Microsoft.Azure.Security.Monitoring extension to the Virtual Machines +$$deploy_defender_extension$$ + +# If defined, defines the patching mode for the Virtual Machines +$$patch_mode$$ + +# If defined, defines the mode of VM Guest Patching for the Virtual Machines +$$patch_assessment_mode$$ + + +######################################################################################### +# # +# Resource group details # +# # +######################################################################################### + +# The two resource group name and arm_id can be used to control the naming and the creation of the resource group + +# The resourcegroup_name value is optional, it can be used to override the name of the resource group that will be provisioned +$$resourcegroup_name$$ + +# The resourcegroup_name arm_id is optional, it can be used to provide an existing resource group for the deployment +$$resourcegroup_arm_id$$ + + + ######################################################################################### # # # DNS Settings # # # ######################################################################################### -# custom dns resource group name -$$management_dns_resourcegroup_name$$ -# custom dns subscription +# Subscription for the resource group containing the Private DNS zone for the compute resources $$management_dns_subscription_id$$ +# Resource group name for the resource group containing the Private DNS zone for the compute resources +$$management_dns_resourcegroup_name$$ + +# Subscription for the resource group containing the Private DNS zone for the Privatelink resources +$$privatelink_dns_subscription_id$$ + +# Resource group name for the resource group containing the Private DNS zone for the Privatelink resources +$$privatelink_dns_resourcegroup_name$$ + + # Defines if a custom dns solution is used $$use_custom_dns_a_registration$$ -# Defines if the Virtual network for the Virtual machines is registered with DNS +# Defines if the Virtual network for the Virtual Machines is registered with DNS # This also controls the creation of DNS entries for the load balancers $$register_virtual_network_to_dns$$ @@ -527,9 +551,6 @@ $$iscsi_nic_ips$$ # Defines the Availability zones for the iSCSI devices $$iscsi_vm_zones$$ -# user_assigned_identity_id defines the user assigned identity to be assigned to the Virtual machines -$$user_assigned_identity_id$$ - ######################################################################################### # # # Terraform deployment parameters # @@ -614,7 +635,7 @@ $$deploy_nat_gateway$$ # If provided, the name of the NAT Gateway $$nat_gateway_name$$ -# If provided, the Azure resource id for the NAT Gateway +# If provided, the Azure resource id for the NAT Gateway $$nat_gateway_arm_id$$ # If provided, the zones for the NAT Gateway public IP diff --git a/Webapp/SDAF/ParameterDetails/SystemDetails.json b/Webapp/SDAF/ParameterDetails/SystemDetails.json index 9723b7b816..82f81cf6b7 100644 --- a/Webapp/SDAF/ParameterDetails/SystemDetails.json +++ b/Webapp/SDAF/ParameterDetails/SystemDetails.json @@ -260,51 +260,6 @@ "Overrules": "", "Display": 3 }, - { - "Name": "deploy_v1_monitoring_extension", - "Required": false, - "Description": "Defines if the Microsoft.AzureCAT.AzureEnhancedMonitoring extension will be deployed.", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "deploy_monitoring_extension", - "Required": false, - "Description": "If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "deploy_defender_extension", - "Required": false, - "Description": "If defined, will add the Microsoft.Azure.Security.Monitoring extension to the virtual machines", - "Type": "checkbox", - "Options": [], - "Overrules": "", - "Display": 2 - }, - { - "Name": "vm_disk_encryption_set_id", - "Required": false, - "Description": "Azure resource identifier for custom encryption key to use for disk encryption.", - "Type": "field", - "Options": [], - "Overrules": "", - "Display": 3 - }, - { - "Name": "user_assigned_identity_id", - "Required": false, - "Description": "Azure resource identifier for User assigned identity.", - "Type": "lookup", - "Options": [], - "Overrules": "", - "Display": 3 - }, { "Name": "upgrade_packages", "Required": false, @@ -1351,6 +1306,112 @@ } ] }, + { + "Section": "Common Virtual Machine settings", + "Link": "https://learn.microsoft.com/en-us/azure/sap/automation/configure-workload-zone#vm-parameters", + "Parameters": [ + { + "Name": "user_assigned_identity_id", + "Required": false, + "Description": "Azure resource identifier for User assigned identity.", + "Type": "lookup", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "deploy_monitoring_extension", + "Required": false, + "Description": "If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "deploy_defender_extension", + "Required": false, + "Description": "If defined, will add the Microsoft.Azure.Security.Monitoring extension to the virtual machines", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, + { + "Name": "deploy_v1_monitoring_extension", + "Required": false, + "Description": "Defines if the Microsoft.AzureCAT.AzureEnhancedMonitoring extension will be deployed.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "vm_disk_encryption_set_id", + "Required": false, + "Description": "Azure resource identifier for custom encryption key to use for disk encryption.", + "Type": "field", + "Options": [], + "Overrules": "", + "Display": 3 + }, + { + "Name": "patch_mode", + "Required": false, + "Description": "Defines the patching mode for the Virtual Machines.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + }, + { + "Text": "ImageDefault (Linux)", + "Value": "ImageDefault" + }, + { + "Text": "Manual (Windows)", + "Value": "Manual" + }, + { + "Text": "AutomaticByOS (Windows)", + "Value": "AutomaticByOS" + }, + { + "Text": "AutomaticByPlatform (Azure Orchestrated patching)", + "Value": "AutomaticByPlatform" + } + ], + "Overrules": "", + "Display": 2 + }, + { + "Name": "patch_assessment_mode", + "Required": false, + "Description": "Specifies the mode of VM Guest Patching for the Virtual Machine.", + "Type": "lookup", + "Options": [ + { + "Text": "", + "Value": "" + }, + { + "Text": "ImageDefault", + "Value": "ImageDefault" + }, + { + "Text": "AutomaticByPlatform (Azure Orchestrated patching)", + "Value": "AutomaticByPlatform" + } + ], + "Overrules": "", + "Display": 2 + } + + + ] + }, + { "Section": "Cluster settings", "Link": "https://learn.microsoft.com/en-us/azure/virtual-machines/workloads/sap/automation-configure-system#environment-parameters", diff --git a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt index 0c692279fc..573202cb0d 100644 --- a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt @@ -1,35 +1,32 @@ -########################################################################################## -# # -# Deployment topologies # -# # -# Standard (All roles on same server) # -# Define the database tier values and set enable_app_tier_deployment to false # -# # -# Distributed (1+1 or 1+1+N) # -# Define the database tier values and define scs_server_count = 1, # -# application_server_count >= 1 # -# # -# High Availability # -# Define the database tier values and database_high_availability = true # -# scs_server_count = 1 and scs_high_availability = true # -# application_server_count >= 1 # -# # -########################################################################################## - -########################################################################################## -# # -# This sample defines an Distributed deployment # -# # -########################################################################################## - -# The automation supports both creating resources (greenfield) or using existing resources (brownfield) -# For the greenfield scenario the automation defines default names for resources, -# if there is a XXXXname variable then the name is customizable -# for the brownfield scenario the Azure resource identifiers for the resources must be specified +######################################################################################### +# # +# Deployment topologies # +# # +# Standard (All roles on same server) # +# Define the database tier values and set enable_app_tier_deployment to false # +# # +# Distributed (1+1 or 1+1+N) # +# Define the database tier values and define scs_server_count = 1, # +# application_server_count >= 1 # +# # +# High Availability # +# Define the database tier values and database_high_availability = true # +# scs_server_count = 1 and scs_high_availability = true # +# application_server_count >= 1 # +# # +# The automation supports both creating resources (greenfield) or using existing # +# resources (brownfield). # +# # +# For the greenfield scenario the automation defines default names for resources, # +# if there is a XXXXname variable then the name is customizable. # +# For the brownfield scenario the Azure resource identifiers for the resources must # +# be specified using the XXXX_armid fields. # +# # +######################################################################################### ######################################################################################### # # -# Environment definitions # +# Environment/Application definitions # # # ######################################################################################### @@ -39,27 +36,37 @@ $$environment$$ # The location value is a mandatory field, it is used to control where the resources are deployed $$location$$ +# The sid value is a mandatory field that defines the SAP Application SID +$$sid$$ + +# The database_sid defines the database SID +$$database_sid$$ + +# The database_platform defines the database backend, supported values are +# - HANA +# - DB2 +# - ORACLE +# - ORACLE-ASM +# - SYBASE +# - SQLSERVER +# - NONE (in this case no database tier is deployed) +$$database_platform$$ + # Description of the SAP system. $$Description$$ +######################################################################################### +# # +# Deployment parameters # +# # +######################################################################################### -#If you want to customize the disk sizes for VMs use the following parameter to specify the custom sizing file. -$$custom_disk_sizes_filename$$ #If you want to provide a custom naming json use the following parameter. $$name_override_file$$ -# save_naming_information,defines that a json formatted file defining the resource names will be created -$$save_naming_information$$ - -# custom_prefix defines the prefix that will be added to the resource names -$$custom_prefix$$ - -# use_prefix defines if a prefix will be added to the resource names -$$use_prefix$$ - -# use_zonal_markers defines if a zonal markers will be added to the virtual machine resource names -$$use_zonal_markers$$ +#If you want to customize the disk sizes for VMs use the following parameter to specify the custom sizing file. +$$custom_disk_sizes_filename$$ # use_secondary_ips controls if the virtual machines should be deployed with two IP addresses. Required for SAP Virtual Hostname support $$use_secondary_ips$$ @@ -73,118 +80,18 @@ $$use_scalesets_for_deployment$$ # scaleset_id defines the scale set Azure resource Id $$scaleset_id$$ - # database_use_premium_v2_storage defines if the database tier will use premium v2 storage $$database_use_premium_v2_storage$$ # upgrade_packages defines if all packages should be upgraded after installation $$upgrade_packages$$ -# user_assigned_identity_id defines the user assigned identity to be assigned to the Virtual machines -$$user_assigned_identity_id$$ - -######################################################################################### -# # -# Networking # -# By default the networking is defined in the workload zone # -# Only use this section if the SID needs unique subnets/NSGs # -# # -# The deployment automation supports two ways of providing subnet information. # -# 1. Subnets are defined as part of the workload zone deployment # -# In this model multiple SAP System share the subnets # -# 2. Subnets are deployed as part of the SAP system # -# In this model each SAP system has its own sets of subnets # -# # -# The automation supports both creating the subnets (greenfield) # -# or using existing subnets (brownfield) # -# For the greenfield scenario the subnet address prefix must be specified whereas # -# for the brownfield scenario the Azure resource identifier for the subnet must # -# be specified # -# # -######################################################################################### - -# The network logical name is mandatory - it is used in the naming convention and should map to the workload virtual network logical name -$$network_logical_name$$ - -# use_loadbalancers_for_standalone_deployments is a boolean flag that can be used to control if standalone deployments (non HA) will have load balancers -$$use_loadbalancers_for_standalone_deployments$$ - -# use_private_endpoint is a boolean flag controlling if the key vaults and storage accounts have private endpoints -$$use_private_endpoint$$ - - -######################################################################################### -# # -# Cluster settings # -# # -######################################################################################### - -# scs_cluster_type defines cluster quorum type; AFA (Azure Fencing Agent), ASD (Azure Shared Disk), ISCSI -$$scs_cluster_type$$ - -#scs_cluster_disk_lun defines the LUN number for the SAP Central Services cluster disk -$$scs_cluster_disk_lun$$ - -#scs_cluster_disk_size defines the size for the SAP Central Services cluster disk -$$scs_cluster_disk_size$$ - -#scs_cluster_disk_type defines the storage_account_type of the shared disk for the SAP Central Services cluster -$$scs_cluster_disk_type$$ - -# database_cluster_type defines cluster quorum type; AFA (Azure Fencing Agent), ASD (Azure Shared Disk), ISCSI -$$database_cluster_type$$ - -#database_cluster_disk_lun defines the LUN number for the database cluster disk -$$database_cluster_disk_lun$$ - -#database_cluster_disk_size defines the size for the database cluster disk -$$database_cluster_disk_size$$ - -#database_cluster_disk_type defines the storage_account_type of the shared disk for the Database cluster -$$database_cluster_disk_type$$ - -# use_msi_for_clusters if defined will use managed service identity for the Pacemaker cluster fencing -$$use_msi_for_clusters$$ - -# fencing_role_name, If specified the role name to use for the fencing agent -$$fencing_role_name$$ - -# use_simple_mount specifies if Simple mounts are used (Applicable for SLES 15 SP# or newer) -$$use_simple_mount$$ - -# Configure fencing device based on the fence agent fence_kdump for both SCS and DB clusters -$$use_fence_kdump$$ - -# Default size of the kdump disk which will be attached to the VMs which are part DB cluster -$$use_fence_kdump_size_gb_db$$ - -# Default LUN number of the kdump disk which will be attached to the VMs which are part of DB cluster -$$use_fence_kdump_lun_db$$ - -# Default size of the kdump disk which will be attached to the VMs which are part of SCS cluster -$$use_fence_kdump_size_gb_scs$$ - -# Default LUN number of the kdump disk which will be attached to the VMs which are part of SCS cluster -$$use_fence_kdump_lun_scs$$ - ######################################################################################### # # # Database tier # # # # ######################################################################################### -$$database_sid$$ - -# database_platform defines the database backend, supported values are -# - HANA -# - DB2 -# - ORACLE -# - ORACLE-ASM -# - SYBASE -# - SQLSERVER -# - NONE (in this case no database tier is deployed) -$$database_platform$$ - # Defines the number of database servers $$database_server_count$$ @@ -285,7 +192,6 @@ $$database_use_avset$$ # Optional, Defines if the tags for the database virtual machines $$database_tags$$ - ######################################################################################### # # # Application tier # # @@ -300,9 +206,6 @@ $$enable_app_tier_deployment$$ # app_tier_use_DHCP is a boolean flag controlling if Azure subnet provided IP addresses should be used (true) $$app_tier_use_DHCP$$ -# sid is a mandatory field that defines the SAP Application SID -$$sid$$ - ######################################################################################### # # # SAP Central Services # @@ -466,37 +369,86 @@ $$webdispatcher_server_zones$$ $$webdispatcher_server_image$$ - ######################################################################################### # # -# Miscellaneous settings # +# Common Virtual Machine settings # # # ######################################################################################### -# resource_offset can be used to provide an offset for resource naming -# server#, disk# -$$resource_offset$$ +# user_assigned_identity_id defines the user assigned identity to be assigned to the Virtual machines +$$user_assigned_identity_id$$ # vm_disk_encryption_set_id if defined defines the custom encryption key $$vm_disk_encryption_set_id$$ -# deploy_application_security_groups if defined will create application security groups -$$deploy_application_security_groups$$ - -# deploy_v1_monitoring_extension Defines if the Microsoft.AzureCAT.AzureEnhancedMonitoring extension will be deployed -$$deploy_v1_monitoring_extension$$ - # If defined, will add the Microsoft.Azure.Monitor.AzureMonitorLinuxAgent extension to the virtual machines $$deploy_monitoring_extension$$ # If defined, will add the Microsoft.Azure.Security.Monitoring extension to the virtual machines $$deploy_defender_extension$$ -# dns_a_records_for_secondary_names defines if DNS records should be created for the virtual host names -$$dns_a_records_for_secondary_names$$ +# If defined, defines the patching mode for the virtual machines +$$patch_mode$$ + +# If defined, defines the mode of VM Guest Patching for the Virtual Machine +$$patch_assessment_mode$$ + + + +######################################################################################### +# # +# Cluster settings # +# # +######################################################################################### + +# scs_cluster_type defines cluster quorum type; AFA (Azure Fencing Agent), ASD (Azure Shared Disk), ISCSI +$$scs_cluster_type$$ + +#scs_cluster_disk_lun defines the LUN number for the SAP Central Services cluster disk +$$scs_cluster_disk_lun$$ + +#scs_cluster_disk_size defines the size for the SAP Central Services cluster disk +$$scs_cluster_disk_size$$ + +#scs_cluster_disk_type defines the storage_account_type of the shared disk for the SAP Central Services cluster +$$scs_cluster_disk_type$$ + +# database_cluster_type defines cluster quorum type; AFA (Azure Fencing Agent), ASD (Azure Shared Disk), ISCSI +$$database_cluster_type$$ + +#database_cluster_disk_lun defines the LUN number for the database cluster disk +$$database_cluster_disk_lun$$ + +#database_cluster_disk_size defines the size for the database cluster disk +$$database_cluster_disk_size$$ + +#database_cluster_disk_type defines the storage_account_type of the shared disk for the Database cluster +$$database_cluster_disk_type$$ + +# use_msi_for_clusters if defined will use managed service identity for the Pacemaker cluster fencing +$$use_msi_for_clusters$$ + +# fencing_role_name, If specified the role name to use for the fencing agent +$$fencing_role_name$$ + +# use_simple_mount specifies if Simple mounts are used (Applicable for SLES 15 SP# or newer) +$$use_simple_mount$$ + +# Configure fencing device based on the fence agent fence_kdump for both SCS and DB clusters +$$use_fence_kdump$$ + +# Default size of the kdump disk which will be attached to the VMs which are part DB cluster +$$use_fence_kdump_size_gb_db$$ + +# Default LUN number of the kdump disk which will be attached to the VMs which are part of DB cluster +$$use_fence_kdump_lun_db$$ + +# Default size of the kdump disk which will be attached to the VMs which are part of SCS cluster +$$use_fence_kdump_size_gb_scs$$ + +# Default LUN number of the kdump disk which will be attached to the VMs which are part of SCS cluster +$$use_fence_kdump_lun_scs$$ -# register_endpoints_with_dns defines if the endpoints should be registered with the DNS -$$register_endpoints_with_dns$$ ######################################################################################### # # @@ -536,7 +488,7 @@ $$ANF_HANA_use_Zones$$ ######################################################################################### # # -# HANA Data # +# Azure NetApp Files - HANA Data # # # ######################################################################################### @@ -561,7 +513,7 @@ $$ANF_HANA_data_volume_count$$ ######################################################################################### # # -# HANA Log # +# Azure NetApp Files - HANA Log # # # ######################################################################################### @@ -585,7 +537,7 @@ $$ANF_HANA_log_volume_count$$ ######################################################################################### # # -# HANA Shared # +# Azure NetApp Files - HANA Shared # # # ######################################################################################### @@ -607,7 +559,7 @@ $$ANF_HANA_shared_volume_name$$ ######################################################################################### # # -# Azure NetApp Files /usr/sap # +# Azure NetApp Files - /usr/sap # # # ######################################################################################### @@ -629,7 +581,7 @@ $$ANF_usr_sap_volume_name$$ ######################################################################################### # # -# Azure NetApp Files sapmnt # +# Azure NetApp Files - sapmnt # # # ######################################################################################### @@ -679,21 +631,20 @@ $$vm_disk_encryption_set_id$$ $$nsg_asg_with_vnet$$ ######################################################################################### -# RESOURCE GROUP -# The two resource group name and arm_id can be used to control the naming and the creation of the resource group -# The resourcegroup_name value is optional, it can be used to override the name of the resource group that will be provisioned -# The resourcegroup_name arm_id is optional, it can be used to provide an existing resource group for the deployment +# # +# Resource Group # +# # ######################################################################################### +# The resourcegroup_name value is optional, it can be used to override the name of the resource group that will be provisioned $$resourcegroup_name$$ +# The resourcegroup_name arm_id is optional, it can be used to provide an existing resource group for the deployment $$resourcegroup_arm_id$$ ######################################################################################### # # -# PPG # -# The proximity placement group names and arm_ids are optional can be used to -# control the naming and the creation of the proximity placement groups +# Proximity Placement Group # # # ######################################################################################### @@ -714,16 +665,51 @@ $$app_proximityplacementgroup_arm_ids$$ ######################################################################################### # # -# Key Vault variables # +# Key Vault information # # # ######################################################################################### +# If defined, specifies the Azure resource identifier for an existing key vault. +# Designed to host the SAP system credentials $$user_keyvault_id$$ +# If defined, specifies the Azure resource identifier for an existing key vault. +# Designed to host the deployment credentials used by the automation $$spn_keyvault_id$$ +# If defined, will enable purge control for the key vaults $$enable_purge_control_for_keyvaults$$ +######################################################################################### +# # +# Networking # +# By default the networking is defined in the workload zone # +# Only use this section if the SID needs unique subnets/NSGs # +# # +# The deployment automation supports two ways of providing subnet information. # +# 1. Subnets are defined as part of the workload zone deployment # +# In this model multiple SAP System share the subnets # +# 2. Subnets are deployed as part of the SAP system # +# In this model each SAP system has its own sets of subnets # +# # +# The automation supports both creating the subnets (greenfield) # +# or using existing subnets (brownfield) # +# For the greenfield scenario the subnet address prefix must be specified whereas # +# for the brownfield scenario the Azure resource identifier for the subnet must # +# be specified # +# # +######################################################################################### + +# The network logical name is mandatory - it is used in the naming convention and should map to the workload virtual network logical name +$$network_logical_name$$ + +# use_loadbalancers_for_standalone_deployments is a boolean flag that can be used to control if standalone deployments (non HA) will have load balancers +$$use_loadbalancers_for_standalone_deployments$$ + +# use_private_endpoint is a boolean flag controlling if the key vaults and storage accounts have private endpoints +$$use_private_endpoint$$ + + ######################################################################################### # # # Admin Subnet variables # @@ -926,4 +912,46 @@ $$enable_os_monitoring$$ $$ams_resource_id$$ +######################################################################################### +# # +# DNS settings # +# # +######################################################################################### + +# dns_a_records_for_secondary_names defines if DNS records should be created for the virtual host names +$$dns_a_records_for_secondary_names$$ + +# register_endpoints_with_dns defines if the endpoints should be registered with the DNS +$$register_endpoints_with_dns$$ + + + +######################################################################################### +# # +# Miscellaneous settings # +# # +######################################################################################### + +# deploy_application_security_groups if defined will create application security groups +$$deploy_application_security_groups$$ + +# deploy_v1_monitoring_extension Defines if the Microsoft.AzureCAT.AzureEnhancedMonitoring extension will be deployed +$$deploy_v1_monitoring_extension$$ + +# resource_offset can be used to provide an offset for resource naming +# server#, disk# +$$resource_offset$$ + +# save_naming_information,defines that a json formatted file defining the resource names will be created +$$save_naming_information$$ + +# custom_prefix defines the prefix that will be added to the resource names +$$custom_prefix$$ + +# use_prefix defines if a prefix will be added to the resource names +$$use_prefix$$ + +# use_zonal_markers defines if a zonal markers will be added to the virtual machine resource names +$$use_zonal_markers$$ + diff --git a/Webapp/SDAF/SDAFWebApp.csproj b/Webapp/SDAF/SDAFWebApp.csproj index ed10178397..6e4dfa43d2 100644 --- a/Webapp/SDAF/SDAFWebApp.csproj +++ b/Webapp/SDAF/SDAFWebApp.csproj @@ -23,16 +23,16 @@ - - - + + + - - - + + - + + diff --git a/deploy/ansible/roles-os/1.1-swap/handlers/main.yaml b/deploy/ansible/roles-os/1.1-swap/handlers/main.yaml index 15633ae609..f416ac6182 100644 --- a/deploy/ansible/roles-os/1.1-swap/handlers/main.yaml +++ b/deploy/ansible/roles-os/1.1-swap/handlers/main.yaml @@ -7,6 +7,17 @@ - name: "Swap reboot" ansible.builtin.reboot: + reboot_timeout: 300 + post_reboot_delay: 10 + failed_when: false +# +- name: "1.1 Swap: - Clear the failed state of hosts" + ansible.builtin.meta: clear_host_errors +# Wait for Connection after reboot +- name: "1.1 Swap: - Wait for system to become reachable" + ansible.builtin.wait_for_connection: + delay: 10 + timeout: 300 # ... diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml index 892791ec8e..94edb602a7 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-Suse.yml @@ -6,19 +6,31 @@ - name: "1.17 Generic Pacemaker - Ensure a list of package version is available for checking the cloud-netconfig-azure version" ansible.builtin.package_facts: +- name: "1.17 Generic Pacemaker - Debug systemd version" + ansible.builtin.debug: + msg: "SystemD version {{ ansible_facts.packages['systemd'][0].version }}" + verbosity: 2 + # Pacemaker can create a large number of processes -- name: "1.17 Generic Pacemaker - Ensure Process limit is raised" +- name: "1.17 Generic Pacemaker - Ensure Process limit is raised (systemd < 234)" ansible.builtin.lineinfile: path: /etc/systemd/system.conf state: present regexp: "^#?\\s*DefaultTasksMax=" line: "DefaultTasksMax=4096" register: raise_process_limit - when: (ansible_facts.packages['systemd'][0].version | float) < 234 + when: ansible_facts.packages['systemd'][0].version is version('234', '<') # Create a drop in file for systemd.conf to raise the process limit in the directory # /etc/systemd/system.conf.d and update the value of DefaultTasksMax to 4096 -- name: "1.17 Generic Pacemaker - Ensure Process limit is raised" +- name: "1.17 Generic Pacemaker - Create directory for drop file (systemd > 233)" + ansible.builtin.file: + path: /etc/systemd/system.conf.d + state: directory + mode: '0644' + when: ansible_facts.packages['systemd'][0].version is version('234', '>=') + +- name: "1.17 Generic Pacemaker - Ensure Process limit is raised (systemd > 233)" ansible.builtin.copy: dest: /etc/systemd/system.conf.d/99-pacemaker.conf content: | @@ -26,7 +38,7 @@ DefaultTasksMax=4096 mode: '0644' register: raise_process_limit - when: (ansible_facts.packages['systemd'][0].version | float) > 233 + when: ansible_facts.packages['systemd'][0].version is version('234', '>=') # eth0 is the "db" NIC - name: "1.17 Generic Pacemaker - Ensure clustering can manage Virtual IPs on the Database Interface" @@ -37,7 +49,7 @@ line: "CLOUD_NETCONFIG_MANAGE='no'" when: - ansible_facts.packages['cloud-netconfig-azure'] - - (ansible_facts.packages['cloud-netconfig-azure'][0].version | float) < 1.3 + - ansible_facts.packages['cloud-netconfig-azure'][0].version is version('1.3', '<') - name: "1.17 Generic Pacemaker - Stop SBD service" ansible.builtin.systemd: diff --git a/deploy/ansible/roles-os/1.3-repository/tasks/1.3.2-custom-repositories-RedHat.yaml b/deploy/ansible/roles-os/1.3-repository/tasks/1.3.2-custom-repositories-RedHat.yaml index e69de29bb2..70854f2a01 100644 --- a/deploy/ansible/roles-os/1.3-repository/tasks/1.3.2-custom-repositories-RedHat.yaml +++ b/deploy/ansible/roles-os/1.3-repository/tasks/1.3.2-custom-repositories-RedHat.yaml @@ -0,0 +1,39 @@ +# Analyse the repo list for this distribution selecting only those +# packages assigned to the active tier or 'all'. +- name: "1.3 Repository - Determine custom repos appropriate for tier {{ distribution_full_id }}" + ansible.builtin.set_fact: + custom_repos_for_tier: "{{ custom_repos[distribution_full_id] | + selectattr('tier', 'in', ['all', tier]) | + list }}" + +# Print list of matching repos if verbosity it 1 or greater +- name: "1.3 Repos: Print matching repos" + ansible.builtin.debug: + var: "{{ custom_repos_for_tier }}" + verbosity: 2 + when: + - custom_repos_for_tier is defined + - custom_repos_for_tier | length > 0 + +- name: "1.3 Repos: Add the repositories {{ ansible_os_family }}" + ansible.builtin.dnf: + name: "{{ item.url }}" + state: "{{ item.state }}" + disable_gpg_check: true + loop: "{{ custom_repos_for_tier }}" + register: custom_repos_zypresult + ignore_errors: true + +- name: "1.3 Repos: Add the repositories result" + ansible.builtin.debug: + var: custom_repos_zypresult + verbosity: 2 + +- name: "1.3 Repos: Add the HA repositories for RHEL" + ansible.builtin.dnf: + enablerepo: rhel-9-for-x86_64-highavailability-rpms + disable_gpg_check: true + changed_when: false + when: + - distribution_id in ['redhat9'] + - node_tier == 'ha' diff --git a/deploy/ansible/roles-os/1.3-repository/tasks/main.yml b/deploy/ansible/roles-os/1.3-repository/tasks/main.yml index 5b7bb12108..061468ec7f 100644 --- a/deploy/ansible/roles-os/1.3-repository/tasks/main.yml +++ b/deploy/ansible/roles-os/1.3-repository/tasks/main.yml @@ -65,12 +65,16 @@ - name: "1.3 Repository: - Manage the repositories." ansible.builtin.include_tasks: "1.3.2-custom-repositories-Suse.yaml" - when: ansible_os_family | upper == 'SUSE' + when: + - custom_repos is defined + - ansible_os_family | upper == 'SUSE' # Doing it this way to handle also Oracle Distros - name: "1.3 Repository: - Prepare the repositories." ansible.builtin.include_tasks: "1.3.2-custom-repositories-RedHat.yaml" - when: ansible_os_family | upper == 'REDHAT' + when: + - custom_repos is defined + - ansible_os_family | upper == 'REDHAT' # - name: "1.3 Repos: Install EPEL repo" # ansible.builtin.yum_repository: diff --git a/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml b/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml index 7f7dd3f387..fb7b9518ff 100644 --- a/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml +++ b/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml @@ -54,3 +54,4 @@ repos: oraclelinux8.7: oraclelinux8.8: oraclelinux8.9: + oraclelinux8.10: diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml index adb3559c87..f7ae05130e 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.0-packages-RedHat-prep.yaml @@ -44,7 +44,6 @@ - is_rhel_90_or_newer - init_d_exists - # /*----------------------------------------------------------------------------8 # | END | # +------------------------------------4---------------------------------------*/ diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml index d21f088831..844f8bce44 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml @@ -17,6 +17,7 @@ register: reboot_output when: - tier == 'os' + - ansible_distribution != "OracleLinux" # Analyse the package list for this distribution selecting only those # packages assigned to the active tier or 'all'. # - name: "1.4 Packages: - Upgrade all: {{ distribution_full_id }}" # noqa package-latest diff --git a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml index 2e4041684f..3352be0ea0 100644 --- a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml +++ b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml @@ -247,6 +247,10 @@ packages: - { tier: 'ha', package: 'nmap', node_tier: 'all', state: 'present' } - { tier: 'ha', package: 'fence-agents-common', node_tier: 'all', state: 'present' } - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'all', state: 'present' } + - { tier: 'ha', package: 'python3-pip', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'python3-pip', node_tier: 'ers', state: 'present' } + - { tier: 'ha', package: 'python3-pip', node_tier: 'hana', state: 'present' } + - { tier: 'ha', package: 'python3-pip', node_tier: 'db2', state: 'present' } # ------------------------- End - Packages required for Clustering -----------------------------------------8 # ------------------------- Begin - Packages required for Start/Stop ------------------------------------8 - { tier: 'ha', package: 'sap-cluster-connector', node_tier: 'hana', state: 'present' } @@ -428,12 +432,6 @@ packages: - { tier: 'db2', package: 'system-user-bin', node_tier: 'db2', state: 'present' } - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'scs', state: 'present' } - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'ers', state: 'present' } - - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'hana', state: 'present' } - - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'hana', state: 'present' } - - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'scs', state: 'present' } - - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'scs', state: 'present' } - - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'ers', state: 'present' } - - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'ers', state: 'present' } - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'scs', state: 'present' } - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'ers', state: 'present' } - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'hana', state: 'present' } @@ -446,12 +444,6 @@ packages: - { tier: 'ha', package: 'sapstartsrv-resource-agents', node_tier: 'ers', state: 'present' } # These package cause issues on SLES15 SP5 due to changes to the public cloud SDKs # https://www.suse.com/c/incompatible-changes-ahead-for-public-cloud-sdks/ - - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'hana', state: 'absent' } - - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'hana', state: 'absent' } - - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'scs', state: 'absent' } - - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'scs', state: 'absent' } - - { tier: 'ha', package: 'python3-azure-mgmt-compute', node_tier: 'ers', state: 'absent' } - - { tier: 'ha', package: 'python3-azure-identity', node_tier: 'ers', state: 'absent' } - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'scs', state: 'present' } - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'ers', state: 'present' } - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'hana', state: 'present' } @@ -553,3 +545,7 @@ packages: - { tier: 'os', package: 'gdisk', node_tier: 'all', state: 'present' } # - { tier: 'os', package: 'kmod-oracleasm', node_tier: 'oracle-asm', state: 'present' } # - { tier: 'os', package: 'oracleasm-support', node_tier: 'oracle-asm', state: 'present' } + + oraclelinux8.10: + - { tier: 'os', package: 'oracle-database-preinstall-19c', node_tier: 'all', state: 'present' } + - { tier: 'os', package: 'gdisk', node_tier: 'all', state: 'present' } diff --git a/deploy/ansible/roles-sap-os/2.10-sap-notes/handlers/main.yaml b/deploy/ansible/roles-sap-os/2.10-sap-notes/handlers/main.yaml index 0dbd71ecf0..ad97a23185 100644 --- a/deploy/ansible/roles-sap-os/2.10-sap-notes/handlers/main.yaml +++ b/deploy/ansible/roles-sap-os/2.10-sap-notes/handlers/main.yaml @@ -3,4 +3,14 @@ - name: "2.10-sap-notes: Reboot after the selinux is configured" ansible.builtin.reboot: reboot_timeout: 300 -# ... + post_reboot_delay: 10 + failed_when: false +# +- name: "2.10-sap-notes: - Clear the failed state of hosts" + ansible.builtin.meta: clear_host_errors + +# Wait for Connection after reboot +- name: "2.10-sap-notes: - Wait for system to become reachable" + ansible.builtin.wait_for_connection: + delay: 10 + timeout: 300 diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml index 20acb2f725..fff35cfa12 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml @@ -24,7 +24,7 @@ - { mode: '0755', path: '{{ tmp_directory }}/{{ sid_to_be_deployed.sid | upper }}' } - { mode: '0755', path: '/etc/sap_deployment_automation/{{ sap_sid | upper }}' } -- name: "DBLoad: - reset" +- name: "DBLoad: - reset" ansible.builtin.file: path: "/etc/sap_deployment_automation/{{ sid_to_be_deployed.sid | upper }}/sap_deployment_dbload.txt" state: absent @@ -334,9 +334,9 @@ - job_result.rc is defined - job_result.rc == 0 - - name: "DBLoad: results" + - name: "DBLoad: results" ansible.builtin.debug: - msg: "DBLoad succeeded" + msg: "DBLoad succeeded" when: - job_result.rc is defined - job_result.rc == 0 diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-RedHat.yml index 6fc8ada131..90da4bbaae 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-RedHat.yml @@ -38,7 +38,7 @@ directory='{{ profile_directory }}' fstype='nfs' fast_stop=no force_unmount=safe options='sec=sys,vers=4.1' \ op start interval=0 timeout=60 \ op stop interval=0 timeout=120 \ - op monitor interval=200 timeout={{ clus_fs_mon_timeout | int }} \ + op monitor interval=20 timeout={{ clus_fs_mon_timeout | int }} \ --group g-{{ sap_sid | upper }}_{{ instance_type | upper }} register: ascs_fs_resource failed_when: ascs_fs_resource.rc > 1 @@ -181,7 +181,7 @@ directory='/usr/sap/{{ sap_sid | upper }}/ERS{{ ers_instance_number }}' fstype='nfs' fast_stop=no force_unmount=safe options='sec=sys,vers=4.1' \ op start interval=0 timeout=60 \ op stop interval=0 timeout=120 \ - op monitor interval=200 timeout={{ clus_fs_mon_timeout | int }} \ + op monitor interval=20 timeout={{ clus_fs_mon_timeout | int }} \ --group g-{{ sap_sid | upper }}_ERS register: ers_fs_resource failed_when: ers_fs_resource.rc > 1 diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-Suse.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-Suse.yml index 74de0225cd..b59507763c 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-Suse.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.4.0-cluster-Suse.yml @@ -27,7 +27,7 @@ directory='{{ profile_directory }}' fstype='nfs' fast_stop=no options='sec=sys,vers=4.1' \ op start timeout="{{ cluster_sap_scs_timeouts.start }}" interval=0 \ op stop timeout="{{ cluster_sap_scs_timeouts.stop }}" interval=0 \ - op monitor interval=200 timeout={{ clus_fs_mon_timeout | int }} + op monitor interval=20 timeout={{ clus_fs_mon_timeout | int }} register: ascs_fs_resource failed_when: ascs_fs_resource.rc > 1 @@ -172,7 +172,7 @@ directory='/usr/sap/{{ sap_sid | upper }}/ERS{{ ers_instance_number }}' fstype='nfs' fast_stop=no options='sec=sys,vers=4.1' \ op start timeout="{{ cluster_sap_scs_timeouts.start }}" interval=0 \ op stop timeout="{{ cluster_sap_scs_timeouts.stop }}" interval=0 \ - op monitor interval=200 timeout={{ clus_fs_mon_timeout | int }} + op monitor interval=20 timeout={{ clus_fs_mon_timeout | int }} register: ers_fs_resource failed_when: ers_fs_resource.rc > 1 diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index 229d13744d..dbcc904045 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -5,7 +5,7 @@ become_user_name: root oracle_user_name: oracle orchestration_ansible_user: azureadm # ------------------- Begin - SDAF Ansible Version ---------------------------8 -SDAF_Version: "3.11.0.3" +SDAF_Version: "3.12.0.0" # ------------------- End - SDAF Ansible Version ---------------------------8 # ------------------- Begin - OS Config Settings variables -------------------8 diff --git a/deploy/configs/version.txt b/deploy/configs/version.txt index 36d9b11208..a57eb4c686 100644 --- a/deploy/configs/version.txt +++ b/deploy/configs/version.txt @@ -1 +1 @@ -3.11.0.3 +3.12.0.0 diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index f8ce940d34..0a3d70d335 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -898,6 +898,7 @@ stages: TF_VAR_agent_pool: $(POOL) TF_VAR_agent_ado_url: $(System.CollectionUri) TF_VAR_tf_version: $(tf_version) + TF_VAR_agent_pat: $(PAT) AZURE_DEVOPS_EXT_PAT: $(PAT) IS_PIPELINE_DEPLOYMENT: true WEB_APP_CLIENT_SECRET: $(WEB_APP_CLIENT_SECRET) diff --git a/deploy/terraform/bootstrap/sap_deployer/module.tf b/deploy/terraform/bootstrap/sap_deployer/module.tf index fb0d867adf..9beee66193 100644 --- a/deploy/terraform/bootstrap/sap_deployer/module.tf +++ b/deploy/terraform/bootstrap/sap_deployer/module.tf @@ -31,7 +31,6 @@ module "sap_deployer" { configure = false deployer = local.deployer deployer_vm_count = var.deployer_count - dns_zone_names = var.dns_zone_names enable_firewall_for_keyvaults_and_storage = var.enable_firewall_for_keyvaults_and_storage enable_purge_control_for_keyvaults = var.enable_purge_control_for_keyvaults firewall_deployment = local.firewall_deployment @@ -39,8 +38,6 @@ module "sap_deployer" { firewall_allowed_ipaddresses = local.firewall_allowed_ipaddresses infrastructure = local.infrastructure key_vault = local.key_vault - management_dns_resourcegroup_name = var.management_dns_resourcegroup_name - management_dns_subscription_id = var.management_dns_subscription_id options = local.options place_delete_lock_on_resources = var.place_delete_lock_on_resources public_network_access_enabled = var.public_network_access_enabled @@ -51,11 +48,11 @@ module "sap_deployer" { ssh-timeout = var.ssh-timeout subnets_to_add = var.subnets_to_add_to_firewall_for_keyvaults_and_storage tf_version = var.tf_version - use_custom_dns_a_registration = var.use_custom_dns_a_registration use_private_endpoint = var.use_private_endpoint use_service_endpoint = var.use_service_endpoint use_webapp = var.use_webapp webapp_client_secret = var.webapp_client_secret + dns_settings = local.dns_settings } module "sap_namegenerator" { diff --git a/deploy/terraform/bootstrap/sap_deployer/providers.tf b/deploy/terraform/bootstrap/sap_deployer/providers.tf index 464f38bc6c..c0b52989cf 100644 --- a/deploy/terraform/bootstrap/sap_deployer/providers.tf +++ b/deploy/terraform/bootstrap/sap_deployer/providers.tf @@ -57,6 +57,15 @@ provider "azurerm" { alias = "dnsmanagement" } +provider "azurerm" { + features {} + subscription_id = try(coalesce(var.privatelink_dns_subscription_id, var.management_dns_subscription_id), null) + alias = "privatelinkdnsmanagement" + skip_provider_registration = true + storage_use_azuread = true + } + + terraform { required_version = ">= 1.0" required_providers { diff --git a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf index be8074c3b2..533594e903 100644 --- a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf @@ -426,13 +426,13 @@ variable "use_custom_dns_a_registration" { variable "management_dns_subscription_id" { description = "String value giving the possibility to register custom dns a records in a separate subscription" - default = null + default = "" type = string } variable "management_dns_resourcegroup_name" { description = "String value giving the possibility to register custom dns a records in a separate resourcegroup" - default = null + default = "" type = string } @@ -449,6 +449,18 @@ variable "dns_zone_names" { } } +variable "privatelink_dns_subscription_id" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate subscription" + default = "" + type = string + } + +variable "privatelink_dns_resourcegroup_name" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate resourcegroup" + default = "" + type = string + } + ######################################################################################### # # diff --git a/deploy/terraform/bootstrap/sap_deployer/transform.tf b/deploy/terraform/bootstrap/sap_deployer/transform.tf index 84f49ec231..23b8d31955 100644 --- a/deploy/terraform/bootstrap/sap_deployer/transform.tf +++ b/deploy/terraform/bootstrap/sap_deployer/transform.tf @@ -227,4 +227,14 @@ locals { app_id = var.app_registration_app_id client_secret = var.webapp_client_secret } + + dns_settings = { + use_custom_dns_a_registration = var.use_custom_dns_a_registration + dns_zone_names = var.dns_zone_names + management_dns_resourcegroup_name = trimspace(var.management_dns_resourcegroup_name) + management_dns_subscription_id = trimspace(var.management_dns_subscription_id) + privatelink_dns_subscription_id = trimspace(coalesce(var.management_dns_subscription_id,var.privatelink_dns_subscription_id)) + privatelink_dns_resourcegroup_name = trimspace(coalesce(var.management_dns_resourcegroup_name, var.privatelink_dns_resourcegroup_name)) + } + } diff --git a/deploy/terraform/bootstrap/sap_library/module.tf b/deploy/terraform/bootstrap/sap_library/module.tf index f015481b40..deb16748ba 100644 --- a/deploy/terraform/bootstrap/sap_library/module.tf +++ b/deploy/terraform/bootstrap/sap_library/module.tf @@ -8,27 +8,24 @@ module "sap_library" { azurerm.main = azurerm.main azurerm.deployer = azurerm.deployer azurerm.dnsmanagement = azurerm.dnsmanagement + azurerm.privatelinkdnsmanagement = azurerm.privatelinkdnsmanagement } Agent_IP = var.add_Agent_IP ? var.Agent_IP : "" bootstrap = true deployer = local.deployer deployer_tfstate = try(data.terraform_remote_state.deployer[0].outputs, []) - dns_label = var.dns_label - dns_zone_names = var.dns_zone_names infrastructure = local.infrastructure key_vault = local.key_vault - management_dns_resourcegroup_name = trimspace(var.management_dns_resourcegroup_name) - management_dns_subscription_id = trimspace(var.management_dns_subscription_id) naming = length(var.name_override_file) > 0 ? local.custom_names : module.sap_namegenerator.naming place_delete_lock_on_resources = var.place_delete_lock_on_resources service_principal = var.use_deployer ? local.service_principal : local.account short_named_endpoints_nics = var.short_named_endpoints_nics storage_account_sapbits = local.storage_account_sapbits storage_account_tfstate = local.storage_account_tfstate - use_custom_dns_a_registration = var.use_custom_dns_a_registration use_private_endpoint = var.use_private_endpoint use_webapp = var.use_webapp + dns_settings = local.dns_settings } module "sap_namegenerator" { diff --git a/deploy/terraform/bootstrap/sap_library/providers.tf b/deploy/terraform/bootstrap/sap_library/providers.tf index 165b85580b..688be3bf6c 100644 --- a/deploy/terraform/bootstrap/sap_library/providers.tf +++ b/deploy/terraform/bootstrap/sap_library/providers.tf @@ -66,6 +66,17 @@ provider "azurerm" { storage_use_azuread = true } +provider "azurerm" { + features {} + subscription_id = try(coalesce(var.privatelink_dns_subscription_id, local.spn.subscription_id), null) + client_id = local.use_spn ? local.spn.client_id : null + client_secret = local.use_spn ? local.spn.client_secret : null + tenant_id = local.use_spn ? local.spn.tenant_id : null + alias = "privatelinkdnsmanagement" + skip_provider_registration = true + storage_use_azuread = true + } + provider "azuread" { client_id = local.spn.client_id client_secret = local.spn.client_secret diff --git a/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf index d792b636aa..37db9ba236 100644 --- a/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf @@ -256,19 +256,31 @@ variable "add_Agent_IP" { ######################################################################################### variable "use_custom_dns_a_registration" { - description = "Boolean value indicating if a custom dns a record should be created when using private endpoints" + description = "Boolean value indicating if a custom DNS A record should be created when using private endpoints" default = false type = bool } variable "management_dns_subscription_id" { - description = "String value giving the possibility to register custom dns a records in a separate subscription" + description = "String value giving the possibility to register custom DNS A records in a separate subscription" default = "" type = string } variable "management_dns_resourcegroup_name" { - description = "String value giving the possibility to register custom dns a records in a separate resourcegroup" + description = "String value giving the possibility to register custom DNS A records in a separate resourcegroup" + default = "" + type = string + } + +variable "privatelink_dns_subscription_id" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate subscription" + default = "" + type = string + } + +variable "privatelink_dns_resourcegroup_name" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate resourcegroup" default = "" type = string } @@ -285,3 +297,14 @@ variable "dns_zone_names" { } } +variable "register_storage_accounts_keyvaults_with_dns" { + description = "Boolean value indicating if storage accounts and key vaults should be registered to the corresponding dns zones" + default = true + type = bool + } + +variable "register_endpoints_with_dns" { + description = "Boolean value indicating if endpoints should be registered to the dns zone" + default = true + type = bool + } diff --git a/deploy/terraform/bootstrap/sap_library/transform.tf b/deploy/terraform/bootstrap/sap_library/transform.tf index fa4dc57114..a49eb28725 100644 --- a/deploy/terraform/bootstrap/sap_library/transform.tf +++ b/deploy/terraform/bootstrap/sap_library/transform.tf @@ -118,4 +118,17 @@ locals { public_network_access_enabled = var.public_network_access_enabled } + dns_settings = { + use_custom_dns_a_registration = var.use_custom_dns_a_registration + dns_label = var.dns_label + dns_zone_names = var.dns_zone_names + management_dns_resourcegroup_name = trimspace(var.management_dns_resourcegroup_name) + management_dns_subscription_id = trimspace(var.management_dns_subscription_id) + privatelink_dns_subscription_id = trimspace(var.privatelink_dns_subscription_id) + privatelink_dns_resourcegroup_name = trimspace(var.privatelink_dns_resourcegroup_name) + register_storage_accounts_keyvaults_with_dns = var.register_storage_accounts_keyvaults_with_dns + register_endpoints_with_dns = var.register_endpoints_with_dns + } + + } diff --git a/deploy/terraform/run/sap_deployer/module.tf b/deploy/terraform/run/sap_deployer/module.tf index 37c7b63032..8a1967650d 100644 --- a/deploy/terraform/run/sap_deployer/module.tf +++ b/deploy/terraform/run/sap_deployer/module.tf @@ -31,7 +31,6 @@ module "sap_deployer" { configure = true deployer = local.deployer deployer_vm_count = var.deployer_count - dns_zone_names = var.dns_zone_names enable_firewall_for_keyvaults_and_storage = var.enable_firewall_for_keyvaults_and_storage enable_purge_control_for_keyvaults = var.enable_purge_control_for_keyvaults firewall_deployment = local.firewall_deployment @@ -39,8 +38,6 @@ module "sap_deployer" { firewall_allowed_ipaddresses = local.firewall_allowed_ipaddresses infrastructure = local.infrastructure key_vault = local.key_vault - management_dns_resourcegroup_name = var.management_dns_resourcegroup_name - management_dns_subscription_id = var.management_dns_subscription_id options = local.options place_delete_lock_on_resources = var.place_delete_lock_on_resources public_network_access_enabled = var.public_network_access_enabled @@ -51,11 +48,11 @@ module "sap_deployer" { ssh-timeout = var.ssh-timeout subnets_to_add = var.subnets_to_add_to_firewall_for_keyvaults_and_storage tf_version = var.tf_version - use_custom_dns_a_registration = var.use_custom_dns_a_registration use_private_endpoint = var.use_private_endpoint use_service_endpoint = var.use_service_endpoint use_webapp = var.use_webapp webapp_client_secret = var.webapp_client_secret + dns_settings = local.dns_settings } diff --git a/deploy/terraform/run/sap_deployer/tfvar_variables.tf b/deploy/terraform/run/sap_deployer/tfvar_variables.tf index cab0027079..46d648bfe2 100644 --- a/deploy/terraform/run/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/run/sap_deployer/tfvar_variables.tf @@ -429,13 +429,13 @@ variable "use_custom_dns_a_registration" { variable "management_dns_subscription_id" { description = "String value giving the possibility to register custom dns a records in a separate subscription" - default = null + default = "" type = string } variable "management_dns_resourcegroup_name" { description = "String value giving the possibility to register custom dns a records in a separate resourcegroup" - default = null + default = "" type = string } variable "dns_zone_names" { @@ -450,6 +450,19 @@ variable "dns_zone_names" { } } +variable "privatelink_dns_subscription_id" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate subscription" + default = "" + type = string + } + +variable "privatelink_dns_resourcegroup_name" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate resourcegroup" + default = "" + type = string + } + + ######################################################################################### # # # ADO definitioms # diff --git a/deploy/terraform/run/sap_deployer/transform.tf b/deploy/terraform/run/sap_deployer/transform.tf index 88936dcb28..f1102e122f 100644 --- a/deploy/terraform/run/sap_deployer/transform.tf +++ b/deploy/terraform/run/sap_deployer/transform.tf @@ -225,4 +225,15 @@ locals { client_secret = var.webapp_client_secret } + dns_settings = { + use_custom_dns_a_registration = var.use_custom_dns_a_registration + dns_zone_names = var.dns_zone_names + + management_dns_resourcegroup_name = coalesce(var.management_dns_resourcegroup_name,local.saplib_resource_group_name) + management_dns_subscription_id = coalesce(var.management_dns_subscription_id, local.saplib_subscription_id) + + privatelink_dns_subscription_id = coalesce(var.privatelink_dns_subscription_id,var.management_dns_subscription_id, local.saplib_subscription_id) + privatelink_dns_resourcegroup_name = coalesce(var.privatelink_dns_resourcegroup_name,var.management_dns_resourcegroup_name,local.saplib_resource_group_name) + } + } diff --git a/deploy/terraform/run/sap_landscape/module.tf b/deploy/terraform/run/sap_landscape/module.tf index ab1ae59733..13a5f7782c 100644 --- a/deploy/terraform/run/sap_landscape/module.tf +++ b/deploy/terraform/run/sap_landscape/module.tf @@ -20,9 +20,6 @@ module "sap_landscape" { create_transport_storage = var.create_transport_storage deployer_tfstate = try(data.terraform_remote_state.deployer[0].outputs, []) diagnostics_storage_account = local.diagnostics_storage_account - dns_label = var.dns_label - dns_server_list = var.dns_server_list - dns_zone_names = var.dns_zone_names enable_firewall_for_keyvaults_and_storage = var.enable_firewall_for_keyvaults_and_storage enable_purge_control_for_keyvaults = var.enable_purge_control_for_keyvaults enable_rbac_authorization_for_keyvault = var.enable_rbac_authorization_for_keyvault @@ -33,11 +30,6 @@ module "sap_landscape" { install_volume_size = var.install_volume_size key_vault = local.key_vault keyvault_private_endpoint_id = var.keyvault_private_endpoint_id - management_dns_subscription_id = try(var.management_dns_subscription_id, local.saplib_subscription_id) - management_dns_resourcegroup_name = lower(length(var.management_dns_resourcegroup_name) > 0 ? ( - var.management_dns_resourcegroup_name) : ( - local.saplib_resource_group_name - )) naming = length(var.name_override_file) > 0 ? ( local.custom_names) : ( module.sap_namegenerator.naming @@ -47,8 +39,6 @@ module "sap_landscape" { peer_with_control_plane_vnet = var.peer_with_control_plane_vnet place_delete_lock_on_resources = var.place_delete_lock_on_resources public_network_access_enabled = var.public_network_access_enabled - register_endpoints_with_dns = var.register_endpoints_with_dns - register_virtual_network_to_dns = var.register_virtual_network_to_dns service_principal = var.use_spn ? local.service_principal : local.account soft_delete_retention_days = var.soft_delete_retention_days storage_account_replication_type = var.storage_account_replication_type @@ -58,12 +48,12 @@ module "sap_landscape" { transport_storage_account_id = var.transport_storage_account_id transport_volume_size = var.transport_volume_size use_AFS_for_shared_storage = var.use_AFS_for_shared_storage - use_custom_dns_a_registration = var.use_custom_dns_a_registration use_deployer = length(var.deployer_tfstate_key) > 0 use_private_endpoint = var.use_private_endpoint use_service_endpoint = var.use_service_endpoint vm_settings = local.vm_settings witness_storage_account = local.witness_storage_account + dns_settings = local.dns_settings } diff --git a/deploy/terraform/run/sap_landscape/output.tf b/deploy/terraform/run/sap_landscape/output.tf index 1a5a88b9ab..00f1f4143c 100644 --- a/deploy/terraform/run/sap_landscape/output.tf +++ b/deploy/terraform/run/sap_landscape/output.tf @@ -203,6 +203,17 @@ output "management_dns_subscription_id" { value = var.management_dns_subscription_id } +output "privatelink_dns_resourcegroup_name" { + description = "Resource group name for the resource group containing the PrivateLink DNS Zones" + value = coalesce(var.privatelink_dns_resourcegroup_name,var.management_dns_resourcegroup_name, local.saplib_resource_group_name) + } + +output "privatelink_dns_subscription_id" { + description = "Subscription ID for the PrivateLink Private DNS Zones" + value = coalesce(var.privatelink_dns_subscription_id, var.management_dns_subscription_id) + } + + output "privatelink_file_id" { description = "Azure resource identifier for the zone for the file resources" value = module.sap_landscape.privatelink_file_id @@ -213,6 +224,11 @@ output "register_virtual_network_to_dns" { value = var.register_virtual_network_to_dns } +output "register_storage_accounts_keyvaults_with_dns" { + description = "Boolean flag to indicate if the stor4agte accounts and key vaults are registered to DNS" + value = var.register_storage_accounts_keyvaults_with_dns + } + output "use_custom_dns_a_registration" { description = "Defines if custom DNS is used" value = var.use_custom_dns_a_registration diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index 016b76254f..88b306f343 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -526,6 +526,18 @@ variable "management_dns_resourcegroup_name" { type = string } +variable "privatelink_dns_subscription_id" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate subscription" + default = "" + type = string + } + +variable "privatelink_dns_resourcegroup_name" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate resourcegroup" + default = "" + type = string + } + variable "dns_server_list" { description = "DNS server list" @@ -557,6 +569,13 @@ variable "register_endpoints_with_dns" { type = bool } +variable "register_storage_accounts_keyvaults_with_dns" { + description = "Boolean value indicating if storage accounts and key vaults should be registered to the corresponding dns zones" + default = true + type = bool + } + + ######################################################################################### # # # ANF variables # @@ -781,6 +800,18 @@ variable "utility_vm_nic_ips" { default = [] } +variable "patch_mode" { + description = "If defined, define the patch mode for the virtual machines" + default = "ImageDefault" + } + +variable "patch_assessment_mode" { + description = "If defined, define the patch mode for the virtual machines" + default = "ImageDefault" + } + + + ######################################################################################### # # # Tags # @@ -876,3 +907,25 @@ variable "nat_gateway_public_ip_tags" { type = map(string) default = null } + +#######################################4#######################################8 +# # +# Terraform variables # +# # +#######################################4#######################################8 + +variable "tfstate_resource_id" { + description = "Resource id of tfstate storage account" + validation { + condition = ( + length(split("/", var.tfstate_resource_id)) == 9 + ) + error_message = "The Azure Resource ID for the storage account containing the Terraform state files must be provided and be in correct format." + } + } + +variable "deployer_tfstate_key" { + description = "The name of deployer's remote tfstate file" + type = string + default = "" + } diff --git a/deploy/terraform/run/sap_landscape/transform.tf b/deploy/terraform/run/sap_landscape/transform.tf index 9507d6f04c..9605ded6bb 100644 --- a/deploy/terraform/run/sap_landscape/transform.tf +++ b/deploy/terraform/run/sap_landscape/transform.tf @@ -186,6 +186,8 @@ locals { deploy_monitoring_extension = var.deploy_monitoring_extension deploy_defender_extension = var.deploy_defender_extension user_assigned_identity_id = var.user_assigned_identity_id + patch_mode = var.patch_mode + patch_assessment_mode = var.patch_assessment_mode } authentication = { @@ -625,4 +627,21 @@ locals { } + dns_settings = { + use_custom_dns_a_registration = var.use_custom_dns_a_registration + dns_label = var.dns_label + dns_zone_names = var.dns_zone_names + dns_server_list = var.dns_server_list + + management_dns_resourcegroup_name = coalesce(var.management_dns_resourcegroup_name,local.saplib_resource_group_name) + management_dns_subscription_id = coalesce(var.management_dns_subscription_id, local.saplib_subscription_id) + + privatelink_dns_subscription_id = coalesce(var.privatelink_dns_subscription_id,var.management_dns_subscription_id, local.saplib_subscription_id) + privatelink_dns_resourcegroup_name = coalesce(var.privatelink_dns_resourcegroup_name,var.management_dns_resourcegroup_name,local.saplib_resource_group_name) + + register_storage_accounts_keyvaults_with_dns = var.register_storage_accounts_keyvaults_with_dns + register_endpoints_with_dns = var.register_endpoints_with_dns + register_virtual_network_to_dns = var.register_virtual_network_to_dns + } + } diff --git a/deploy/terraform/run/sap_landscape/variables_global.tf b/deploy/terraform/run/sap_landscape/variables_global.tf index 634cba4217..d545c790e7 100644 --- a/deploy/terraform/run/sap_landscape/variables_global.tf +++ b/deploy/terraform/run/sap_landscape/variables_global.tf @@ -4,17 +4,6 @@ # # #######################################4#######################################8 -variable "tfstate_resource_id" { - description = "Resource id of tfstate storage account" - validation { - condition = ( - length(split("/", var.tfstate_resource_id)) == 9 - ) - error_message = "The Azure Resource ID for the storage account containing the Terraform state files must be provided and be in correct format." - } - } - -variable "deployer_tfstate_key" { description = "The key of deployer's remote tfstate file" } variable "NFS_provider" { type = string diff --git a/deploy/terraform/run/sap_library/module.tf b/deploy/terraform/run/sap_library/module.tf index e55874592e..eac1cf6372 100644 --- a/deploy/terraform/run/sap_library/module.tf +++ b/deploy/terraform/run/sap_library/module.tf @@ -8,17 +8,14 @@ module "sap_library" { azurerm.main = azurerm.main azurerm.deployer = azurerm.deployer azurerm.dnsmanagement = azurerm.dnsmanagement + azurerm.privatelinkdnsmanagement = azurerm.privatelinkdnsmanagement } Agent_IP = var.add_Agent_IP ? var.Agent_IP : "" bootstrap = false deployer = local.deployer deployer_tfstate = try(data.terraform_remote_state.deployer[0].outputs, []) - dns_label = var.dns_label - dns_zone_names = var.dns_zone_names infrastructure = local.infrastructure key_vault = local.key_vault - management_dns_resourcegroup_name = var.management_dns_resourcegroup_name - management_dns_subscription_id = var.management_dns_subscription_id naming = length(var.name_override_file) > 0 ? local.custom_names : module.sap_namegenerator.naming place_delete_lock_on_resources = var.place_delete_lock_on_resources service_principal = var.use_deployer ? local.service_principal : local.account @@ -28,6 +25,8 @@ module "sap_library" { use_custom_dns_a_registration = var.use_custom_dns_a_registration use_private_endpoint = var.use_private_endpoint use_webapp = var.use_webapp || length(try(data.terraform_remote_state.deployer[0].outputs.webapp_id,"")) > 0 + dns_settings = local.dns_settings + } module "sap_namegenerator" { diff --git a/deploy/terraform/run/sap_library/providers.tf b/deploy/terraform/run/sap_library/providers.tf index cbc8b786d8..28e481b0b7 100644 --- a/deploy/terraform/run/sap_library/providers.tf +++ b/deploy/terraform/run/sap_library/providers.tf @@ -68,6 +68,18 @@ provider "azurerm" { use_msi = var.use_spn ? false : true } +provider "azurerm" { + features {} + subscription_id = try(coalesce(var.privatelink_dns_subscription_id, local.spn.subscription_id), null) + client_id = local.use_spn ? local.spn.client_id : null + client_secret = local.use_spn ? local.spn.client_secret : null + tenant_id = local.use_spn ? local.spn.tenant_id : null + alias = "privatelinkdnsmanagement" + skip_provider_registration = true + storage_use_azuread = true + } + + provider "azuread" { client_id = local.use_spn ? local.spn.client_id : null client_secret = local.use_spn ? local.spn.client_secret : null diff --git a/deploy/terraform/run/sap_library/tfvar_variables.tf b/deploy/terraform/run/sap_library/tfvar_variables.tf index ff1509c3c9..0e4fbd75f5 100644 --- a/deploy/terraform/run/sap_library/tfvar_variables.tf +++ b/deploy/terraform/run/sap_library/tfvar_variables.tf @@ -300,3 +300,26 @@ variable "dns_zone_names" { } } +variable "register_storage_accounts_keyvaults_with_dns" { + description = "Boolean value indicating if storage accounts and key vaults should be registered to the corresponding dns zones" + default = true + type = bool + } + +variable "register_endpoints_with_dns" { + description = "Boolean value indicating if endpoints should be registered to the dns zone" + default = true + type = bool + } + +variable "privatelink_dns_subscription_id" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate subscription" + default = "" + type = string + } + +variable "privatelink_dns_resourcegroup_name" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate resourcegroup" + default = "" + type = string + } diff --git a/deploy/terraform/run/sap_library/transform.tf b/deploy/terraform/run/sap_library/transform.tf index 284266a49c..e344d7f942 100644 --- a/deploy/terraform/run/sap_library/transform.tf +++ b/deploy/terraform/run/sap_library/transform.tf @@ -120,4 +120,15 @@ locals { public_network_access_enabled = var.public_network_access_enabled } + dns_settings = { + use_custom_dns_a_registration = var.use_custom_dns_a_registration + dns_label = var.dns_label + dns_zone_names = var.dns_zone_names + management_dns_resourcegroup_name = trimspace(var.management_dns_resourcegroup_name) + management_dns_subscription_id = trimspace(var.management_dns_subscription_id) + privatelink_dns_subscription_id = trimspace(var.privatelink_dns_subscription_id) + privatelink_dns_resourcegroup_name = trimspace(var.privatelink_dns_resourcegroup_name) + register_storage_accounts_keyvaults_with_dns = var.register_storage_accounts_keyvaults_with_dns + register_endpoints_with_dns = var.register_endpoints_with_dns + } } diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index 5f53f12cd0..99f1c0adf7 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -66,7 +66,6 @@ module "common_infrastructure" { deploy_application_security_groups = var.deploy_application_security_groups deployer_tfstate = length(var.deployer_tfstate_key) > 0 ? data.terraform_remote_state.deployer[0].outputs : null deployment = var.deployment - dns_zone_names = var.dns_zone_names enable_purge_control_for_keyvaults = var.enable_purge_control_for_keyvaults ha_validator = format("%d%d-%s", local.application_tier.scs_high_availability ? 1 : 0, @@ -79,8 +78,6 @@ module "common_infrastructure" { key_vault = local.key_vault landscape_tfstate = data.terraform_remote_state.landscape.outputs license_type = var.license_type - management_dns_resourcegroup_name = try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) - management_dns_subscription_id = try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, null) naming = length(var.name_override_file) > 0 ? local.custom_names : module.sap_namegenerator.naming NFS_provider = var.NFS_provider options = local.options @@ -90,11 +87,10 @@ module "common_infrastructure" { service_principal = var.use_spn ? local.service_principal : local.account tags = var.tags terraform_template_version = var.terraform_template_version - use_custom_dns_a_registration = try(data.terraform_remote_state.landscape.outputs.use_custom_dns_a_registration, true) use_private_endpoint = var.use_private_endpoint use_random_id_for_storageaccounts = var.use_random_id_for_storageaccounts use_scalesets_for_deployment = var.use_scalesets_for_deployment - register_endpoints_with_dns = var.register_endpoints_with_dns + dns_settings = local.dns_settings } #------------------------------------------------------------------------------- @@ -139,13 +135,10 @@ module "hdb_node" { infrastructure = local.infrastructure landscape_tfstate = data.terraform_remote_state.landscape.outputs license_type = var.license_type - management_dns_resourcegroup_name = try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) - management_dns_subscription_id = try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, null) naming = length(var.name_override_file) > 0 ? local.custom_names : module.sap_namegenerator.naming NFS_provider = var.NFS_provider options = local.options ppg = module.common_infrastructure.ppg - register_virtual_network_to_dns = try(data.terraform_remote_state.landscape.outputs.register_virtual_network_to_dns, true) resource_group = module.common_infrastructure.resource_group sap_sid = local.sap_sid scale_set_id = length(var.scaleset_id) > 0 ? var.scaleset_id : module.common_infrastructure.scale_set_id @@ -157,12 +150,11 @@ module "hdb_node" { storage_subnet = module.common_infrastructure.storage_subnet tags = var.tags terraform_template_version = var.terraform_template_version - use_custom_dns_a_registration = try(data.terraform_remote_state.landscape.outputs.use_custom_dns_a_registration, false) use_loadbalancers_for_standalone_deployments = var.use_loadbalancers_for_standalone_deployments use_msi_for_clusters = var.use_msi_for_clusters use_scalesets_for_deployment = var.use_scalesets_for_deployment use_secondary_ips = var.use_secondary_ips - register_endpoints_with_dns = var.register_endpoints_with_dns + dns_settings = local.dns_settings } ######################################################################################### @@ -193,16 +185,12 @@ module "app_tier" { infrastructure = local.infrastructure landscape_tfstate = data.terraform_remote_state.landscape.outputs license_type = var.license_type - management_dns_resourcegroup_name = try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) - management_dns_subscription_id = try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, null) naming = length(var.name_override_file) > 0 ? local.custom_names : module.sap_namegenerator.naming network_location = module.common_infrastructure.network_location network_resource_group = module.common_infrastructure.network_resource_group options = local.options order_deployment = null ppg = var.use_app_proximityplacementgroups ? module.common_infrastructure.app_ppg : module.common_infrastructure.ppg - register_virtual_network_to_dns = try(data.terraform_remote_state.landscape.outputs.register_virtual_network_to_dns, true) - register_endpoints_with_dns = var.register_endpoints_with_dns resource_group = module.common_infrastructure.resource_group route_table_id = module.common_infrastructure.route_table_id sap_sid = local.sap_sid @@ -214,11 +202,11 @@ module "app_tier" { storage_bootdiag_endpoint = module.common_infrastructure.storage_bootdiag_endpoint tags = var.tags terraform_template_version = var.terraform_template_version - use_custom_dns_a_registration = try(data.terraform_remote_state.landscape.outputs.use_custom_dns_a_registration, false) use_loadbalancers_for_standalone_deployments = var.use_loadbalancers_for_standalone_deployments use_msi_for_clusters = var.use_msi_for_clusters use_scalesets_for_deployment = var.use_scalesets_for_deployment use_secondary_ips = var.use_secondary_ips + dns_settings = local.dns_settings } ######################################################################################### @@ -258,8 +246,6 @@ module "anydb_node" { infrastructure = local.infrastructure landscape_tfstate = data.terraform_remote_state.landscape.outputs license_type = var.license_type - management_dns_resourcegroup_name = try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name) - management_dns_subscription_id = try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, null) naming = length(var.name_override_file) > 0 ? local.custom_names : module.sap_namegenerator.naming options = local.options order_deployment = local.enable_db_deployment ? ( @@ -268,8 +254,6 @@ module "anydb_node" { ) : (null) ) : (null) ppg = module.common_infrastructure.ppg - register_virtual_network_to_dns = try(data.terraform_remote_state.landscape.outputs.register_virtual_network_to_dns, true) - register_endpoints_with_dns = var.register_endpoints_with_dns resource_group = module.common_infrastructure.resource_group sap_sid = local.sap_sid scale_set_id = try(module.common_infrastructure.scale_set_id, null) @@ -280,12 +264,12 @@ module "anydb_node" { storage_bootdiag_endpoint = module.common_infrastructure.storage_bootdiag_endpoint tags = var.tags terraform_template_version = var.terraform_template_version - use_custom_dns_a_registration = data.terraform_remote_state.landscape.outputs.use_custom_dns_a_registration use_loadbalancers_for_standalone_deployments = var.use_loadbalancers_for_standalone_deployments use_msi_for_clusters = var.use_msi_for_clusters use_observer = var.use_observer use_scalesets_for_deployment = var.use_scalesets_for_deployment use_secondary_ips = var.use_secondary_ips + dns_settings = local.dns_settings } ######################################################################################### diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index a4315b7da3..325439ab55 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -1050,6 +1050,18 @@ variable "management_dns_resourcegroup_name" { type = string } +variable "privatelink_dns_subscription_id" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate subscription" + default = "" + type = string + } + +variable "privatelink_dns_resourcegroup_name" { + description = "String value giving the possibility to register custom PrivateLink DNS A records in a separate resourcegroup" + default = "" + type = string + } + variable "dns_zone_names" { description = "Private DNS zone names" @@ -1074,6 +1086,12 @@ variable "register_endpoints_with_dns" { type = bool } +variable "register_storage_accounts_keyvaults_with_dns" { + description = "Boolean value indicating if storage accounts and key vaults should be registered to the corresponding dns zones" + default = true + type = bool + } + ######################################################################################### # # # NFS and Shared Filed settings # @@ -1393,6 +1411,11 @@ variable "patch_mode" { default = "ImageDefault" } +variable "patch_assessment_mode" { + description = "If defined, define the patch mode for the virtual machines" + default = "ImageDefault" + } + ######################################################################################### # # # Scaleout variables # diff --git a/deploy/terraform/run/sap_system/transform.tf b/deploy/terraform/run/sap_system/transform.tf index f69a3a53a8..462a5b707d 100644 --- a/deploy/terraform/run/sap_system/transform.tf +++ b/deploy/terraform/run/sap_system/transform.tf @@ -12,6 +12,7 @@ locals { deploy_monitoring_extension = var.deploy_monitoring_extension deploy_defender_extension = var.deploy_defender_extension patch_mode = var.patch_mode + patch_assessment_mode = var.patch_assessment_mode } @@ -752,5 +753,19 @@ locals { } + dns_settings = { + use_custom_dns_a_registration = var.use_custom_dns_a_registration + dns_zone_names = var.dns_zone_names + management_dns_resourcegroup_name = coalesce(var.management_dns_resourcegroup_name, try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name)) + management_dns_subscription_id = coalesce(var.management_dns_subscription_id, try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, null)) + + privatelink_dns_resourcegroup_name = coalesce(var.privatelink_dns_resourcegroup_name, try(data.terraform_remote_state.landscape.outputs.privatelink_dns_resourcegroup_name, try(data.terraform_remote_state.landscape.outputs.management_dns_resourcegroup_name, local.saplib_resource_group_name))) + privatelink_dns_subscription_id = coalesce(var.privatelink_dns_subscription_id, try(data.terraform_remote_state.landscape.outputs.privatelink_dns_subscription_id, try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, null))) + + register_storage_accounts_keyvaults_with_dns = var.register_storage_accounts_keyvaults_with_dns + register_endpoints_with_dns = var.register_endpoints_with_dns + + register_virtual_network_to_dns = try(data.terraform_remote_state.landscape.outputs.register_virtual_network_to_dns, false) + } } diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl index 355bf50961..20866dba8b 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl @@ -809,6 +809,7 @@ else ;; esac else + echo "NO TOKEN specified" echo export "PATH=$${ansible_bin}:$${tf_bin}:"'$${PATH}'::"$${DOTNET_ROOT}":'$${AZADHOME}/Azure_SAP_Automated_Deployment/sap-automation/deploy/scripts' | sudo tee -a /etc/profile.d/deploy_server.sh echo "export SAP_AUTOMATION_REPO_PATH='$${AZADHOME}/Azure_SAP_Automated_Deployment/sap-automation'" | sudo tee -a /etc/profile.d/deploy_server.sh diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_deployer/variables_global.tf index f11374a850..0e7fb90abf 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/variables_global.tf @@ -77,34 +77,10 @@ variable "subnets_to_add" { # # ######################################################################################### -variable "use_custom_dns_a_registration" { - description = "Boolean value indicating if a custom dns a record should be created when using private endpoints" - default = false - type = bool - } - -variable "management_dns_subscription_id" { - description = "String value giving the possibility to register custom dns a records in a separate subscription" - default = null - type = string - } - -variable "management_dns_resourcegroup_name" { - description = "String value giving the possibility to register custom dns a records in a separate resourcegroup" - default = null - type = string - } -variable "dns_zone_names" { - description = "Private DNS zone names" - type = map(string) - - default = { - "file_dns_zone_name" = "privatelink.file.core.windows.net" - "blob_dns_zone_name" = "privatelink.blob.core.windows.net" - "table_dns_zone_name" = "privatelink.table.core.windows.net" - "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" - } - } +variable "dns_settings" { + description = "DNS details for the deployment" + default = {} + } ############################################################################### # # diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf b/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf index 712b034e31..9cfb588d7b 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf @@ -219,7 +219,9 @@ resource "azurerm_virtual_machine_extension" "configure" { count = var.auto_configure_deployer ? var.deployer_vm_count : 0 depends_on = [ - time_sleep.wait_for_VM + time_sleep.wait_for_VM, + azurerm_virtual_machine_extension.monitoring_extension_deployer_lnx, + azurerm_virtual_machine_extension.monitoring_defender_deployer_lnx ] name = "configure_deployer" diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf b/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf index 7a082b6751..5f489b8e13 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf @@ -7,6 +7,18 @@ data "azurerm_subnet" "ams" { resource_group_name = split("/", local.ams_subnet_arm_id)[4] # Get RG name from actual arm_id } +resource "azurerm_subnet_route_table_association" "ams" { + provider = azurerm.main + count = local.create_ams_instance && local.ams_subnet_defined && !local.SAP_virtualnetwork_exists && !local.ams_subnet_existing ? 1 : 0 + depends_on = [ + azurerm_route_table.rt, + azurerm_subnet.ams + ] + subnet_id = local.ams_subnet_existing ? var.infrastructure.vnets.sap.subnet_ams.arm_id : azurerm_subnet.ams[0].id + route_table_id = azurerm_route_table.rt[0].id +} + + # Created AMS instance if log analytics workspace is NOT defined resource "azapi_resource" "ams_instance" { type = "Microsoft.Workloads/monitors@2023-04-01" diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_landscape/infrastructure.tf index cc857475b5..c4fd4db895 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/infrastructure.tf @@ -49,12 +49,12 @@ data "azurerm_virtual_network" "vnet_sap" { resource "azurerm_virtual_network_dns_servers" "vnet_sap_dns_servers" { provider = azurerm.main - count = local.SAP_virtualnetwork_exists && length(var.dns_server_list) > 0 ? 1 : 0 + count = local.SAP_virtualnetwork_exists && length(var.dns_settings.dns_server_list) > 0 ? 1 : 0 virtual_network_id = local.SAP_virtualnetwork_exists ? ( data.azurerm_virtual_network.vnet_sap[0].id) : ( azurerm_virtual_network.vnet_sap[0].id ) - dns_servers = var.dns_server_list + dns_servers = var.dns_settings.dns_server_list } # // Peers management VNET to SAP VNET @@ -154,7 +154,6 @@ resource "azurerm_route_table" "rt" { data.azurerm_virtual_network.vnet_sap[0].location) : ( azurerm_virtual_network.vnet_sap[0].location ) - disable_bgp_route_propagation = false tags = var.tags } @@ -183,7 +182,7 @@ resource "azurerm_route" "admin" { resource "azurerm_private_dns_zone_virtual_network_link" "vnet_sap" { provider = azurerm.dnsmanagement - count = local.use_Azure_native_DNS && var.use_private_endpoint && var.register_virtual_network_to_dns ? 1 : 0 + count = local.use_Azure_native_DNS && var.use_private_endpoint && var.dns_settings.register_virtual_network_to_dns ? 1 : 0 depends_on = [ azurerm_virtual_network.vnet_sap ] @@ -194,9 +193,9 @@ resource "azurerm_private_dns_zone_virtual_network_link" "vnet_sap" { var.naming.resource_suffixes.dns_link ) - resource_group_name = var.management_dns_resourcegroup_name + resource_group_name = var.dns_settings.management_dns_resourcegroup_name - private_dns_zone_name = var.dns_label + private_dns_zone_name = var.dns_settings.dns_label virtual_network_id = azurerm_virtual_network.vnet_sap[0].id registration_enabled = true } @@ -214,18 +213,18 @@ resource "azurerm_private_dns_zone_virtual_network_link" "vnet_sap_file" { var.naming.resource_suffixes.dns_link ) - resource_group_name = var.management_dns_resourcegroup_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name - private_dns_zone_name = var.dns_zone_names.file_dns_zone_name + private_dns_zone_name = var.dns_settings.dns_zone_names.file_dns_zone_name virtual_network_id = azurerm_virtual_network.vnet_sap[0].id registration_enabled = false } data "azurerm_private_dns_zone" "file" { provider = azurerm.dnsmanagement - count = var.use_private_endpoint ? 1 : 0 - name = var.dns_zone_names.file_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + count = var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 + name = var.dns_settings.dns_zone_names.file_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name } resource "azurerm_private_dns_zone_virtual_network_link" "storage" { @@ -241,16 +240,16 @@ resource "azurerm_private_dns_zone_virtual_network_link" "storage" { var.naming.resource_suffixes.dns_link ) - resource_group_name = var.management_dns_resourcegroup_name - private_dns_zone_name = var.dns_zone_names.blob_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name + private_dns_zone_name = var.dns_settings.dns_zone_names.blob_dns_zone_name virtual_network_id = azurerm_virtual_network.vnet_sap[0].id } data "azurerm_private_dns_zone" "storage" { provider = azurerm.dnsmanagement - count = var.use_private_endpoint ? 1 : 0 - name = var.dns_zone_names.blob_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + count = var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 + name = var.dns_settings.dns_zone_names.blob_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name } resource "azurerm_management_lock" "vnet_sap" { diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf index c864edf0db..5acf53349d 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf @@ -59,6 +59,19 @@ data "azurerm_network_security_group" "iscsi" { resource_group_name = split("/", local.sub_iscsi_nsg_arm_id)[4] } + + +resource "azurerm_subnet_route_table_association" "iscsi" { + provider = azurerm.main + count = local.enable_iscsi && !local.SAP_virtualnetwork_exists && !local.sub_iscsi_exists ? 1 : 0 + depends_on = [ + azurerm_route_table.rt, + azurerm_subnet.iscsi + ] + subnet_id = local.sub_iscsi_exists ? var.infrastructure.vnets.sap.sub_iscsi.arm_id : azurerm_subnet.iscsi[0].id + route_table_id = azurerm_route_table.rt[0].id +} + // TODO: Add nsr to iSCSI's nsg /* @@ -145,6 +158,11 @@ resource "azurerm_linux_virtual_machine" "iscsi" { //custom_data = try(data.template_cloudinit_config.config_growpart.rendered, "Cg==") + patch_mode = var.infrastructure.patch_mode + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + os_disk { name = format("%s%s%s%s%s", var.naming.resource_prefixes.osdisk, @@ -327,6 +345,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_iscsi_lnx" { type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } resource "azurerm_virtual_machine_extension" "monitoring_defender_iscsi_lnx" { @@ -341,6 +360,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_iscsi_lnx" { type = "AzureSecurityLinuxAgent" type_handler_version = "2.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf index 26b796de47..fb3f5bbccb 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf @@ -439,9 +439,9 @@ resource "azurerm_private_endpoint" "kv_user" { } dynamic "private_dns_zone_group" { - for_each = range(var.register_endpoints_with_dns ? 1 : 0) + for_each = range(var.dns_settings.register_endpoints_with_dns ? 1 : 0) content { - name = var.dns_zone_names.vault_dns_zone_name + name = var.dns_settings.dns_zone_names.vault_dns_zone_name private_dns_zone_ids = [data.azurerm_private_dns_zone.keyvault[0].id] } } @@ -450,9 +450,9 @@ resource "azurerm_private_endpoint" "kv_user" { data "azurerm_private_dns_zone" "keyvault" { provider = azurerm.dnsmanagement - count = var.use_private_endpoint && !var.use_custom_dns_a_registration ? 1 : 0 - name = var.dns_zone_names.vault_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + count = var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 + name = var.dns_settings.dns_zone_names.vault_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name } resource "azurerm_private_dns_a_record" "keyvault" { @@ -461,8 +461,8 @@ resource "azurerm_private_dns_a_record" "keyvault" { name = lower( format("%s", local.user_keyvault_name) ) - zone_name = var.dns_zone_names.vault_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + zone_name = var.dns_settings.dns_zone_names.vault_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name ttl = 10 records = [ length(var.keyvault_private_endpoint_id) > 0 ? ( @@ -487,19 +487,12 @@ resource "azurerm_private_dns_zone_virtual_network_link" "vault" { var.naming.separator, "vault" ) - resource_group_name = var.management_dns_resourcegroup_name - private_dns_zone_name = var.dns_zone_names.vault_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name + private_dns_zone_name = var.dns_settings.dns_zone_names.vault_dns_zone_name virtual_network_id = azurerm_virtual_network.vnet_sap[0].id registration_enabled = false } -data "azurerm_private_dns_zone" "vault" { - provider = azurerm.dnsmanagement - count = var.use_private_endpoint && var.register_endpoints_with_dns ? 1 : 0 - name = var.dns_zone_names.vault_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name -} - ############################################################################### # # diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf index 6e0dc17b94..d4340abfdf 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf @@ -35,10 +35,10 @@ resource "azurerm_storage_account" "storage_bootdiag" { resource "azurerm_private_dns_a_record" "storage_bootdiag" { provider = azurerm.dnsmanagement - count = var.use_custom_dns_a_registration ? 0 : 0 + count = var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 0 : 0 name = lower(local.storageaccount_name) - zone_name = var.dns_zone_names.blob_dns_zone_name + zone_name = var.dns_settings.dns_zone_names.blob_dns_zone_name resource_group_name = local.resource_group_exists ? ( data.azurerm_resource_group.resource_group[0].name) : ( azurerm_resource_group.resource_group[0].name @@ -108,9 +108,9 @@ resource "azurerm_private_endpoint" "storage_bootdiag" { } dynamic "private_dns_zone_group" { - for_each = range(var.register_endpoints_with_dns ? 1 : 0) + for_each = range(var.dns_settings.register_endpoints_with_dns ? 1 : 0) content { - name = var.dns_zone_names.blob_dns_zone_name + name = var.dns_settings.dns_zone_names.blob_dns_zone_name private_dns_zone_ids = [data.azurerm_private_dns_zone.storage[0].id] } } @@ -173,10 +173,10 @@ resource "azurerm_storage_account" "witness_storage" { resource "azurerm_private_dns_a_record" "witness_storage" { provider = azurerm.dnsmanagement - count = var.use_custom_dns_a_registration ? 0 : 0 + count = var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 0 : 0 name = lower(local.witness_storageaccount_name) - zone_name = var.dns_zone_names.blob_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + zone_name = var.dns_settings.dns_zone_names.blob_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name ttl = 3600 records = [data.azurerm_network_interface.witness_storage[count.index].ip_configuration[0].private_ip_address] @@ -250,9 +250,9 @@ resource "azurerm_private_endpoint" "witness_storage" { } dynamic "private_dns_zone_group" { - for_each = range(var.register_endpoints_with_dns ? 1 : 0) + for_each = range(var.dns_settings.register_endpoints_with_dns ? 1 : 0) content { - name = var.dns_zone_names.blob_dns_zone_name + name = var.dns_settings.dns_zone_names.blob_dns_zone_name private_dns_zone_ids = [data.azurerm_private_dns_zone.storage[0].id] } } @@ -322,7 +322,7 @@ resource "azurerm_storage_account" "transport" { resource "azurerm_private_dns_a_record" "transport" { provider = azurerm.dnsmanagement - count = var.use_private_endpoint && var.create_transport_storage && local.use_Azure_native_DNS && local.use_AFS_for_shared && length(var.transport_private_endpoint_id) == 0 ? 1 : 0 + count = var.create_transport_storage && local.use_Azure_native_DNS && local.use_AFS_for_shared && length(var.transport_private_endpoint_id) == 0 ? 1 : 0 name = replace( lower( format("%s", local.landscape_shared_transport_storage_account_name) @@ -330,8 +330,8 @@ resource "azurerm_private_dns_a_record" "transport" { "/[^a-z0-9]/", "" ) - zone_name = var.dns_zone_names.file_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + zone_name = var.dns_settings.dns_zone_names.file_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name ttl = 10 records = [ length(var.transport_private_endpoint_id) > 0 ? ( @@ -351,8 +351,8 @@ data "azurerm_private_dns_a_record" "transport" { "/[^a-z0-9]/", "" ) - zone_name = var.dns_zone_names.file_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + zone_name = var.dns_settings.dns_zone_names.file_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name } @@ -448,9 +448,9 @@ resource "azurerm_private_endpoint" "transport" { ] } dynamic "private_dns_zone_group" { - for_each = range(var.register_endpoints_with_dns ? 1 : 0) + for_each = range(var.dns_settings.register_endpoints_with_dns ? 1 : 0) content { - name = var.dns_zone_names.file_dns_zone_name + name = var.dns_settings.dns_zone_names.file_dns_zone_name private_dns_zone_ids = [data.azurerm_private_dns_zone.file[0].id] } } @@ -548,8 +548,8 @@ resource "azurerm_private_dns_a_record" "install" { "/[^a-z0-9]/", "" ) - zone_name = var.dns_zone_names.file_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + zone_name = var.dns_settings.dns_zone_names.file_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name ttl = 10 records = [ length(var.install_private_endpoint_id) > 0 ? ( @@ -573,8 +573,8 @@ data "azurerm_private_dns_a_record" "install" { "/[^a-z0-9]/", "" ) - zone_name = var.dns_zone_names.file_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + zone_name = var.dns_settings.dns_zone_names.file_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name } @@ -659,9 +659,9 @@ resource "azurerm_private_endpoint" "install" { } dynamic "private_dns_zone_group" { - for_each = range(var.register_endpoints_with_dns ? 1 : 0) + for_each = range(var.dns_settings.register_endpoints_with_dns ? 1 : 0) content { - name = var.dns_zone_names.file_dns_zone_name + name = var.dns_settings.dns_zone_names.file_dns_zone_name private_dns_zone_ids = [data.azurerm_private_dns_zone.file[0].id] } } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf b/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf index cf33a1823a..eefbc315ea 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf @@ -224,17 +224,6 @@ resource "azurerm_subnet_route_table_association" "web" { route_table_id = azurerm_route_table.rt[0].id } -resource "azurerm_subnet_route_table_association" "ams" { - provider = azurerm.main - count = local.create_ams_instance && local.ams_subnet_defined && !local.SAP_virtualnetwork_exists && !local.ams_subnet_existing ? 1 : 0 - depends_on = [ - azurerm_route_table.rt, - azurerm_subnet.ams - ] - subnet_id = local.ams_subnet_existing ? var.infrastructure.vnets.sap.subnet_ams.arm_id : azurerm_subnet.ams[0].id - route_table_id = azurerm_route_table.rt[0].id -} - # Creates network security rule to allow internal traffic for SAP db subnet resource "azurerm_network_security_rule" "nsr_internal_db" { provider = azurerm.main diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf index 4451866759..e77fa7fe3b 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/variables_global.tf @@ -218,43 +218,7 @@ variable "place_delete_lock_on_resources" { description = "If def ######################################################################################### -variable "dns_label" { description = "DNS label for the system, for example azure.contoso.net" } - -variable "dns_server_list" { - description = "The list of DNS Servers to associate with the VNet" - default = [] - } - -variable "register_virtual_network_to_dns" { - description = "Boolean value indicating if the vnet should be registered to the dns zone" - type = bool - } - -variable "register_endpoints_with_dns" { - description = "Boolean value indicating if endpoints should be registered to the dns zone" - type = bool - } - -variable "use_custom_dns_a_registration" { - description = "Boolean value indicating if a custom dns a records should be created for private endpoints" - default = false - type = bool - } - -variable "management_dns_subscription_id" { description = "String value giving the possibility to register custom dns a records in a separate subscription" } - -variable "management_dns_resourcegroup_name" { description = "String value giving the possibility to register custom dns a records in a separate resourcegroup" } - -variable "dns_zone_names" { - description = "Private DNS zone names" - type = map(string) - default = { - "file_dns_zone_name" = "privatelink.file.core.windows.net" - "blob_dns_zone_name" = "privatelink.blob.core.windows.net" - "table_dns_zone_name" = "privatelink.table.core.windows.net" - "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" - } - } +variable "dns_settings" { description = "DNS settings for the deployment" } variable "use_private_endpoint" { description = "Boolean value indicating if private endpoint should be used for the deployment" diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf index 9facd0f913..10f71190f4 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/variables_local.tf @@ -802,7 +802,7 @@ locals { )] ) - use_Azure_native_DNS = length(var.dns_label) > 0 && !var.use_custom_dns_a_registration && !local.SAP_virtualnetwork_exists + use_Azure_native_DNS = length(var.dns_settings.dns_label) > 0 && !var.dns_settings.use_custom_dns_a_registration && !local.SAP_virtualnetwork_exists use_AFS_for_shared = (var.NFS_provider == "ANF" && var.use_AFS_for_shared_storage) || var.NFS_provider == "AFS" diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf index 7260e24f00..ddd5224d2f 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/vm.tf @@ -67,6 +67,14 @@ resource "azurerm_windows_virtual_machine" "utility_vm" { admin_username = local.input_sid_username admin_password = local.input_sid_password + // ImageDefault = Manual on Windows + // https://learn.microsoft.com/en-us/azure/virtual-machines/automatic-vm-guest-patching#patch-orchestration-modes + patch_mode = var.infrastructure.patch_mode == "ImageDefault" ? "Manual" : var.infrastructure.patch_mode + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + enable_automatic_updates = !(var.infrastructure.patch_mode == "ImageDefault") + os_disk { name = format("%s%s%s%s%s", var.naming.resource_prefixes.osdisk, @@ -130,6 +138,11 @@ resource "azurerm_linux_virtual_machine" "utility_vm" { admin_password = local.input_sid_password disable_password_authentication = true + patch_mode = var.infrastructure.patch_mode + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + dynamic "admin_ssh_key" { for_each = range(1) content { @@ -180,6 +193,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_utility_lnx" type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } @@ -193,6 +207,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_utility_win" type = "AzureMonitorWindowsAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } @@ -205,6 +220,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_utility_lnx" { type = "AzureSecurityLinuxAgent" type_handler_version = "2.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { @@ -224,6 +240,8 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_utility_win" { type = "AzureSecurityWindowsAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true + settings = jsonencode( { "enableGenevaUpload" = true, diff --git a/deploy/terraform/terraform-units/modules/sap_library/dns.tf b/deploy/terraform/terraform-units/modules/sap_library/dns.tf index 5eae5a3a7a..d15ebdf1d2 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/dns.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/dns.tf @@ -10,7 +10,7 @@ resource "azurerm_private_dns_zone" "dns" { depends_on = [ azurerm_resource_group.library ] - name = var.dns_label + name = var.dns_settings.dns_label resource_group_name = local.resource_group_exists ? ( split("/", var.infrastructure.resource_group.arm_id)[4]) : ( azurerm_resource_group.library[0].name @@ -18,11 +18,11 @@ resource "azurerm_private_dns_zone" "dns" { } resource "azurerm_private_dns_zone" "blob" { provider = azurerm.main - count = local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 + count = local.use_local_privatelink_dns && var.use_private_endpoint && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 depends_on = [ azurerm_resource_group.library ] - name = var.dns_zone_names.blob_dns_zone_name + name = var.dns_settings.dns_zone_names.blob_dns_zone_name resource_group_name = local.resource_group_exists ? ( split("/", var.infrastructure.resource_group.arm_id)[4]) : ( azurerm_resource_group.library[0].name @@ -31,11 +31,11 @@ resource "azurerm_private_dns_zone" "blob" { resource "azurerm_private_dns_zone" "table" { provider = azurerm.main - count = local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 + count = local.use_local_privatelink_dns && var.use_private_endpoint && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 depends_on = [ azurerm_resource_group.library ] - name = var.dns_zone_names.table_dns_zone_name + name = var.dns_settings.dns_zone_names.table_dns_zone_name resource_group_name = local.resource_group_exists ? ( split("/", var.infrastructure.resource_group.arm_id)[4]) : ( azurerm_resource_group.library[0].name @@ -44,11 +44,11 @@ resource "azurerm_private_dns_zone" "table" { resource "azurerm_private_dns_zone" "file" { provider = azurerm.main - count = local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 + count = local.use_local_privatelink_dns && var.use_private_endpoint && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 depends_on = [ azurerm_resource_group.library ] - name = var.dns_zone_names.file_dns_zone_name + name = var.dns_settings.dns_zone_names.file_dns_zone_name resource_group_name = local.resource_group_exists ? ( split("/", var.infrastructure.resource_group.arm_id)[4]) : ( azurerm_resource_group.library[0].name @@ -57,11 +57,11 @@ resource "azurerm_private_dns_zone" "file" { resource "azurerm_private_dns_zone" "vault" { provider = azurerm.main - count = local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 + count = local.use_local_privatelink_dns && var.use_private_endpoint && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 depends_on = [ azurerm_resource_group.library ] - name = var.dns_zone_names.vault_dns_zone_name + name = var.dns_settings.dns_zone_names.vault_dns_zone_name resource_group_name = local.resource_group_exists ? ( split("/", var.infrastructure.resource_group.arm_id)[4]) : ( azurerm_resource_group.library[0].name @@ -69,10 +69,10 @@ resource "azurerm_private_dns_zone" "vault" { } data "azurerm_private_dns_zone" "vault" { - provider = azurerm.dnsmanagement - count = !local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 - name = var.dns_zone_names.vault_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + provider = azurerm.privatelinkdnsmanagement + count = !local.use_local_privatelink_dns && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 + name = var.dns_settings.dns_zone_names.vault_dns_zone_name + resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name } diff --git a/deploy/terraform/terraform-units/modules/sap_library/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_library/infrastructure.tf index 48a6e30322..b046292896 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/infrastructure.tf @@ -31,7 +31,7 @@ data "azurerm_resource_group" "library" { resource "azurerm_private_dns_zone_virtual_network_link" "vnet_mgmt" { provider = azurerm.dnsmanagement - count = length(var.dns_label) > 0 && !var.use_custom_dns_a_registration && var.use_private_endpoint ? 1 : 0 + count = length(var.dns_settings.dns_label) > 0 && !var.use_custom_dns_a_registration && var.use_private_endpoint ? 1 : 0 depends_on = [ azurerm_private_dns_zone.dns ] @@ -42,21 +42,21 @@ resource "azurerm_private_dns_zone_virtual_network_link" "vnet_mgmt" { var.naming.resource_suffixes.dns_link ) - resource_group_name = length(var.management_dns_subscription_id) == 0 ? ( + resource_group_name = length(var.dns_settings.management_dns_subscription_id) == 0 ? ( local.resource_group_exists ? ( split("/", var.infrastructure.resource_group.arm_id)[4]) : ( azurerm_resource_group.library[0].name )) : ( - var.management_dns_resourcegroup_name + var.dns_settings.management_dns_resourcegroup_name ) - private_dns_zone_name = var.dns_label + private_dns_zone_name = var.dns_settings.dns_label virtual_network_id = var.deployer_tfstate.vnet_mgmt_id registration_enabled = true } resource "azurerm_private_dns_zone_virtual_network_link" "vnet_mgmt_blob" { provider = azurerm.dnsmanagement - count = length(var.dns_label) > 0 && !var.use_custom_dns_a_registration && var.use_private_endpoint ? 1 : 0 + count = var.dns_settings.register_storage_accounts_keyvaults_with_dns && !var.use_custom_dns_a_registration && var.use_private_endpoint ? 1 : 0 depends_on = [ azurerm_storage_account.storage_tfstate, azurerm_private_dns_zone.blob @@ -68,14 +68,14 @@ resource "azurerm_private_dns_zone_virtual_network_link" "vnet_mgmt_blob" { var.naming.resource_suffixes.dns_link ) - resource_group_name = length(var.management_dns_subscription_id) == 0 ? ( + resource_group_name = length(var.dns_settings.privatelink_dns_subscription_id) == 0 ? ( local.resource_group_exists ? ( split("/", var.infrastructure.resource_group.arm_id)[4]) : ( azurerm_resource_group.library[0].name )) : ( - var.management_dns_resourcegroup_name + var.dns_settings.privatelink_dns_resourcegroup_name ) - private_dns_zone_name = var.dns_zone_names.blob_dns_zone_name + private_dns_zone_name = var.dns_settings.dns_zone_names.blob_dns_zone_name virtual_network_id = var.deployer_tfstate.vnet_mgmt_id registration_enabled = false } diff --git a/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf b/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf index 7470f37608..cf04ab0fb3 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf @@ -83,10 +83,13 @@ resource "azurerm_key_vault_secret" "tfstate" { resource "azurerm_private_dns_a_record" "kv_user" { provider = azurerm.deployer - count = var.use_private_endpoint && var.use_custom_dns_a_registration ? 1 : 0 + count = var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 name = lower(split("/", var.key_vault.kv_spn_id)[8]) - zone_name = var.dns_zone_names.vault_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + zone_name = var.dns_settings.dns_zone_names.vault_dns_zone_name + resource_group_name = coalesce( + var.dns_settings.privatelink_dns_resourcegroup_name, + var.dns_settings.management_dns_resourcegroup_name + ) ttl = 3600 records = [azurerm_private_endpoint.kv_user[0].private_service_connection[0].private_ip_address] diff --git a/deploy/terraform/terraform-units/modules/sap_library/keyvault_endpoint.tf b/deploy/terraform/terraform-units/modules/sap_library/keyvault_endpoint.tf index 5b41d25b88..3c24f8c479 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/keyvault_endpoint.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/keyvault_endpoint.tf @@ -38,9 +38,9 @@ resource "azurerm_private_endpoint" "kv_user" { } dynamic "private_dns_zone_group" { - for_each = range(var.use_private_endpoint && !var.use_custom_dns_a_registration ? 1 : 0) + for_each = range(var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0) content { - name = var.dns_zone_names.vault_dns_zone_name + name = var.dns_settings.dns_zone_names.vault_dns_zone_name private_dns_zone_ids = [local.use_local_private_dns ? azurerm_private_dns_zone.vault[0].id : data.azurerm_private_dns_zone.vault[0].id] } } @@ -49,7 +49,7 @@ resource "azurerm_private_endpoint" "kv_user" { resource "azurerm_private_dns_zone_virtual_network_link" "vault" { provider = azurerm.dnsmanagement - count = length(var.dns_label) > 0 && !var.use_custom_dns_a_registration && var.use_private_endpoint ? 1 : 0 + count = var.dns_settings.register_storage_accounts_keyvaults_with_dns && var.use_private_endpoint ? 1 : 0 depends_on = [ azurerm_private_dns_zone.vault ] @@ -60,14 +60,14 @@ resource "azurerm_private_dns_zone_virtual_network_link" "vault" { var.naming.separator, "vault" ) - resource_group_name = length(var.management_dns_subscription_id) == 0 ? ( + resource_group_name = length(var.dns_settings.privatelink_dns_subscription_id) == 0 ? ( local.resource_group_exists ? ( split("/", var.infrastructure.resource_group.arm_id)[4]) : ( azurerm_resource_group.library[0].name )) : ( - var.management_dns_resourcegroup_name + var.dns_settings.privatelink_dns_resourcegroup_name ) - private_dns_zone_name = var.dns_zone_names.vault_dns_zone_name + private_dns_zone_name = var.dns_settings.dns_zone_names.vault_dns_zone_name virtual_network_id = var.deployer_tfstate.vnet_mgmt_id registration_enabled = false } diff --git a/deploy/terraform/terraform-units/modules/sap_library/providers.tf b/deploy/terraform/terraform-units/modules/sap_library/providers.tf index d3398211b5..e08192b874 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/providers.tf @@ -2,7 +2,7 @@ terraform { required_providers { azurerm = { source = "hashicorp/azurerm" - configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement] + configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement, azurerm.privatelinkdnsmanagement] version = "~> 3.0" } } diff --git a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf index f24d54e4b2..5ead7bdc78 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf @@ -106,14 +106,15 @@ resource "azurerm_role_assignment" "storage_tfstate_contributor_ssi" { resource "azurerm_private_dns_a_record" "storage_tfstate_pep_a_record_registry" { provider = azurerm.dnsmanagement - count = var.use_private_endpoint && var.use_custom_dns_a_registration && !local.sa_tfstate_exists ? 1 : 0 + count = var.dns_settings.register_storage_accounts_keyvaults_with_dns && var.use_private_endpoint && var.use_custom_dns_a_registration && !local.sa_tfstate_exists ? 1 : 0 depends_on = [ azurerm_private_dns_zone.blob ] name = lower(azurerm_storage_account.storage_tfstate[0].name) - zone_name = var.dns_zone_names.blob_dns_zone_name + zone_name = var.dns_settings.dns_zone_names.blob_dns_zone_name resource_group_name = coalesce( - var.management_dns_resourcegroup_name, + var.dns_settings.privatelink_dns_resourcegroup_name, + var.dns_settings.management_dns_resourcegroup_name, local.resource_group_exists ? ( data.azurerm_resource_group.library[0].name ) : ( @@ -185,9 +186,9 @@ resource "azurerm_private_endpoint" "storage_tfstate" { } dynamic "private_dns_zone_group" { - for_each = range(var.use_private_endpoint && !var.use_custom_dns_a_registration ? 1 : 0) + for_each = range(var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0) content { - name = var.dns_zone_names.blob_dns_zone_name + name = var.dns_settings.dns_zone_names.blob_dns_zone_name private_dns_zone_ids = [local.use_local_private_dns ? azurerm_private_dns_zone.blob[0].id : data.azurerm_private_dns_zone.storage[0].id] } } @@ -243,9 +244,9 @@ resource "azurerm_private_endpoint" "table_tfstate" { } dynamic "private_dns_zone_group" { - for_each = range(var.use_private_endpoint && !var.use_custom_dns_a_registration && var.use_webapp ? 1 : 0) + for_each = range(var.dns_settings.register_storage_accounts_keyvaults_with_dns && var.use_webapp ? 1 : 0) content { - name = var.dns_zone_names.blob_dns_zone_name + name = var.dns_settings.dns_zone_names.blob_dns_zone_name private_dns_zone_ids = [local.use_local_private_dns ? azurerm_private_dns_zone.table[0].id : data.azurerm_private_dns_zone.table[0].id] } } @@ -348,9 +349,10 @@ resource "azurerm_private_dns_a_record" "storage_sapbits_pep_a_record_registry" ] name = lower(azurerm_storage_account.storage_sapbits[0].name) - zone_name = var.dns_zone_names.blob_dns_zone_name + zone_name = var.dns_settings.dns_zone_names.blob_dns_zone_name resource_group_name = coalesce( - var.management_dns_resourcegroup_name, + var.dns_settings.privatelink_dns_resourcegroup_name, + var.dns_settings.management_dns_resourcegroup_name, local.resource_group_exists ? ( data.azurerm_resource_group.library[0].name) : ( azurerm_resource_group.library[0].name) @@ -418,7 +420,7 @@ resource "azurerm_private_endpoint" "storage_sapbits" { dynamic "private_dns_zone_group" { for_each = range(var.use_private_endpoint && !var.use_custom_dns_a_registration ? 1 : 0) content { - name = var.dns_zone_names.blob_dns_zone_name + name = var.dns_settings.dns_zone_names.blob_dns_zone_name private_dns_zone_ids = [local.use_local_private_dns ? azurerm_private_dns_zone.blob[0].id : data.azurerm_private_dns_zone.storage[0].id] } @@ -488,18 +490,24 @@ resource "azurerm_role_assignment" "storage_sapbits_contributor_ssi" { data "azurerm_private_dns_zone" "storage" { - provider = azurerm.dnsmanagement - count = !local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 - name = var.dns_zone_names.blob_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + provider = azurerm.privatelinkdnsmanagement + count = !local.use_local_private_dns && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 + name = var.dns_settings.dns_zone_names.blob_dns_zone_name + resource_group_name = coalesce( + var.dns_settings.privatelink_dns_resourcegroup_name, + var.dns_settings.management_dns_resourcegroup_name + ) } data "azurerm_private_dns_zone" "table" { - provider = azurerm.dnsmanagement - count = !local.use_local_private_dns && var.use_private_endpoint ? 1 : 0 - name = var.dns_zone_names.table_dns_zone_name - resource_group_name = var.management_dns_resourcegroup_name + provider = azurerm.privatelinkdnsmanagement + count = !local.use_local_private_dns && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 + name = var.dns_settings.dns_zone_names.table_dns_zone_name + resource_group_name = coalesce( + var.dns_settings.privatelink_dns_resourcegroup_name, + var.dns_settings.management_dns_resourcegroup_name + ) } diff --git a/deploy/terraform/terraform-units/modules/sap_library/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_library/variables_global.tf index 01b3d1aad2..11a885c230 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/variables_global.tf @@ -19,6 +19,10 @@ variable "infrastructure" { variable "storage_account_sapbits" {} variable "storage_account_tfstate" {} +variable "dns_settings" { + description = "DNS details for the deployment" + default = {} + } variable "deployer" { description = "Details of deployer" default = {} @@ -59,24 +63,6 @@ variable "key_vault" { } -variable "dns_label" { - description = "DNS label for the deployment" - default = "" - - } - -variable "dns_zone_names" { - description = "Private DNS zone names" - type = map(string) - default = { - "file_dns_zone_name" = "privatelink.file.core.windows.net" - "blob_dns_zone_name" = "privatelink.blob.core.windows.net" - "table_dns_zone_name" = "privatelink.table.core.windows.net" - "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" - } - } - - variable "naming" { description = "naming convention data structure" } @@ -101,18 +87,6 @@ variable "use_custom_dns_a_registration" { type = bool } -variable "management_dns_subscription_id" { - description = "String value giving the possibility to register custom dns a records in a separate subscription" - default = null - type = string - } - -variable "management_dns_resourcegroup_name" { - description = "String value giving the possibility to register custom dns a records in a separate resourcegroup" - default = null - type = string - } - variable "enable_purge_control_for_keyvaults" { description = "Allow the deployment to control the purge protection" type = bool diff --git a/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf index 2a2940cb97..015338c91b 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf @@ -56,7 +56,8 @@ locals { enable_firewall_for_keyvaults_and_storage = try(var.deployer_tfstate.enable_firewall_for_keyvaults_and_storage, false) - use_local_private_dns = (length(var.dns_label) > 0 && !var.use_custom_dns_a_registration && length(trimspace(var.management_dns_resourcegroup_name)) == 0) + use_local_private_dns = (length(var.dns_settings.dns_label) > 0 && !var.use_custom_dns_a_registration && length(trimspace(var.dns_settings.management_dns_resourcegroup_name)) == 0) + use_local_privatelink_dns = !var.use_custom_dns_a_registration && length(trimspace(var.dns_settings.privatelink_dns_resourcegroup_name)) == 0 keyvault_id = try(var.deployer_tfstate.deployer_kv_user_arm_id, "") diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/infrastructure.tf index 091336fdcf..844f14527b 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/infrastructure.tf @@ -151,9 +151,9 @@ data "azurerm_availability_set" "anydb" { resource "azurerm_private_dns_a_record" "db" { provider = azurerm.dnsmanagement - count = local.enable_db_lb_deployment && length(local.dns_label) > 0 && var.register_virtual_network_to_dns ? 1 : 0 + count = local.enable_db_lb_deployment && length(local.dns_label) > 0 && var.dns_settings.register_virtual_network_to_dns ? 1 : 0 name = lower(format("%s%sdb%scl", var.sap_sid, local.anydb_sid, "00")) - resource_group_name = coalesce(var.management_dns_resourcegroup_name, var.landscape_tfstate.dns_resource_group_name) + resource_group_name = coalesce(var.dns_settings.management_dns_resourcegroup_name, var.landscape_tfstate.dns_resource_group_name) zone_name = local.dns_label ttl = 300 records = [azurerm_lb.anydb[0].frontend_ip_configuration[0].private_ip_address] diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_global.tf index 08de04649a..e814c7ff26 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/variables_global.tf @@ -51,29 +51,8 @@ variable "use_secondary_ips" { # # ######################################################################################### -variable "use_custom_dns_a_registration" { - description = "Boolean value indicating if a custom dns a record should be created when using private endpoints" - default = false - type = bool - } -variable "management_dns_subscription_id" { - description = "String value giving the possibility to register custom dns a records in a separate subscription" - default = null - type = string - } -variable "management_dns_resourcegroup_name" { - description = "String value giving the possibility to register custom dns a records in a separate resourcegroup" - default = null - type = string - } -variable "register_virtual_network_to_dns" { - description = "Boolean value indicating if the vnet should be registered to the dns zone" - type = bool - } - -variable "register_endpoints_with_dns" { - description = "Boolean value indicating if endpoints should be registered to the dns zone" - type = bool +variable "dns_settings" { + description = "DNS Settings" } ######################################################################################### diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf index 8d9719394b..b5f7b8ec0d 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/vm-anydb.tf @@ -161,8 +161,6 @@ resource "azurerm_linux_virtual_machine" "dbserver" { size = local.anydb_sku source_image_id = var.database.os.type == "custom" ? var.database.os.source_image_id : null license_type = length(var.license_type) > 0 ? var.license_type : null - # ToDo Add back later -# patch_mode = var.infrastructure.patch_mode admin_username = var.sid_username admin_password = local.enable_auth_key ? null : var.sid_password @@ -170,6 +168,11 @@ resource "azurerm_linux_virtual_machine" "dbserver" { custom_data = var.deployment == "new" ? var.cloudinit_growpart_config : null + patch_mode = var.infrastructure.patch_mode + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + tags = merge(local.tags, var.tags) dynamic "admin_ssh_key" { @@ -301,9 +304,14 @@ resource "azurerm_windows_virtual_machine" "dbserver" { size = local.anydb_sku source_image_id = var.database.os.type == "custom" ? var.database.os.source_image_id : null license_type = length(var.license_type) > 0 ? var.license_type : null - # ToDo Add back later -# patch_mode = var.infrastructure.patch_mode + // ImageDefault = Manual on Windows + // https://learn.microsoft.com/en-us/azure/virtual-machines/automatic-vm-guest-patching#patch-orchestration-modes + patch_mode = var.infrastructure.patch_mode == "ImageDefault" ? "Manual" : var.infrastructure.patch_mode + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + enable_automatic_updates = !(var.infrastructure.patch_mode == "ImageDefault") admin_username = var.sid_username admin_password = var.sid_password @@ -714,6 +722,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_db_lnx" { type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } @@ -729,6 +738,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_db_win" { type = "AzureMonitorWindowsAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } @@ -745,6 +755,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_db_lnx" { type = "AzureSecurityLinuxAgent" type_handler_version = "2.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { @@ -767,6 +778,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_db_win" { type = "AzureSecurityWindowsAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf index b29f6228db..ebb7fb2eee 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/infrastructure.tf @@ -431,12 +431,12 @@ resource "azurerm_subnet_route_table_association" "subnet_sap_web" { resource "azurerm_private_dns_a_record" "scs" { provider = azurerm.dnsmanagement - count = local.enable_scs_lb_deployment && length(local.dns_label) > 0 && var.register_virtual_network_to_dns ? 1 : 0 + count = local.enable_scs_lb_deployment && length(local.dns_label) > 0 && var.dns_settings.register_virtual_network_to_dns ? 1 : 0 name = lower(format("%sscs%scl1", local.sid, var.application_tier.scs_instance_number )) - resource_group_name = coalesce(var.management_dns_resourcegroup_name, var.landscape_tfstate.dns_resource_group_name) + resource_group_name = coalesce(var.dns_settings.management_dns_resourcegroup_name, var.landscape_tfstate.dns_resource_group_name) zone_name = var.landscape_tfstate.dns_label ttl = 300 records = [azurerm_lb.scs[0].frontend_ip_configuration[0].private_ip_address] @@ -444,12 +444,12 @@ resource "azurerm_private_dns_a_record" "scs" { resource "azurerm_private_dns_a_record" "ers" { provider = azurerm.dnsmanagement - count = local.enable_scs_lb_deployment && length(local.dns_label) > 0 && var.register_virtual_network_to_dns ? 1 : 0 + count = local.enable_scs_lb_deployment && length(local.dns_label) > 0 && var.dns_settings.register_virtual_network_to_dns ? 1 : 0 name = lower(format("%sers%scl2", local.sid, local.ers_instance_number )) - resource_group_name = coalesce(var.management_dns_resourcegroup_name, var.landscape_tfstate.dns_resource_group_name) + resource_group_name = coalesce(var.dns_settings.management_dns_resourcegroup_name, var.landscape_tfstate.dns_resource_group_name) zone_name = local.dns_label ttl = 300 records = [azurerm_lb.scs[0].frontend_ip_configuration[1].private_ip_address] @@ -457,11 +457,11 @@ resource "azurerm_private_dns_a_record" "ers" { resource "azurerm_private_dns_a_record" "web" { provider = azurerm.dnsmanagement - count = local.enable_web_lb_deployment && length(local.dns_label) > 0 && var.register_virtual_network_to_dns ? 1 : 0 + count = local.enable_web_lb_deployment && length(local.dns_label) > 0 && var.dns_settings.register_virtual_network_to_dns ? 1 : 0 name = lower(format("%sweb%s", local.sid, var.application_tier.web_instance_number )) - resource_group_name = coalesce(var.management_dns_resourcegroup_name, var.landscape_tfstate.dns_resource_group_name) + resource_group_name = coalesce(var.dns_settings.management_dns_resourcegroup_name, var.landscape_tfstate.dns_resource_group_name) zone_name = local.dns_label ttl = 300 records = [azurerm_lb.web[0].frontend_ip_configuration[0].private_ip_address] diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_global.tf index 656d956576..360b72eb09 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/variables_global.tf @@ -29,8 +29,6 @@ variable "network_resource_group" { description = "Resourc variable "options" { description = "Dictionary of miscallaneous parameters" } variable "order_deployment" { description = "psuedo condition for ordering deployment" } variable "ppg" { description = "Details of the proximity placement group" } -variable "register_virtual_network_to_dns" { description = "Boolean value indicating if the vnet should be registered to the dns zone" } -variable "register_endpoints_with_dns" { description = "Boolean value indicating if endpoints should be registered to the dns zone" } variable "resource_group" { description = "Details of the resource group" } variable "route_table_id" { description = "Route table (if any) id" } variable "sap_sid" { description = "The SID of the application" } @@ -56,20 +54,8 @@ variable "use_msi_for_clusters" { description = "If true # # ######################################################################################### -variable "use_custom_dns_a_registration" { - description = "Boolean value indicating if a custom dns a record should be created when using private endpoints" - default = false - type = bool - } -variable "management_dns_subscription_id" { - description = "String value giving the possibility to register custom dns a records in a separate subscription" - default = null - type = string - } -variable "management_dns_resourcegroup_name" { - description = "String value giving the possibility to register custom dns a records in a separate resourcegroup" - default = null - type = string +variable "dns_settings" { + description = "DNS Settings" } diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf index 5bf88f7ab6..826df1ec25 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-app.tf @@ -144,6 +144,12 @@ resource "azurerm_linux_virtual_machine" "app" { virtual_machine_scale_set_id = length(var.scale_set_id) > 0 ? var.scale_set_id : null + patch_mode = var.infrastructure.patch_mode + + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + //If length of zones > 1 distribute servers evenly across zones zone = var.application_tier.app_use_avset ? null : try(local.app_zones[count.index % max(local.app_zone_count, 1)], null) @@ -289,6 +295,15 @@ resource "azurerm_windows_virtual_machine" "app" { virtual_machine_scale_set_id = length(var.scale_set_id) > 0 ? var.scale_set_id : null + + // ImageDefault = Manual on Windows + // https://learn.microsoft.com/en-us/azure/virtual-machines/automatic-vm-guest-patching#patch-orchestration-modes + patch_mode = var.infrastructure.patch_mode == "ImageDefault" ? "Manual" : var.infrastructure.patch_mode + enable_automatic_updates = !(var.infrastructure.patch_mode == "ImageDefault") + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + //If length of zones > 1 distribute servers evenly across zones zone = var.application_tier.app_use_avset ? null : try(local.app_zones[count.index % max(local.app_zone_count, 1)], null) @@ -305,10 +320,7 @@ resource "azurerm_windows_virtual_machine" "app" { admin_username = var.sid_username admin_password = var.sid_password - #ToDo: Remove once feature is GA patch_mode = "Manual" license_type = length(var.license_type) > 0 ? var.license_type : null - # ToDo Add back later -# patch_mode = var.infrastructure.patch_mode tags = merge(var.application_tier.app_tags, var.tags) @@ -511,6 +523,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_app_lnx" { type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } @@ -526,6 +539,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_app_win" { type = "AzureMonitorWindowsAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } @@ -540,6 +554,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_app_lnx" { type = "AzureSecurityLinuxAgent" type_handler_version = "2.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { @@ -561,6 +576,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_app_win" { type = "AzureSecurityWindowsAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf index e226b5fdef..b380dcc93d 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-scs.tf @@ -140,6 +140,11 @@ resource "azurerm_linux_virtual_machine" "scs" { virtual_machine_scale_set_id = length(var.scale_set_id) > 0 ? var.scale_set_id : null + patch_mode = var.infrastructure.patch_mode + + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true //If length of zones > 1 distribute servers evenly across zones zone = local.use_scs_avset ? null : try(local.scs_zones[count.index % max(local.scs_zone_count, 1)], null) network_interface_ids = var.application_tier.dual_nics ? ( @@ -327,6 +332,13 @@ resource "azurerm_windows_virtual_machine" "scs" { virtual_machine_scale_set_id = length(var.scale_set_id) > 0 ? var.scale_set_id : null + // ImageDefault = Manual on Windows + // https://learn.microsoft.com/en-us/azure/virtual-machines/automatic-vm-guest-patching#patch-orchestration-modes + patch_mode = var.infrastructure.patch_mode == "ImageDefault" ? "Manual" : var.infrastructure.patch_mode + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + enable_automatic_updates = !(var.infrastructure.patch_mode == "ImageDefault") //If length of zones > 1 distribute servers evenly across zones zone = local.use_scs_avset ? ( null) : ( @@ -357,9 +369,7 @@ resource "azurerm_windows_virtual_machine" "scs" { admin_username = var.sid_username admin_password = var.sid_password - #ToDo: Remove once feature is GA patch_mode = "Manual" license_type = length(var.license_type) > 0 ? var.license_type : null - patch_mode = var.infrastructure.patch_mode tags = merge(var.application_tier.scs_tags, var.tags) @@ -709,6 +719,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_scs_lnx" { type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } @@ -723,6 +734,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_scs_win" { type = "AzureMonitorWindowsAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } @@ -737,6 +749,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_lnx" { type = "AzureSecurityLinuxAgent" type_handler_version = "2.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { @@ -758,6 +771,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_scs_win" { type = "AzureSecurityWindowsAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf index 85d6d7e8b8..b7747a5fea 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/vm-webdisp.tf @@ -136,6 +136,11 @@ resource "azurerm_linux_virtual_machine" "web" { virtual_machine_scale_set_id = length(var.scale_set_id) > 0 ? var.scale_set_id : null + patch_mode = var.infrastructure.patch_mode + + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true //If length of zones > 1 distribute servers evenly across zones zone = local.use_web_avset ? null : try(local.web_zones[count.index % max(local.web_zone_count, 1)], null) @@ -284,6 +289,14 @@ resource "azurerm_windows_virtual_machine" "web" { virtual_machine_scale_set_id = length(var.scale_set_id) > 0 ? var.scale_set_id : null + // ImageDefault = Manual on Windows + // https://learn.microsoft.com/en-us/azure/virtual-machines/automatic-vm-guest-patching#patch-orchestration-modes + patch_mode = var.infrastructure.patch_mode == "ImageDefault" ? "Manual" : var.infrastructure.patch_mode + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + enable_automatic_updates = !(var.infrastructure.patch_mode == "ImageDefault") + //If length of zones > 1 distribute servers evenly across zones zone = local.use_web_avset ? ( null) : ( @@ -639,6 +652,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_web_lnx" { type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } @@ -653,6 +667,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_web_win" { type = "AzureMonitorWindowsAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } resource "azurerm_virtual_machine_extension" "monitoring_defender_web_lnx" { @@ -666,6 +681,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_web_lnx" { type = "AzureSecurityLinuxAgent" type_handler_version = "2.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { @@ -687,6 +703,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_web_win" { type = "AzureSecurityWindowsAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf index 4e4e001608..3ff79311c4 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf @@ -127,9 +127,9 @@ resource "azurerm_private_endpoint" "sapmnt" { dynamic "private_dns_zone_group" { - for_each = range(length(try(var.landscape_tfstate.privatelink_file_id, "")) > 0 && var.register_endpoints_with_dns ? 1 : 0) + for_each = range(length(try(var.landscape_tfstate.privatelink_file_id, "")) > 0 && var.dns_settings.register_endpoints_with_dns ? 1 : 0) content { - name = var.dns_zone_names.file_dns_zone_name + name = var.dns_settings.dns_zone_names.file_dns_zone_name private_dns_zone_ids = [var.landscape_tfstate.privatelink_file_id] } } diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf index 2388871d4d..922e45fa16 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf @@ -224,41 +224,9 @@ variable "use_private_endpoint" { ######################################################################################### -variable "use_custom_dns_a_registration" { - description = "Boolean value indicating if a custom dns a record should be created when using private endpoints" - default = false - type = bool - } - -variable "management_dns_subscription_id" { - description = "String value giving the possibility to register custom dns a records in a separate subscription" - default = null - type = string - } - -variable "management_dns_resourcegroup_name" { - description = "String value giving the possibility to register custom dns a records in a separate resourcegroup" - default = null - type = string - } - -variable "register_endpoints_with_dns" { - description = "Boolean value indicating if endpoints should be registered to the dns zone" - type = bool - } - -variable "dns_zone_names" { - description = "Private DNS zone names" - type = map(string) - - default = { - "file_dns_zone_name" = "privatelink.file.core.windows.net" - "blob_dns_zone_name" = "privatelink.blob.core.windows.net" - "table_dns_zone_name" = "privatelink.table.core.windows.net" - "vault_dns_zone_name" = "privatelink.vaultcore.azure.net" - } - } - +variable "dns_settings" { + description = "DNS Settings" + } variable "sapmnt_private_endpoint_id" { description = "Azure Resource Identifier for an private endpoint connection" type = string diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/vm-anchor.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/vm-anchor.tf index bd705a9a9a..9c2a826350 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/vm-anchor.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/vm-anchor.tf @@ -40,6 +40,13 @@ resource "azurerm_linux_virtual_machine" "anchor" { resource_group_name = local.resource_group_exists ? data.azurerm_resource_group.resource_group[0].name : azurerm_resource_group.resource_group[0].name location = local.resource_group_exists ? data.azurerm_resource_group.resource_group[0].location : azurerm_resource_group.resource_group[0].location proximity_placement_group_id = local.ppg_exists ? data.azurerm_proximity_placement_group.ppg[count.index].id : azurerm_proximity_placement_group.ppg[count.index].id + + patch_mode = var.infrastructure.patch_mode + + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + zone = local.zones[count.index] network_interface_ids = [ @@ -134,6 +141,13 @@ resource "azurerm_windows_virtual_machine" "anchor" { proximity_placement_group_id = local.ppg_exists ? data.azurerm_proximity_placement_group.ppg[count.index].id : azurerm_proximity_placement_group.ppg[count.index].id zone = local.zones[count.index] + // ImageDefault = Manual on Windows + // https://learn.microsoft.com/en-us/azure/virtual-machines/automatic-vm-guest-patching#patch-orchestration-modes + patch_mode = var.infrastructure.patch_mode == "ImageDefault" ? "Manual" : var.infrastructure.patch_mode + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + network_interface_ids = [ azurerm_network_interface.anchor[count.index].id ] @@ -180,6 +194,5 @@ resource "azurerm_windows_virtual_machine" "anchor" { ] } - patch_mode = "Manual" license_type = length(var.license_type) > 0 ? var.license_type : null } diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/infrastructure.tf index bc630e1e8c..bc2641fac9 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/infrastructure.tf @@ -145,9 +145,9 @@ resource "azurerm_lb_rule" "hdb" { resource "azurerm_private_dns_a_record" "db" { provider = azurerm.dnsmanagement - count = local.enable_db_lb_deployment && length(local.dns_label) > 0 && var.register_virtual_network_to_dns ? 1 : 0 + count = local.enable_db_lb_deployment && length(local.dns_label) > 0 && var.dns_settings.register_virtual_network_to_dns ? 1 : 0 name = lower(format("%s%sdb%scl", var.sap_sid, local.database_sid, local.database_instance)) - resource_group_name = coalesce(var.management_dns_resourcegroup_name, var.landscape_tfstate.dns_resource_group_name) + resource_group_name = coalesce(var.dns_settings.management_dns_resourcegroup_name, var.landscape_tfstate.dns_resource_group_name) zone_name = local.dns_label ttl = 300 records = [try(azurerm_lb.hdb[0].frontend_ip_configuration[0].private_ip_address, "")] diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf index 9093165754..360af4ee08 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/variables_global.tf @@ -72,36 +72,10 @@ variable "use_secondary_ips" { ######################################################################################### -variable "use_custom_dns_a_registration" { - description = "Boolean value indicating if a custom dns a record should be created when using private endpoints" - default = false - type = bool +variable "dns_settings" { + description = "DNS Settings" } -variable "management_dns_subscription_id" { - description = "String value giving the possibility to register custom dns a records in a separate subscription" - default = null - type = string - } - -variable "management_dns_resourcegroup_name" { - description = "String value giving the possibility to register custom dns a records in a separate resourcegroup" - default = null - type = string - } - - -variable "register_virtual_network_to_dns" { - description = "Boolean value indicating if the vnet should be registered to the dns zone" - type = bool - } - -variable "register_endpoints_with_dns" { - description = "Boolean value indicating if endpoints should be registered to the dns zone" - type = bool - } - - ######################################################################################### # # # ANF settings # diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf index 977f383ee2..e81afe012d 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/vm-hdb.tf @@ -187,6 +187,11 @@ resource "azurerm_linux_virtual_machine" "vm_dbnode" { disable_password_authentication = !local.enable_auth_password tags = merge(var.tags, local.tags) + patch_mode = var.infrastructure.patch_mode + patch_assessment_mode = var.infrastructure.patch_assessment_mode + bypass_platform_safety_checks_on_user_schedule_enabled = var.infrastructure.patch_mode != "AutomaticByPlatform" ? false : true + vm_agent_platform_updates_enabled = true + zone = local.use_avset ? null : try(local.zones[count.index % max(local.db_zone_count, 1)], null) size = local.hdb_vm_sku @@ -567,6 +572,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_extension_db_lnx" { type = "AzureMonitorLinuxAgent" type_handler_version = "1.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true } @@ -583,6 +589,7 @@ resource "azurerm_virtual_machine_extension" "monitoring_defender_db_lnx" { type = "AzureSecurityLinuxAgent" type_handler_version = "2.0" auto_upgrade_minor_version = true + automatic_upgrade_enabled = true settings = jsonencode( { From 1e24dd140df1597a4e212e6af135169160530e02 Mon Sep 17 00:00:00 2001 From: Nadeen Noaman <95418928+nnoaman@users.noreply.github.com> Date: Tue, 27 Aug 2024 17:26:18 +0200 Subject: [PATCH 063/164] Extend 'root' and 'tmp' logical volumes for SAP CAL integration on RHEL (#618) * Extend 'root' and 'tmp' logical volumes for SAP CAL integration on RHEL * chore: Set pipefail and Code Linting --- .../ansible/playbook_sapcal_integration.yaml | 21 ++++-- .../1.5.3-disk-setup-sapcal/tasks/main.yml | 75 +++++++++++++++++++ deploy/ansible/vars/ansible-input-api.yaml | 3 + 3 files changed, 91 insertions(+), 8 deletions(-) create mode 100644 deploy/ansible/roles-os/1.5.3-disk-setup-sapcal/tasks/main.yml diff --git a/deploy/ansible/playbook_sapcal_integration.yaml b/deploy/ansible/playbook_sapcal_integration.yaml index 0fdb2923a3..7ff35a53a6 100644 --- a/deploy/ansible/playbook_sapcal_integration.yaml +++ b/deploy/ansible/playbook_sapcal_integration.yaml @@ -1,20 +1,25 @@ --- -- name: "SAP CAL Integration" - hosts: "{{ sap_sid | upper }}_DB : - {{ sap_sid | upper }}_SCS : - {{ sap_sid | upper }}_PAS : - {{ sap_sid | upper }}_APP" +- name: "SAP CAL Integration" + hosts: "{{ sap_sid | upper }}_DB : + {{ sap_sid | upper }}_SCS : + {{ sap_sid | upper }}_PAS : + {{ sap_sid | upper }}_APP" become: true gather_facts: true vars_files: vars/ansible-input-api.yaml tasks: - - name: 6.0.0-sapcal-install - Retrieve Resourced Data + - name: "SAP-CAL Integration" become: true when: - ansible_os_family | upper == "SUSE" or ansible_os_family | upper == "REDHAT" - enable_sap_cal is defined and enable_sap_cal block: + - name: "6.0.0-sapcal-install - Extend logical volumes" + when: ansible_os_family | upper == "REDHAT" + ansible.builtin.include_role: + name: roles-os/1.5.3-disk-setup-sapcal + - name: "Retrieve Resource Group Name and ResourceID" ansible.builtin.uri: url: http://169.254.169.254/metadata/instance?api-version=2021-02-01 @@ -114,9 +119,9 @@ - { key: 'app_physical_hostname', value: 'APP' } - { key: 'app_virtual_hostname', value: 'APP' } - - name: 6.0.0-sapcal-install - CALL SAP CAL API + - name: "6.0.0-sapcal-install - CALL SAP CAL API" when: enable_sap_cal is defined and enable_sap_cal block: - - name: Import the 6.0.0-sapcal-install role + - name: "Import the 6.0.0-sapcal-install role" ansible.builtin.import_role: name: "roles-sap/6.0.0-sapcal-install" diff --git a/deploy/ansible/roles-os/1.5.3-disk-setup-sapcal/tasks/main.yml b/deploy/ansible/roles-os/1.5.3-disk-setup-sapcal/tasks/main.yml new file mode 100644 index 0000000000..c70873d143 --- /dev/null +++ b/deploy/ansible/roles-os/1.5.3-disk-setup-sapcal/tasks/main.yml @@ -0,0 +1,75 @@ +--- + +# /*---------------------------------------------------------------------------8 +# | | +# | OS Base Disk Configuration | +# | | +# +------------------------------------4--------------------------------------*/ +# -------------------------------------+---------------------------------------8 +# +# Task: 1.5.3 - os-disk-setup SAP-CAL +# +# -------------------------------------+---------------------------------------8 + +# # Check the free size of the volume group +# Extend the logical volumes [tmplv & rootlv] to the required size and resize the FS +# + +# -------------------------------------+---------------------------------------8 +# + +- name: "Get Volume Group information" + ansible.builtin.shell: set -o pipefail && vgdisplay --units g {{ vg_root }} | grep 'Free PE / Size' | awk '{print $(NF-1)}' + register: vg_info + changed_when: false + +- name: "Extract free size of the VG" + ansible.builtin.set_fact: + vg_free_size: "{{ vg_info.stdout | float }}" + when: vg_info is defined and vg_info.stdout is defined + +- name: "Check if free size is more than 20 GB" + ansible.builtin.set_fact: + sufficient_vg_space: "{{ vg_free_size | default(0) | float > 20.0 }}" + when: vg_free_size is defined + failed_when: sufficient_vg_space is not defined or not sufficient_vg_space + +# ------------------------------------- +- name: "Print volume group details" + ansible.builtin.debug: + msg: + - "vg_info: {{ vg_info }}" + - "vg_free_size: {{ vg_free_size }}" + - "sufficient_vg_space: {{ sufficient_vg_space }}" + verbosity: 2 +# ------------------------------------ + +- name: "Extend the logical volumes and resize the FS" + community.general.lvol: + vg: "{{ item.vg }}" + lv: "{{ item.lv }}" + size: "{{ item.size }}" + active: true + state: present + shrink: false + resizefs: true + loop: + - { vg: '{{ vg_root }}', lv: 'rootlv', size: '{{ lv_root_size }}' } + - { vg: '{{ vg_root }}', lv: 'tmplv', size: '{{ lv_tmp_size }}' } + when: + - sufficient_vg_space is defined and sufficient_vg_space + +# ------------------------------------- +- name: "Print recent Volume Group size and Logical Volume information" + ansible.builtin.shell: | + set -o pipefail + vgdisplay --units g {{ vg_root }} | grep 'Free PE / Size' | awk '{print $(NF-1)}' + lvdisplay {{ vg_root }} + register: recent_info + +- name: "Print volume group details" + ansible.builtin.debug: + msg: + - "vg_info: {{ recent_info | to_nice_json }}" + verbosity: 2 +# ------------------------------------ diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index dbcc904045..44b72ed04c 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -258,6 +258,9 @@ enable_ha_monitoring: false enable_sap_cal: false calapi_kv: "" sap_cal_product_name: "" +vg_root: "rootvg" +lv_root_size: 10g +lv_tmp_size: 10g # ------------------- End - SAP CAL Integration variables ----------------------8 python_version: "python3" From ab9e8589f9ced8bd3c45cc967a3c315f66c3898f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 27 Aug 2024 23:02:30 +0300 Subject: [PATCH 064/164] feat: Add ability to split out privatelink resources --- deploy/terraform/run/sap_landscape/output.tf | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/deploy/terraform/run/sap_landscape/output.tf b/deploy/terraform/run/sap_landscape/output.tf index 00f1f4143c..5312417571 100644 --- a/deploy/terraform/run/sap_landscape/output.tf +++ b/deploy/terraform/run/sap_landscape/output.tf @@ -200,17 +200,16 @@ output "management_dns_resourcegroup_name" { output "management_dns_subscription_id" { description = "Subscription ID for the public Private DNS Zone" - value = var.management_dns_subscription_id + value = coalesce(var.management_dns_subscription_id, local.saplib_subscription_id) } output "privatelink_dns_resourcegroup_name" { - description = "Resource group name for the resource group containing the PrivateLink DNS Zones" value = coalesce(var.privatelink_dns_resourcegroup_name,var.management_dns_resourcegroup_name, local.saplib_resource_group_name) } output "privatelink_dns_subscription_id" { description = "Subscription ID for the PrivateLink Private DNS Zones" - value = coalesce(var.privatelink_dns_subscription_id, var.management_dns_subscription_id) + value = coalesce(var.privatelink_dns_subscription_id, var.management_dns_subscription_id, local.saplib_subscription_id) } From bcd9da0911747ad799fdd6686618c0429902ab3c Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Tue, 27 Aug 2024 23:18:29 +0300 Subject: [PATCH 065/164] feat: Refactor subnet configuration to enforce private link endpoint network policies Refactor the subnet configuration in the `sap_landscape` module to enforce private link endpoint network policies. This change ensures that the private link endpoints have network policies enabled, as specified by the `var.use_private_endpoint` variable. Co-authored-by: Kimmo Forss --- .../terraform-units/modules/sap_landscape/subnets.tf | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf b/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf index eefbc315ea..55f75813c4 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf @@ -7,7 +7,7 @@ resource "azurerm_subnet" "admin" { virtual_network_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].name : azurerm_virtual_network.vnet_sap[0].name address_prefixes = [local.admin_subnet_prefix] - private_endpoint_network_policies_enabled = var.use_private_endpoint + enforce_private_link_endpoint_network_policies = var.use_private_endpoint service_endpoints = var.use_service_endpoint ? ( ["Microsoft.Storage", "Microsoft.KeyVault"] @@ -34,7 +34,7 @@ resource "azurerm_subnet" "db" { virtual_network_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].name : azurerm_virtual_network.vnet_sap[0].name address_prefixes = [local.database_subnet_prefix] - private_endpoint_network_policies_enabled = var.use_private_endpoint + enforce_private_link_endpoint_network_policies = var.use_private_endpoint service_endpoints = var.use_service_endpoint ? ( ["Microsoft.Storage", "Microsoft.KeyVault"] ) : ( @@ -59,7 +59,7 @@ resource "azurerm_subnet" "app" { virtual_network_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].name : azurerm_virtual_network.vnet_sap[0].name address_prefixes = [local.application_subnet_prefix] - private_endpoint_network_policies_enabled = var.use_private_endpoint + enforce_private_link_endpoint_network_policies = var.use_private_endpoint service_endpoints = var.use_service_endpoint ? ( ["Microsoft.Storage", "Microsoft.KeyVault"] @@ -86,7 +86,7 @@ resource "azurerm_subnet" "web" { virtual_network_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].name : azurerm_virtual_network.vnet_sap[0].name address_prefixes = [local.web_subnet_prefix] - private_endpoint_network_policies_enabled = var.use_private_endpoint + enforce_private_link_endpoint_network_policies = var.use_private_endpoint service_endpoints = var.use_service_endpoint ? ( ["Microsoft.Storage", "Microsoft.KeyVault"] @@ -114,7 +114,7 @@ resource "azurerm_subnet" "storage" { virtual_network_name = local.SAP_virtualnetwork_exists ? data.azurerm_virtual_network.vnet_sap[0].name : azurerm_virtual_network.vnet_sap[0].name address_prefixes = [local.subnet_cidr_storage] - private_endpoint_network_policies_enabled = var.use_private_endpoint + enforce_private_link_endpoint_network_policies = var.use_private_endpoint service_endpoints = var.use_service_endpoint ? ( ["Microsoft.Storage", "Microsoft.KeyVault"] From 75625c276c59fc43f488c599f5b69a5333904f0c Mon Sep 17 00:00:00 2001 From: Devansh Jain <86314060+devanshjainms@users.noreply.github.com> Date: Tue, 27 Aug 2024 13:24:11 -0700 Subject: [PATCH 066/164] fix: update the management dns subscription id to saplib sub id, pin azurerm version in lanscape, deployer (#619) * pin azurerm version in deployer and landscape * Extend 'root' and 'tmp' logical volumes for SAP CAL integration on RHEL * chore: Extend 'root' and 'tmp' logical volumes for SAP CAL integration on RHEL --- deploy/terraform/run/sap_deployer/providers.tf | 2 +- deploy/terraform/run/sap_landscape/providers.tf | 2 +- deploy/terraform/run/sap_library/providers.tf | 2 +- .../terraform-units/modules/sap_landscape/providers.tf | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/terraform/run/sap_deployer/providers.tf b/deploy/terraform/run/sap_deployer/providers.tf index 07199ae105..4588670d4b 100644 --- a/deploy/terraform/run/sap_deployer/providers.tf +++ b/deploy/terraform/run/sap_deployer/providers.tf @@ -88,7 +88,7 @@ terraform { } azurerm = { source = "hashicorp/azurerm" - version = ">=3.3" + version = "~> 3.3" } } } diff --git a/deploy/terraform/run/sap_landscape/providers.tf b/deploy/terraform/run/sap_landscape/providers.tf index 549a2abc0f..82b1da6926 100644 --- a/deploy/terraform/run/sap_landscape/providers.tf +++ b/deploy/terraform/run/sap_landscape/providers.tf @@ -116,7 +116,7 @@ terraform { } azurerm = { source = "hashicorp/azurerm" - version = ">=3.3" + version = "~> 3.3" } azapi = { source = "Azure/azapi" diff --git a/deploy/terraform/run/sap_library/providers.tf b/deploy/terraform/run/sap_library/providers.tf index 28e481b0b7..7e6a6a8edd 100644 --- a/deploy/terraform/run/sap_library/providers.tf +++ b/deploy/terraform/run/sap_library/providers.tf @@ -108,7 +108,7 @@ terraform { } azurerm = { source = "hashicorp/azurerm" - version = ">=3.3" + version = "~> 3.3" } } } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf b/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf index 7de1539916..35722baebe 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf @@ -3,7 +3,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement, azurerm.peering] - version = ">= 3.23" + version = "~> 3.23" } azapi = { From 0df1c46ede0bbb2520f837656a298b7b8ecc0d57 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 09:29:42 +0300 Subject: [PATCH 067/164] Add iSCSI NSG rule --- .../modules/sap_landscape/iscsi.tf | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf index 5acf53349d..8e07e8502b 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf @@ -115,6 +115,37 @@ resource "azurerm_network_interface" "iscsi" { } } +// Add SSH network security rule +resource "azurerm_network_security_rule" "nsr_controlplane_iscsi" { + provider = azurerm.main + count = local.enable_sub_iscsi ? local.sub_iscsi_nsg_exists ? 0 : 1 : 0 + depends_on = [ + azurerm_network_security_group.iscsi + ] + name = "ConnectivityToSAPApplicationSubnetFromControlPlane-ssh-rdp-winrm" + resource_group_name = local.SAP_virtualnetwork_exists ? ( + data.azurerm_virtual_network.vnet_sap[0].resource_group_name + ) : ( + azurerm_virtual_network.vnet_sap[0].resource_group_name + ) + network_security_group_name = try(azurerm_network_security_group.iscsi[0].name, azurerm_network_security_group.app[0].name) + priority = 100 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_ranges = [22, 443, 3389, 5985, 5986, 2049, 111] + source_address_prefixes = compact(concat( + var.deployer_tfstate.subnet_mgmt_address_prefixes, + var.deployer_tfstate.subnet_bastion_address_prefixes, + local.SAP_virtualnetwork_exists ? ( + data.azurerm_virtual_network.vnet_sap[0].address_space) : ( + azurerm_virtual_network.vnet_sap[0].address_space + ))) + destination_address_prefixes = local.sub_iscsi_exists ? data.azurerm_subnet.iscsi[0].address_prefixes : azurerm_subnet.iscsi[0].address_prefixes +} + + // Manages the association between NIC and NSG resource "azurerm_network_interface_security_group_association" "iscsi" { provider = azurerm.main From 7651ef044f983a561bd9a304bcfde8058daef7d9 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 09:30:30 +0300 Subject: [PATCH 068/164] Change rule name --- deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf index 8e07e8502b..e76d338943 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf @@ -122,7 +122,7 @@ resource "azurerm_network_security_rule" "nsr_controlplane_iscsi" { depends_on = [ azurerm_network_security_group.iscsi ] - name = "ConnectivityToSAPApplicationSubnetFromControlPlane-ssh-rdp-winrm" + name = "ConnectivityToISCSISubnetFromControlPlane-ssh-rdp-winrm" resource_group_name = local.SAP_virtualnetwork_exists ? ( data.azurerm_virtual_network.vnet_sap[0].resource_group_name ) : ( From 6883a83fad0bb5b8b370669894d23ccb41f4a4fc Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 12:00:12 +0300 Subject: [PATCH 069/164] Add Expiry to workload zone key vault secrets --- .../run/sap_landscape/tfvar_variables.tf | 6 ++++ .../terraform/run/sap_landscape/transform.tf | 7 +++-- .../modules/sap_landscape/iscsi.tf | 17 +++++++++++ .../sap_landscape/key_vault_sap_landscape.tf | 28 +++++++++++++++++++ 4 files changed, 55 insertions(+), 3 deletions(-) diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index 88b306f343..475e33b72d 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -376,6 +376,12 @@ variable "soft_delete_retention_days" { default = 7 } +variable "set_secret_expiry" { + description = "Set expiry date for secrets" + default = false + type = bool + } + ######################################################################################### # # # Authentication variables # diff --git a/deploy/terraform/run/sap_landscape/transform.tf b/deploy/terraform/run/sap_landscape/transform.tf index 9605ded6bb..cb13262968 100644 --- a/deploy/terraform/run/sap_landscape/transform.tf +++ b/deploy/terraform/run/sap_landscape/transform.tf @@ -200,9 +200,10 @@ locals { enable_secure_transfer = true use_spn = var.use_spn || try(var.options.use_spn, true) } - key_vault_temp = { - exists = length(var.user_keyvault_id) > 0 - } + key_vault_temp = { + exists = length(var.user_keyvault_id) > 0 + set_secret_expiry = var.set_secret_expiry + } user_keyvault_specified = length(var.user_keyvault_id) > 0 diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf index e76d338943..e38ee93811 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf @@ -264,6 +264,11 @@ resource "azurerm_key_vault_secret" "iscsi_ppk" { name = local.iscsi_ppk_name value = local.iscsi_private_key key_vault_id = local.user_keyvault_exist ? local.user_key_vault_id : azurerm_key_vault.kv_user[0].id + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) + } resource "azurerm_key_vault_secret" "iscsi_pk" { @@ -279,6 +284,10 @@ resource "azurerm_key_vault_secret" "iscsi_pk" { name = local.iscsi_pk_name value = local.iscsi_public_key key_vault_id = local.user_keyvault_exist ? local.user_key_vault_id : azurerm_key_vault.kv_user[0].id + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) } resource "azurerm_key_vault_secret" "iscsi_username" { @@ -294,6 +303,10 @@ resource "azurerm_key_vault_secret" "iscsi_username" { name = local.iscsi_username_name value = local.iscsi_auth_username key_vault_id = local.user_keyvault_exist ? local.user_key_vault_id : azurerm_key_vault.kv_user[0].id + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) } resource "azurerm_key_vault_secret" "iscsi_password" { @@ -309,6 +322,10 @@ resource "azurerm_key_vault_secret" "iscsi_password" { name = local.iscsi_pwd_name value = local.iscsi_auth_password key_vault_id = local.user_keyvault_exist ? local.user_key_vault_id : azurerm_key_vault.kv_user[0].id + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) } // Generate random password if password is set as authentication type and user doesn't specify a password, and save in KV diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf index fb3f5bbccb..dc164f8334 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf @@ -150,6 +150,10 @@ resource "azurerm_key_vault_secret" "sid_ppk" { name = local.sid_ppk_name value = local.sid_private_key key_vault_id = local.user_keyvault_exist ? local.user_key_vault_id : azurerm_key_vault.kv_user[0].id + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) } data "azurerm_key_vault_secret" "sid_ppk" { @@ -175,6 +179,10 @@ resource "azurerm_key_vault_secret" "sid_pk" { local.user_key_vault_id) : ( azurerm_key_vault.kv_user[0].id ) + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) } data "azurerm_key_vault_secret" "sid_pk" { @@ -202,6 +210,10 @@ resource "azurerm_key_vault_secret" "sid_username" { local.user_key_vault_id) : ( azurerm_key_vault.kv_user[0].id ) + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) } data "azurerm_key_vault_secret" "sid_username" { @@ -227,6 +239,10 @@ resource "azurerm_key_vault_secret" "sid_password" { local.user_key_vault_id) : ( azurerm_key_vault.kv_user[0].id ) + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) } data "azurerm_key_vault_secret" "sid_password" { @@ -268,6 +284,10 @@ resource "azurerm_key_vault_secret" "witness_access_key" { local.user_key_vault_id) : ( azurerm_key_vault.kv_user[0].id ) + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) } //Witness access key @@ -301,6 +321,10 @@ resource "azurerm_key_vault_secret" "witness_name" { local.user_key_vault_id) : ( azurerm_key_vault.kv_user[0].id ) + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) } resource "azurerm_key_vault_access_policy" "kv_user_msi" { @@ -350,6 +374,10 @@ resource "azurerm_key_vault_secret" "deployer_keyvault_user_name" { local.user_key_vault_id) : ( azurerm_key_vault.kv_user[0].id ) + expiration_date = var.key_vault.set_secret_expiry ? ( + time_offset.secret_expiry_date.rfc3339) : ( + null + ) } From e79ea4ab0505caff97a24053d23c41109cc99363 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 12:04:30 +0300 Subject: [PATCH 070/164] Provide a more robust way to source the deployer subscription --- deploy/terraform/run/sap_landscape/variables_local.tf | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/deploy/terraform/run/sap_landscape/variables_local.tf b/deploy/terraform/run/sap_landscape/variables_local.tf index 3e1cc00b5c..2efe0b1642 100644 --- a/deploy/terraform/run/sap_landscape/variables_local.tf +++ b/deploy/terraform/run/sap_landscape/variables_local.tf @@ -27,10 +27,12 @@ locals { "") ) - deployer_subscription_id = length(local.spn_key_vault_arm_id) > 0 ? ( - split("/", local.spn_key_vault_arm_id)[2]) : ( - "" - ) + deployer_subscription_id = coalesce( + try(data.terraform_remote_state.deployer[0].outputs.created_resource_group_subscription_id,""), + length(local.spn_key_vault_arm_id) > 0 ? ( + split("/", local.spn_key_vault_arm_id)[2]) : ( + "" + )) spn = { subscription_id = data.azurerm_key_vault_secret.subscription_id.value, From a11ce4e9cd9d706fa1ecd6d8e370f3a7702f334e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 12:12:16 +0300 Subject: [PATCH 071/164] Add support for secret expiry --- Webapp/SDAF/Models/LandscapeModel.cs | 2 ++ Webapp/SDAF/ParameterDetails/LandscapeDetails.json | 9 +++++++++ Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt | 3 +++ Webapp/SDAF/SDAFWebApp.csproj | 6 +++--- 4 files changed, 17 insertions(+), 3 deletions(-) diff --git a/Webapp/SDAF/Models/LandscapeModel.cs b/Webapp/SDAF/Models/LandscapeModel.cs index fd20dc3229..674737500f 100644 --- a/Webapp/SDAF/Models/LandscapeModel.cs +++ b/Webapp/SDAF/Models/LandscapeModel.cs @@ -316,6 +316,8 @@ public bool IsValid() public int? soft_delete_retention_days { get; set; } = 14; + public bool? set_secret_expiry { get; set; } = false; + /*---------------------------------------------------------------------------8 | | | NFS information | diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index 9755b9c8bd..54da7b1e33 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -871,6 +871,15 @@ "Options": [], "Overrules": "", "Display": 2 + }, + { + "Name": "set_secret_expiry", + "Required": false, + "Description": "Sets expiry date for secrets", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 } ] }, diff --git a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt index dee38b8200..4cf26cb4e7 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt @@ -364,6 +364,9 @@ $$additional_users_to_add_to_keyvault_policies$$ # The number of days that items should be retained in the soft delete period $$soft_delete_retention_days$$ +# Set expiry date for secrets +$$set_secret_expiry$$ + ######################################################################################### # # # Credentials # diff --git a/Webapp/SDAF/SDAFWebApp.csproj b/Webapp/SDAF/SDAFWebApp.csproj index 6e4dfa43d2..acfa3b9393 100644 --- a/Webapp/SDAF/SDAFWebApp.csproj +++ b/Webapp/SDAF/SDAFWebApp.csproj @@ -18,7 +18,7 @@ - + @@ -26,8 +26,8 @@ - - + + From f7f5392ccb44793199caf9c076e4b7fca3309ba5 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 12:15:50 +0300 Subject: [PATCH 072/164] chore: Update keyvault set-policy command in sap-workload-zone.yaml Co-authored-by: Kimmo Forss --- deploy/pipelines/02-sap-workload-zone.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/deploy/pipelines/02-sap-workload-zone.yaml b/deploy/pipelines/02-sap-workload-zone.yaml index 88f49d9177..eb8ca1518a 100644 --- a/deploy/pipelines/02-sap-workload-zone.yaml +++ b/deploy/pipelines/02-sap-workload-zone.yaml @@ -434,7 +434,7 @@ stages: --region "${LOCATION}" --subscription $WL_ARM_SUBSCRIPTION_ID --spn_id $WL_ARM_CLIENT_ID --spn_secret "${WL_ARM_CLIENT_SECRET}" \ --tenant_id $WL_ARM_TENANT_ID --keyvault_subscription $STATE_SUBSCRIPTION secrets_set=$? ; echo -e "$cyan Set Secrets returned $secrets_set $reset" - az keyvault set-policy --name "${key_vault}" --object-id $WL_ARM_OBJECT_ID --secret-permissions get list --output none + az keyvault set-policy --name "${key_vault}" --object-id $WL_ARM_OBJECT_ID --secret-permissions get list --subscription $STATE_SUBSCRIPTION --output none fi fi @@ -562,11 +562,11 @@ stages: if [ -z ${az_var} ]; then echo "##vso[task.logissue type=warning]Variable FENCING_SPN_ID is not set. Required for highly available deployments" else - export fencing_id=$(az keyvault secret list --vault-name $workload_key_vault --query [].name -o tsv | grep ${workload_prefix}-fencing-spn-id | xargs) + export fencing_id=$(az keyvault secret list --vault-name $workload_key_vault --subscription $STATE_SUBSCRIPTION --query [].name -o tsv | grep ${workload_prefix}-fencing-spn-id | xargs) if [ -z "$fencing_id" ]; then - az keyvault secret set --name ${workload_prefix}-fencing-spn-id --vault-name $workload_key_vault --value $(FENCING_SPN_ID) --output none - az keyvault secret set --name ${workload_prefix}-fencing-spn-pwd --vault-name $workload_key_vault --value=$FENCING_SPN_PWD --output none - az keyvault secret set --name ${workload_prefix}-fencing-spn-tenant --vault-name $workload_key_vault --value $(FENCING_SPN_TENANT) --output none + az keyvault secret set --name ${workload_prefix}-fencing-spn-id --vault-name $workload_key_vault --value $(FENCING_SPN_ID) --subscription $STATE_SUBSCRIPTION --output none + az keyvault secret set --name ${workload_prefix}-fencing-spn-pwd --vault-name $workload_key_vault --value=$FENCING_SPN_PWD --subscription $STATE_SUBSCRIPTION --output none + az keyvault secret set --name ${workload_prefix}-fencing-spn-tenant --vault-name $workload_key_vault --value $(FENCING_SPN_TENANT) --subscription $STATE_SUBSCRIPTION --output none fi fi az logout --output none From e52568dcf33b4cbff94922a7b521017f469fb9aa Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 13:40:31 +0300 Subject: [PATCH 073/164] feat: Add expiry date to secrets in key vault --- .../modules/sap_landscape/key_vault_sap_landscape.tf | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf index dc164f8334..593fcfd9fd 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf @@ -135,6 +135,10 @@ resource "random_password" "created_password" { min_numeric = 2 } +## Add an expiry date to the secrets +resource "time_offset" "secret_expiry_date" { + offset_months = 12 +} // Key pair/password will be stored in the existing KV if specified, otherwise will be stored in a newly provisioned KV resource "azurerm_key_vault_secret" "sid_ppk" { From 71f6ad82528cc919d6cf22b52dc1ea9a3ef5d84d Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 14:50:26 +0300 Subject: [PATCH 074/164] chore: Disable cross-tenant replication for sapmnt storage account --- .../modules/sap_system/common_infrastructure/storage_accounts.tf | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf index 3ff79311c4..7a6d122c34 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf @@ -37,6 +37,7 @@ resource "azurerm_storage_account" "sapmnt" { enable_https_traffic_only = false min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false + cross_tenant_replication_enabled = false public_network_access_enabled = try(var.landscape_tfstate.public_network_access_enabled, true) tags = var.tags From c19937f2354cd73a4bc5ce1cb08554dda259a6d6 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 15:45:50 +0300 Subject: [PATCH 075/164] chore: Update DNS configuration for sap_library module --- .../terraform/terraform-units/modules/sap_library/dns.tf | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_library/dns.tf b/deploy/terraform/terraform-units/modules/sap_library/dns.tf index d15ebdf1d2..e017aaa18f 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/dns.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/dns.tf @@ -18,7 +18,7 @@ resource "azurerm_private_dns_zone" "dns" { } resource "azurerm_private_dns_zone" "blob" { provider = azurerm.main - count = local.use_local_privatelink_dns && var.use_private_endpoint && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 + count = local.use_local_privatelink_dns && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 depends_on = [ azurerm_resource_group.library ] @@ -31,7 +31,7 @@ resource "azurerm_private_dns_zone" "blob" { resource "azurerm_private_dns_zone" "table" { provider = azurerm.main - count = local.use_local_privatelink_dns && var.use_private_endpoint && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 + count = local.use_local_privatelink_dns && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 depends_on = [ azurerm_resource_group.library ] @@ -44,7 +44,7 @@ resource "azurerm_private_dns_zone" "table" { resource "azurerm_private_dns_zone" "file" { provider = azurerm.main - count = local.use_local_privatelink_dns && var.use_private_endpoint && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 + count = local.use_local_privatelink_dns && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 depends_on = [ azurerm_resource_group.library ] @@ -57,7 +57,7 @@ resource "azurerm_private_dns_zone" "file" { resource "azurerm_private_dns_zone" "vault" { provider = azurerm.main - count = local.use_local_privatelink_dns && var.use_private_endpoint && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 + count = local.use_local_privatelink_dns && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 depends_on = [ azurerm_resource_group.library ] From 117ffd3b0e7c12b7cd0313f4aa5dc4fe9941b77e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 15:48:25 +0300 Subject: [PATCH 076/164] chore: Update DNS configuration for sap_library module --- deploy/terraform/terraform-units/modules/sap_library/dns.tf | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_library/dns.tf b/deploy/terraform/terraform-units/modules/sap_library/dns.tf index e017aaa18f..2fcfa23f4a 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/dns.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/dns.tf @@ -72,7 +72,11 @@ data "azurerm_private_dns_zone" "vault" { provider = azurerm.privatelinkdnsmanagement count = !local.use_local_privatelink_dns && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 name = var.dns_settings.dns_zone_names.vault_dns_zone_name - resource_group_name = var.dns_settings.privatelink_dns_resourcegroup_name + resource_group_name = coalesce(var.dns_settings.privatelink_dns_resourcegroup_name, var.dns_settings.management_dns_resourcegroup_name,local.resource_group_exists ? ( + split("/", var.infrastructure.resource_group.arm_id)[4]) : ( + azurerm_resource_group.library[0].name + ) + } From 2c77a26680dd814687637672b7d8dc16fc1ba8e1 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 16:04:13 +0300 Subject: [PATCH 077/164] chore: Update use_local_privatelink_dns condition in sap_library module --- .../terraform-units/modules/sap_library/variables_local.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf index 015338c91b..7fd17df9a1 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf @@ -57,7 +57,7 @@ locals { enable_firewall_for_keyvaults_and_storage = try(var.deployer_tfstate.enable_firewall_for_keyvaults_and_storage, false) use_local_private_dns = (length(var.dns_settings.dns_label) > 0 && !var.use_custom_dns_a_registration && length(trimspace(var.dns_settings.management_dns_resourcegroup_name)) == 0) - use_local_privatelink_dns = !var.use_custom_dns_a_registration && length(trimspace(var.dns_settings.privatelink_dns_resourcegroup_name)) == 0 + use_local_privatelink_dns = !var.use_custom_dns_a_registration && length(trimspace(var.dns_settings.privatelink_dns_resourcegroup_name)) > 0 keyvault_id = try(var.deployer_tfstate.deployer_kv_user_arm_id, "") From 0a90f46b7091eb44dca6ba0c53ff080f6b951286 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 16:13:28 +0300 Subject: [PATCH 078/164] chore: Update DNS configuration for sap_library module --- .../terraform-units/modules/sap_library/dns.tf | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_library/dns.tf b/deploy/terraform/terraform-units/modules/sap_library/dns.tf index 2fcfa23f4a..8bc9cd9720 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/dns.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/dns.tf @@ -72,10 +72,12 @@ data "azurerm_private_dns_zone" "vault" { provider = azurerm.privatelinkdnsmanagement count = !local.use_local_privatelink_dns && var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 name = var.dns_settings.dns_zone_names.vault_dns_zone_name - resource_group_name = coalesce(var.dns_settings.privatelink_dns_resourcegroup_name, var.dns_settings.management_dns_resourcegroup_name,local.resource_group_exists ? ( - split("/", var.infrastructure.resource_group.arm_id)[4]) : ( - azurerm_resource_group.library[0].name - ) + resource_group_name = coalesce(var.dns_settings.privatelink_dns_resourcegroup_name, + var.dns_settings.management_dns_resourcegroup_name, + local.resource_group_exists ? ( + split("/", var.infrastructure.resource_group.arm_id)[4]) : ( + azurerm_resource_group.library[0].name + )) } From 8a2f9c0a28632c434a65a2766494952ae3e6f055 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 16:25:30 +0300 Subject: [PATCH 079/164] chore: Update private DNS configuration in sap_library module --- .../terraform-units/modules/sap_library/keyvault_endpoint.tf | 2 +- .../terraform-units/modules/sap_library/variables_local.tf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_library/keyvault_endpoint.tf b/deploy/terraform/terraform-units/modules/sap_library/keyvault_endpoint.tf index 3c24f8c479..da8de606d1 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/keyvault_endpoint.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/keyvault_endpoint.tf @@ -41,7 +41,7 @@ resource "azurerm_private_endpoint" "kv_user" { for_each = range(var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0) content { name = var.dns_settings.dns_zone_names.vault_dns_zone_name - private_dns_zone_ids = [local.use_local_private_dns ? azurerm_private_dns_zone.vault[0].id : data.azurerm_private_dns_zone.vault[0].id] + private_dns_zone_ids = [local.use_local_privatelink_dns ? azurerm_private_dns_zone.vault[0].id : data.azurerm_private_dns_zone.vault[0].id] } } diff --git a/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf b/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf index 7fd17df9a1..015338c91b 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/variables_local.tf @@ -57,7 +57,7 @@ locals { enable_firewall_for_keyvaults_and_storage = try(var.deployer_tfstate.enable_firewall_for_keyvaults_and_storage, false) use_local_private_dns = (length(var.dns_settings.dns_label) > 0 && !var.use_custom_dns_a_registration && length(trimspace(var.dns_settings.management_dns_resourcegroup_name)) == 0) - use_local_privatelink_dns = !var.use_custom_dns_a_registration && length(trimspace(var.dns_settings.privatelink_dns_resourcegroup_name)) > 0 + use_local_privatelink_dns = !var.use_custom_dns_a_registration && length(trimspace(var.dns_settings.privatelink_dns_resourcegroup_name)) == 0 keyvault_id = try(var.deployer_tfstate.deployer_kv_user_arm_id, "") From 18bc0fdbddd48e6d53a2800c534c6351f72d6f33 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 17:11:52 +0300 Subject: [PATCH 080/164] Don't create route table if Firewall is used --- .../terraform-units/modules/sap_landscape/ams.tf | 2 +- .../modules/sap_landscape/infrastructure.tf | 4 ++-- .../terraform-units/modules/sap_landscape/iscsi.tf | 2 +- .../terraform-units/modules/sap_landscape/subnets.tf | 9 ++++----- 4 files changed, 8 insertions(+), 9 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf b/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf index 5f489b8e13..d123757e2c 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/ams.tf @@ -9,7 +9,7 @@ data "azurerm_subnet" "ams" { resource "azurerm_subnet_route_table_association" "ams" { provider = azurerm.main - count = local.create_ams_instance && local.ams_subnet_defined && !local.SAP_virtualnetwork_exists && !local.ams_subnet_existing ? 1 : 0 + count = local.create_ams_instance && local.ams_subnet_defined && !local.SAP_virtualnetwork_exists && !local.ams_subnet_existing ? (local.create_nat_gateway ? 0 : 1) : 0 depends_on = [ azurerm_route_table.rt, azurerm_subnet.ams diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_landscape/infrastructure.tf index c4fd4db895..0da138aa69 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/infrastructure.tf @@ -136,7 +136,7 @@ resource "azurerm_virtual_network_peering" "peering_sap_management" { //Route table resource "azurerm_route_table" "rt" { provider = azurerm.main - count = local.SAP_virtualnetwork_exists ? 0 : 1 + count = local.SAP_virtualnetwork_exists ? 0 : (local.create_nat_gateway ? 0 : 1) depends_on = [ azurerm_virtual_network.vnet_sap ] @@ -159,7 +159,7 @@ resource "azurerm_route_table" "rt" { resource "azurerm_route" "admin" { provider = azurerm.main - count = length(local.firewall_ip) > 0 ? local.SAP_virtualnetwork_exists ? 0 : 1 : 0 + count = length(local.firewall_ip) > 0 ? local.SAP_virtualnetwork_exists ? 0 : (local.create_nat_gateway ? 0 : 1) : 0 depends_on = [ azurerm_route_table.rt ] diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf index e38ee93811..6b58678af4 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/iscsi.tf @@ -63,7 +63,7 @@ data "azurerm_network_security_group" "iscsi" { resource "azurerm_subnet_route_table_association" "iscsi" { provider = azurerm.main - count = local.enable_iscsi && !local.SAP_virtualnetwork_exists && !local.sub_iscsi_exists ? 1 : 0 + count = local.enable_iscsi && !local.SAP_virtualnetwork_exists && !local.sub_iscsi_exists ? (local.create_nat_gateway ? 0 : 1) : 0 depends_on = [ azurerm_route_table.rt, azurerm_subnet.iscsi diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf b/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf index 55f75813c4..956b0f1b19 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/subnets.tf @@ -177,12 +177,11 @@ resource "azurerm_subnet" "ams" { } } - #Associate the subnets to the route table resource "azurerm_subnet_route_table_association" "admin" { provider = azurerm.main - count = local.admin_subnet_defined && !local.SAP_virtualnetwork_exists && !local.admin_subnet_existing ? 1 : 0 + count = local.admin_subnet_defined && !local.SAP_virtualnetwork_exists && !local.admin_subnet_existing ? (local.create_nat_gateway ? 0 : 1) : 0 depends_on = [ azurerm_route_table.rt, azurerm_subnet.admin @@ -193,7 +192,7 @@ resource "azurerm_subnet_route_table_association" "admin" { resource "azurerm_subnet_route_table_association" "db" { provider = azurerm.main - count = local.database_subnet_defined && !local.SAP_virtualnetwork_exists && !local.database_subnet_existing ? 1 : 0 + count = local.database_subnet_defined && !local.SAP_virtualnetwork_exists && !local.database_subnet_existing ? (local.create_nat_gateway ? 0 : 1) : 0 depends_on = [ azurerm_route_table.rt, azurerm_subnet.db @@ -204,7 +203,7 @@ resource "azurerm_subnet_route_table_association" "db" { resource "azurerm_subnet_route_table_association" "app" { provider = azurerm.main - count = local.application_subnet_defined && !local.SAP_virtualnetwork_exists && !local.application_subnet_existing ? 1 : 0 + count = local.application_subnet_defined && !local.SAP_virtualnetwork_exists && !local.application_subnet_existing ? (local.create_nat_gateway ? 0 : 1) : 0 depends_on = [ azurerm_route_table.rt, azurerm_subnet.db @@ -215,7 +214,7 @@ resource "azurerm_subnet_route_table_association" "app" { resource "azurerm_subnet_route_table_association" "web" { provider = azurerm.main - count = local.web_subnet_defined && !local.SAP_virtualnetwork_exists && !local.web_subnet_existing ? 1 : 0 + count = local.web_subnet_defined && !local.SAP_virtualnetwork_exists && !local.web_subnet_existing ? (local.create_nat_gateway ? 0 : 1) : 0 depends_on = [ azurerm_route_table.rt, azurerm_subnet.web From c5f271da7c59a759554a566f5cc64e18e8c1c68f Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 17:55:54 +0300 Subject: [PATCH 081/164] chore: Update key_vault.tf for sap_library module --- .../terraform/terraform-units/modules/sap_library/key_vault.tf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf b/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf index cf04ab0fb3..9a402c5636 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf @@ -88,7 +88,8 @@ resource "azurerm_private_dns_a_record" "kv_user" { zone_name = var.dns_settings.dns_zone_names.vault_dns_zone_name resource_group_name = coalesce( var.dns_settings.privatelink_dns_resourcegroup_name, - var.dns_settings.management_dns_resourcegroup_name + var.dns_settings.management_dns_resourcegroup_name, + local.resource_group_name ) ttl = 3600 records = [azurerm_private_endpoint.kv_user[0].private_service_connection[0].private_ip_address] From 4beb22ae001261450a3f84469d11fc8a230a0d24 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 18:11:01 +0300 Subject: [PATCH 082/164] chore: Update private DNS configuration in sap_library module --- .../modules/sap_library/infrastructure.tf | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/deploy/terraform/terraform-units/modules/sap_library/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_library/infrastructure.tf index b046292896..1000ffcb62 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/infrastructure.tf @@ -42,13 +42,12 @@ resource "azurerm_private_dns_zone_virtual_network_link" "vnet_mgmt" { var.naming.resource_suffixes.dns_link ) - resource_group_name = length(var.dns_settings.management_dns_subscription_id) == 0 ? ( + resource_group_name = coalesce(var.dns_settings.privatelink_dns_resourcegroup_name, + var.dns_settings.management_dns_resourcegroup_name, local.resource_group_exists ? ( split("/", var.infrastructure.resource_group.arm_id)[4]) : ( azurerm_resource_group.library[0].name - )) : ( - var.dns_settings.management_dns_resourcegroup_name - ) + )) private_dns_zone_name = var.dns_settings.dns_label virtual_network_id = var.deployer_tfstate.vnet_mgmt_id registration_enabled = true @@ -68,13 +67,12 @@ resource "azurerm_private_dns_zone_virtual_network_link" "vnet_mgmt_blob" { var.naming.resource_suffixes.dns_link ) - resource_group_name = length(var.dns_settings.privatelink_dns_subscription_id) == 0 ? ( + resource_group_name = coalesce(var.dns_settings.privatelink_dns_resourcegroup_name, + var.dns_settings.management_dns_resourcegroup_name, local.resource_group_exists ? ( split("/", var.infrastructure.resource_group.arm_id)[4]) : ( azurerm_resource_group.library[0].name - )) : ( - var.dns_settings.privatelink_dns_resourcegroup_name - ) + )) private_dns_zone_name = var.dns_settings.dns_zone_names.blob_dns_zone_name virtual_network_id = var.deployer_tfstate.vnet_mgmt_id registration_enabled = false From bf5e633fb370143817e7e2d9be122279937c6334 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 19:07:39 +0300 Subject: [PATCH 083/164] chore: Update private endpoint configuration for sapmnt storage account --- .../sap_system/common_infrastructure/storage_accounts.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf index 7a6d122c34..3740eba135 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf @@ -78,7 +78,7 @@ data "azurerm_storage_account" "sapmnt" { resource "azurerm_private_endpoint" "sapmnt" { provider = azurerm.main - count = var.NFS_provider == "AFS" ? ( + count = var.NFS_provider == "AFS" && var.use_private_endpoint ? ( length(var.sapmnt_private_endpoint_id) > 0 ? ( 0) : ( 1 From 43f4bb074dde0d4975611f580f136220154cf4cd Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 19:15:21 +0300 Subject: [PATCH 084/164] Bump up the TF version --- deploy/scripts/New-SDAFDevopsProject.ps1 | 2 +- deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf | 2 +- deploy/terraform/run/sap_deployer/tfvar_variables.tf | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index 2e277c55cd..df8a73ce86 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -541,7 +541,7 @@ Write-Host "Creating the variable group SDAF-General" -ForegroundColor Green $general_group_id = (az pipelines variable-group list --query "[?name=='SDAF-General'].id | [0]" --only-show-errors) if ($general_group_id.Length -eq 0) { - az pipelines variable-group create --name SDAF-General --variables ANSIBLE_HOST_KEY_CHECKING=false Deployment_Configuration_Path=WORKSPACES Branch=main tf_version="1.7.5" ansible_core_version="2.15" S-Username=$SUserName S-Password=$SPassword --output yaml --authorize true --output none + az pipelines variable-group create --name SDAF-General --variables ANSIBLE_HOST_KEY_CHECKING=false Deployment_Configuration_Path=WORKSPACES Branch=main tf_version="1.9.5" ansible_core_version="2.15" S-Username=$SUserName S-Password=$SPassword --output yaml --authorize true --output none $general_group_id = (az pipelines variable-group list --query "[?name=='SDAF-General'].id | [0]" --only-show-errors) az pipelines variable-group variable update --group-id $general_group_id --name "S-Password" --value $SPassword --secret true --output none --only-show-errors } diff --git a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf index 533594e903..ac31fd1c2d 100644 --- a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf @@ -381,7 +381,7 @@ variable "deployer_diagnostics_account_arm_id" { variable "tf_version" { description = "Terraform version to install on deployer" - default = "1.7.5" + default = "1.9.5" } variable "name_override_file" { diff --git a/deploy/terraform/run/sap_deployer/tfvar_variables.tf b/deploy/terraform/run/sap_deployer/tfvar_variables.tf index 46d648bfe2..59f69fc495 100644 --- a/deploy/terraform/run/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/run/sap_deployer/tfvar_variables.tf @@ -378,7 +378,7 @@ variable "deployer_diagnostics_account_arm_id" { variable "tf_version" { description = "Terraform version to install on deployer" - default = "1.7.5" + default = "1.9.5" } variable "name_override_file" { From fdd52fe12181e96ad377804feea35343cfbe4452 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 20:02:05 +0300 Subject: [PATCH 085/164] Also add the DB Virtual Hostname --- deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 b/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 index d6ecb7324f..619e07ed97 100644 --- a/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 +++ b/deploy/ansible/roles-sap-os/2.4-hosts-file/templates/hosts.j2 @@ -104,7 +104,7 @@ ansible_facts. {% endif %} {% elif tier in ['hana', 'oracle', 'oracle-asm', 'db2', 'sybase'] %} {% set db_virtual_host = hostvars[host]['custom_db_virtual_hostname'] if 'custom_db_virtual_hostname' in hostvars[host] else hostvars[host]['virtual_host'] %} -{% if db_virtual_host not in virtual_host_names and not database_high_availability %} +{% if db_virtual_host not in virtual_host_names %} {% set _ = virtual_host_names.append(db_virtual_host) %} {% endif %} {% endif %} From 1df34a4a4bd81728fee0de31aa490ae7a54c818e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 20:23:56 +0300 Subject: [PATCH 086/164] chore: Update private endpoint configuration for sapmnt storage account --- .../modules/sap_system/common_infrastructure/outputs.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/outputs.tf index 054e337f8e..71f7b0914b 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/outputs.tf @@ -238,7 +238,7 @@ output "sapmnt_path" { format("%s:/%s/%s", length(var.sapmnt_private_endpoint_id) == 0 ? ( try(azurerm_private_endpoint.sapmnt[0].private_dns_zone_configs[0].record_sets[0].fqdn, - azurerm_private_endpoint.sapmnt[0].private_service_connection[0].private_ip_address + try(azurerm_private_endpoint.sapmnt[0].private_service_connection[0].private_ip_address,"") )) : ( data.azurerm_private_endpoint_connection.sapmnt[0].private_service_connection[0].private_ip_address ), From 8ab8a023cb957d36cc4286079fa8710edf075521 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 21:59:51 +0300 Subject: [PATCH 087/164] chore: Update default value for "use_private_endpoint" to true --- deploy/terraform/run/sap_system/tfvar_variables.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index 325439ab55..7cb426df5c 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -119,7 +119,7 @@ variable "app_proximityplacementgroup_arm_ids" { variable "use_private_endpoint" { description = "Boolean value indicating if private endpoint should be used for the deployment" - default = false + default = true type = bool } From 79b1744af3822d1f5c9490a4710ff580570ffd47 Mon Sep 17 00:00:00 2001 From: Devansh Jain <86314060+devanshjainms@users.noreply.github.com> Date: Wed, 28 Aug 2024 13:01:40 -0700 Subject: [PATCH 088/164] chore: remove extra line from 2.10.3.yaml --- deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml index f1c0e9f14f..82734dbc0f 100644 --- a/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml +++ b/deploy/ansible/roles-sap-os/2.10-sap-notes/tasks/2.10.3.yaml @@ -182,4 +182,3 @@ ansible.builtin.command: "saptune solution verify {{ saptune_solution_to_apply }}" changed_when: false failed_when: false - From 152e63843e9303423117508d8ce278b0887e7e08 Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 28 Aug 2024 23:01:49 +0300 Subject: [PATCH 089/164] Update github-actions-ansible-lint.yml Update lint version --- .github/workflows/github-actions-ansible-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/github-actions-ansible-lint.yml b/.github/workflows/github-actions-ansible-lint.yml index c782aa06eb..472cc3e7d2 100644 --- a/.github/workflows/github-actions-ansible-lint.yml +++ b/.github/workflows/github-actions-ansible-lint.yml @@ -16,7 +16,7 @@ jobs: - name: Install Ansible and Ansible-Lint run: | python -m pip install --upgrade pip - pip install ansible-core ansible-lint==24.2.0 jmespath netaddr + pip install ansible-core ansible-lint==24.7.0 jmespath netaddr - name: Install Ansible Collections run: | From 105e7dca248ee9007dbf20f877df1610bef35bae Mon Sep 17 00:00:00 2001 From: devanshjain Date: Wed, 28 Aug 2024 20:15:13 +0000 Subject: [PATCH 090/164] chore: Update yum to dnf for Oracle RPM package installation --- deploy/ansible/roles-db/4.1.0-ora-install/tasks/main.yaml | 2 +- deploy/ansible/roles-db/4.1.1-ora-asm-grid/tasks/main.yaml | 2 +- .../1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml | 2 +- deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml | 2 +- .../1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml | 4 ++-- deploy/ansible/roles-os/1.5.1-disk-setup-asm/tasks/main.yml | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/deploy/ansible/roles-db/4.1.0-ora-install/tasks/main.yaml b/deploy/ansible/roles-db/4.1.0-ora-install/tasks/main.yaml index 7153423cb5..cb577f3f5a 100644 --- a/deploy/ansible/roles-db/4.1.0-ora-install/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.1.0-ora-install/tasks/main.yaml @@ -65,7 +65,7 @@ register: oracle_installed - name: "ORACLE: Install RPM Packages" - ansible.builtin.yum: + ansible.builtin.dnf: name: "{{ target_media_location }}/downloads/compat-libcap1-1.10-7.el7.x86_64.rpm" state: present disable_gpg_check: true diff --git a/deploy/ansible/roles-db/4.1.1-ora-asm-grid/tasks/main.yaml b/deploy/ansible/roles-db/4.1.1-ora-asm-grid/tasks/main.yaml index 66e700d7ce..660b1102e2 100644 --- a/deploy/ansible/roles-db/4.1.1-ora-asm-grid/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.1.1-ora-asm-grid/tasks/main.yaml @@ -172,7 +172,7 @@ - name: "ORACLE ASM: Install RPM Packages" - ansible.builtin.yum: + ansible.builtin.dnf: name: - "/oracle/GRID/{{ ora_version }}/cv/rpm/cvuqdisk-1.0.10-1.rpm" state: present diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml index 0d229f9d65..1030399880 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.2.0-cluster-RedHat.yml @@ -236,7 +236,7 @@ - name: "1.17 Generic Pacemaker - Install fence-agents-kdump package" when: - kdump_enabled | default("disabled") == "enabled" - ansible.builtin.yum: + ansible.builtin.dnf: name: fence-agents-kdump state: present register: fence_agents_kdump_package diff --git a/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml b/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml index 2bcd2a8ef1..d04d9267bb 100644 --- a/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml +++ b/deploy/ansible/roles-os/1.20-prometheus/tasks/main.yml @@ -14,7 +14,7 @@ - ansible_os_family | upper == "REDHAT" block: - name: "1.20 Packages: - Install pcp and pcp-pmda-hacluster package" - ansible.builtin.yum: + ansible.builtin.dnf: name: - "pcp" - "pcp-pmda-hacluster" diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml index 844f8bce44..4cf468bac7 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.3-update-packages-RedHat.yaml @@ -9,7 +9,7 @@ - name: "1.4 Packages: - Update packages" become: true - ansible.builtin.yum: + ansible.builtin.dnf: name: '*' state: latest skip_broken: true @@ -22,7 +22,7 @@ # packages assigned to the active tier or 'all'. # - name: "1.4 Packages: - Upgrade all: {{ distribution_full_id }}" # noqa package-latest # become: true -# ansible.builtin.yum: +# ansible.builtin.dnf: # name: '*' # state: latest # skip_broken: true diff --git a/deploy/ansible/roles-os/1.5.1-disk-setup-asm/tasks/main.yml b/deploy/ansible/roles-os/1.5.1-disk-setup-asm/tasks/main.yml index 10a273787f..9166b8a942 100644 --- a/deploy/ansible/roles-os/1.5.1-disk-setup-asm/tasks/main.yml +++ b/deploy/ansible/roles-os/1.5.1-disk-setup-asm/tasks/main.yml @@ -196,7 +196,7 @@ block: - name: "ORACLE ASM: Install RPM Packages" - ansible.builtin.yum: + ansible.builtin.dnf: name: - "{{ target_media_location }}/downloads/oracleasmlib-2.0.17-1.el8.x86_64.rpm" - "{{ target_media_location }}/downloads/oracleasm-support-2.1.12-1.el8.x86_64.rpm" From 6757a8e7297fd7da029da38dbc54dfcba82e59a8 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Tue, 3 Sep 2024 18:27:58 -0400 Subject: [PATCH 091/164] added file to set_vars --- .../ansible/lookup_plugins/convert_ansible.py | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 deploy/ansible/lookup_plugins/convert_ansible.py diff --git a/deploy/ansible/lookup_plugins/convert_ansible.py b/deploy/ansible/lookup_plugins/convert_ansible.py new file mode 100644 index 0000000000..995abdfbb5 --- /dev/null +++ b/deploy/ansible/lookup_plugins/convert_ansible.py @@ -0,0 +1,32 @@ +def setting_vars(): + this_sid = { + 'sid': sap_id.upper(), + 'dbsid_uid': hdbadm_uid, + 'sidadm_uid': asesidadm_uid if platform == 'SYSBASE' else sidadm_uid, + 'ascs_inst_no': scs_instance_number, + 'pas_inst_no': pas_instance_number, + 'app_inst_no': app_instance_number + } + try: + all_sap_mounts = multi_sids + except: + all_sap_mounts = dict(**all_sap_mounts, **this_sid) + + for server in list_of_servers: + first_server = query(sap_id.upper()+'_'+server) + first_server_temp.append(first_server) + + afs_mnt_options = 'noresvport,vers=4,minorversion=1,sec=sys' + + print(this_sid) + print(all_sap_mounts) + print(first_server_temp) + +def query(full_hostname): + with open('/etc/ansible/hosts', 'r') as file: + lines = file.readlines() + for line in lines: + if full_hostname in line: + return full_hostname + +setting_vars() From e1de050d8ab852c701b761913e72fce64c475d32 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Thu, 12 Sep 2024 12:47:55 -0400 Subject: [PATCH 092/164] pushing my changes --- .../ansible/lookup_plugins/convert_ansible.py | 32 --------- deploy/ansible/lookup_plugins/setting_vars.py | 72 +++++++++++++++++++ .../tasks/2.6.0-afs-mounts.yaml | 43 +++++------ 3 files changed, 88 insertions(+), 59 deletions(-) delete mode 100644 deploy/ansible/lookup_plugins/convert_ansible.py create mode 100644 deploy/ansible/lookup_plugins/setting_vars.py diff --git a/deploy/ansible/lookup_plugins/convert_ansible.py b/deploy/ansible/lookup_plugins/convert_ansible.py deleted file mode 100644 index 995abdfbb5..0000000000 --- a/deploy/ansible/lookup_plugins/convert_ansible.py +++ /dev/null @@ -1,32 +0,0 @@ -def setting_vars(): - this_sid = { - 'sid': sap_id.upper(), - 'dbsid_uid': hdbadm_uid, - 'sidadm_uid': asesidadm_uid if platform == 'SYSBASE' else sidadm_uid, - 'ascs_inst_no': scs_instance_number, - 'pas_inst_no': pas_instance_number, - 'app_inst_no': app_instance_number - } - try: - all_sap_mounts = multi_sids - except: - all_sap_mounts = dict(**all_sap_mounts, **this_sid) - - for server in list_of_servers: - first_server = query(sap_id.upper()+'_'+server) - first_server_temp.append(first_server) - - afs_mnt_options = 'noresvport,vers=4,minorversion=1,sec=sys' - - print(this_sid) - print(all_sap_mounts) - print(first_server_temp) - -def query(full_hostname): - with open('/etc/ansible/hosts', 'r') as file: - lines = file.readlines() - for line in lines: - if full_hostname in line: - return full_hostname - -setting_vars() diff --git a/deploy/ansible/lookup_plugins/setting_vars.py b/deploy/ansible/lookup_plugins/setting_vars.py new file mode 100644 index 0000000000..6866389929 --- /dev/null +++ b/deploy/ansible/lookup_plugins/setting_vars.py @@ -0,0 +1,72 @@ +from ansible.module_utils.basic import AnsibleModule +#afs mount: Define this SID +#sap_id = 'rh6' +#hdbadm_uid = 'testing' +#platform = 'SYSBASE' +#sidadm_uid = 'testing2' +#asesidadm_uid = 'testing3' +#scs_instance_number = '1' +#pas_instance_number = '2' +#app_instance_number = '3' +#list_of_servers = ['SCS','DB'] +first_server_temp = [] + +def run_module(): + module_args = dict( + sap_id=dict(type="str", required=True), + hdbadm_uid=dict(type="str", required=True), + platform=dict(type="str", required=True), + sidadm_uid=dict(type="str", required=True), + multi_sids=dict(type='list', required=False), + asesidadm_uid=dict(type="str", required=False), + scs_instance_number=dict(type="str", required=True), + pas_instance_number=dict(type="str", required=True), + app_instance_number=dict(type="str", required=True), + list_of_servers=dict(type="list", required=True), + ) + + result = { + "this_sid": {}, + "all_sap_mounts": {}, + "first_server_temp": [], + "mnt_options": {} + } + + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + + result['this_sid'] = { + 'sid': sap_id.upper(), + 'dbsid_uid': hdbadm_uid, + 'sidadm_uid': asesidadm_uid if platform == 'SYSBASE' else sidadm_uid, + 'ascs_inst_no': scs_instance_number, + 'pas_inst_no': pas_instance_number, + 'app_inst_no': app_instance_number + } + try: + result['all_sap_mounts'] = multi_sids + except: + result['all_sap_mounts'] = dict(**all_sap_mounts, **this_sid) + + for server in list_of_servers: + first_server = query(sap_id.upper()+'_'+server) + result['first_server_temp'].append(first_server) + + result['mnt_options'] = { + 'afs_mnt_options': 'noresvport,vers=4,minorversion=1,sec=sys', + 'anf_mnt_options': 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys' + } + + print(this_sid) + print(all_sap_mounts) + print(first_server_temp) + module.exit_json(**result) + +def query(full_hostname): + with open('/etc/ansible/hosts', 'r') as file: + lines = file.readlines() + for line in lines: + if full_hostname in line: + return full_hostname + +if __name__ == "__main__": + run_module() diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 74e85a9a51..ec6dbf9832 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -5,33 +5,22 @@ # | | # +------------------------------------4--------------------------------------*/ -- name: "AFS Mount: Define this SID" - ansible.builtin.set_fact: - this_sid: - { - 'sid': '{{ sap_sid | upper }}', - 'dbsid_uid': '{{ hdbadm_uid }}', - 'sidadm_uid': '{% if platform == "SYBASE" %}{{ asesidadm_uid }}{% else %}{{ sidadm_uid }}{% endif %}', - 'ascs_inst_no': '{{ scs_instance_number }}', - 'pas_inst_no': '{{ pas_instance_number }}', - 'app_inst_no': '{{ app_instance_number }}' - } - -- name: "AFS Mount: Create list of all_sap_mounts to support " - ansible.builtin.set_fact: - all_sap_mounts: "{% if MULTI_SIDS is defined %}{{ MULTI_SIDS }}{% else %}{{ all_sap_mounts | default([]) + [this_sid] }}{% endif %}" - -- name: "AFS Mount: Get the Server name list" - ansible.builtin.set_fact: - first_server_temp: "{{ first_server_temp | default([]) + [item] }}" - with_items: - - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" - - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" - -- name: "AFS Mount: Set the NFSmount options" - ansible.builtin.set_fact: - afs_mnt_options: 'noresvport,vers=4,minorversion=1,sec=sys' - +- name: "calling setting variables python script" + setting_vars: + sap_id: "{{ sap_id }}" + hdbadm_uid: "{{ hdbadm_uid }}" + platform: "{{ platform }}" + sidadm_uid: "{{ sidadm_uid }}" + multi_sids: "{{ multi_sids | default(omit) }}" + asesidadm_uid: "{{ asesidadm_uid | default(omit)}}" + scs_instance_number: "{{ scs_instance_number }}" + pas_instance_number: "{{ pas_instance_number }}" + app_instance_number: "{{ app_instance_number }}" + list_of_servers: "{{ list_of_servers }}" + tags: + - test + register: check + - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: msg: From fcf1de123998469ac92477b7db03eb670ad244c1 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Thu, 12 Sep 2024 13:08:54 -0400 Subject: [PATCH 093/164] pushing my changes --- .../playbook_02_os_sap_specific_config.yaml | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/deploy/ansible/playbook_02_os_sap_specific_config.yaml b/deploy/ansible/playbook_02_os_sap_specific_config.yaml index ab66479547..9f5166e85e 100644 --- a/deploy/ansible/playbook_02_os_sap_specific_config.yaml +++ b/deploy/ansible/playbook_02_os_sap_specific_config.yaml @@ -196,6 +196,12 @@ # hosts file. We do not need to set ipaddr to ipadd. # vars: # ipaddr: "{{ ipadd }}" + - name: "SAP OS configuration playbook: - Mount the file systems" + ansible.builtin.include_role: + name: roles-sap-os/2.6-sap-mounts + tags: + - 2.6-sap-mounts + - name: "SAP OS configuration playbook: - Create hosts file" ansible.builtin.include_role: name: roles-sap-os/2.4-hosts-file @@ -300,11 +306,11 @@ tags: - 2.3-sap-exports - - name: "SAP OS configuration playbook: - Mount the file systems" - ansible.builtin.include_role: - name: roles-sap-os/2.6-sap-mounts - tags: - - 2.6-sap-mounts +# - name: "SAP OS configuration playbook: - Mount the file systems" +# ansible.builtin.include_role: +# name: roles-sap-os/2.6-sap-mounts +# tags: +# - 2.6-sap-mounts when: - ansible_os_family != "Windows" From c5372b0760442adc78b994ae04eadc4521ed785e Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Thu, 12 Sep 2024 13:11:55 -0400 Subject: [PATCH 094/164] moving location of setting_vars --- deploy/ansible/{lookup_plugins => action_plugins}/setting_vars.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename deploy/ansible/{lookup_plugins => action_plugins}/setting_vars.py (100%) diff --git a/deploy/ansible/lookup_plugins/setting_vars.py b/deploy/ansible/action_plugins/setting_vars.py similarity index 100% rename from deploy/ansible/lookup_plugins/setting_vars.py rename to deploy/ansible/action_plugins/setting_vars.py From 39fb5dba956e0ffd15e97096cc5dff7cdc15aac6 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Thu, 12 Sep 2024 13:25:17 -0400 Subject: [PATCH 095/164] referring to the variables correctly --- deploy/ansible/action_plugins/setting_vars.py | 24 ++++++++----------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/deploy/ansible/action_plugins/setting_vars.py b/deploy/ansible/action_plugins/setting_vars.py index 6866389929..a2de4d828d 100644 --- a/deploy/ansible/action_plugins/setting_vars.py +++ b/deploy/ansible/action_plugins/setting_vars.py @@ -35,30 +35,26 @@ def run_module(): module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) result['this_sid'] = { - 'sid': sap_id.upper(), - 'dbsid_uid': hdbadm_uid, - 'sidadm_uid': asesidadm_uid if platform == 'SYSBASE' else sidadm_uid, - 'ascs_inst_no': scs_instance_number, - 'pas_inst_no': pas_instance_number, - 'app_inst_no': app_instance_number + 'sid': module.params['sap_id'].upper(), + 'dbsid_uid': module.params['hdbadm_uid'], + 'sidadm_uid': module.params['asesidadm_uid'] if module.params['platform'] == 'SYSBASE' else module.params['sidadm_uid'], + 'ascs_inst_no': module.params['scs_instance_number'], + 'pas_inst_no': module.params['pas_instance_number'], + 'app_inst_no': module.params['app_instance_number'] } try: - result['all_sap_mounts'] = multi_sids + result['all_sap_mounts'] = module.params['multi_sids'] except: - result['all_sap_mounts'] = dict(**all_sap_mounts, **this_sid) + result['all_sap_mounts'] = dict(result['all_sap_mounts'], result['this_sid']) - for server in list_of_servers: - first_server = query(sap_id.upper()+'_'+server) + for server in module.params['list_of_servers']: + first_server = query(module.params['sap_id'].upper()+'_'+server) result['first_server_temp'].append(first_server) result['mnt_options'] = { 'afs_mnt_options': 'noresvport,vers=4,minorversion=1,sec=sys', 'anf_mnt_options': 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys' } - - print(this_sid) - print(all_sap_mounts) - print(first_server_temp) module.exit_json(**result) def query(full_hostname): From b97035b0f551d9840cc3bf2211b5a0e84551aae4 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Thu, 12 Sep 2024 13:32:51 -0400 Subject: [PATCH 096/164] created action plugins folder --- .../2.6-sap-mounts}/action_plugins/setting_vars.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename deploy/ansible/{ => roles-sap-os/2.6-sap-mounts}/action_plugins/setting_vars.py (100%) diff --git a/deploy/ansible/action_plugins/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/action_plugins/setting_vars.py similarity index 100% rename from deploy/ansible/action_plugins/setting_vars.py rename to deploy/ansible/roles-sap-os/2.6-sap-mounts/action_plugins/setting_vars.py From 198c4abcf39bb6e2980f3adffd1148c20d763d5e Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Thu, 12 Sep 2024 13:39:44 -0400 Subject: [PATCH 097/164] created library folder --- .../2.6-sap-mounts/{action_plugins => library}/setting_vars.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename deploy/ansible/roles-sap-os/2.6-sap-mounts/{action_plugins => library}/setting_vars.py (100%) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/action_plugins/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py similarity index 100% rename from deploy/ansible/roles-sap-os/2.6-sap-mounts/action_plugins/setting_vars.py rename to deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py From 40d0738a43fe0f4ae899238cb5a1ea5de8230c2f Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Thu, 12 Sep 2024 13:44:29 -0400 Subject: [PATCH 098/164] removed comments --- .../2.6-sap-mounts/library/setting_vars.py | 16 +++------------- .../2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 4 ++-- 2 files changed, 5 insertions(+), 15 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index a2de4d828d..71f7fbc82a 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -1,19 +1,9 @@ from ansible.module_utils.basic import AnsibleModule -#afs mount: Define this SID -#sap_id = 'rh6' -#hdbadm_uid = 'testing' -#platform = 'SYSBASE' -#sidadm_uid = 'testing2' -#asesidadm_uid = 'testing3' -#scs_instance_number = '1' -#pas_instance_number = '2' -#app_instance_number = '3' -#list_of_servers = ['SCS','DB'] first_server_temp = [] def run_module(): module_args = dict( - sap_id=dict(type="str", required=True), + sap_sid=dict(type="str", required=True), hdbadm_uid=dict(type="str", required=True), platform=dict(type="str", required=True), sidadm_uid=dict(type="str", required=True), @@ -35,7 +25,7 @@ def run_module(): module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) result['this_sid'] = { - 'sid': module.params['sap_id'].upper(), + 'sid': module.params['sap_sid'].upper(), 'dbsid_uid': module.params['hdbadm_uid'], 'sidadm_uid': module.params['asesidadm_uid'] if module.params['platform'] == 'SYSBASE' else module.params['sidadm_uid'], 'ascs_inst_no': module.params['scs_instance_number'], @@ -48,7 +38,7 @@ def run_module(): result['all_sap_mounts'] = dict(result['all_sap_mounts'], result['this_sid']) for server in module.params['list_of_servers']: - first_server = query(module.params['sap_id'].upper()+'_'+server) + first_server = query(module.params['sap_sid'].upper()+'_'+server) result['first_server_temp'].append(first_server) result['mnt_options'] = { diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index ec6dbf9832..0d1b21ba46 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -7,7 +7,7 @@ - name: "calling setting variables python script" setting_vars: - sap_id: "{{ sap_id }}" + sap_sid: "{{ sap_sid }}" hdbadm_uid: "{{ hdbadm_uid }}" platform: "{{ platform }}" sidadm_uid: "{{ sidadm_uid }}" @@ -20,7 +20,7 @@ tags: - test register: check - + - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: msg: From af67b20ca357adb6aa58ff73f310a39a3e96711f Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Thu, 12 Sep 2024 13:49:17 -0400 Subject: [PATCH 099/164] removed comments --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 0d1b21ba46..85acb8da3e 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -16,7 +16,7 @@ scs_instance_number: "{{ scs_instance_number }}" pas_instance_number: "{{ pas_instance_number }}" app_instance_number: "{{ app_instance_number }}" - list_of_servers: "{{ list_of_servers }}" + list_of_servers: ['SCS','DB'] tags: - test register: check From 36e9aad98369465e249c0f81611631abb509b750 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Thu, 12 Sep 2024 14:04:10 -0400 Subject: [PATCH 100/164] removed comments --- .../2.6-sap-mounts/library/setting_vars.py | 13 ++----------- .../2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 6 +++++- 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index 71f7fbc82a..e7d0826377 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -12,7 +12,7 @@ def run_module(): scs_instance_number=dict(type="str", required=True), pas_instance_number=dict(type="str", required=True), app_instance_number=dict(type="str", required=True), - list_of_servers=dict(type="list", required=True), + server_name=dict(type="str", required=True), ) result = { @@ -37,9 +37,7 @@ def run_module(): except: result['all_sap_mounts'] = dict(result['all_sap_mounts'], result['this_sid']) - for server in module.params['list_of_servers']: - first_server = query(module.params['sap_sid'].upper()+'_'+server) - result['first_server_temp'].append(first_server) + result['first_server_temp'].append(module.params['server_name']) result['mnt_options'] = { 'afs_mnt_options': 'noresvport,vers=4,minorversion=1,sec=sys', @@ -47,12 +45,5 @@ def run_module(): } module.exit_json(**result) -def query(full_hostname): - with open('/etc/ansible/hosts', 'r') as file: - lines = file.readlines() - for line in lines: - if full_hostname in line: - return full_hostname - if __name__ == "__main__": run_module() diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 85acb8da3e..aeb5afea52 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -16,10 +16,14 @@ scs_instance_number: "{{ scs_instance_number }}" pas_instance_number: "{{ pas_instance_number }}" app_instance_number: "{{ app_instance_number }}" - list_of_servers: ['SCS','DB'] + server_name: "{{ item }}" tags: - test register: check + with_items: + - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" + - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: From 243d30755679c0cd9d81fbda8ffda2a074d35baa Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Thu, 12 Sep 2024 14:10:39 -0400 Subject: [PATCH 101/164] defining variables --- .../roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index e7d0826377..227499a7bf 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -1,7 +1,7 @@ from ansible.module_utils.basic import AnsibleModule -first_server_temp = [] - def run_module(): + first_server_temp = [] + all_sap_mounts = {} module_args = dict( sap_sid=dict(type="str", required=True), hdbadm_uid=dict(type="str", required=True), From be13e086ddb8bcd858b83456b8221d4e25581ec5 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Thu, 12 Sep 2024 14:19:37 -0400 Subject: [PATCH 102/164] registering output from setting_vars --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index aeb5afea52..f8d42f14e0 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -19,7 +19,7 @@ server_name: "{{ item }}" tags: - test - register: check + register: setting_vars_output with_items: - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" @@ -28,8 +28,8 @@ - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: msg: - - "List of all the SAP mounts: {{ all_sap_mounts }}" - - "First server: {{ first_server_temp }}" + - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" + - "First server: {{ setting_vars_output.first_server_temp }}" verbosity: 2 # /*---------------------------------------------------------------------------8 From 9e2db4e45100c7b3dfab8b03b43e94bf57d1fb3e Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Thu, 12 Sep 2024 14:23:34 -0400 Subject: [PATCH 103/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index f8d42f14e0..7f1c2cac50 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -28,8 +28,9 @@ - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: msg: - - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" - - "First server: {{ setting_vars_output.first_server_temp }}" + - "{{ setting_vars_output }}" +# - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" +# - "First server: {{ setting_vars_output.first_server_temp }}" verbosity: 2 # /*---------------------------------------------------------------------------8 From 192eb78b87e84c933e18a11bd92a7f79db1a42c6 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Thu, 12 Sep 2024 15:12:16 -0400 Subject: [PATCH 104/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 7f1c2cac50..90cb49d730 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -28,7 +28,7 @@ - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: msg: - - "{{ setting_vars_output }}" + - " list output: {{ setting_vars_output }}" # - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" # - "First server: {{ setting_vars_output.first_server_temp }}" verbosity: 2 From 948ee7231161abbec28be8ab099dbe6548e879ed Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Thu, 12 Sep 2024 15:28:27 -0400 Subject: [PATCH 105/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 90cb49d730..96bffdcb23 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -31,7 +31,7 @@ - " list output: {{ setting_vars_output }}" # - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" # - "First server: {{ setting_vars_output.first_server_temp }}" - verbosity: 2 + verbosity: 4 # /*---------------------------------------------------------------------------8 # | | From 449bfe4c5c06e5f835637d1b2216bc8521d3e6ad Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Thu, 12 Sep 2024 15:36:44 -0400 Subject: [PATCH 106/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 96bffdcb23..501f97710a 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -17,12 +17,10 @@ pas_instance_number: "{{ pas_instance_number }}" app_instance_number: "{{ app_instance_number }}" server_name: "{{ item }}" - tags: - - test - register: setting_vars_output with_items: - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + register: setting_vars_output - name: "AFS Mount: Create list of all_sap_mounts to support" From f2da13f1748ff7aaec6bf20429ae930231548bf6 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Thu, 12 Sep 2024 23:18:35 -0400 Subject: [PATCH 107/164] testing --- .../ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index 227499a7bf..c31ed07754 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -43,6 +43,8 @@ def run_module(): 'afs_mnt_options': 'noresvport,vers=4,minorversion=1,sec=sys', 'anf_mnt_options': 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys' } + + print(result) module.exit_json(**result) if __name__ == "__main__": From e7d7513aa48653acc37e143774586db794b62a0a Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 11:19:20 -0400 Subject: [PATCH 108/164] testing --- .../ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index c31ed07754..04ecbc9688 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -44,7 +44,7 @@ def run_module(): 'anf_mnt_options': 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys' } - print(result) + print(result['this_sid']) module.exit_json(**result) if __name__ == "__main__": From 18a93aecb2adb24cfc6be74b92865997d36ab220 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 11:25:42 -0400 Subject: [PATCH 109/164] testing --- .../roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 4 ++-- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index 04ecbc9688..ab6e8c2c7b 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -44,8 +44,8 @@ def run_module(): 'anf_mnt_options': 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys' } - print(result['this_sid']) - module.exit_json(**result) + print(result['mnt_options']['anf_mnt_options']) + module.exit_json(result) if __name__ == "__main__": run_module() diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 501f97710a..69c9d9b9b3 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -64,7 +64,7 @@ src: "{{ sap_mnt }}" path: "/saptmp" fstype: "nfs4" - opts: "{{ afs_mnt_options }}" + opts: "{{ setting_vars_output }}" state: mounted rescue: From e604a46d9655dce9c47197951d4d10b5503a1b63 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 11:33:25 -0400 Subject: [PATCH 110/164] testing --- .../2.6-sap-mounts/library/setting_vars.py | 1 - .../tasks/2.6.0-afs-mounts.yaml | 22 +++++++++---------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index ab6e8c2c7b..109a717260 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -44,7 +44,6 @@ def run_module(): 'anf_mnt_options': 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys' } - print(result['mnt_options']['anf_mnt_options']) module.exit_json(result) if __name__ == "__main__": diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 69c9d9b9b3..a977d5fbbf 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -64,7 +64,7 @@ src: "{{ sap_mnt }}" path: "/saptmp" fstype: "nfs4" - opts: "{{ setting_vars_output }}" + opts: "{{ setting_vars_output.mnt_options.afs_mnt_options }}" state: mounted rescue: @@ -81,7 +81,7 @@ src: "{{ sap_mnt }}" path: "/saptmp" fstype: "nfs4" - opts: "{{ afs_mnt_options }}" + opts: "{{ setting_vars_output.mnt_options.afs_mnt_options }}" state: mounted - name: "AFS Mount: Create SAP Directories (AFS)" @@ -133,7 +133,7 @@ src: "{{ sap_mnt }}" path: "/saptmp" fstype: "nfs4" - opts: "{{ afs_mnt_options }}" + opts: "{{ setting_vars_output.mnt_options.afs_mnt_options }}" state: absent when: - sap_mnt is defined @@ -246,7 +246,7 @@ src: "{{ item.src }}" path: "{{ item.path }}" fstype: "{{ item.type }}" - opts: "{{ afs_mnt_options }}" + opts: "{{ setting_vars_output.mnt_options.afs_mnt_options }}" state: mounted loop: - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } @@ -256,7 +256,7 @@ src: "{{ item.src }}" path: "{{ item.path }}" fstype: "{{ item.type }}" - opts: "{{ afs_mnt_options }}" + opts: "{{ setting_vars_output.mnt_options.afs_mnt_options }}" state: unmounted loop: - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } @@ -273,7 +273,7 @@ src: "{{ item.src }}" path: "{{ item.path }}" fstype: "{{ item.type }}" - opts: "{{ afs_mnt_options }}" + opts: "{{ setting_vars_output.mnt_options.afs_mnt_options }}" state: mounted loop: - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } @@ -290,7 +290,7 @@ src: "{{ sap_mnt }}/sapmnt{{ item.sid }}" path: "/sapmnt/{{ item.sid }}" fstype: 'nfs4' - opts: "{{ afs_mnt_options }}" + opts: "{{ setting_vars_output.mnt_options.afs_mnt_options }}" state: mounted loop: "{{ MULTI_SIDS }}" when: @@ -305,7 +305,7 @@ src: "{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}" path: "/sapmnt/{{ sap_sid | upper }}" fstype: nfs4 - opts: "{{ afs_mnt_options }}" + opts: "{{ setting_vars_output.mnt_options.afs_mnt_options }}" state: mounted rescue: - name: "AFS Mount: Pause for 15 seconds" @@ -316,7 +316,7 @@ src: "{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}" path: "/sapmnt/{{ sap_sid | upper }}" fstype: nfs4 - opts: "{{ afs_mnt_options }}" + opts: "{{ setting_vars_output.mnt_options.afs_mnt_options }}" state: mounted when: @@ -354,7 +354,7 @@ src: "{{ item.src }}" path: "{{ item.path }}" fstype: "{{ item.type }}" - opts: "{{ afs_mnt_options }}" + opts: "{{ setting_vars_output.mnt_options.afs_mnt_options }}" state: mounted loop: @@ -374,7 +374,7 @@ 'type': 'trans', 'temppath': 'saptrans', 'mount': '{{ sap_trans }}', - 'opts': '{{ afs_mnt_options }}', + 'opts': '{{ setting_vars_output.mnt_options.afs_mnt_options }}', 'path': '/usr/sap/trans', 'permissions': '0775', 'set_chattr_on_dir': false, From be21258e59028109685add591f466d8185a2335a Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 11:34:00 -0400 Subject: [PATCH 111/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index a977d5fbbf..b6dd7899ad 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -26,7 +26,7 @@ - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: msg: - - " list output: {{ setting_vars_output }}" +# - "list output: {{ setting_vars_output }}" # - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" # - "First server: {{ setting_vars_output.first_server_temp }}" verbosity: 4 From 883dbca541f0f0697cdd22bb7cdb002486a49e39 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 11:39:10 -0400 Subject: [PATCH 112/164] testing --- .../ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index 109a717260..4c69dfe06c 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -44,7 +44,7 @@ def run_module(): 'anf_mnt_options': 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys' } - module.exit_json(result) + module.exit_json(**result) if __name__ == "__main__": run_module() From 7ea926086fb034a42bb823118aa7c2b1e144e151 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 11:43:47 -0400 Subject: [PATCH 113/164] testing --- .../ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index 4c69dfe06c..ac31aa50e5 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -43,6 +43,8 @@ def run_module(): 'afs_mnt_options': 'noresvport,vers=4,minorversion=1,sec=sys', 'anf_mnt_options': 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys' } + print(**result['mnt_options']) + print(**result['mnt_options']['anf_mnt_options']) module.exit_json(**result) From 8d56a2f6852d3a7f80680a2ed51c6fc6a0b7799c Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 12:39:42 -0400 Subject: [PATCH 114/164] testing --- .../ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 2 -- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index ac31aa50e5..4c69dfe06c 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -43,8 +43,6 @@ def run_module(): 'afs_mnt_options': 'noresvport,vers=4,minorversion=1,sec=sys', 'anf_mnt_options': 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys' } - print(**result['mnt_options']) - print(**result['mnt_options']['anf_mnt_options']) module.exit_json(**result) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index b6dd7899ad..47859cbcef 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -26,7 +26,7 @@ - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: msg: -# - "list output: {{ setting_vars_output }}" + - "list output: {{ setting_vars_output }}" # - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" # - "First server: {{ setting_vars_output.first_server_temp }}" verbosity: 4 From c7a9fd268b0e9f23000a3d4e241ffc7a894a9ef2 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 12:45:37 -0400 Subject: [PATCH 115/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 47859cbcef..d9c6f8ea64 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -26,7 +26,7 @@ - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: msg: - - "list output: {{ setting_vars_output }}" + - "list output: {{ setting_vars_output.mnt_options }}" # - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" # - "First server: {{ setting_vars_output.first_server_temp }}" verbosity: 4 From 21290407e62d191ba7e1f1ba5d6221734d82e437 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 13:58:55 -0400 Subject: [PATCH 116/164] testing --- .../tasks/2.6.0-afs-mounts.yaml | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index d9c6f8ea64..0a3da88b2f 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -26,7 +26,7 @@ - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: msg: - - "list output: {{ setting_vars_output.mnt_options }}" + - "list output: {{ setting_vars_output }}" # - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" # - "First server: {{ setting_vars_output.first_server_temp }}" verbosity: 4 @@ -64,7 +64,7 @@ src: "{{ sap_mnt }}" path: "/saptmp" fstype: "nfs4" - opts: "{{ setting_vars_output.mnt_options.afs_mnt_options }}" + opts: "{{ setting_vars_output }}" state: mounted rescue: @@ -81,7 +81,7 @@ src: "{{ sap_mnt }}" path: "/saptmp" fstype: "nfs4" - opts: "{{ setting_vars_output.mnt_options.afs_mnt_options }}" + opts: "{{ setting_vars_output }}" state: mounted - name: "AFS Mount: Create SAP Directories (AFS)" @@ -133,7 +133,7 @@ src: "{{ sap_mnt }}" path: "/saptmp" fstype: "nfs4" - opts: "{{ setting_vars_output.mnt_options.afs_mnt_options }}" + opts: "{{ setting_vars_output }}" state: absent when: - sap_mnt is defined @@ -246,7 +246,7 @@ src: "{{ item.src }}" path: "{{ item.path }}" fstype: "{{ item.type }}" - opts: "{{ setting_vars_output.mnt_options.afs_mnt_options }}" + opts: "{{ setting_vars_output }}" state: mounted loop: - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } @@ -256,7 +256,7 @@ src: "{{ item.src }}" path: "{{ item.path }}" fstype: "{{ item.type }}" - opts: "{{ setting_vars_output.mnt_options.afs_mnt_options }}" + opts: "{{ setting_vars_output }}" state: unmounted loop: - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } @@ -273,7 +273,7 @@ src: "{{ item.src }}" path: "{{ item.path }}" fstype: "{{ item.type }}" - opts: "{{ setting_vars_output.mnt_options.afs_mnt_options }}" + opts: "{{ setting_vars_output }}" state: mounted loop: - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } @@ -290,7 +290,7 @@ src: "{{ sap_mnt }}/sapmnt{{ item.sid }}" path: "/sapmnt/{{ item.sid }}" fstype: 'nfs4' - opts: "{{ setting_vars_output.mnt_options.afs_mnt_options }}" + opts: "{{ setting_vars_output }}" state: mounted loop: "{{ MULTI_SIDS }}" when: @@ -305,7 +305,7 @@ src: "{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}" path: "/sapmnt/{{ sap_sid | upper }}" fstype: nfs4 - opts: "{{ setting_vars_output.mnt_options.afs_mnt_options }}" + opts: "{{ setting_vars_output }}" state: mounted rescue: - name: "AFS Mount: Pause for 15 seconds" @@ -316,7 +316,7 @@ src: "{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}" path: "/sapmnt/{{ sap_sid | upper }}" fstype: nfs4 - opts: "{{ setting_vars_output.mnt_options.afs_mnt_options }}" + opts: "{{ setting_vars_output }}" state: mounted when: @@ -354,7 +354,7 @@ src: "{{ item.src }}" path: "{{ item.path }}" fstype: "{{ item.type }}" - opts: "{{ setting_vars_output.mnt_options.afs_mnt_options }}" + opts: "{{ setting_vars_output }}" state: mounted loop: @@ -374,7 +374,7 @@ 'type': 'trans', 'temppath': 'saptrans', 'mount': '{{ sap_trans }}', - 'opts': '{{ setting_vars_output.mnt_options.afs_mnt_options }}', + 'opts': '{{ setting_vars_output }}', 'path': '/usr/sap/trans', 'permissions': '0775', 'set_chattr_on_dir': false, From 84f6edfa13ff7fd2cb8f9e17b9fec349d9e568ac Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 14:04:42 -0400 Subject: [PATCH 117/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 0a3da88b2f..e92cc7f2e9 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -26,7 +26,7 @@ - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: msg: - - "list output: {{ setting_vars_output }}" + - "list output: {{ setting_vars_output['mnt_options'] }}" # - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" # - "First server: {{ setting_vars_output.first_server_temp }}" verbosity: 4 From 252955acda7aefbfd6f3ce972906410bcf4865da Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 14:11:37 -0400 Subject: [PATCH 118/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index e92cc7f2e9..4cecd23783 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -26,7 +26,7 @@ - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: msg: - - "list output: {{ setting_vars_output['mnt_options'] }}" + - "TESTING list output: {{ setting_vars_output[3] }}" # - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" # - "First server: {{ setting_vars_output.first_server_temp }}" verbosity: 4 From a4bf71791a4a714a6360906c0eb5ca7f92878534 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 17:08:25 -0400 Subject: [PATCH 119/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 4cecd23783..3e5cd6e196 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -26,7 +26,7 @@ - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: msg: - - "TESTING list output: {{ setting_vars_output[3] }}" + - "TESTING list output: {{ setting_vars_output }}" # - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" # - "First server: {{ setting_vars_output.first_server_temp }}" verbosity: 4 From 7561f3eba604f38639ae2f71d8cf87ba5f6186e1 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 17:14:59 -0400 Subject: [PATCH 120/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 3e5cd6e196..92343b09fa 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -25,8 +25,7 @@ - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: - msg: - - "TESTING list output: {{ setting_vars_output }}" + msg: "TESTING list output: {{ setting_vars_output.mnt_options }}" # - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" # - "First server: {{ setting_vars_output.first_server_temp }}" verbosity: 4 From 00bf9ffc223c5f6e4d5a4745b3b3b4ea648eb81e Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 17:15:23 -0400 Subject: [PATCH 121/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 92343b09fa..5a60b0d7ee 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -28,7 +28,6 @@ msg: "TESTING list output: {{ setting_vars_output.mnt_options }}" # - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" # - "First server: {{ setting_vars_output.first_server_temp }}" - verbosity: 4 # /*---------------------------------------------------------------------------8 # | | From 618b38c3a6655531724d5e3a4b0fa31488a2b188 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 17:19:12 -0400 Subject: [PATCH 122/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 5a60b0d7ee..795a3052fa 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -25,7 +25,7 @@ - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: - msg: "TESTING list output: {{ setting_vars_output.mnt_options }}" + msg: "TESTING list output: {{ setting_vars_output }}" # - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" # - "First server: {{ setting_vars_output.first_server_temp }}" From 2a537e963be52bdf8a2e7a21bdd069646613bb3a Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 17:23:41 -0400 Subject: [PATCH 123/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 795a3052fa..741d0099c6 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -25,7 +25,7 @@ - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: - msg: "TESTING list output: {{ setting_vars_output }}" + msg: "TESTING list output: {{ setting_vars_output.results }}" # - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" # - "First server: {{ setting_vars_output.first_server_temp }}" From e1a46786e6bddcec1c2ecabf41b5d4568d062566 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 17:27:14 -0400 Subject: [PATCH 124/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 741d0099c6..3310f97e95 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -25,7 +25,7 @@ - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: - msg: "TESTING list output: {{ setting_vars_output.results }}" + msg: "TESTING list output: {{ setting_vars_output.results['mnt_options'] }}" # - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" # - "First server: {{ setting_vars_output.first_server_temp }}" From eb31942d95bc5e4c0881ac1e037558f520a07150 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 17:53:14 -0400 Subject: [PATCH 125/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 3310f97e95..b1f5e45551 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -25,7 +25,7 @@ - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: - msg: "TESTING list output: {{ setting_vars_output.results['mnt_options'] }}" + msg: "TESTING list output: {{ setting_vars_output.results.mnt_options }}" # - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" # - "First server: {{ setting_vars_output.first_server_temp }}" From 85208cd57a77e5764ea8aad32dc81c456c9a6408 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 18:05:08 -0400 Subject: [PATCH 126/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index b1f5e45551..da36046cb5 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -25,7 +25,7 @@ - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: - msg: "TESTING list output: {{ setting_vars_output.results.mnt_options }}" + msg: "TESTING list output: {{ setting_vars_output.results[0]['mnt_options'] }}" # - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" # - "First server: {{ setting_vars_output.first_server_temp }}" From 0f06096452f3ba9d4895a2e56196e91ee086e3ed Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 18:10:00 -0400 Subject: [PATCH 127/164] testing --- .../tasks/2.6.0-afs-mounts.yaml | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index da36046cb5..c86f85106e 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -25,7 +25,7 @@ - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: - msg: "TESTING list output: {{ setting_vars_output.results[0]['mnt_options'] }}" + msg: "TESTING list output: {{ setting_vars_output.results[0]['mnt_options']['afs_mnt_options'] }}" # - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" # - "First server: {{ setting_vars_output.first_server_temp }}" @@ -62,7 +62,7 @@ src: "{{ sap_mnt }}" path: "/saptmp" fstype: "nfs4" - opts: "{{ setting_vars_output }}" + opts: "{{ setting_vars_output.results[0]['mnt_options']['afs_mnt_options'] }}" state: mounted rescue: @@ -79,7 +79,7 @@ src: "{{ sap_mnt }}" path: "/saptmp" fstype: "nfs4" - opts: "{{ setting_vars_output }}" + opts: "{{ setting_vars_output.results[0]['mnt_options']['afs_mnt_options'] }}" state: mounted - name: "AFS Mount: Create SAP Directories (AFS)" @@ -131,7 +131,7 @@ src: "{{ sap_mnt }}" path: "/saptmp" fstype: "nfs4" - opts: "{{ setting_vars_output }}" + opts: "{{ setting_vars_output.results[0]['mnt_options']['afs_mnt_options'] }}" state: absent when: - sap_mnt is defined @@ -244,7 +244,7 @@ src: "{{ item.src }}" path: "{{ item.path }}" fstype: "{{ item.type }}" - opts: "{{ setting_vars_output }}" + opts: "{{ setting_vars_output.results[0]['mnt_options']['afs_mnt_options'] }}" state: mounted loop: - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } @@ -254,7 +254,7 @@ src: "{{ item.src }}" path: "{{ item.path }}" fstype: "{{ item.type }}" - opts: "{{ setting_vars_output }}" + opts: "{{ setting_vars_output.results[0]['mnt_options']['afs_mnt_options'] }}" state: unmounted loop: - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } @@ -271,7 +271,7 @@ src: "{{ item.src }}" path: "{{ item.path }}" fstype: "{{ item.type }}" - opts: "{{ setting_vars_output }}" + opts: "{{ setting_vars_output.results[0]['mnt_options']['afs_mnt_options'] }}" state: mounted loop: - { type: 'nfs4', src: '{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}', path: '/sapmnt/{{ sap_sid | upper }}' } @@ -288,7 +288,7 @@ src: "{{ sap_mnt }}/sapmnt{{ item.sid }}" path: "/sapmnt/{{ item.sid }}" fstype: 'nfs4' - opts: "{{ setting_vars_output }}" + opts: "{{ setting_vars_output.results[0]['mnt_options']['afs_mnt_options'] }}" state: mounted loop: "{{ MULTI_SIDS }}" when: @@ -303,7 +303,7 @@ src: "{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}" path: "/sapmnt/{{ sap_sid | upper }}" fstype: nfs4 - opts: "{{ setting_vars_output }}" + opts: "{{ setting_vars_output.results[0]['mnt_options']['afs_mnt_options'] }}" state: mounted rescue: - name: "AFS Mount: Pause for 15 seconds" @@ -314,7 +314,7 @@ src: "{{ sap_mnt }}/sapmnt{{ sap_sid | upper }}" path: "/sapmnt/{{ sap_sid | upper }}" fstype: nfs4 - opts: "{{ setting_vars_output }}" + opts: "{{ setting_vars_output.results[0]['mnt_options']['afs_mnt_options'] }}" state: mounted when: @@ -352,7 +352,7 @@ src: "{{ item.src }}" path: "{{ item.path }}" fstype: "{{ item.type }}" - opts: "{{ setting_vars_output }}" + opts: "{{ setting_vars_output.results[0]['mnt_options']['afs_mnt_options'] }}" state: mounted loop: @@ -372,7 +372,7 @@ 'type': 'trans', 'temppath': 'saptrans', 'mount': '{{ sap_trans }}', - 'opts': '{{ setting_vars_output }}', + 'opts': '{{ setting_vars_output.results[0]['mnt_options']['afs_mnt_options'] }}', 'path': '/usr/sap/trans', 'permissions': '0775', 'set_chattr_on_dir': false, From 253615340ef0cde8506d9d60f6f03d147b4e9b01 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 18:13:57 -0400 Subject: [PATCH 128/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index c86f85106e..005331858c 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -372,7 +372,7 @@ 'type': 'trans', 'temppath': 'saptrans', 'mount': '{{ sap_trans }}', - 'opts': '{{ setting_vars_output.results[0]['mnt_options']['afs_mnt_options'] }}', + 'opts': '{{ setting_vars_output.results[0]["mnt_options"]["afs_mnt_options"] }}', 'path': '/usr/sap/trans', 'permissions': '0775', 'set_chattr_on_dir': false, From f6b6493cc5e1c997a6229706d9b448c7d5e45054 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 18:29:14 -0400 Subject: [PATCH 129/164] testing --- .../2.6-sap-mounts/library/setting_vars.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index 4c69dfe06c..105bda1a3b 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -33,9 +33,14 @@ def run_module(): 'app_inst_no': module.params['app_instance_number'] } try: - result['all_sap_mounts'] = module.params['multi_sids'] - except: - result['all_sap_mounts'] = dict(result['all_sap_mounts'], result['this_sid']) + if module.params['multi_sids'] is not None: + result['all_sap_mounts'] = module.params['multi_sids'] + + else: + result['all_sap_mounts'] = result['all_sap_mounts'] + module.params['this_sid'] + + except Exception as e: + return(e) result['first_server_temp'].append(module.params['server_name']) From 585b9f5d247ea8f5ee9fba0e8ca8d37fbc31bd4e Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 18:33:29 -0400 Subject: [PATCH 130/164] testing --- .../ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index 105bda1a3b..a43cba3a69 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -40,7 +40,7 @@ def run_module(): result['all_sap_mounts'] = result['all_sap_mounts'] + module.params['this_sid'] except Exception as e: - return(e) + module.exit_json(e) result['first_server_temp'].append(module.params['server_name']) From 4cfcfabe305ae19ab30223a20c5f6b7acc5c7e2f Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 18:36:37 -0400 Subject: [PATCH 131/164] testing --- .../ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index a43cba3a69..49ea1acdf9 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -40,7 +40,7 @@ def run_module(): result['all_sap_mounts'] = result['all_sap_mounts'] + module.params['this_sid'] except Exception as e: - module.exit_json(e) + module.exit_json(**str(e)) result['first_server_temp'].append(module.params['server_name']) From e55c761e379e2e906815a6b5fe010ea9b519e865 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 18:41:51 -0400 Subject: [PATCH 132/164] testing --- .../ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index 49ea1acdf9..ec6ee40b18 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -40,7 +40,7 @@ def run_module(): result['all_sap_mounts'] = result['all_sap_mounts'] + module.params['this_sid'] except Exception as e: - module.exit_json(**str(e)) + module.fail_json(**e) result['first_server_temp'].append(module.params['server_name']) From c160fbfc4941e376c4f23cef9e0a3ca97548bcaa Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 18:46:35 -0400 Subject: [PATCH 133/164] testing --- .../ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index ec6ee40b18..79f2af299b 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -40,7 +40,7 @@ def run_module(): result['all_sap_mounts'] = result['all_sap_mounts'] + module.params['this_sid'] except Exception as e: - module.fail_json(**e) + module.fail_json(msg=str(e),**result) result['first_server_temp'].append(module.params['server_name']) From 495529236447a9c0a9ba52721505a051c5fbb5a7 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 19:18:29 -0400 Subject: [PATCH 134/164] testing --- .../roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index 79f2af299b..d690494747 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -37,8 +37,8 @@ def run_module(): result['all_sap_mounts'] = module.params['multi_sids'] else: - result['all_sap_mounts'] = result['all_sap_mounts'] + module.params['this_sid'] - + result['all_sap_mounts'] = result['all_sap_mounts'] + result['this_sid'] + except Exception as e: module.fail_json(msg=str(e),**result) From 7eea804b9a4ff9a0fb6f79ec1979a20470623198 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 19:34:45 -0400 Subject: [PATCH 135/164] testing --- .../ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index d690494747..c25b865453 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -37,7 +37,7 @@ def run_module(): result['all_sap_mounts'] = module.params['multi_sids'] else: - result['all_sap_mounts'] = result['all_sap_mounts'] + result['this_sid'] + result['all_sap_mounts'].update(result['this_sid']) except Exception as e: module.fail_json(msg=str(e),**result) From 4f14d682b6fe317d09dc2e99de53582e7fb62972 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 19:40:26 -0400 Subject: [PATCH 136/164] testing --- .../ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index c25b865453..24f03e5ed5 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -33,7 +33,7 @@ def run_module(): 'app_inst_no': module.params['app_instance_number'] } try: - if module.params['multi_sids'] is not None: + if module.params['multi_sids'] in locals(): result['all_sap_mounts'] = module.params['multi_sids'] else: From b15753ce7b2e72bf4a1cb04b8a9028c9a04b844f Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 19:45:24 -0400 Subject: [PATCH 137/164] testing --- .../ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index 24f03e5ed5..c25b865453 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -33,7 +33,7 @@ def run_module(): 'app_inst_no': module.params['app_instance_number'] } try: - if module.params['multi_sids'] in locals(): + if module.params['multi_sids'] is not None: result['all_sap_mounts'] = module.params['multi_sids'] else: From b5cfe305029a1274ce56e9e25d6289308fe6ba4d Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 19:48:25 -0400 Subject: [PATCH 138/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 005331858c..2d0ec4170b 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -102,7 +102,7 @@ owner: '{{ item.sidadm_uid }}' group: sapsys mode: 0755 - loop: "{{ all_sap_mounts }}" + loop: "{{ setting_vars_output.results[0]['all_sap_mounts'] }}" - name: "AFS Mount: Unmount file systems (sapmnt)" ansible.posix.mount: @@ -124,7 +124,7 @@ ansible.builtin.file: path: "/saptmp/sapmnt{{ item.sid | upper }}" state: absent - loop: "{{ all_sap_mounts }}" + loop: "{{ setting_vars_output.results[0]['all_sap_mounts'] }}" - name: "AFS Mount: Cleanup fstab and directory (sapmnt)" ansible.posix.mount: @@ -172,7 +172,7 @@ path: "/sapmnt/{{ item.sid }}" state: directory register: is_created_now - loop: "{{ all_sap_mounts }}" + loop: "{{ setting_vars_output.results[0]['all_sap_mounts'] }}" - name: "AFS Mount: Change attribute only when we create SAP Directories (sapmnt)" ansible.builtin.file: From 1ce5a072ef0ad65a487bc300725b0fa0baab361f Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 20:03:07 -0400 Subject: [PATCH 139/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 2d0ec4170b..3fa7ac85d5 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -102,7 +102,7 @@ owner: '{{ item.sidadm_uid }}' group: sapsys mode: 0755 - loop: "{{ setting_vars_output.results[0]['all_sap_mounts'] }}" + loop: "{{ setting_vars_output.results[0]['all_sap_mounts'] | dict2items }}" - name: "AFS Mount: Unmount file systems (sapmnt)" ansible.posix.mount: From 0a79b9106fafe9f96466a8a754315c3e5772bf8f Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 20:07:17 -0400 Subject: [PATCH 140/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 3fa7ac85d5..1893402a59 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -102,7 +102,7 @@ owner: '{{ item.sidadm_uid }}' group: sapsys mode: 0755 - loop: "{{ setting_vars_output.results[0]['all_sap_mounts'] | dict2items }}" + loop: "{{ setting_vars_output.results[0]['all_sap_mounts'] | wantlist }}" - name: "AFS Mount: Unmount file systems (sapmnt)" ansible.posix.mount: From a266106c578feb68c07d2780720bbf0da035ae60 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 20:20:25 -0400 Subject: [PATCH 141/164] testing --- .../ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 2 +- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index c25b865453..cbba564481 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -37,7 +37,7 @@ def run_module(): result['all_sap_mounts'] = module.params['multi_sids'] else: - result['all_sap_mounts'].update(result['this_sid']) + result['all_sap_mounts'] = result['this_sid'] except Exception as e: module.fail_json(msg=str(e),**result) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 1893402a59..b22aa92c49 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -102,7 +102,7 @@ owner: '{{ item.sidadm_uid }}' group: sapsys mode: 0755 - loop: "{{ setting_vars_output.results[0]['all_sap_mounts'] | wantlist }}" + loop: "{{ setting_vars_output.results[0]['all_sap_mounts']}}" - name: "AFS Mount: Unmount file systems (sapmnt)" ansible.posix.mount: From dc53ff65d10eae32f99c5c02535c58e6a7029405 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 20:29:23 -0400 Subject: [PATCH 142/164] testing --- .../roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 4 ++-- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index cbba564481..aa8031611e 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -1,7 +1,7 @@ from ansible.module_utils.basic import AnsibleModule def run_module(): first_server_temp = [] - all_sap_mounts = {} + all_sap_mounts = [] module_args = dict( sap_sid=dict(type="str", required=True), hdbadm_uid=dict(type="str", required=True), @@ -37,7 +37,7 @@ def run_module(): result['all_sap_mounts'] = module.params['multi_sids'] else: - result['all_sap_mounts'] = result['this_sid'] + result['all_sap_mounts'].update(result['this_sid']) except Exception as e: module.fail_json(msg=str(e),**result) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index b22aa92c49..2d0ec4170b 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -102,7 +102,7 @@ owner: '{{ item.sidadm_uid }}' group: sapsys mode: 0755 - loop: "{{ setting_vars_output.results[0]['all_sap_mounts']}}" + loop: "{{ setting_vars_output.results[0]['all_sap_mounts'] }}" - name: "AFS Mount: Unmount file systems (sapmnt)" ansible.posix.mount: From 930cfe7ada63e8ebaf5f93885801885ebbb3fc34 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Fri, 13 Sep 2024 20:36:18 -0400 Subject: [PATCH 143/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 2d0ec4170b..1893402a59 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -102,7 +102,7 @@ owner: '{{ item.sidadm_uid }}' group: sapsys mode: 0755 - loop: "{{ setting_vars_output.results[0]['all_sap_mounts'] }}" + loop: "{{ setting_vars_output.results[0]['all_sap_mounts'] | wantlist }}" - name: "AFS Mount: Unmount file systems (sapmnt)" ansible.posix.mount: From b0160ffb390a4676019b0c2758f6c3d3d5cbda97 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Sun, 15 Sep 2024 15:30:18 -0400 Subject: [PATCH 144/164] testing --- .../ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 2 +- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index aa8031611e..7e3c43d6a4 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -17,7 +17,7 @@ def run_module(): result = { "this_sid": {}, - "all_sap_mounts": {}, + "all_sap_mounts": [], "first_server_temp": [], "mnt_options": {} } diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 1893402a59..2d0ec4170b 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -102,7 +102,7 @@ owner: '{{ item.sidadm_uid }}' group: sapsys mode: 0755 - loop: "{{ setting_vars_output.results[0]['all_sap_mounts'] | wantlist }}" + loop: "{{ setting_vars_output.results[0]['all_sap_mounts'] }}" - name: "AFS Mount: Unmount file systems (sapmnt)" ansible.posix.mount: From a42fade0773e0b4c02d8f9b133ce15e23da08114 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Sun, 15 Sep 2024 15:36:00 -0400 Subject: [PATCH 145/164] testing --- .../roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index 7e3c43d6a4..475c98e5b0 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -37,11 +37,14 @@ def run_module(): result['all_sap_mounts'] = module.params['multi_sids'] else: + print("HELLLOOOOOOOO") result['all_sap_mounts'].update(result['this_sid']) except Exception as e: module.fail_json(msg=str(e),**result) + print(result['all_sap_mounts']) + result['first_server_temp'].append(module.params['server_name']) result['mnt_options'] = { From 45c6b6f8cd9807a3e26002ac098014d97de36457 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Sun, 15 Sep 2024 15:40:54 -0400 Subject: [PATCH 146/164] testing --- .../roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index 475c98e5b0..273a9ea682 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -37,14 +37,11 @@ def run_module(): result['all_sap_mounts'] = module.params['multi_sids'] else: - print("HELLLOOOOOOOO") - result['all_sap_mounts'].update(result['this_sid']) + result['all_sap_mounts'] += (result['this_sid']) except Exception as e: module.fail_json(msg=str(e),**result) - print(result['all_sap_mounts']) - result['first_server_temp'].append(module.params['server_name']) result['mnt_options'] = { From caf7c5fd89d453729988b6191ebe592a688af3cf Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Sun, 15 Sep 2024 15:53:20 -0400 Subject: [PATCH 147/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index 2d0ec4170b..b4929f661d 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -25,7 +25,7 @@ - name: "AFS Mount: Create list of all_sap_mounts to support" ansible.builtin.debug: - msg: "TESTING list output: {{ setting_vars_output.results[0]['mnt_options']['afs_mnt_options'] }}" + msg: "TESTING list output: {{ setting_vars_output.results[0]['all_sap_mounts'] }}" # - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" # - "First server: {{ setting_vars_output.first_server_temp }}" From 9ff6baf5304250ddd8481736f7de3cce2146cf52 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Sun, 15 Sep 2024 16:01:33 -0400 Subject: [PATCH 148/164] testing --- .../ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index 273a9ea682..7e3c43d6a4 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -37,7 +37,7 @@ def run_module(): result['all_sap_mounts'] = module.params['multi_sids'] else: - result['all_sap_mounts'] += (result['this_sid']) + result['all_sap_mounts'].update(result['this_sid']) except Exception as e: module.fail_json(msg=str(e),**result) From b15b23d2581c1bbef0adfd6cc2ec83ffc419df59 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Sun, 15 Sep 2024 16:06:13 -0400 Subject: [PATCH 149/164] testing --- .../ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index 7e3c43d6a4..fdaea81cf0 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -37,7 +37,7 @@ def run_module(): result['all_sap_mounts'] = module.params['multi_sids'] else: - result['all_sap_mounts'].update(result['this_sid']) + result['all_sap_mounts'].append(result['this_sid']) except Exception as e: module.fail_json(msg=str(e),**result) From a3dfcde87d3eeafa35c1ea8267f75bbcaa4e3e49 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Sun, 15 Sep 2024 16:23:46 -0400 Subject: [PATCH 150/164] testing --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index b4929f661d..d4150bbf0c 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -29,6 +29,10 @@ # - "List of all the SAP mounts: {{ setting_vars_output.all_sap_mounts }}" # - "First server: {{ setting_vars_output.first_server_temp }}" +- name: Set first_server_temp to the value from setting_vars_output + ansible.builtin.set_fact: + first_server_temp: "{{ setting_vars_output.results[0]['first_server_temp'] }}" + # /*---------------------------------------------------------------------------8 # | | # | Prepare for the sap_mnt mounts | From d8242a66a9d2dd0c2d1818d70b62ffff37813fe6 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Mon, 16 Sep 2024 15:57:47 -0400 Subject: [PATCH 151/164] testing anf-mounts --- .../2.6-sap-mounts/library/setting_vars.py | 28 ++++++- .../tasks/2.6.1-anf-mounts.yaml | 80 ++++++++----------- 2 files changed, 58 insertions(+), 50 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index fdaea81cf0..7a14ebd3a8 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -2,6 +2,7 @@ def run_module(): first_server_temp = [] all_sap_mounts = [] + distro_versions = ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5'] module_args = dict( sap_sid=dict(type="str", required=True), hdbadm_uid=dict(type="str", required=True), @@ -13,17 +14,21 @@ def run_module(): pas_instance_number=dict(type="str", required=True), app_instance_number=dict(type="str", required=True), server_name=dict(type="str", required=True), + distribution_full_id=dict(type="str", required=False), ) result = { "this_sid": {}, "all_sap_mounts": [], "first_server_temp": [], - "mnt_options": {} + "mnt_options": {}, + "nfs_service": "", } module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) - + + distribution_full_id = module.params['distribution_full_id'] + result['this_sid'] = { 'sid': module.params['sap_sid'].upper(), 'dbsid_uid': module.params['hdbadm_uid'], @@ -46,9 +51,26 @@ def run_module(): result['mnt_options'] = { 'afs_mnt_options': 'noresvport,vers=4,minorversion=1,sec=sys', - 'anf_mnt_options': 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys' } + if distribution_full_id in distro_versions: + result['mnt_options'] = { + 'anf_mnt_options': 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys,nconnect=8' + } + else: + result['mnt_options'] = { + 'anf_mnt_options': 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys' + } + + if distribution_full_id in ['redhat8', 'redhat9']: + result['nfs_service'] = 'nfs-server' + elif distribution_full_id == 'redhat7': + result['nfs_service'] = 'nfs' + elif distribution_full_id == 'oraclelinux8': + result['nfs_service'] = 'rpcbind' + else: + result['nfs_service'] = 'nfsserver' + module.exit_json(**result) if __name__ == "__main__": diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index e012dcfb88..bd6d07b619 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -4,41 +4,34 @@ # | | # +------------------------------------4--------------------------------------*/ --- - -- name: "ANF Mount: Set the NFS Service name" - ansible.builtin.set_fact: - nfs_service: "{% if distribution_id in ['redhat8', 'redhat9'] %}nfs-server{% else %}{% if distribution_id == 'redhat7' %}nfs{% else %}{% if distribution_id == 'oraclelinux8' %}rpcbind{% else %}nfsserver{% endif %}{% endif %}{% endif %}" - -- name: "ANF Mount: Set the NFSmount options" - ansible.builtin.set_fact: - mnt_options: 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys' - when: distribution_full_id not in ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5'] - -- name: "ANF Mount: Set the NFSmount options" - ansible.builtin.set_fact: - mnt_options: 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys,nconnect=8' - when: distribution_full_id in ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5'] - -- name: "ANF Mount: Define this SID" - ansible.builtin.set_fact: - this_sid: - { - 'sid': '{{ sap_sid | upper }}', - 'dbsid_uid': '{{ hdbadm_uid }}', - 'sidadm_uid': '{{ sidadm_uid }}', - 'ascs_inst_no': '{{ scs_instance_number }}', - 'pas_inst_no': '{{ pas_instance_number }}', - 'app_inst_no': '{{ app_instance_number }}' - } +- name: "calling setting variables python script" + setting_vars: + sap_sid: "{{ sap_sid }}" + hdbadm_uid: "{{ hdbadm_uid }}" + platform: "{{ platform }}" + sidadm_uid: "{{ sidadm_uid }}" + multi_sids: "{{ multi_sids | default(omit) }}" + asesidadm_uid: "{{ asesidadm_uid | default(omit)}}" + scs_instance_number: "{{ scs_instance_number }}" + pas_instance_number: "{{ pas_instance_number }}" + app_instance_number: "{{ app_instance_number }}" + server_name: "{{ item }}" + with_items: + - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" + - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + register: setting_vars_output - name: "ANF Mount: Create list of all_sap_mounts to support " ansible.builtin.set_fact: - all_sap_mounts: "{% if MULTI_SIDS is defined %}{{ MULTI_SIDS }}{% else %}{{ all_sap_mounts | default([]) + [this_sid] }}{% endif %}" db_hosts: "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" +- name: Set first_server_temp to the value from setting_vars_output + ansible.builtin.set_fact: + first_server_temp: "{{ setting_vars_output.results[0]['first_server_temp'] }}" + - name: "ANF Mount: Ensure the NFS service is stopped" ansible.builtin.systemd: - name: "{{ nfs_service }}" + name: "{{ setting_vars_output.results[0]['nfs_service'] }}" state: stopped when: - "'scs' in supported_tiers" @@ -118,7 +111,7 @@ seconds: 5 - name: "ANF Mount: Ensure the NFS service is restarted" ansible.builtin.systemd: - name: "{{ nfs_service }}" + name: "{{ setting_vars_output.results[0]['nfs_service'] }}" state: restarted when: - id_mapping_changed is changed @@ -271,7 +264,7 @@ path: "/sapmnt/{{ item.sid }}" state: directory register: is_created_now - loop: "{{ all_sap_mounts }}" + loop: "{{ setting_vars_output.results[0]['all_sap_mounts'] }}" when: - tier == 'sapos' - node_tier in ['app','scs','ers', 'pas'] or 'scs' in supported_tiers @@ -411,13 +404,6 @@ # | | # +------------------------------------4--------------------------------------*/ -- name: "ANF Mount: install:Get the Server name list" - ansible.builtin.set_fact: - first_server_temp: "{{ first_server_temp | default([]) + [item] }}" - with_items: - - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" - - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" - - name: "ANF Mount: sap_trans" ansible.builtin.include_tasks: 2.6.1.1-anf-mount.yaml loop: @@ -589,7 +575,7 @@ 'temppath': 'hanadata', 'folder': 'hanadata', 'mount': '{{ hana_data_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', + 'opts': "{{ setting_vars_output.results[0]['mnt_options']['anf_mnt_options'] }}", 'path': '/hana/data', 'permissions': '0755', 'set_chattr_on_dir': false, @@ -613,7 +599,7 @@ 'temppath': 'hanalog', 'folder': 'hanalog', 'mount' : '{{ hana_log_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', + 'opts': "{{ setting_vars_output.results[0]['mnt_options']['anf_mnt_options'] }}", 'path' : '/hana/log', 'permissions': '0755', 'set_chattr_on_dir': false, @@ -637,7 +623,7 @@ 'temppath': 'hanashared', 'folder': 'hanashared', 'mount': '{{ hana_shared_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', + 'opts': "{{ setting_vars_output.results[0]['mnt_options']['anf_mnt_options'] }}", 'path': '/hana/shared', 'permissions': '0775', 'set_chattr_on_dir': false, @@ -661,7 +647,7 @@ 'temppath': 'hanadata', 'folder': 'hanadata', 'mount': '{{ hana_data_mountpoint[1] }}', - 'opts': '{{ mnt_options }}', + 'opts': "{{ setting_vars_output.results[0]['mnt_options']['anf_mnt_options'] }}", 'path': '/hana/data', 'permissions': '0755', 'set_chattr_on_dir': false, @@ -686,7 +672,7 @@ 'temppath': 'hanalog', 'folder': 'hanalog', 'mount' : '{{ hana_log_mountpoint[1] }}', - 'opts': '{{ mnt_options }}', + 'opts': "{{ setting_vars_output.results[0]['mnt_options']['anf_mnt_options'] }}", 'path' : '/hana/log', 'permissions': '0755', 'set_chattr_on_dir': false, @@ -711,7 +697,7 @@ 'temppath': 'hanashared', 'folder': 'hanashared', 'mount': '{{ hana_shared_mountpoint[1] }}', - 'opts': '{{ mnt_options }}', + 'opts': "{{ setting_vars_output.results[0]['mnt_options']['anf_mnt_options'] }}", 'path': '/hana/shared', 'permissions': '0775', 'set_chattr_on_dir': false, @@ -792,7 +778,7 @@ # change folder to match the mount folder within the share 'folder': 'shared', 'mount': '{{ hana_shared_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', + 'opts': "{{ setting_vars_output.results[0]['mnt_options']['anf_mnt_options'] }}", 'path': '/hana/shared', 'permissions': '0775', 'set_chattr_on_dir': false, @@ -815,7 +801,7 @@ 'temppath': 'usrsap', 'folder': "usr-sap-hanadb{{ lookup('ansible.utils.index_of', db_hosts, 'eq', ansible_hostname) }}", 'mount': '{{ hana_shared_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', + 'opts': "{{ setting_vars_output.results[0]['mnt_options']['anf_mnt_options'] }}", 'path': '/usr/sap/{{ db_sid | upper }}', 'permissions': '0775', 'set_chattr_on_dir': false, @@ -851,7 +837,7 @@ temppath: 'hanadata', folder: 'hanadata', mount: "{{ item }}", - opts: "{{ mnt_options }}", + opts: "{{ setting_vars_output.results[0]['mnt_options']['anf_mnt_options'] }}", path: "{{ '/hana/data/' + (db_sid | upper ) + '/mnt0000' + ( my_index + 1 )| string }}", permissions: '0775', set_chattr_on_dir: false, @@ -896,7 +882,7 @@ temppath: 'hanalog', folder: 'hanalog', mount: "{{ item }}", - opts: "{{ mnt_options }}", + opts: "{{ setting_vars_output.results[0]['mnt_options']['anf_mnt_options'] }}", path: "{{ '/hana/log/' + (db_sid | upper ) + '/mnt0000' + ( my_index + 1 )| string }}", permissions: '0775', set_chattr_on_dir: false, From 23fc02b2de3322ed90cc773243d0af64351fd6f9 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Mon, 16 Sep 2024 16:02:54 -0400 Subject: [PATCH 152/164] testing anf-mounts --- .../2.6-sap-mounts/library/setting_vars.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index 7a14ebd3a8..fee88ee8df 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -49,17 +49,15 @@ def run_module(): result['first_server_temp'].append(module.params['server_name']) - result['mnt_options'] = { - 'afs_mnt_options': 'noresvport,vers=4,minorversion=1,sec=sys', - } - if distribution_full_id in distro_versions: result['mnt_options'] = { - 'anf_mnt_options': 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys,nconnect=8' + 'afs_mnt_options': 'noresvport,vers=4,minorversion=1,sec=sys', + 'anf_mnt_options': 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys,nconnect=8', } else: result['mnt_options'] = { - 'anf_mnt_options': 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys' + 'afs_mnt_options': 'noresvport,vers=4,minorversion=1,sec=sys', + 'anf_mnt_options': 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys', } if distribution_full_id in ['redhat8', 'redhat9']: From dd2857880f17233c455b61b1318da19de40ffb6c Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Mon, 16 Sep 2024 16:11:08 -0400 Subject: [PATCH 153/164] testing anf-mounts --- .../ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index fee88ee8df..d488e7af1f 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -1,7 +1,5 @@ from ansible.module_utils.basic import AnsibleModule def run_module(): - first_server_temp = [] - all_sap_mounts = [] distro_versions = ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5'] module_args = dict( sap_sid=dict(type="str", required=True), From b3c2812a078f0914012a284c96d9164a69a277ac Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Wed, 9 Oct 2024 13:02:43 -0400 Subject: [PATCH 154/164] new pushes with oracle and simplemount --- .../library/setting_vars_oracle.py | 20 ++++++ .../tasks/2.6.1-anf-mounts.yaml | 1 + .../tasks/2.6.2-oracle-mounts.yaml | 13 ++-- .../tasks/2.6.3-oracle-asm-mounts.yaml | 9 +-- .../tasks/2.6.3-oracle-asm-prereq.yaml | 11 +-- .../tasks/2.6.3-oracle-observer.yaml | 11 +-- .../tasks/2.6.7-afs-mounts-simplemount.yaml | 49 ++++++------- .../tasks/2.6.8-anf-mounts-simplemount.yaml | 68 +++++++++---------- 8 files changed, 96 insertions(+), 86 deletions(-) create mode 100644 deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars_oracle.py diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars_oracle.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars_oracle.py new file mode 100644 index 0000000000..ce6dd8b230 --- /dev/null +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars_oracle.py @@ -0,0 +1,20 @@ +from ansible.module_utils.basic import AnsibleModule +def run_module(): + module_args = dict( + nfs_server_temp=dict(type="str",required=True), + NFS_provider=dict(type="str",required=True), + ) + + result = { + "nfs_server_temp": [], + "nfs_server": "", + } + + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + + result['nfs_server_temp'].append(module.params['nfs_server_temp']) + + module.exit_json(**result) + +if __name__ == "__main__": + run_module() diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index bd6d07b619..7f5053b90c 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -4,6 +4,7 @@ # | | # +------------------------------------4--------------------------------------*/ --- + - name: "calling setting variables python script" setting_vars: sap_sid: "{{ sap_sid }}" diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.2-oracle-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.2-oracle-mounts.yaml index a83ba4b169..6577b863f8 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.2-oracle-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.2-oracle-mounts.yaml @@ -11,16 +11,17 @@ - name: "ORACLE: Gather Logical volumes created in 1.5" ansible.builtin.include_vars: disks_config.yml -- name: "ORACLE: Set the NFS Server name list" - ansible.builtin.set_fact: - nfs_server_temp: "{{ nfs_server_temp | default([]) + [item] }}" +- name: "calling setting variables oracle python script and setting NFS Server name list" + setting_vars_oracle: + nfs_server_temp: "{{ item }}" with_items: - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" - - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" - + - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + register: setting_vars_output + - name: "ORACLE: Set the NFS Server name" ansible.builtin.set_fact: - nfs_server: "{{ nfs_server_temp | first }}" + nfs_server: "{{ setting_vars_output.results[0]['nfs_server_temp'] | first }}" when: NFS_provider == "NONE" - name: "ORACLE: Check if LVs exists." diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-mounts.yaml index 53dc91b2f8..d58a5588e1 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-mounts.yaml @@ -2,16 +2,17 @@ - name: ORACLE ASM - Gather Logical volumes created in 1.5.1 ansible.builtin.include_vars: disks_config_asm.yml -- name: ORACLE ASM - Set the NFS Server name list - ansible.builtin.set_fact: - nfs_server_temp: "{{ nfs_server_temp | default([]) + [item] }}" +- name: "calling setting variables oracle python script and setting NFS Server name list" + setting_vars_oracle: + nfs_server_temp: "{{ item }}" with_items: - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + register: setting_vars_output - name: ORACLE ASM - Set the NFS Server name ansible.builtin.set_fact: - nfs_server: "{{ nfs_server_temp | first }}" + nfs_server: "{{ setting_vars_output.results[0]['nfs_server_temp'] | first }}" when: NFS_provider == "NONE" - name: ORACLE ASM - Check if LVs exists. diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-prereq.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-prereq.yaml index da56c24afc..2fb3a3e435 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-prereq.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-prereq.yaml @@ -3,16 +3,17 @@ - name: Gather Logical volumes created in roles-os/1.5.1.1 ansible.builtin.include_vars: disks_config_asm.yml -- name: Set the NFS Server name list - ansible.builtin.set_fact: - nfs_server_temp: "{{ nfs_server_temp | default([]) + [item] }}" +- name: "calling setting variables oracle python script and setting NFS Server name list" + setting_vars_oracle: + nfs_server_temp: "{{ item }}" with_items: - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" - - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + register: setting_vars_output - name: Set the NFS Server name ansible.builtin.set_fact: - nfs_server: "{{ nfs_server_temp | first }}" + nfs_server: "{{ setting_vars_output.results[0]['nfs_server_temp'] | first }}" when: NFS_provider == "NONE" - name: Check if LVs exists. diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-observer.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-observer.yaml index 6fc2d14e2a..44e4fb8af1 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-observer.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-observer.yaml @@ -8,16 +8,17 @@ # Mount Filesystems -- name: "2.6 SAP Mounts: - Set the NFS Server name list" - ansible.builtin.set_fact: - nfs_server_temp: "{{ nfs_server_temp | default([]) + [item] }}" +- name: "calling setting variables oracle python script and setting NFS Server name list" + setting_vars_oracle: + nfs_server_temp: "{{ item }}" with_items: - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" - - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + register: setting_vars_output - name: "2.6 SAP Mounts: - Set the NFS Server name" ansible.builtin.set_fact: - nfs_server: "{{ nfs_server_temp | first }}" + nfs_server: "{{ setting_vars_output.results[0]['nfs_server_temp'] | first }}" - name: "2.6 SAP Mounts: - Set the usr/sap/install path" ansible.builtin.set_fact: diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml index 95c99e84c2..2e73c106f3 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml @@ -5,35 +5,26 @@ # | | # +------------------------------------4--------------------------------------*/ -- name: "AFS Mount: Define this SID" - ansible.builtin.set_fact: - this_sid: - { - 'sid': '{{ sap_sid | upper }}', - 'dbsid_uid': '{{ hdbadm_uid }}', - 'sidadm_uid': '{% if platform == "SYBASE" %}{{ asesidadm_uid }}{% else %}{{ sidadm_uid }}{% endif %}', - 'ascs_inst_no': '{{ scs_instance_number }}', - 'pas_inst_no': '{{ pas_instance_number }}', - 'app_inst_no': '{{ app_instance_number }}' - } - -- name: "AFS Mount: Create list of all_sap_mounts to support " - ansible.builtin.set_fact: - all_sap_mounts: "{% if MULTI_SIDS is defined %}{{ MULTI_SIDS }}{% else %}{{ all_sap_mounts | default([]) + [this_sid] }}{% endif %}" - -- name: "AFS Mount: Get the Server name list" - ansible.builtin.set_fact: - first_server_temp: "{{ first_server_temp | default([]) + [item] }}" +- name: "calling setting variables python script" + setting_vars: + sap_sid: "{{ sap_sid }}" + hdbadm_uid: "{{ hdbadm_uid }}" + platform: "{{ platform }}" + sidadm_uid: "{{ sidadm_uid }}" + multi_sids: "{{ multi_sids | default(omit) }}" + asesidadm_uid: "{{ asesidadm_uid | default(omit)}}" + scs_instance_number: "{{ scs_instance_number }}" + pas_instance_number: "{{ pas_instance_number }}" + app_instance_number: "{{ app_instance_number }}" + server_name: "{{ item }}" with_items: - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + register: setting_vars_output -- name: "AFS Mount: Create list of all_sap_mounts to support" - ansible.builtin.debug: - msg: - - "List of all the SAP mounts: {{ all_sap_mounts }}" - - "First server: {{ first_server_temp }}" - verbosity: 2 +- name: Set first_server_temp to the value from setting_vars_output + ansible.builtin.set_fact: + first_server_temp: "{{ setting_vars_output.results[0]['first_server_temp'] }}" # /*---------------------------------------------------------------------------8 # | | @@ -105,7 +96,7 @@ owner: '{{ item.sidadm_uid }}' group: sapsys mode: 0755 - loop: "{{ all_sap_mounts }}" + loop: "{{ setting_vars_output.results[0]['all_sap_mounts'] }}" - name: "AFS Mount: Unmount file systems (sapmnt)" ansible.posix.mount: @@ -124,7 +115,7 @@ ansible.builtin.file: path: "/saptmp/sapmnt{{ item.sid | upper }}" state: absent - loop: "{{ all_sap_mounts }}" + loop: "{{ setting_vars_output.results[0]['all_sap_mounts'] }}" - name: "AFS Mount: Cleanup fstab and directory (sapmnt)" ansible.posix.mount: @@ -173,7 +164,7 @@ path: "/sapmnt/{{ item.sid }}" state: directory register: is_created_now - loop: "{{ all_sap_mounts }}" + loop: "{{ setting_vars_output.results[0]['all_sap_mounts'] }}" - name: "AFS Mount: Change attribute only when we create SAP Directories (sapmnt)" ansible.builtin.file: @@ -204,7 +195,7 @@ ansible.builtin.file: path: "{{ item.path }}" state: directory - owner: "{{ this_sid.sidadm_uid }}" + owner: "{{ setting_vars_output.results[0]['this_sid']['sidadm_uid'] }}" group: sapsys mode: 0755 loop: diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml index b9b30f2f76..919a5efc23 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml @@ -5,37 +5,31 @@ # +------------------------------------4--------------------------------------*/ --- -- name: "ANF Mount: Set the NFS Service name" - ansible.builtin.set_fact: - nfs_service: "{% if distribution_id in ['redhat8', 'redhat9'] %}nfs-server{% else %}{% if distribution_id == 'redhat7' %}nfs{% else %}{% if distribution_id == 'oraclelinux8' %}rpcbind{% else %}nfsserver{% endif %}{% endif %}{% endif %}" - -- name: "ANF Mount: Set the NFSmount options" - ansible.builtin.set_fact: - mnt_options: 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys' - when: distribution_full_id not in ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5', 'sles_sap15.6'] - -- name: "ANF Mount: Set the NFSmount options" - ansible.builtin.set_fact: - mnt_options: 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys,nconnect=8' - when: distribution_full_id in ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5', 'sles_sap15.6'] - -- name: "ANF Mount: Define this SID" - ansible.builtin.set_fact: - this_sid: - { - 'sid': '{{ sap_sid | upper }}', - 'dbsid_uid': '{{ hdbadm_uid }}', - 'sidadm_uid': '{{ sidadm_uid }}', - 'ascs_inst_no': '{{ scs_instance_number }}', - 'pas_inst_no': '{{ pas_instance_number }}', - 'app_inst_no': '{{ app_instance_number }}' - } +- name: "calling setting variables python script" + setting_vars: + sap_sid: "{{ sap_sid }}" + hdbadm_uid: "{{ hdbadm_uid }}" + platform: "{{ platform }}" + sidadm_uid: "{{ sidadm_uid }}" + multi_sids: "{{ multi_sids | default(omit) }}" + asesidadm_uid: "{{ asesidadm_uid | default(omit)}}" + scs_instance_number: "{{ scs_instance_number }}" + pas_instance_number: "{{ pas_instance_number }}" + app_instance_number: "{{ app_instance_number }}" + server_name: "{{ item }}" + with_items: + - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" + - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + register: setting_vars_output - name: "ANF Mount: Create list of all_sap_mounts to support " ansible.builtin.set_fact: - all_sap_mounts: "{% if MULTI_SIDS is defined %}{{ MULTI_SIDS }}{% else %}{{ all_sap_mounts | default([]) + [this_sid] }}{% endif %}" db_hosts: "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" +- name: Set first_server_temp to the value from setting_vars_output + ansible.builtin.set_fact: + first_server_temp: "{{ setting_vars_output.results[0]['first_server_temp'] }}" + - name: "ANF Mount: Ensure the NFS service is stopped" ansible.builtin.systemd: name: "{{ nfs_service }}" @@ -118,7 +112,7 @@ seconds: 5 - name: "ANF Mount: Ensure the NFS service is restarted" ansible.builtin.systemd: - name: "{{ nfs_service }}" + name: "{{ setting_vars_output.results[0]['nfs_service'] }}" state: restarted when: - id_mapping_changed is changed @@ -524,7 +518,7 @@ 'temppath': 'hanadata', 'folder': 'hanadata', 'mount': '{{ hana_data_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', + 'opts': "{{ setting_vars_output.results[0]['mnt_options']['anf_mnt_options'] }}", 'path': '/hana/data', 'permissions': '0755', 'set_chattr_on_dir': false, @@ -548,7 +542,7 @@ 'temppath': 'hanalog', 'folder': 'hanalog', 'mount' : '{{ hana_log_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', + 'opts': "{{ setting_vars_output.results[0]['mnt_options']['anf_mnt_options'] }}", 'path' : '/hana/log', 'permissions': '0755', 'set_chattr_on_dir': false, @@ -572,7 +566,7 @@ 'temppath': 'hanashared', 'folder': 'hanashared', 'mount': '{{ hana_shared_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', + 'opts': "{{ setting_vars_output.results[0]['mnt_options']['anf_mnt_options'] }}", 'path': '/hana/shared', 'permissions': '0775', 'set_chattr_on_dir': false, @@ -596,7 +590,7 @@ 'temppath': 'hanadata', 'folder': 'hanadata', 'mount': '{{ hana_data_mountpoint[1] }}', - 'opts': '{{ mnt_options }}', + 'opts': "{{ setting_vars_output.results[0]['mnt_options']['anf_mnt_options'] }}", 'path': '/hana/data', 'permissions': '0755', 'set_chattr_on_dir': false, @@ -621,7 +615,7 @@ 'temppath': 'hanalog', 'folder': 'hanalog', 'mount' : '{{ hana_log_mountpoint[1] }}', - 'opts': '{{ mnt_options }}', + 'opts': "{{ setting_vars_output.results[0]['mnt_options']['anf_mnt_options'] }}", 'path' : '/hana/log', 'permissions': '0755', 'set_chattr_on_dir': false, @@ -646,7 +640,7 @@ 'temppath': 'hanashared', 'folder': 'hanashared', 'mount': '{{ hana_shared_mountpoint[1] }}', - 'opts': '{{ mnt_options }}', + 'opts': "{{ setting_vars_output.results[0]['mnt_options']['anf_mnt_options'] }}", 'path': '/hana/shared', 'permissions': '0775', 'set_chattr_on_dir': false, @@ -722,7 +716,7 @@ # change folder to match the mount folder within the share 'folder': 'shared', 'mount': '{{ hana_shared_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', + 'opts': "{{ setting_vars_output.results[0]['mnt_options']['anf_mnt_options'] }}", 'path': '/hana/shared', 'permissions': '0775', 'set_chattr_on_dir': false, @@ -746,7 +740,7 @@ 'temppath': 'usrsap', 'folder': "usr-sap-hanadb{{ lookup('ansible.utils.index_of', db_hosts, 'eq', ansible_hostname) }}", 'mount': '{{ hana_shared_mountpoint[0] }}', - 'opts': '{{ mnt_options }}', + 'opts': "{{ setting_vars_output.results[0]['mnt_options']['anf_mnt_options'] }}", 'path': '/usr/sap/{{ db_sid | upper }}', 'permissions': '0775', 'set_chattr_on_dir': false, @@ -782,7 +776,7 @@ temppath: 'hanadata', folder: 'hanadata', mount: "{{ item }}", - opts: "{{ mnt_options }}", + opts: "{{ setting_vars_output.results[0]['mnt_options']['anf_mnt_options'] }}", path: "{{ '/hana/data/' + (db_sid | upper ) + '/mnt0000' + ( my_index + 1 )| string }}", permissions: '0775', set_chattr_on_dir: false, @@ -827,7 +821,7 @@ temppath: 'hanalog', folder: 'hanalog', mount: "{{ item }}", - opts: "{{ mnt_options }}", + opts: "{{ setting_vars_output.results[0]['mnt_options']['anf_mnt_options'] }}", path: "{{ '/hana/log/' + (db_sid | upper ) + '/mnt0000' + ( my_index + 1 )| string }}", permissions: '0775', set_chattr_on_dir: false, From e5629e067ac47992da7ffe637ce25bdd5378ea3c Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Thu, 10 Oct 2024 13:30:00 -0400 Subject: [PATCH 155/164] fixed main playbook that calls SAP Mounts --- .../playbook_02_os_sap_specific_config.yaml | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/deploy/ansible/playbook_02_os_sap_specific_config.yaml b/deploy/ansible/playbook_02_os_sap_specific_config.yaml index 168ac702a2..4206db6cdd 100644 --- a/deploy/ansible/playbook_02_os_sap_specific_config.yaml +++ b/deploy/ansible/playbook_02_os_sap_specific_config.yaml @@ -195,12 +195,6 @@ # hosts file. We do not need to set ipaddr to ipadd. # vars: # ipaddr: "{{ ipadd }}" - - name: "SAP OS configuration playbook: - Mount the file systems" - ansible.builtin.include_role: - name: roles-sap-os/2.6-sap-mounts - tags: - - 2.6-sap-mounts - - name: "SAP OS configuration playbook: - Create hosts file" ansible.builtin.include_role: name: roles-sap-os/2.4-hosts-file @@ -305,11 +299,11 @@ tags: - 2.3-sap-exports -# - name: "SAP OS configuration playbook: - Mount the file systems" -# ansible.builtin.include_role: -# name: roles-sap-os/2.6-sap-mounts -# tags: -# - 2.6-sap-mounts + - name: "SAP OS configuration playbook: - Mount the file systems" + ansible.builtin.include_role: + name: roles-sap-os/2.6-sap-mounts + tags: + - 2.6-sap-mounts when: - ansible_os_family != "Windows" From 199e933ef752c93862c57e9334b917ffa0fe8989 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Wed, 16 Oct 2024 14:37:34 -0400 Subject: [PATCH 156/164] testing anf mounts --- .../ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index d488e7af1f..4e2ab7ff08 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -59,6 +59,7 @@ def run_module(): } if distribution_full_id in ['redhat8', 'redhat9']: + print("HELLO") result['nfs_service'] = 'nfs-server' elif distribution_full_id == 'redhat7': result['nfs_service'] = 'nfs' From b64d7808d3e1f2064f8d9dfac54715f7b03e1ff7 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Wed, 16 Oct 2024 14:39:47 -0400 Subject: [PATCH 157/164] testing anf mounts --- .../roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index 4e2ab7ff08..4c1ae9ca2e 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -62,10 +62,13 @@ def run_module(): print("HELLO") result['nfs_service'] = 'nfs-server' elif distribution_full_id == 'redhat7': + print("HELLO2") result['nfs_service'] = 'nfs' elif distribution_full_id == 'oraclelinux8': + print("HELLO3") result['nfs_service'] = 'rpcbind' else: + print("HELLO4") result['nfs_service'] = 'nfsserver' module.exit_json(**result) From c8aedbde8df6e016929bde33fb505673e91135af Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Wed, 16 Oct 2024 22:00:08 -0400 Subject: [PATCH 158/164] testing anf mounting --- .../roles-sap-os/2.6-sap-mounts/library/setting_vars.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index 4c1ae9ca2e..d488e7af1f 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -59,16 +59,12 @@ def run_module(): } if distribution_full_id in ['redhat8', 'redhat9']: - print("HELLO") result['nfs_service'] = 'nfs-server' elif distribution_full_id == 'redhat7': - print("HELLO2") result['nfs_service'] = 'nfs' elif distribution_full_id == 'oraclelinux8': - print("HELLO3") result['nfs_service'] = 'rpcbind' else: - print("HELLO4") result['nfs_service'] = 'nfsserver' module.exit_json(**result) From c6dec82049da84fc2b59c8ce19c3c8627228e75e Mon Sep 17 00:00:00 2001 From: Kimmo Forss Date: Wed, 23 Oct 2024 15:03:44 +0300 Subject: [PATCH 159/164] Bring in HotFix repairs (#651) * Refactor deploy control plane script to remove unnecessary Terraform installation and Azure CLI installation * Refactor deploy control plane script to include sourcing deploy_server.sh and fixing Terraform ownership * Refactor deploy control plane script to include sourcing deploy_server.sh and fixing Terraform ownership * Refactor deploy control plane script to include azurerm_role_assignment for deployer and storage_sapbits_contributor * Refactor deploy control plane script to remove unnecessary Terraform installation and Azure CLI installation * Refactor deploy control plane script to include sourcing deploy_server.sh and fixing Terraform ownership * Refactor deploy control plane script to include dynamic role assignment based on VM count * Refactor deploy scripts to simplify checkIfCloudShell function * Refactor deploy control plane script to include dynamic role assignment based on VM count and use managed service identity (MSI) for authentication * Refactor deploy scripts to include sourcing deploy_server.sh and fixing Terraform ownership * Refactor deploy control plane script to include dynamic role assignment based on VM count and use managed service identity (MSI) for authentication * Refactor deploy control plane script to include dynamic role assignment based on VM count and use managed service identity (MSI) for authentication * handle the realfilepath and the scriptdir variables as they might be replaced with other values in child scripts * Refactor deploy_controlplane.sh to use managed service identity (MSI) for authentication * Refactor deploy_controlplane.sh to remove ARM_USE_MSI variable and use managed service identity (MSI) for authentication * Refactor deploy_utils.sh to remove ARM_USE_MSI variable and use managed service identity (MSI) for authentication * Refactor deploy_controlplane.sh to remove ARM_USE_MSI variable and use managed service identity (MSI) for authentication * Refactor deploy_controlplane.sh to use managed service identity (MSI) for authentication * Refactor installer.sh to include terraform output command * Refactor installer.sh to comment out unnecessary terraform output command * Refactor variables_local.tf to use client_id instead of id for service principal object_id * Refactor deploy_controlplane.sh to use managed service identity (MSI) for authentication and remove unnecessary ARM_USE_MSI variable * Refactor deploy_controlplane.sh to use managed service identity (MSI) for authentication * chore: include OpenSSF Scorecard badge * Refactor variables_local.tf to use client_id instead of id for service principal object_id Refactor installer.sh to comment out unnecessary terraform output command * Refactor pipeline script to use correct variable for workload ARM subscription ID * Refactor pipeline script to use correct variable for workload ARM subscription ID * Refactor pipeline script to include ARM_OBJECT_ID variable * Refactor pipeline script to use correct variable for workload ARM_CLIENT_ID * Refactor pipeline script to use correct variable for workload ARM_CLIENT_ID * check terraform when running in cloudshell * Refactor pipeline script to include missing variable checks * Refactor pipeline script to remove unnecessary variable checks * Refactor pipeline script to use correct variables for workload ARM_CLIENT_ID * Refactor pipeline script to use correct variables for workload ARM_CLIENT_ID * Refactor pipeline script to include missing variable checks * Refactor pipeline script to update echo statements for installation method * Refactor pipeline script to update echo statements for installation method * Refactor pipeline script to use correct variables for workload ARM_CLIENT_ID * Refactor pipeline script to update echo statements for installation method * Refactor pipeline script to update echo statements for installation method * Refactor pipeline script to update echo statements for installation method and use correct variables for workload ARM_CLIENT_ID * Refactor pipeline script to update echo statements for installation method and use correct variables for workload ARM_CLIENT_ID * Refactor pipeline script to update echo statements for installation method and use correct variables for workload ARM_CLIENT_ID * Refactor pipeline script to update echo statements for installation method and use correct variables for workload ARM_CLIENT_ID * Refactor pipeline script to update echo statements for installation method and use correct variables for workload ARM_CLIENT_ID * Refactor pipeline script * Refactor pipeline script to update echo statements for installation method and use correct variables for workload ARM_CLIENT_ID * Refactor pipeline script to update echo statements and export variables for installation method and workload ARM_CLIENT_ID * Refactor pipeline script to update PATH variable in deploy_controlplane.sh * Refactor pipeline script to update echo statements and export variables for installation method, workload ARM_CLIENT_ID, and Terraform state information * Refactor pipeline script to update echo statements and export variables for installation method, workload ARM_CLIENT_ID, and Terraform state information * Refactor pipeline script to update echo statement for displaying the key vault information * Refactor pipeline script to update echo statements and export variables for installation method, workload ARM_CLIENT_ID, and Terraform state information * Refactor pipeline script to update usage of Azure CLI command in installer.sh * Refactor pipeline script to update echo statements and export variables for installation method, workload ARM_CLIENT_ID, and Terraform state information * Refactor pipeline script to update echo statements and export variables for installation method, workload ARM_CLIENT_ID, and Terraform state information * Refactor pipeline script to update echo statements and export variables for installation method, workload ARM_CLIENT_ID, and Terraform state information * Refactor pipeline script to update echo statements and export variables for installation method, workload ARM_CLIENT_ID, and Terraform state information * Refactor pipeline script to update echo statements and export variables for installation method, workload ARM_CLIENT_ID, and Terraform state information * Refactor pipeline script to update echo statements and export variables for installation method, workload ARM_CLIENT_ID, and Terraform state information * Refactor pipeline script to update echo statements and export variables for installation method, workload ARM_CLIENT_ID, and Terraform state information * Refactor provider configuration to use Azure Key Vault for subscription ID retrieval * Fixes #1: Added a new line to the installer script * Refactor pipeline script to fix unzip command in deploy control plane stage * Refactor pipeline script to update echo statements and export variables for installation method, workload ARM_CLIENT_ID, and Terraform state information * Refactor deploy_controlplane.sh script to save deployer_tfstate_key in config information * remove the deployer provider * Refactor deploy_controlplane.sh script to use the azurerm.deployer provider for retrieving key vault secrets * Refactor tfvar_variables.tf to add "tags" variable for providing tags to all resources * Refactor providers.tf to use local variable for subscription_id in deployer provider * Refactor providers.tf to remove subscription_id from deployer provider * Refactor deploy_controlplane.sh script to use local variables for deployer and library state file keys * Refactor deploy_controlplane.sh script to use local variables for deployer and library state file keys * Refactor azurerm provider versions to 4.6.0 * Refactor deploy_controlplane.sh script to use local variables for deployer and library state file keys * Refactor deploy_controlplane.sh script to use local variables for deployer and library state file keys * Refactor deploy_controlplane.sh script to use local variables for deployer and library state file keys * Refactor az keyvault set-policy command in deploy_controlplane.sh script * Refactor az keyvault set-policy command in deploy_controlplane.sh script * Add SPN to workload zone key vault * Remove the permission setting from the pipeline * Refactor echo statements in script_helpers.sh and installer.sh * Refactor echo statements in script_helpers.sh and installer.sh * Refactor echo statements in script_helpers.sh and installer.sh * Refactor echo statements in script_helpers.sh and installer.sh * Refactor echo statements in script_helpers.sh and installer.sh * Refactor echo statements in script_helpers.sh and installer.sh * Refactor echo statements in script_helpers.sh and installer.sh * Refactor echo statements in deploy_utils.sh for better readability * Refactor echo statement in 03-sap-system-deployment.yaml * Refactor echo statement in 03-sap-system-deployment.yaml for better readability * Refactor echo statement in 03-sap-system-deployment.yaml for better readability * Refactor echo statements for better readability and consistency * Refactor echo statements for better readability and consistency * Refactor echo statements for better readability and consistency * Refactor echo statements for better readability and consistency * Refactor echo statements for better readability and consistency * Refactor echo statements for better readability and consistency * Refactor echo statements for better readability and consistency * Refactor echo statements to use variable for workload TFvars * Refactor echo statements to use variable for workload TFvars * Refactor echo statements to use variable for workload TFvars * Refactor echo statement to use variable for Terraform Storage Account Id * Refactor echo statements to use variables for Terraform details * Refactor echo statements to use variables for Terraform details * Refactor echo statements to use variables for Terraform details * Refactor echo statements to use variables consistently * Refactor echo statements to use variables consistently * Refactor echo statements to use variables consistently and for Terraform details * Refactor echo statements to consistently use variables * Refactor echo statements to consistently use variables and improve Terraform details * Refactor echo statements to consistently use variables and improve Terraform details * Refactor echo statements to consistently use variables and improve Terraform details * Refactor echo statements to consistently use variables and improve Terraform details * Debugging * Refactor echo statements to consistently use variables and improve Terraform details * Refactor echo statements to consistently use variables and improve Terraform details * Refactor echo statements to consistently use variables and improve Terraform details * trimming * Refactor echo statements to consistently use variables and improve Terraform details * Refactor echo statements to consistently use variables and improve Terraform details * Refactor echo statements to consistently use variables and improve Terraform details * Refactor echo statements to consistently use variables and improve Terraform details * Refactor echo statements to consistently use variables and improve Terraform details * Refactor echo statements to consistently use variables and improve Terraform details * Refactor echo statements to consistently use variables and improve Terraform details * Refactor echo statements to consistently use variables and improve formatting * Refactor echo statements to consistently use variables and improve formatting * terraform * Refactor echo statements to consistently use variables and improve Terraform details * Refactor echo statements to consistently use variables and improve formatting * Refactor echo statements to consistently use variables and improve formatting * Refactor echo statements to consistently use variables and improve Terraform details * Refactor providers.tf to use Managed Service Identity (MSI) for authentication * Refactor echo statements to consistently use variables and improve formatting * Refactor echo statement to improve parameter file formatting * Refactor echo statements to improve formatting and use variables consistently * Refactor echo statements to consistently use variables and improve formatting * Refactor deploy control plane pipeline to improve configuration and extension installation * Refactor providers.tf to use remote state for subscription ID * Refactor echo statements to consistently use variables and improve formatting * Refactor echo statements to consistently use variables and improve formatting Refactor providers.tf to use remote state for subscription ID Refactor deploy control plane pipeline to improve configuration and extension installation Fix validation issue in script_helpers.sh Update providers.tf to handle null subscription ID Remove unused variable in variables_local.tf * Refactor echo statement to improve formatting in deploy_controlplane.sh * Refactor echo statements to improve formatting in deploy_controlplane.sh and script_helpers.sh * Refactor deploy_controlplane.sh and script_helpers.sh echo statements for improved formatting * Refactor key vault secrets to include service principal access * Refactor key vault secrets to include service principal access * Refactor key vault secrets to include service principal access * Refactor key vault secrets to include service principal access * Refactor echo statements for improved formatting in deploy_controlplane.sh and script_helpers.sh * Refactor permissions assignment in deploy_controlplane.sh * Refactor echo statements for improved formatting and include deployer subscription * Refactor echo statements for improved formatting and include deployer subscription * Refactor echo statements for improved formatting and include deployer subscription * Change to use ARM CLIENT ID * Refactor echo statement to include deployer subscription in 02-sap-workload-zone.yaml * Refactor echo statements for improved formatting and include deployer subscription in 02-sap-workload-zone.yaml * Refactor echo statements for improved formatting and include deployer subscription in 02-sap-workload-zone.yaml * Refactor echo statements for improved formatting and include deployer subscription in 02-sap-workload-zone.yaml * Refactor echo statements and include deployer subscription in 02-sap-workload-zone.yaml Change to use WL_ARM_CLIENT_ID instead of ARM_CLIENT_ID Update variable group and variable names in New-SDAFDevopsWorkloadZone.ps1 Update echo statements in script_helpers.sh for improved formatting * Refactor echo statements for improved formatting and include deployer subscription in 02-sap-workload-zone.yaml * Refactor echo statements for improved formatting and include deployer subscription in 03-sap-system-deployment.yaml * Refactor echo statements for improved formatting and include deployer subscription in 03-sap-system-deployment.yaml * indentation * Refactor echo statements for improved formatting and include deployer subscription in 03-sap-system-deployment.yaml * Refactor echo statements for improved formatting and include deployer subscription in 03-sap-system-deployment.yaml * Refactor echo statements for improved formatting and include deployer subscription in 03-sap-system-deployment.yaml * Refactor echo statements for improved formatting and fix indentation in 10-remover-terraform.yaml * Refactor echo statement for improved formatting in remover.sh * Refactor echo statements for improved formatting and remove unnecessary output in remover.sh and 10-remover-terraform.yaml * Refactor echo statements for improved formatting and remove unnecessary output in remover.sh and 10-remover-terraform.yaml * Refactor echo statements for improved formatting and remove unnecessary output in remover.sh and 10-remover-terraform.yaml * Refactor echo statements for improved formatting and remove unnecessary output in 01-deploy-control-plane.yaml * Refactor validate_dependencies function to check for the existence of the terraform binary file instead of the terraform directory. * Refactor echo statements for improved formatting and remove unnecessary output in deploy and remove pipelines * Refactor echo statements for improved formatting and remove unnecessary output in deploy and remove pipelines * Refactor echo statements for improved formatting and remove unnecessary output in set_secrets.sh * Refactor azuread_service_principal data source to conditionally include object_id in locals * Update SDAF version to 3.13.1.0 in ansible-input-api.yaml and version.txt * Refactor echo statements for improved formatting and remove unnecessary output in deploy and remove pipelines * Refactor echo statements for improved formatting and remove unnecessary output in deploy and remove pipelines * Refactor key_vault_sap_landscape.tf to conditionally include object_id in azurerm_key_vault_access_policy * Refactor echo statements for improved formatting and remove unnecessary output in deploy and remove pipelines * Refactor key_vault_sap_landscape.tf to conditionally include object_id in azurerm_key_vault_access_policy * Refactor key_vault_sap_landscape.tf to conditionally include object_id in azurerm_key_vault_access_policy * Refactor echo statements for improved formatting and remove unnecessary output in deploy and remove pipelines * Refactor echo statements for improved formatting and remove unnecessary output in deploy and remove pipelines * Refactor echo statements for improved formatting and remove unnecessary output in deploy and remove pipelines * Refactor echo statements for improved formatting in installer.sh * Refactor echo statements for improved formatting and remove unnecessary output in installer.sh * Refactor echo statements for improved formatting and remove unnecessary output in set_secrets.sh * Refactor echo statements for improved formatting and remove unnecessary output in installer.sh and providers.tf * Refactor echo statements for improved formatting and remove unnecessary output in installer.sh * Refactor installer.sh to fix path issue and pass parameters correctly * Refactor deploy_controlplane.sh to include state subscription parameter in installer.sh call * Refactor deploy_controlplane.sh to include correct subscription parameter in installer.sh call * Refactor deploy_controlplane.sh to include deployer subscription parameter and persist parameters * Refactor deploy_controlplane.sh to include state subscription parameter in installer.sh call * Refactor deploy_controlplane.sh to include state subscription parameter in installer.sh call * Refactor deploy_controlplane.sh to include correct subscription parameter in installer.sh call * Refactor deploy_controlplane.sh to include correct subscription parameter in installer.sh call and handle storage account authentication * Refactor deploy_controlplane.sh to remove unnecessary echo statement * Refactor deploy_controlplane.sh to remove unnecessary echo statements and improve parameter handling * Refactor deploy_controlplane.sh to improve parameter handling * Refactor deploy_controlplane.sh to improve storage account authentication handling * Refactor deploy_controlplane.sh to improve parameter handling and remove unnecessary echo statements * Refactor deploy_utils.sh to fix variable value retrieval from config file * Refactor parameter handling in script_helpers.sh and install_workloadzone.sh * Refactor install_workloadzone.sh to handle unknown region codes * Refactor install_workloadzone.sh to handle unknown region codes * Refactor install_workloadzone.sh to handle unknown region codes and improve parameter handling * Refactor install_workloadzone.sh to handle unknown region codes and improve parameter handling * Refactor region code handling in deploy_utils.sh and script_helpers.sh * Refactor install_workloadzone.sh to handle unknown region codes and improve parameter handling * Refactor install_workloadzone.sh to handle unknown region codes and improve parameter handling * Refactor install_workloadzone.sh to handle unknown region codes and improve parameter handling * Refactor install_workloadzone.sh to handle unknown region codes and improve parameter handling * Refactor install_workloadzone.sh to improve parameter handling and region code handling in deploy_utils.sh and script_helpers.sh * Refactor install_workloadzone.sh to improve parameter handling and region code handling * Refactor install_workloadzone.sh to improve parameter handling and region code handling * Refactor install_workloadzone.sh to improve parameter handling and region code handling * Refactor install_workloadzone.sh to improve parameter handling and region code handling * Refactor install_workloadzone.sh to improve parameter handling and region code handling * Refactor install_workloadzone.sh to remove unnecessary code * Refactor echo statements to improve readability and consistency * Refactor installer.sh to improve parameter handling and region code handling * Refactor installer.sh to improve parameter handling and region code handling * Refactor installer.sh to remove unnecessary echo statements * Refactor installer.sh to improve parameter handling and region code handling * Refactor storage_accounts.tf to include var.use_private_endpoint in the count condition * Refactor storage_accounts.tf to include var.use_private_endpoint in the count condition * Keyvault network rules * Refactor key_vault_sap_landscape.tf to include var.enable_firewall_for_keyvaults_and_storage in the default_action condition * Refactor installer.sh to handle empty SPN secret in set_executing_user_environment_variables * Refactor installer.sh to handle empty SPN secret in set_executing_user_environment_variables * Refactor module.tf to include enable_firewall_for_keyvaults_and_storage variable * Refactor installer.sh to handle empty SPN secret in set_executing_user_environment_variables and remove error file * Add Terraform output detaisl * Refactor Terraform plugin cache directory handling * Refactor Terraform destroy command in remover.sh * Refactor Terraform destroy command in remover.sh * Refactor echo statement in deploy pipeline to include return code from deployment * Refactor echo statement in deploy pipeline to include return code from deployment --------- Co-authored-by: Kimmo Forss Co-authored-by: hdamecharla --- README.md | 2 +- Webapp/SDAF/SDAFWebApp.csproj | 12 +- .../roles-os/1.1-swap/defaults/main.yaml | 8 + .../ansible/roles-os/1.1-swap/tasks/main.yaml | 3 +- deploy/ansible/vars/ansible-input-api.yaml | 2 +- deploy/configs/version.txt | 2 +- deploy/pipelines/01-deploy-control-plane.yaml | 1106 +++++++++-------- deploy/pipelines/02-sap-workload-zone.yaml | 623 +++++----- .../pipelines/03-sap-system-deployment.yaml | 87 +- .../pipelines/04-sap-software-download.yaml | 2 +- deploy/pipelines/10-remover-terraform.yaml | 957 +++++++------- deploy/pipelines/11-remover-arm-fallback.yaml | 18 +- deploy/pipelines/12-remove-control-plane.yaml | 104 +- deploy/scripts/New-SDAFDevopsProject.ps1 | 2 +- deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 | 10 +- deploy/scripts/advanced_state_management.sh | 16 +- deploy/scripts/deploy_controlplane.sh | 244 ++-- deploy/scripts/deploy_utils.sh | 46 +- deploy/scripts/helpers/script_helpers.sh | 101 +- deploy/scripts/install_deployer.sh | 28 +- deploy/scripts/install_library.sh | 27 +- deploy/scripts/install_workloadzone.sh | 183 ++- deploy/scripts/installer.sh | 245 ++-- deploy/scripts/remove_controlplane.sh | 64 +- deploy/scripts/remove_deployer.sh | 2 +- deploy/scripts/remover.sh | 80 +- deploy/scripts/set_secrets.sh | 435 +++---- deploy/scripts/validate.sh | 117 +- .../bootstrap/sap_deployer/providers.tf | 4 +- .../bootstrap/sap_deployer/tfvar_variables.tf | 25 +- .../bootstrap/sap_deployer/transform.tf | 2 - .../bootstrap/sap_library/imports.tf | 8 +- .../bootstrap/sap_library/providers.tf | 9 +- .../bootstrap/sap_library/tfvar_variables.tf | 5 + .../bootstrap/sap_library/transform.tf | 122 +- .../terraform/run/sap_deployer/providers.tf | 10 +- .../run/sap_deployer/tfvar_variables.tf | 24 +- .../terraform/run/sap_deployer/transform.tf | 2 - .../run/sap_deployer/variables_local.tf | 8 +- deploy/terraform/run/sap_landscape/imports.tf | 9 +- .../terraform/run/sap_landscape/providers.tf | 4 +- .../run/sap_landscape/tfvar_variables.tf | 5 + .../run/sap_landscape/variables_local.tf | 9 +- deploy/terraform/run/sap_library/imports.tf | 8 +- deploy/terraform/run/sap_library/providers.tf | 7 +- .../run/sap_library/tfvar_variables.tf | 5 + deploy/terraform/run/sap_library/transform.tf | 119 +- .../run/sap_library/variables_local.tf | 3 +- deploy/terraform/run/sap_system/module.tf | 1 + deploy/terraform/run/sap_system/providers.tf | 2 +- .../run/sap_system/tfvar_variables.tf | 5 + .../modules/sap_deployer/infrastructure.tf | 22 +- .../modules/sap_deployer/providers.tf | 2 +- .../templates/configure_deployer.sh.tmpl | 19 +- .../modules/sap_deployer/vm-deployer.tf | 7 +- .../sap_landscape/key_vault_sap_landscape.tf | 56 +- .../modules/sap_landscape/providers.tf | 2 +- .../modules/sap_landscape/storage_accounts.tf | 4 +- .../modules/sap_library/key_vault.tf | 2 +- .../modules/sap_library/providers.tf | 2 +- .../modules/sap_library/storage_accounts.tf | 8 +- .../sap_system/anydb_node/providers.tf | 2 +- .../modules/sap_system/app_tier/providers.tf | 2 +- .../common_infrastructure/providers.tf | 2 +- .../common_infrastructure/storage_accounts.tf | 39 +- .../common_infrastructure/variables_global.tf | 4 + .../modules/sap_system/hdb_node/providers.tf | 2 +- .../sap_system/output_files/providers.tf | 2 +- 68 files changed, 2752 insertions(+), 2347 deletions(-) create mode 100644 deploy/ansible/roles-os/1.1-swap/defaults/main.yaml diff --git a/README.md b/README.md index b00b803c9e..4293ee7693 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ The framework uses Terraform for infrastructure deployment, and Ansible for the ## Enterprise-scale - Reference Implementation -![Ansible Lint](https://github.com/Azure/sap-automation/workflows/Ansible%20Lint/badge.svg) [![Average time to resolve an issue](http://isitmaintained.com/badge/resolution/azure/sap-automation.svg)](http://isitmaintained.com/project/azure/sap-automation "Average time to resolve an issue") [![Percentage of issues still open](http://isitmaintained.com/badge/open/azure/sap-automation.svg)](http://isitmaintained.com/project/azure/sap-automation "Percentage of issues still open") +![Ansible Lint](https://github.com/Azure/sap-automation/workflows/Ansible%20Lint/badge.svg) [![Average time to resolve an issue](http://isitmaintained.com/badge/resolution/azure/sap-automation.svg)](http://isitmaintained.com/project/azure/sap-automation "Average time to resolve an issue") [![Percentage of issues still open](http://isitmaintained.com/badge/open/azure/sap-automation.svg)](http://isitmaintained.com/project/azure/sap-automation "Percentage of issues still open") [![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/Azure/sap-automation/badge)](https://scorecard.dev/viewer/?uri=github.com/Azure/sap-automation) ## Partnership diff --git a/Webapp/SDAF/SDAFWebApp.csproj b/Webapp/SDAF/SDAFWebApp.csproj index 295391fb10..4f15f4fee8 100644 --- a/Webapp/SDAF/SDAFWebApp.csproj +++ b/Webapp/SDAF/SDAFWebApp.csproj @@ -16,18 +16,18 @@ - + - - - - - + + + + + diff --git a/deploy/ansible/roles-os/1.1-swap/defaults/main.yaml b/deploy/ansible/roles-os/1.1-swap/defaults/main.yaml new file mode 100644 index 0000000000..199a68459e --- /dev/null +++ b/deploy/ansible/roles-os/1.1-swap/defaults/main.yaml @@ -0,0 +1,8 @@ +--- +# TODO: Maybe move these to a group_vars/all/distro file so that they +# can be shared by all playbooks/tasks automatically, and extend with +# standardised versions of all similar patterns used in the playbooks. +# Changed from ansible_os_family to ansible_distribution to adopt Oracle Linux. os_family returns returns value Redhat by default. +distro_name: "{{ ansible_distribution | upper }}-{{ ansible_distribution_major_version }}" +distribution_id: "{{ ansible_distribution | lower ~ ansible_distribution_major_version }}" +distribution_full_id: "{{ ansible_distribution | lower ~ ansible_distribution_version }}" diff --git a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml index 44b71b00e7..016f87a6e2 100644 --- a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml @@ -35,9 +35,8 @@ state: latest environment: ZYPP_LOCK_TIMEOUT: "20" - when: - - ansible_os_family == 'Suse' + - distribution_id == 'sles_sap15' tags: - skip_ansible_lint diff --git a/deploy/ansible/vars/ansible-input-api.yaml b/deploy/ansible/vars/ansible-input-api.yaml index 18b4ae7722..b03529cb8d 100644 --- a/deploy/ansible/vars/ansible-input-api.yaml +++ b/deploy/ansible/vars/ansible-input-api.yaml @@ -5,7 +5,7 @@ become_user_name: root oracle_user_name: oracle orchestration_ansible_user: azureadm # ------------------- Begin - SDAF Ansible Version ---------------------------8 -SDAF_Version: "3.13.0.0" +SDAF_Version: "3.13.1.0" # ------------------- End - SDAF Ansible Version ---------------------------8 diff --git a/deploy/configs/version.txt b/deploy/configs/version.txt index c21c6f6867..bc8db301f8 100644 --- a/deploy/configs/version.txt +++ b/deploy/configs/version.txt @@ -1 +1 @@ -3.13.0.0 +3.13.1.0 diff --git a/deploy/pipelines/01-deploy-control-plane.yaml b/deploy/pipelines/01-deploy-control-plane.yaml index f96089caf3..27475bed1b 100644 --- a/deploy/pipelines/01-deploy-control-plane.yaml +++ b/deploy/pipelines/01-deploy-control-plane.yaml @@ -89,47 +89,45 @@ stages: boldred="\e[1;31m" cyan="\e[1;36m" - export ARM_CLIENT_ID=$servicePrincipalId - - if [ -n "$(servicePrincipalKey)" ]; then - export ARM_CLIENT_SECRET=$servicePrincipalKey - else - export ARM_USE_OIDC=true - export ARM_USE_AZUREAD=true - export ARM_OIDC_TOKEN=$idToken - fi + export ARM_CLIENT_ID=$servicePrincipalId; echo 'ARM_CLIENT_ID' $ARM_CLIENT_ID export ARM_TENANT_ID=$tenantId set -eu file_deployer_tfstate_key=$(deployerfolder).tfstate - ENVIRONMENT=$(echo $(deployerfolder) | awk -F'-' '{print $1}' | xargs) ; echo Environment ${ENVIRONMENT} - LOCATION=$(echo $(deployerfolder) | awk -F'-' '{print $2}' | xargs) ; echo Location ${LOCATION} + ENVIRONMENT=$(echo $(deployerfolder) | awk -F'-' '{print $1}' | xargs) + LOCATION=$(echo $(deployerfolder) | awk -F'-' '{print $2}' | xargs) + deployer_environment_file_name=$CONFIG_REPO_PATH/.sap_deployment_automation/${ENVIRONMENT}${LOCATION} echo -e "$green--- Checkout $(Build.SourceBranchName) ---$reset" cd $CONFIG_REPO_PATH git checkout -q $(Build.SourceBranchName) echo -e "$green--- Configure devops CLI extension ---$reset" - az config set extension.use_dynamic_install=yes_without_prompt - az --version + az config set extension.use_dynamic_install=yes_without_prompt --only-show-errors - az extension add --name azure-devops --output none + az extension add --name azure-devops --output none --only-show-errors - echo "Agent: " $(this_agent) - echo "Organization: " $(System.CollectionUri) - echo "Project: " $(System.TeamProject) + echo "Environment: $ENVIRONMENT" + echo "Location: $LOCATION" + + echo "" + echo "Agent: $(this_agent)" + echo "Organization: $(System.CollectionUri)" + echo "Project: $(System.TeamProject)" + echo "" + az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none --only-show-errors - az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") - echo "$(variable_group) id: ${VARIABLE_GROUP_ID}" + printf -v tempval '%s id:' $(variable_group) + printf -v val '%-20s' "${tempval}" + echo "$val $VARIABLE_GROUP_ID" - echo "${{ parameters.force_reset }}" if [ "${{ parameters.force_reset }}" = "True" ]; then echo "##vso[task.logissue type=warning]Forcing a re-install" - echo "running on $(this_agent)" + echo "Running on: $(this_agent)" sed -i 's/step=1/step=0/' $deployer_environment_file_name sed -i 's/step=2/step=0/' $deployer_environment_file_name sed -i 's/step=3/step=0/' $deployer_environment_file_name @@ -140,7 +138,7 @@ stages: key_vault="${az_var}" ; echo 'Deployer Key Vault' ${key_vault} else echo "Reading key vault from environment file" - key_vault=$(cat ${deployer_environment_file_name} | grep keyvault= -m1 | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer Key Vault' ${key_vault} + key_vault=$(grep -m1 "^keyvault=" ${deployer_environment_file_name} |awk -F'=' '{print $2}' | xargs) fi key_vault_id=$(az resource list --name "${key_vault}" --resource-type Microsoft.KeyVault/vaults --query "[].id | [0]" -o tsv) @@ -169,7 +167,6 @@ stages: fi fi fi - echo "Agent: " $(this_agent) if [ -z ${VARIABLE_GROUP_ID} ]; then echo "##vso[task.logissue type=error]Variable group $(variable_group) could not be found." exit 2 @@ -198,19 +195,22 @@ stages: fi # Check if running on deployer if [ ! -f /etc/profile.d/deploy_server.sh ]; then - echo -e "$green --- Install dos2unix ---$reset" + echo -e "$green--- Install dos2unix ---$reset" sudo apt-get -qq install dos2unix - sudo apt -qq install zip - echo -e "$green --- Install terraform ---$reset" + sudo apt-get -qq install zip + echo -e "$green--- Install terraform ---$reset" wget -q $(tf_url) return_code=$? if [ 0 != $return_code ]; then echo "##vso[task.logissue type=error]Unable to download Terraform version $(tf_version)." exit 2 fi - unzip -qq terraform_$(tf_version)_linux_amd64.zip ; sudo mv terraform /bin/ + sudo mkdir -p /opt/terraform/bin/ + unzip -qq terraform_$(tf_version)_linux_amd64.zip + sudo mv terraform /opt/terraform/bin/terraform + sudo chmod +x /opt/terraform/bin/terraform rm -f terraform_$(tf_version)_linux_amd64.zip - az extension add --name storage-blob-preview >/dev/null + az extension add --name storage-blob-preview --allow-preview true --only-show-errors >/dev/null fi echo -e "$green--- Configure parameters ---$reset" echo -e "$green--- Convert config files to UX format ---$reset" @@ -220,23 +220,23 @@ stages: deployer_environment_file_name=$CONFIG_REPO_PATH/.sap_deployment_automation/${ENVIRONMENT}$LOCATION echo -e "$green--- Deploy the Control Plane ---$reset" if [ -n "$(PAT)" ]; then - echo 'Deployer Agent PAT is defined' + echo "Deployer Agent PAT: IsDefined" fi if [ -n "$(POOL)" ]; then - echo 'Deployer Agent Pool' $(POOL) + echo " Deployer Agent Pool: $(POOL)" fi if [ -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip ]; then pass=$(echo $(System.CollectionId) | sed 's/-//g') + echo "Unzipping state.zip" unzip -qq -o -P "${pass}" ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip -d ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder) fi - ls -lart ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder) - if [ $(use_webapp) = "true" ]; then - echo "Use WebApp is selected" + echo "Deploy Web App: true" + else - echo "No WebApp" + echo "Deploy Web App: false" fi export TF_LOG_PATH=$CONFIG_REPO_PATH/.sap_deployment_automation/terraform.log @@ -245,14 +245,26 @@ stages: if [ "$USE_MSI" = "true" ]; then export ARM_CLIENT_SECRET=$servicePrincipalKey export ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID + echo "Deployment credentials: Managed Identity" + $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ --deployer_parameter_file ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) \ --library_parameter_file ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) \ --subscription $ARM_SUBSCRIPTION_ID --auto-approve --ado --only_deployer --msi else - export ARM_CLIENT_ID="$CP_ARM_CLIENT_ID" + export ARM_CLIENT_ID=$CP_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$CP_ARM_CLIENT_SECRET export ARM_CLIENT_SECRET=$CP_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$CP_ARM_TENANT_ID + export ARM_USE_OIDC=false + export ARM_USE_AZUREAD=true + + echo "Deployment credentials: Service Principal" + echo "Deployment credential ID (SPN): $WL_ARM_CLIENT_ID" + + az login --service-principal -u $ARM_CLIENT_ID -p=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none + $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ --deployer_parameter_file ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) \ --library_parameter_file ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) \ @@ -262,7 +274,7 @@ stages: fi return_code=$? - echo "Return code from deploy_controlplane $return_code." + echo "Deploy_controlplane returned $return_code." set -eu @@ -275,12 +287,13 @@ stages: if [ -z "$file_deployer_tfstate_key" ]; then file_deployer_tfstate_key=$DEPLOYER_TFSTATE_KEY fi - echo 'Deployer State File' $file_deployer_tfstate_key + echo "Deployer State File $file_deployer_tfstate_key" + file_key_vault=$(cat ${deployer_environment_file_name} | grep keyvault= | awk -F'=' '{print $2}' | xargs) - echo 'Deployer Key Vault' ${file_key_vault} + echo "Deployer Key Vault: ${file_key_vault}" + deployer_random_id=$(cat ${deployer_environment_file_name} | grep deployer_random_id= | awk -F'=' '{print $2}' | xargs) library_random_id=$(cat ${deployer_environment_file_name} | grep library_random_id= | awk -F'=' '{print $2}' | xargs) - echo 'Deployer Random ID' ${deployer_random_id} fi echo -e "$green--- Update repo ---$reset" @@ -293,9 +306,9 @@ stages: added=1 fi if [ -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/terraform.tfstate ]; then - sudo apt install zip + sudo apt-get install zip -y pass=$(echo $(System.CollectionId) | sed 's/-//g') - zip -j -P "${pass}" ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/terraform.tfstate + zip -q -j -P "${pass}" ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/terraform.tfstate git add -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip added=1 fi @@ -310,26 +323,26 @@ stages: fi echo -e "$green--- Adding variables to the variable group:" $(variable_group) "---$reset" if [ 0 = $return_code ]; then - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_State_FileName.value") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_State_FileName.value" --out tsv) if [ -z ${az_var} ]; then az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --value ${file_deployer_tfstate_key} --output none --only-show-errors else az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --value ${file_deployer_tfstate_key} --output none --only-show-errors fi - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" --out tsv) if [ -z ${az_var} ]; then az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --value ${file_key_vault} --output none --only-show-errors else az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --value ${file_key_vault} --output none --only-show-errors fi - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "ControlPlaneEnvironment.value") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "ControlPlaneEnvironment.value" --out tsv) if [ -z ${az_var} ]; then az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name ControlPlaneEnvironment --value ${ENVIRONMENT} --output none --only-show-errors else az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name ControlPlaneEnvironment --value ${ENVIRONMENT} --output none --only-show-errors fi - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "ControlPlaneLocation.value") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "ControlPlaneLocation.value" --out tsv) if [ -z ${az_var} ]; then az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name ControlPlaneLocation --value ${LOCATION} --output none --only-show-errors else @@ -337,7 +350,7 @@ stages: fi if [ -n "${deployer_random_id}" ] ; then - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "DEPLOYER_RANDOM_ID_SEED.value") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "DEPLOYER_RANDOM_ID_SEED.value" --out tsv) if [ -z ${az_var} ]; then az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name DEPLOYER_RANDOM_ID_SEED --value ${deployer_random_id} --output none --only-show-errors else @@ -407,491 +420,548 @@ stages: parameters: getLatestFromBranch: true - bash: | - #!/bin/bash - set -u - - echo "##vso[build.updatebuildnumber]Deploying the control plane defined in $(deployerfolder) $(libraryfolder)" - green="\e[1;32m" - reset="\e[0m" - boldred="\e[1;31m" - cyan="\e[1;36m" - - ENVIRONMENT=$(echo $(deployerfolder) | awk -F'-' '{print $1}' | xargs) - LOCATION=$(echo $(deployerfolder) | awk -F'-' '{print $2}' | xargs) - deployer_environment_file_name=${CONFIG_REPO_PATH}/.sap_deployment_automation/${ENVIRONMENT}${LOCATION} - file_deployer_tfstate_key=$(deployerfolder).tfstate - file_key_vault="" - file_REMOTE_STATE_SA="" - file_REMOTE_STATE_RG=$(deployerfolder) - - echo -e "$green--- Information ---$reset" - echo "Environment: ${ENVIRONMENT}" - echo "Location: ${LOCATION}" - echo "Agent: $(this_agent)" - echo "Organization: $(System.CollectionUri)" - echo "Project: $(System.TeamProject)" - echo "Deployer Folder $(deployerfolder)" - echo "Deployer TFvars $(deployerconfig)" - echo "Library Folder $(libraryfolder)" - echo "Library TFvars $(libraryconfig)" - echo "" - echo "Azure CLI version:" - echo "-------------------------------------------------" - az --version - echo -e "$green--- Checkout $(Build.SourceBranchName) ---$reset" - cd $CONFIG_REPO_PATH - git checkout -q $(Build.SourceBranchName) - - deployer_configfile="${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig)" - library_configfile="${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig)" - - deployer_configfile="${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig)" - library_configfile="${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig)" - - echo -e "$green--- Configure devops CLI extension ---$reset" - az config set extension.use_dynamic_install=yes_without_prompt - az extension add --name azure-devops --output none - - az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' - - export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") - echo VARIABLE_GROUP_ID ${VARIABLE_GROUP_ID} - if [ -z ${VARIABLE_GROUP_ID} ]; then - echo "##vso[task.logissue type=error]Variable group $(variable_group) could not be found." - exit 2 - fi - echo -e "$green--- Variables ---$reset" - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" --output tsv) - if [ -n "${az_var}" ]; then - key_vault="${az_var}" - echo -e "$cyan 'Deployer Key Vault' ${key_vault} $reset" - else - if [ -f ${deployer_environment_file_name} ] ; then - key_vault=$(cat ${deployer_environment_file_name} | grep keyvault= | awk -F'=' '{print $2}' | xargs) - echo -e "$cyan 'Deployer Key Vault' ${key_vault} $reset" - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --value ${key_vault} --output none --only-show-errors - fi - fi - - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Subscription.value" --output tsv) - if [ -n "${az_var}" ]; then - STATE_SUBSCRIPTION="${az_var}" ; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION - else - if [ -f ${deployer_environment_file_name} ] ; then - STATE_SUBSCRIPTION=$(cat ${deployer_environment_file_name} | grep STATE_SUBSCRIPTION | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Subscription --value ${STATE_SUBSCRIPTION} --output none --only-show-errors - fi - fi - - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "DEPLOYER_RANDOM_ID_SEED.value" --output tsv) - if [ -n "${az_var}" ]; then - deployer_random_id="${az_var}" - else - deployer_random_id=$(cat ${deployer_environment_file_name} | grep deployer_random_id= | awk -F'=' '{print $2}' | xargs) - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name DEPLOYER_RANDOM_ID_SEED --value ${deployer_random_id} --output none --only-show-errors - fi - - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" --output tsv) - if [ -n "${az_var}" ]; then - REMOTE_STATE_SA="${az_var}" ; echo 'Terraform state file storage account' $REMOTE_STATE_SA - else - if [ -f ${deployer_environment_file_name} ] ; then - REMOTE_STATE_SA=$(cat ${deployer_environment_file_name} | grep REMOTE_STATE_SA | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file storage account' $REMOTE_STATE_SA - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --value ${REMOTE_STATE_SA} --output none --only-show-errors - fi - fi - - storage_account_parameter="" - if [ -n "${REMOTE_STATE_SA}" ]; then - storage_account_parameter="--storageaccountname ${REMOTE_STATE_SA}" - else - sed -i 's/step=2/step=1/' $deployer_environment_file_name - sed -i 's/step=3/step=1/' $deployer_environment_file_name - fi - - keyvault_parameter="" - if [ -n "${key_vault}" ]; then - keyvault_parameter=" --vault ${key_vault} " - fi - - echo -e "$green--- Validations ---$reset" - - if [ -z ${TF_VAR_ansible_core_version} ]; then - export TF_VAR_ansible_core_version=2.15 - fi - - if [ "$USE_WEBAPP" = "true" ]; then - echo "Use WebApp is selected" - - if [ -z ${APP_REGISTRATION_APP_ID} ]; then - echo "##vso[task.logissue type=error]Variable APP_REGISTRATION_APP_ID was not defined." - exit 2 - fi + #!/bin/bash + set -u + + echo "##vso[build.updatebuildnumber]Deploying the control plane defined in $(deployerfolder) $(libraryfolder)" + green="\e[1;32m" + reset="\e[0m" + boldred="\e[1;31m" + cyan="\e[1;36m" + + ENVIRONMENT=$(echo $(deployerfolder) | awk -F'-' '{print $1}' | xargs) + LOCATION=$(echo $(deployerfolder) | awk -F'-' '{print $2}' | xargs) + deployer_environment_file_name=${CONFIG_REPO_PATH}/.sap_deployment_automation/"${ENVIRONMENT}${LOCATION}" + file_deployer_tfstate_key=$(deployerfolder).tfstate + file_key_vault="" + file_REMOTE_STATE_SA="" + file_REMOTE_STATE_RG=$(deployerfolder) + REMOTE_STATE_SA="" + REMOTE_STATE_RG=$(deployerfolder) + + if [[ -f /etc/profile.d/deploy_server.sh ]]; then + path=$(grep -m 1 "export PATH=" /etc/profile.d/deploy_server.sh | awk -F'=' '{print $2}' | xargs) + export PATH=$path + fi + + echo -e "$green--- Information ---$reset" + echo "Environment: ${ENVIRONMENT}" + echo "Location: ${LOCATION}" + echo "Agent: $(this_agent)" + echo "Organization: $(System.CollectionUri)" + echo "Project: $(System.TeamProject)" + echo "Deployer Folder: $(deployerfolder)" + echo "Deployer TFvars: $(deployerconfig)" + echo "Library Folder: $(libraryfolder)" + echo "Library TFvars: $(libraryconfig)" + + echo "" + echo "Azure CLI version:" + echo "-------------------------------------------------" + az --version + echo "" + echo "Terraform version:" + echo "-------------------------------------------------" + if [ -f /opt/terraform/bin/terraform ]; then + tfPath="/opt/terraform/bin/terraform" + else + tfPath=$(which terraform) + fi + + "${tfPath}" --version + echo -e "$green--- Checkout $(Build.SourceBranchName) ---$reset" + cd "$CONFIG_REPO_PATH" || exit + git checkout -q $(Build.SourceBranchName) + + deployer_configfile="${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig)" + library_configfile="${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig)" + + deployer_configfile="${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig)" + library_configfile="${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig)" + + echo -e "$green--- Configure devops CLI extension ---$reset" + az config set extension.use_dynamic_install=yes_without_prompt --only-show-errors + az extension add --name azure-devops --output none --only-show-errors + + az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' + + VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") + export VARIABLE_GROUP_ID + if [ -z "${VARIABLE_GROUP_ID}" ]; then + echo "##vso[task.logissue type=error]Variable group $(variable_group) could not be found." + exit 2 + fi + + printf -v tempval '%s id:' $(variable_group) + printf -v val '%-20s' "${tempval}" + echo "$val $VARIABLE_GROUP_ID" + + echo -e "$green--- Variables ---$reset" + az_var=$(az pipelines variable-group variable list --group-id "${VARIABLE_GROUP_ID}" --query "Deployer_Key_Vault.value" --output tsv) + if [ -n "${az_var}" ]; then + export key_vault="${az_var}" + else + if [ -f "${deployer_environment_file_name}" ] ; then + + key_vault=$(grep "^keyvault=" "${deployer_environment_file_name}" | awk -F'=' '{print $2}' | xargs) + az pipelines variable-group variable create --group-id "${VARIABLE_GROUP_ID}" --name Deployer_Key_Vault --value "${key_vault}" --output none --only-show-errors + fi + fi + + az_var=$(az pipelines variable-group variable list --group-id "${VARIABLE_GROUP_ID}" --query "Terraform_Remote_Storage_Subscription.value" --output tsv) + if [ -n "${az_var}" ]; then + export STATE_SUBSCRIPTION="${az_var}" + else + if [ -f "${deployer_environment_file_name}" ] ; then + export STATE_SUBSCRIPTION=$(grep "^STATE_SUBSCRIPTION=" "${deployer_environment_file_name}" | awk -F'=' '{print $2}' | xargs) + az pipelines variable-group variable create --group-id "${VARIABLE_GROUP_ID}" --name Terraform_Remote_Storage_Subscription --value "${STATE_SUBSCRIPTION}" --output none --only-show-errors + fi + fi + + az_var=$(az pipelines variable-group variable list --group-id "${VARIABLE_GROUP_ID}" --query "DEPLOYER_RANDOM_ID_SEED.value" --output tsv) + if [ -n "${az_var}" ]; then + deployer_random_id="${az_var}" + else + deployer_random_id=$(grep "^deployer_random_id=" "${deployer_environment_file_name}" | awk -F'=' '{print $2}' | xargs) + az pipelines variable-group variable create --group-id "${VARIABLE_GROUP_ID}" --name DEPLOYER_RANDOM_ID_SEED --value "${deployer_random_id}" --output none --only-show-errors + fi + + az_var=$(az pipelines variable-group variable list --group-id "${VARIABLE_GROUP_ID}" --query "Terraform_Remote_Storage_Account_Name.value" --output tsv) + if [ -n "${az_var}" ]; then + export REMOTE_STATE_SA="${az_var}" + else + if [ -f "${deployer_environment_file_name}" ] ; then + REMOTE_STATE_SA=$(grep "^REMOTE_STATE_SA=" "${deployer_environment_file_name}" | awk -F'=' '{print $2}' | xargs) + fi + fi + + echo "Terraform state subscription: $STATE_SUBSCRIPTION" + if [ -n "${REMOTE_STATE_RG}" ]; then + echo "Terraform state rg name: $REMOTE_STATE_RG" + fi + if [ -n "${REMOTE_STATE_SA}" ]; then + echo "Terraform storage account: $REMOTE_STATE_SA" + fi + + echo "Deployer Key Vault: ${key_vault}" + echo "Deployer TFvars: $(deployerconfig)" + + storage_account_parameter="" + if [ -n "${REMOTE_STATE_SA}" ]; then + storage_account_parameter=" --storageaccountname ${REMOTE_STATE_SA} " + else + sed -i 's/step=2/step=1/' "$deployer_environment_file_name" + sed -i 's/step=3/step=1/' "$deployer_environment_file_name" + fi + + keyvault_parameter="" + if [ -n "${key_vault}" ]; then + keyvault_parameter=" --vault ${key_vault} " + fi + + echo -e "$green--- Validations ---$reset" + + if [ -z "${TF_VAR_ansible_core_version}" ]; then + export TF_VAR_ansible_core_version=2.15 + fi + + if [ "$USE_WEBAPP" = "true" ]; then + echo "Deploy Web Application: true" + + if [ -z "${APP_REGISTRATION_APP_ID}" ]; then + echo "##vso[task.logissue type=error]Variable APP_REGISTRATION_APP_ID was not defined." + exit 2 + fi - if [ -z ${WEB_APP_CLIENT_SECRET} ]; then - echo "##vso[task.logissue type=error]Variable WEB_APP_CLIENT_SECRET was not defined." - exit 2 - fi - export TF_VAR_app_registration_app_id=$(APP_REGISTRATION_APP_ID); echo 'App Registration App ID' ${TF_VAR_app_registration_app_id} - export TF_VAR_webapp_client_secret=$(WEB_APP_CLIENT_SECRET) - export TF_VAR_use_webapp=true - - fi - - bootstrapped=0 - - if [ ! -f $deployer_environment_file_name ]; then - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value") - if [[ ${#az_var} -ne 0 ]]; then - echo "REMOTE_STATE_SA="${az_var} - echo "REMOTE_STATE_SA="${az_var} | tee -a $deployer_environment_file_name > /dev/null - echo "STATE_SUBSCRIPTION="$ARM_SUBSCRIPTION_ID | tee -a $deployer_environment_file_name > /dev/null - fi - - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Resource_Group_Name.value") - if [[ ${#az_var} -ne 0 ]]; then - echo "REMOTE_STATE_RG="${az_var} - echo "REMOTE_STATE_RG="${az_var} | tee -a $deployer_environment_file_name > /dev/null - echo "step=3" | tee -a $deployer_environment_file_name > /dev/null - - fi - - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_State_FileName.value") - if [[ ${#az_var} -ne 0 ]]; then - echo "deployer_tfstate_key="${az_var} | tee -a $deployer_environment_file_name > /dev/null - fi - - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value") - if [[ ${#az_var} -ne 0 ]]; then - echo "keyvault="${az_var} | tee -a $deployer_environment_file_name > /dev/null - bootstrapped=1 - fi - - fi - - echo -e "$green--- Update .sap_deployment_automation/config as SAP_AUTOMATION_REPO_PATH can change on devops agent ---$reset" - cd ${CONFIG_REPO_PATH} - mkdir -p .sap_deployment_automation - echo SAP_AUTOMATION_REPO_PATH=$SAP_AUTOMATION_REPO_PATH >.sap_deployment_automation/config - export SAP_AUTOMATION_REPO_PATH=$SAP_AUTOMATION_REPO_PATH - - echo -e "$green--- File Validations ---$reset" - if [ ! -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) ]; then - echo -e "$boldred--- File ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) was not found ---$reset" - echo "##vso[task.logissue type=error]File ${CONFIG_REPO_PATH}/${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) was not found." - exit 2 - fi - - if [ ! -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) ]; then - echo -e "$boldred--- File ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) was not found ---$reset" - echo "##vso[task.logissue type=error]File ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) was not found." - exit 2 - fi - - # Check if running on deployer - if [[ ! -f /etc/profile.d/deploy_server.sh ]]; then - echo -e "$green --- Install dos2unix ---$reset" - sudo apt-get -qq install dos2unix - - sudo apt -qq install zip - - echo -e "$green --- Install terraform ---$reset" - - wget -q $(tf_url) - return_code=$? - if [ 0 != $return_code ]; then - echo "##vso[task.logissue type=error]Unable to download Terraform version $(tf_version)." - exit 2 - fi - unzip -qq terraform_$(tf_version)_linux_amd64.zip ; sudo mv terraform /bin/ - rm -f terraform_$(tf_version)_linux_amd64.zip - - az extension add --name storage-blob-preview >/dev/null - echo -e "$green--- az login ---$reset" - az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code - fi - - az account set --subscription $ARM_SUBSCRIPTION_ID - - else - if [ $USE_MSI != "true" ]; then - echo -e "$cyan--- Using SPN ---$reset" - export ARM_CLIENT_ID=$CP_ARM_CLIENT_ID - export ARM_CLIENT_SECRET=$CP_ARM_CLIENT_SECRET - export ARM_TENANT_ID=$CP_ARM_TENANT_ID - export ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID - export ARM_USE_MSI=false - az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none - - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code - fi - az account set --subscription $ARM_SUBSCRIPTION_ID - else - echo -e "$cyan--- Using MSI ---$reset" - source /etc/profile.d/deploy_server.sh - cat /etc/profile.d/deploy_server.sh - # export ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID - export ARM_USE_MSI=true - export ARM_USE_AZUREAD=true - fi - fi + if [ -z "${WEB_APP_CLIENT_SECRET}" ]; then + echo "##vso[task.logissue type=error]Variable WEB_APP_CLIENT_SECRET was not defined." + exit 2 + fi + TF_VAR_app_registration_app_id=$(APP_REGISTRATION_APP_ID); + echo "App Registration ID: ${TF_VAR_app_registration_app_id}" + export TF_VAR_app_registration_app_id + TF_VAR_webapp_client_secret=$(WEB_APP_CLIENT_SECRET) + export TF_VAR_webapp_client_secret + export TF_VAR_use_webapp=true + else + echo "Deploy Web Application: false" + fi + + bootstrapped=0 + + if [ ! -f "$deployer_environment_file_name" ]; then + az_var=$(az pipelines variable-group variable list --group-id "${VARIABLE_GROUP_ID}" --query "Terraform_Remote_Storage_Account_Name.value" --out tsv) + if [[ ${#az_var} -ne 0 ]]; then + echo "REMOTE_STATE_SA="${az_var} + echo "REMOTE_STATE_SA="${az_var} | tee -a "$deployer_environment_file_name" > /dev/null + echo "STATE_SUBSCRIPTION="$ARM_SUBSCRIPTION_ID | tee -a "$deployer_environment_file_name" > /dev/null + fi - echo -e "$green--- Configure parameters ---$reset" + az_var=$(az pipelines variable-group variable list --group-id "${VARIABLE_GROUP_ID}" --query "Terraform_Remote_Storage_Resource_Group_Name.value" --out tsv) + if [[ ${#az_var} -ne 0 ]]; then + echo "REMOTE_STATE_RG="${az_var} + echo "REMOTE_STATE_RG="${az_var} | tee -a "$deployer_environment_file_name" > /dev/null + echo "step=3" | tee -a "$deployer_environment_file_name" > /dev/null - echo -e "$green--- Convert config files to UX format ---$reset" - dos2unix -q ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig) - dos2unix -q ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) + fi - echo -e "$green--- Configuring variables ---$reset" + az_var=$(az pipelines variable-group variable list --group-id "${VARIABLE_GROUP_ID}" --query "Deployer_State_FileName.value" --out tsv) + if [[ ${#az_var} -ne 0 ]]; then + echo "deployer_tfstate_key="${az_var} | tee -a "$deployer_environment_file_name" > /dev/null + fi - deployer_environment_file_name=${CONFIG_REPO_PATH}/.sap_deployment_automation/${ENVIRONMENT}$LOCATION + az_var=$(az pipelines variable-group variable list --group-id "${VARIABLE_GROUP_ID}" --query "Deployer_Key_Vault.value" --out tsv) + if [[ ${#az_var} -ne 0 ]]; then + echo "keyvault="${az_var} | tee -a "$deployer_environment_file_name" > /dev/null + bootstrapped=1 + fi - export key_vault="" - ip_added=0 + fi + + echo -e "$green--- Update .sap_deployment_automation/config as SAP_AUTOMATION_REPO_PATH can change on devops agent ---$reset" + cd ${CONFIG_REPO_PATH} + mkdir -p .sap_deployment_automation + echo SAP_AUTOMATION_REPO_PATH=$SAP_AUTOMATION_REPO_PATH >.sap_deployment_automation/config + export SAP_AUTOMATION_REPO_PATH=$SAP_AUTOMATION_REPO_PATH + + echo -e "$green--- File Validations ---$reset" + if [ ! -f "${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig)" ]; then + echo -e "$boldred--- File "${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig)" was not found ---$reset" + echo "##vso[task.logissue type=error]File "${CONFIG_REPO_PATH}/${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig)" was not found." + exit 2 + fi + + if [ ! -f "${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig)" ]; then + echo -e "$boldred--- File ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) was not found ---$reset" + echo "##vso[task.logissue type=error]File ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig) was not found." + exit 2 + fi + + # Check if running on deployer + if [[ ! -f /etc/profile.d/deploy_server.sh ]]; then + echo -e "$green--- Install dos2unix ---$reset" + sudo apt-get -qq install dos2unix + + sudo apt-get -qq install zip + + echo -e "$green --- Install terraform ---$reset" + + wget -q $(tf_url) + return_code=$? + if [ 0 != $return_code ]; then + echo "##vso[task.logissue type=error]Unable to download Terraform version $(tf_version)." + exit 2 + fi + unzip -qq terraform_$(tf_version)_linux_amd64.zip ; sudo mv terraform /bin/ + rm -f terraform_$(tf_version)_linux_amd64.zip + + az extension add --name storage-blob-preview >/dev/null + echo -e "$green--- az login ---$reset" + export ARM_CLIENT_ID=$CP_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$CP_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$CP_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$CP_ARM_SUBSCRIPTION_ID + az login --service-principal --username "$ARM_CLIENT_ID" --password="$ARM_CLIENT_SECRET" --tenant "$ARM_TENANT_ID" --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi - if [ -f ${deployer_environment_file_name} ]; then - if [ 0 = $bootstrapped ]; then - export key_vault=$(cat ${deployer_environment_file_name} | grep key_vault | awk -F'=' '{print $2}' | xargs) ; echo "Key Vault: $key_vault" - if [ -n "${key_vault}" ]; then - echo 'Deployer Key Vault' ${key_vault} - key_vault_id=$(az resource list --name "${key_vault}" --resource-type Microsoft.KeyVault/vaults --query "[].id | [0]" -o tsv) - if [ -n "${key_vault_id}" ]; then + az account set --subscription "$ARM_SUBSCRIPTION_ID" - if [ "azure pipelines" = "$(this_agent)" ]; then - this_ip=$(curl -s ipinfo.io/ip) >/dev/null 2>&1 - az keyvault network-rule add --name ${key_vault} --ip-address ${this_ip} --only-show-errors --output none - ip_added=1 - fi - fi - fi - fi - fi - - echo -e "$green--- Deploy the Control Plane ---$reset" - - if [ -n $(POOL) ]; then - echo 'Deployer Agent Pool' $(POOL) - fi - - if [ -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/state.zip ]; then - pass=$(echo $(System.CollectionId) | sed 's/-//g') - - echo "Unzipping the library state file" - unzip -o -P "${pass}" ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/state.zip -d ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder) - fi - - # ls -lart ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder) - - if [ -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip ]; then - pass=$(echo $(System.CollectionId) | sed 's/-//g') - - echo "Unzipping the deployer state file" - unzip -o -P "${pass}" ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip -d ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder) - fi - - # ls -lart ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder) - - export TF_LOG_PATH=${CONFIG_REPO_PATH}/.sap_deployment_automation/terraform.log - - sudo chmod +x $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh - - if [ "$USE_MSI" = "true" ]; then - echo -e "$cyan--- Using MSI ---$reset" - $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ - --deployer_parameter_file "${deployer_configfile}" \ - --library_parameter_file "${library_configfile}" \ - --subscription $STATE_SUBSCRIPTION \ - --auto-approve --ado --msi \ - ${storage_account_parameter} ${keyvault_parameter} - else - $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ - --deployer_parameter_file "${deployer_configfile}" \ - --library_parameter_file "${library_configfile}" \ - --subscription $STATE_SUBSCRIPTION \ - --spn_secret $ARM_CLIENT_SECRET --tenant_id $ARM_TENANT_ID \ - --auto-approve --ado \ - ${storage_account_parameter} ${keyvault_parameter} - fi - - return_code=$? - - if [ 0 != $return_code ]; then - echo "##vso[task.logissue type=error]Return code from deploy_controlplane $return_code." - if [ -f .sap_deployment_automation/${ENVIRONMENT}${LOCATION}.err ]; then - error_message=$(cat .sap_deployment_automation/${ENVIRONMENT}${LOCATION}.err) - echo "##vso[task.logissue type=error]Error message: $error_message." - fi - fi - - echo -e "$green--- Adding deployment automation configuration to devops repository ---$reset" - added=0 - cd ${CONFIG_REPO_PATH} - git fetch -q --all - git pull -q - - if [ -f ${deployer_environment_file_name} ]; then - - file_deployer_tfstate_key=$(cat ${deployer_environment_file_name} | grep deployer_tfstate_key | awk -F'=' '{print $2}' | xargs) - echo 'Deployer State File' $file_deployer_tfstate_key - - file_key_vault=$(cat ${deployer_environment_file_name} | grep keyvault= | awk -F'=' '{print $2}' | xargs) - echo '(File) Deployer Key Vault' ${file_key_vault} - - file_REMOTE_STATE_SA=$(cat ${deployer_environment_file_name} | grep REMOTE_STATE_SA | awk -F'=' '{print $2}' | xargs) - echo '(File) Terraform state file storage account' $file_REMOTE_STATE_SA - - file_REMOTE_STATE_RG=$(cat ${deployer_environment_file_name} | grep REMOTE_STATE_RG | awk -F'=' '{print $2}' | xargs) - echo '(File) Terraform state file resource group' $file_REMOTE_STATE_RG - fi - - echo -e "$green--- Update repo ---$reset" - if [ -f .sap_deployment_automation/${ENVIRONMENT}${LOCATION} ]; then - git add .sap_deployment_automation/${ENVIRONMENT}${LOCATION} - added=1 - fi - - if [ -f .sap_deployment_automation/${ENVIRONMENT}${LOCATION}.md ]; then - git add .sap_deployment_automation/${ENVIRONMENT}${LOCATION}.md - added=1 - fi - - if [ -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/.terraform/terraform.tfstate ]; then - git add -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/.terraform/terraform.tfstate - added=1 - fi - # || true suppresses the exitcode of grep. To not trigger the strict exit on error - backend=$(grep "local" ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/.terraform/terraform.tfstate || true) - if [ -n "${backend}" ]; then - echo "Local Terraform state" - if [ -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/terraform.tfstate ]; then - sudo apt install zip - echo "Compressing the deployer state file" - pass=$(echo $(System.CollectionId) | sed 's/-//g') - zip -j -P "${pass}" ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/terraform.tfstate - git add -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip - added=1 - fi - else - echo "Remote Terraform state" - if [ -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/terraform.tfstate ]; then - git rm -q --ignore-unmatch -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/terraform.tfstate - added=1 - fi - if [ -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip ]; then - git rm -q --ignore-unmatch -f ${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip - added=1 - fi - fi - - # || true suppresses the exitcode of grep. To not trigger the strict exit on error - backend=$(grep "local" ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/.terraform/terraform.tfstate || true) - if [ -n "${backend}" ]; then - echo "Local Terraform state" - if [ -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/terraform.tfstate ]; then - sudo apt install zip - echo "Compressing the library state file" - pass=$(echo $(System.CollectionId) | sed 's/-//g') - zip -j -P "${pass}" ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/state ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/terraform.tfstate - git add -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/state.zip - added=1 - fi - else - echo "Remote Terraform state" - if [ -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/terraform.tfstate ]; then - git rm -q -f --ignore-unmatch ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/terraform.tfstate - added=1 - fi - if [ -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/state.zip ]; then - git rm -q --ignore-unmatch -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/state.zip - added=1 - fi - fi - - if [ -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/.terraform/terraform.tfstate ]; then - git add -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/.terraform/terraform.tfstate - added=1 - fi - - if [ 1 = $added ]; then - git config --global user.email "$(Build.RequestedForEmail)" - git config --global user.name "$(Build.RequestedFor)" - git commit -m "Added updates from control plane deployment $(Build.DefinitionName) [skip ci]" - - git -c http.extraheader="AUTHORIZATION: bearer $(System.AccessToken)" push --set-upstream origin $(Build.SourceBranchName) - fi - - if [ -f ${CONFIG_REPO_PATH}/.sap_deployment_automation/${ENVIRONMENT}${LOCATION}.md ]; then - echo "##vso[task.uploadsummary]${CONFIG_REPO_PATH}/.sap_deployment_automation/${ENVIRONMENT}${LOCATION}.md" - fi - - echo -e "$green--- Adding variables to the variable group:" $(variable_group) "---$reset" - if [ 0 = $return_code ]; then - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value") - if [ -z ${az_var} ]; then - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --value ${file_REMOTE_STATE_SA} --output none --only-show-errors - else - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --value ${file_REMOTE_STATE_SA} --output none --only-show-errors - fi - - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Resource_Group_Name.value") - if [ -z ${az_var} ]; then - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Resource_Group_Name --value ${file_REMOTE_STATE_RG} --output none --only-show-errors - else - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Resource_Group_Name --value ${file_REMOTE_STATE_RG} --output none --only-show-errors - fi - - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Subscription.value") - if [ -z ${az_var} ]; then - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Subscription --value $ARM_SUBSCRIPTION_ID --output none --only-show-errors - else - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Subscription --value $ARM_SUBSCRIPTION_ID --output none --only-show-errors - fi - - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_State_FileName.value") - if [ -z ${az_var} ]; then - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --value ${file_deployer_tfstate_key} --output none --only-show-errors - else - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --value ${file_deployer_tfstate_key} --output none --only-show-errors - fi - - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value") - if [ -z ${az_var} ]; then - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --value ${file_key_vault} --output none --only-show-errors - else - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --value ${file_key_vault} --output none --only-show-errors - fi - - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "ControlPlaneEnvironment.value") - if [ -z ${az_var} ]; then - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name ControlPlaneEnvironment --value ${ENVIRONMENT} --output none --only-show-errors - else - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name ControlPlaneEnvironment --value ${ENVIRONMENT} --output none --only-show-errors - fi - - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "ControlPlaneLocation.value") - if [ -z ${az_var} ]; then - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name ControlPlaneLocation --value ${LOCATION} --output none --only-show-errors - else - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name ControlPlaneLocation --value ${LOCATION} --output none --only-show-errors - fi - - fi - exit $return_code + else + echo "Sourcing the deploy_server.sh" + . /etc/profile.d/deploy_server.sh ; /opt/bin/terraform/terraform --version + + if [ $USE_MSI != "true" ]; then + echo "Deployment credentials: Service Principal" + echo "Deployment credential ID (SPN): $CP_ARM_CLIENT_ID" + echo "Deployer subscription: $CP_ARM_SUBSCRIPTION_ID" + + export ARM_CLIENT_ID=$CP_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$CP_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$CP_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$CP_ARM_SUBSCRIPTION_ID + unset ARM_USE_MSI + az login --service-principal --username "${ARM_CLIENT_ID}" --password="${ARM_CLIENT_SECRET}" --tenant "${ARM_TENANT_ID}" --output none + + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi + az account set --subscription $ARM_SUBSCRIPTION_ID + else + echo "Deployment credentials: Managed Identity" + + # export ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID + export ARM_USE_MSI=true + export ARM_USE_AZUREAD=true + unset ARM_CLIENT_SECRET + az account set --subscription $ARM_SUBSCRIPTION_ID + fi + fi + + echo -e "$green--- Configure parameters ---$reset" + + echo -e "$green--- Convert config files to UX format ---$reset" + dos2unix -q "${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/$(deployerconfig)" + dos2unix -q "${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/$(libraryconfig)" + + echo -e "$green--- Configuring variables ---$reset" + + deployer_environment_file_name=${CONFIG_REPO_PATH}/.sap_deployment_automation/${ENVIRONMENT}$LOCATION + + export key_vault="" + ip_added=0 + + if [ -f "${deployer_environment_file_name}" ]; then + if [ 0 = $bootstrapped ]; then + key_vault=$(grep "^keyvault=" "${deployer_environment_file_name}" | awk -F'=' '{print $2}' | xargs) + export key_vault + if [ -n "${key_vault}" ]; then + + key_vault_id=$(az resource list --name "${key_vault}" --resource-type Microsoft.KeyVault/vaults --query "[].id | [0]" -o tsv) + if [ -n "${key_vault_id}" ]; then + + if [ "azure pipelines" = "$(this_agent)" ]; then + this_ip=$(curl -s ipinfo.io/ip) >/dev/null 2>&1 + az keyvault network-rule add --name ${key_vault} --ip-address ${this_ip} --only-show-errors --output none + ip_added=1 + fi + fi + fi + fi + fi + + echo -e "$green--- Deploy the Control Plane ---$reset" + + if [ -n "$(POOL)" ]; then + echo "Deployer Agent Pool: $(POOL)" + fi + + if [ -f "${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/state.zip" ]; then + pass=$(echo $(System.CollectionId) | sed 's/-//g') + + echo "Unzipping the library state file" + unzip -o -qq -P "${pass}" "${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/state.zip" -d "${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)" + fi + + # ls -lart ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder) + + if [ -f "${CONFIG_REPO_PATH}"/DEPLOYER/$(deployerfolder)/state.zip ]; then + pass=$(echo $(System.CollectionId) | sed 's/-//g') + + echo "Unzipping the deployer state file" + unzip -o -qq -P "${pass}" "${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)/state.zip" -d "${CONFIG_REPO_PATH}/DEPLOYER/$(deployerfolder)" + fi + + # ls -lart "${CONFIG_REPO_PATH}"/DEPLOYER/$(deployerfolder) + + export TF_LOG_PATH=${CONFIG_REPO_PATH}/.sap_deployment_automation/terraform.log + + sudo chmod +x $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh + if [ $USE_MSI != "true" ]; then + echo "Deployment credentials: Service Principal" + echo "Deployment credential ID (SPN): $CP_ARM_CLIENT_ID" + echo "Deployer subscription: $CP_ARM_SUBSCRIPTION_ID" + + export TF_VAR_use_spn=true + + $SAP_AUTOMATION_REPO_PATH/deploy/scripts/deploy_controlplane.sh \ + --deployer_parameter_file "${deployer_configfile}" \ + --library_parameter_file "${library_configfile}" \ + --subscription $STATE_SUBSCRIPTION \ + --spn_secret "${ARM_CLIENT_SECRET}" \ + --tenant_id "${ARM_TENANT_ID}" \ + --auto-approve --ado \ + ${storage_account_parameter} ${keyvault_parameter} + else + echo "Deployment credentials: Managed Identity" + export TF_VAR_use_spn=false + + ${SAP_AUTOMATION_REPO_PATH}/deploy/scripts/deploy_controlplane.sh \ + --deployer_parameter_file "${deployer_configfile}" \ + --library_parameter_file "${library_configfile}" \ + --subscription "${STATE_SUBSCRIPTION}" --ado --msi \ + "${storage_account_parameter}" "${keyvault_parameter}" \ + --auto-approve + + fi + + return_code=$? + + if [ 0 != $return_code ]; then + echo "##vso[task.logissue type=error]Return code from deploy_controlplane $return_code." + if [ -f .sap_deployment_automation/"${ENVIRONMENT}${LOCATION}".err ]; then + error_message=$(cat .sap_deployment_automation/"${ENVIRONMENT}${LOCATION}".err) + echo "##vso[task.logissue type=error]Error message: $error_message." + fi + fi + + echo -e "$green--- Adding deployment automation configuration to devops repository ---$reset" + added=0 + cd "${CONFIG_REPO_PATH}" || exit + git fetch -q --all + git pull -q + + if [ -f "${deployer_environment_file_name}" ]; then + + file_deployer_tfstate_key=$(grep "^deployer_tfstate_key=" "${deployer_environment_file_name}" | awk -F'=' '{print $2}' | xargs) + echo "Deployer State: ${file_deployer_tfstate_key}" + + file_key_vault=$(grep "^keyvault=" "${deployer_environment_file_name}" | awk -F'=' '{print $2}' | xargs) + echo "Deployer Keyvault: ${file_key_vault}" + + file_REMOTE_STATE_SA=$(grep "^REMOTE_STATE_SA=" "${deployer_environment_file_name}" | awk -F'=' '{print $2}' | xargs) + echo "Terraform account: ${file_REMOTE_STATE_SA}" + + file_REMOTE_STATE_RG=$(grep "^REMOTE_STATE_RG=" "${deployer_environment_file_name}" | awk -F'=' '{print $2}' | xargs) + echo "Terraform rgname: ${file_REMOTE_STATE_SA}" + fi + + echo -e "$green--- Update repo ---$reset" + if [ -f .sap_deployment_automation/"${ENVIRONMENT}${LOCATION}" ]; then + git add .sap_deployment_automation/"${ENVIRONMENT}${LOCATION}" + added=1 + fi + + if [ -f .sap_deployment_automation/"${ENVIRONMENT}${LOCATION}".md ]; then + git add .sap_deployment_automation/"${ENVIRONMENT}${LOCATION}".md + added=1 + fi + + if [ -f "${CONFIG_REPO_PATH}"/DEPLOYER/$(deployerfolder)/.terraform/terraform.tfstate ]; then + git add -f "${CONFIG_REPO_PATH}"/DEPLOYER/$(deployerfolder)/.terraform/terraform.tfstate + added=1 + fi + # || true suppresses the exitcode of grep. To not trigger the strict exit on error + backend=$(grep "local" "${CONFIG_REPO_PATH}"/DEPLOYER/$(deployerfolder)/.terraform/terraform.tfstate || true) + if [ -n "${backend}" ]; then + echo "Local Terraform state" + if [ -f "${CONFIG_REPO_PATH}"/DEPLOYER/$(deployerfolder)/terraform.tfstate ]; then + sudo apt-get -qq install zip + echo "Compressing the deployer state file" + pass=$(echo $(System.CollectionId) | sed 's/-//g') + zip -q -j -P "${pass}" "${CONFIG_REPO_PATH}"/DEPLOYER/$(deployerfolder)/state "${CONFIG_REPO_PATH}"/DEPLOYER/$(deployerfolder)/terraform.tfstate + git add -f "${CONFIG_REPO_PATH}"/DEPLOYER/$(deployerfolder)/state.zip + added=1 + fi + else + echo "Remote Terraform state" + if [ -f "${CONFIG_REPO_PATH}"/DEPLOYER/$(deployerfolder)/terraform.tfstate ]; then + git rm -q --ignore-unmatch -f "${CONFIG_REPO_PATH}"/DEPLOYER/$(deployerfolder)/terraform.tfstate + added=1 + fi + if [ -f "${CONFIG_REPO_PATH}"/DEPLOYER/$(deployerfolder)/state.zip ]; then + git rm -q --ignore-unmatch -f "${CONFIG_REPO_PATH}"/DEPLOYER/$(deployerfolder)/state.zip + added=1 + fi + fi + + # || true suppresses the exitcode of grep. To not trigger the strict exit on error + backend=$(grep "local" "${CONFIG_REPO_PATH}"/LIBRARY/$(libraryfolder)/.terraform/terraform.tfstate || true) + if [ -n "${backend}" ]; then + echo "Local Terraform state" + if [ -f "${CONFIG_REPO_PATH}"/LIBRARY/$(libraryfolder)/terraform.tfstate ]; then + sudo apt-get -qq install zip + echo "Compressing the library state file" + pass=$(echo $(System.CollectionId) | sed 's/-//g') + zip -q -j -P "${pass}" "${CONFIG_REPO_PATH}"/LIBRARY/$(libraryfolder)/state ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/terraform.tfstate + git add -f "${CONFIG_REPO_PATH}"/LIBRARY/$(libraryfolder)/state.zip + added=1 + fi + else + echo "Remote Terraform state" + if [ -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/terraform.tfstate ]; then + git rm -q -f --ignore-unmatch "${CONFIG_REPO_PATH}"/LIBRARY/$(libraryfolder)/terraform.tfstate + added=1 + fi + if [ -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/state.zip ]; then + git rm -q --ignore-unmatch -f "${CONFIG_REPO_PATH}"/LIBRARY/$(libraryfolder)/state.zip + added=1 + fi + fi + + if [ -f ${CONFIG_REPO_PATH}/LIBRARY/$(libraryfolder)/.terraform/terraform.tfstate ]; then + git add -f "${CONFIG_REPO_PATH}"/LIBRARY/$(libraryfolder)/.terraform/terraform.tfstate + added=1 + fi + + if [ 1 = $added ]; then + git config --global user.email "$(Build.RequestedForEmail)" + git config --global user.name "$(Build.RequestedFor)" + git commit -m "Added updates from control plane deployment $(Build.DefinitionName) [skip ci]" + + git -c http.extraheader="AUTHORIZATION: bearer $(System.AccessToken)" push --set-upstream origin $(Build.SourceBranchName) + fi + + if [ -f "${CONFIG_REPO_PATH}"/.sap_deployment_automation/"${ENVIRONMENT}""${LOCATION}".md ]; then + echo "##vso[task.uploadsummary]${CONFIG_REPO_PATH}/.sap_deployment_automation/"${ENVIRONMENT}${LOCATION}".md" + fi + + echo -e "$green--- Adding variables to the variable group:" $(variable_group) "---$reset" + if [ 0 = $return_code ]; then + az_var=$(az pipelines variable-group variable list --group-id "${VARIABLE_GROUP_ID}" --query "Terraform_Remote_Storage_Account_Name.value" --out tsv) + if [ -z ${az_var} ]; then + az pipelines variable-group variable create --group-id "${VARIABLE_GROUP_ID}" --name Terraform_Remote_Storage_Account_Name --value "${file_REMOTE_STATE_SA}" --output none --only-show-errors + else + az pipelines variable-group variable update --group-id "${VARIABLE_GROUP_ID}" --name Terraform_Remote_Storage_Account_Name --value "${file_REMOTE_STATE_SA}" --output none --only-show-errors + fi + + az_var=$(az pipelines variable-group variable list --group-id "${VARIABLE_GROUP_ID}" --query "Terraform_Remote_Storage_Resource_Group_Name.value" --out tsv) + if [ -z ${az_var} ]; then + az pipelines variable-group variable create --group-id "${VARIABLE_GROUP_ID}" --name Terraform_Remote_Storage_Resource_Group_Name --value "${file_REMOTE_STATE_RG}" --output none --only-show-errors + else + az pipelines variable-group variable update --group-id "${VARIABLE_GROUP_ID}" --name Terraform_Remote_Storage_Resource_Group_Name --value "${file_REMOTE_STATE_RG}" --output none --only-show-errors + fi + + az_var=$(az pipelines variable-group variable list --group-id "${VARIABLE_GROUP_ID}" --query "Terraform_Remote_Storage_Subscription.value" --out tsv) + if [ -z ${az_var} ]; then + az pipelines variable-group variable create --group-id "${VARIABLE_GROUP_ID}" --name Terraform_Remote_Storage_Subscription --value "${ARM_SUBSCRIPTION_ID}" --output none --only-show-errors + else + az pipelines variable-group variable update --group-id "${VARIABLE_GROUP_ID}" --name Terraform_Remote_Storage_Subscription --value "${ARM_SUBSCRIPTION_ID}" --output none --only-show-errors + fi + + az_var=$(az pipelines variable-group variable list --group-id "${VARIABLE_GROUP_ID}" --query "Deployer_State_FileName.value" --out tsv) + if [ -z ${az_var} ]; then + az pipelines variable-group variable create --group-id "${VARIABLE_GROUP_ID}" --name Deployer_State_FileName --value "${file_deployer_tfstate_key}" --output none --only-show-errors + else + az pipelines variable-group variable update --group-id "${VARIABLE_GROUP_ID}" --name Deployer_State_FileName --value "${file_deployer_tfstate_key}" --output none --only-show-errors + fi + + az_var=$(az pipelines variable-group variable list --group-id "${VARIABLE_GROUP_ID}" --query "Deployer_Key_Vault.value" --out tsv) + if [ -z ${az_var} ]; then + az pipelines variable-group variable create --group-id "${VARIABLE_GROUP_ID}" --name Deployer_Key_Vault --value "${file_key_vault}" --output none --only-show-errors + else + az pipelines variable-group variable update --group-id "${VARIABLE_GROUP_ID}" --name Deployer_Key_Vault --value "${file_key_vault}" --output none --only-show-errors + fi + + az_var=$(az pipelines variable-group variable list --group-id "${VARIABLE_GROUP_ID}" --query "ControlPlaneEnvironment.value" --out tsv) + if [ -z ${az_var} ]; then + az pipelines variable-group variable create --group-id "${VARIABLE_GROUP_ID}" --name ControlPlaneEnvironment --value "${ENVIRONMENT}" --output none --only-show-errors + else + az pipelines variable-group variable update --group-id "${VARIABLE_GROUP_ID}" --name ControlPlaneEnvironment --value "${ENVIRONMENT}" --output none --only-show-errors + fi + + az_var=$(az pipelines variable-group variable list --group-id "${VARIABLE_GROUP_ID}" --query "ControlPlaneLocation.value" --out tsv) + if [ -z ${az_var} ]; then + az pipelines variable-group variable create --group-id "${VARIABLE_GROUP_ID}" --name ControlPlaneLocation --value "${LOCATION}" --output none --only-show-errors + else + az pipelines variable-group variable update --group-id "${VARIABLE_GROUP_ID}" --name ControlPlaneLocation --value "${LOCATION}" --output none --only-show-errors + fi + + fi + exit $return_code displayName: Deploy control plane env: SYSTEM_ACCESSTOKEN: $(System.AccessToken) - ARM_SUBSCRIPTION_ID: $(CP_ARM_SUBSCRIPTION_ID) + CP_ARM_SUBSCRIPTION_ID: $(CP_ARM_SUBSCRIPTION_ID) CP_ARM_CLIENT_ID: $(CP_ARM_CLIENT_ID) CP_ARM_CLIENT_SECRET: $(CP_ARM_CLIENT_SECRET) CP_ARM_TENANT_ID: $(CP_ARM_TENANT_ID) diff --git a/deploy/pipelines/02-sap-workload-zone.yaml b/deploy/pipelines/02-sap-workload-zone.yaml index 169e5fa5e8..9320120961 100644 --- a/deploy/pipelines/02-sap-workload-zone.yaml +++ b/deploy/pipelines/02-sap-workload-zone.yaml @@ -119,8 +119,8 @@ stages: echo "##vso[build.updatebuildnumber]Deploying the SAP Workload zone defined in $(workload_zone_folder)" - # Check if running on deployer - if [ ! -f /etc/profile.d/deploy_server.sh ]; then + # Check if running on deployer + if [ ! -f /etc/profile.d/deploy_server.sh ]; then echo -e "$green --- Install dos2unix ---$reset" sudo apt-get -qq install dos2unix echo -e "$green --- Install terraform ---$reset" @@ -128,94 +128,101 @@ stages: wget -q $(tf_url) return_code=$? if [ 0 != $return_code ]; then - echo "##vso[task.logissue type=error]Unable to download Terraform version $(tf_version)." - exit 2 + echo "##vso[task.logissue type=error]Unable to download Terraform version $(tf_version)." + exit 2 fi unzip -qq terraform_$(tf_version)_linux_amd64.zip ; sudo mv terraform /bin/ rm -f terraform_$(tf_version)_linux_amd64.zip - else + else source /etc/profile.d/deploy_server.sh - fi + fi - if [ ! -f $CONFIG_REPO_PATH/LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) ]; then + if [ ! -f $CONFIG_REPO_PATH/LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) ]; then echo -e "$boldred--- $(workload_zone_configuration_file) was not found ---$reset" echo "##vso[task.logissue type=error]File $(workload_zone_configuration_file) was not found." exit 2 - fi + fi + + echo -e "$green--- Checkout $(Build.SourceBranchName) ---$reset" - echo -e "$green--- Checkout $(Build.SourceBranchName) ---$reset" + cd "${CONFIG_REPO_PATH}" || exit + mkdir -p .sap_deployment_automation + git checkout -q $(Build.SourceBranchName) - cd $CONFIG_REPO_PATH - mkdir -p .sap_deployment_automation - git checkout -q $(Build.SourceBranchName) + echo -e "$green--- Validations ---$reset" + if [ $USE_MSI != "true" ]; then - echo -e "$green--- Validations ---$reset" + if [ -z $WL_ARM_SUBSCRIPTION_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined in the $(variable_group) variable group." + exit 2 + fi - if [ -z $WL_ARM_SUBSCRIPTION_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined in the $(variable_group) variable group." - exit 2 - fi - if [ $USE_MSI != "true" ]; then + if [ $WL_ARM_SUBSCRIPTION_ID == '$$(ARM_SUBSCRIPTION_ID)' ]; then + echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined in the $(variable_group) variable group." + exit 2 + fi if [ -z $WL_ARM_CLIENT_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined in the $(variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined in the $(variable_group) variable group." + exit 2 + fi + + if [ $WL_ARM_CLIENT_ID == '$$(ARM_CLIENT_ID)' ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined in the $(variable_group) variable group." + exit 2 fi if [ -z $WL_ARM_CLIENT_SECRET ]; then - echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined in the $(variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined in the $(variable_group) variable group." + exit 2 + fi + + if [ $WL_ARM_CLIENT_SECRET == '$$(ARM_CLIENT_SECRET)' ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined in the $(variable_group) variable group." + exit 2 fi if [ -z $WL_ARM_TENANT_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined in the $(variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined in the $(variable_group) variable group." + exit 2 + fi + + if [ $WL_ARM_TENANT_ID == '$$(ARM_TENANT_ID)' ]; then + echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined in the $(variable_group) variable group." + exit 2 fi if [ -z $CP_ARM_SUBSCRIPTION_ID ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_SUBSCRIPTION_ID was not defined in the $(parent_variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable CP_ARM_SUBSCRIPTION_ID was not defined in the $(parent_variable_group) variable group." + exit 2 fi if [ -z $CP_ARM_CLIENT_ID ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_ID was not defined in the $(parent_variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_ID was not defined in the $(parent_variable_group) variable group." + exit 2 fi if [ -z $CP_ARM_CLIENT_SECRET ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_SECRET was not defined in the $(parent_variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable CP_ARM_CLIENT_SECRET was not defined in the $(parent_variable_group) variable group." + exit 2 fi if [ -z $CP_ARM_TENANT_ID ]; then - echo "##vso[task.logissue type=error]Variable CP_ARM_TENANT_ID was not defined in the $(parent_variable_group) variable group." - exit 2 + echo "##vso[task.logissue type=error]Variable CP_ARM_TENANT_ID was not defined in the $(parent_variable_group) variable group." + exit 2 fi - fi - - echo -e "$green--- Convert config file to UX format ---$reset" - dos2unix -q LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) - echo -e "$green--- Read details ---$reset" - - ENVIRONMENT=$(grep "^environment" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs) - LOCATION=$(grep "^location" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs | tr 'A-Z' 'a-z') - NETWORK=$(grep "^network_logical_name" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs) - echo Environment: ${ENVIRONMENT} - echo Location: ${LOCATION} - echo Network: ${NETWORK} - echo "TFvars $workload_zone_configuration_file" - echo "" - echo "Agent: $(this_agent)" - echo "Organization: $(System.CollectionUri)" - echo "Project: $(System.TeamProject)" - echo "" - echo "Azure CLI version:" - echo "-------------------------------------------------" - az --version - - ENVIRONMENT_IN_FILENAME=$(echo $(workload_zone_folder) | awk -F'-' '{print $1}' | xargs ) - LOCATION_CODE=$(echo $(workload_zone_folder) | awk -F'-' '{print $2}' | xargs ) - case "$LOCATION_CODE" in + fi + + dos2unix -q LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) + echo -e "$green--- Read deployment details ---$reset" + + ENVIRONMENT=$(grep "^environment" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs) + LOCATION=$(grep "^location" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs | tr 'A-Z' 'a-z') + NETWORK=$(grep "^network_logical_name" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs) + + ENVIRONMENT_IN_FILENAME=$(echo $(workload_zone_folder) | awk -F'-' '{print $1}' | xargs ) + LOCATION_CODE=$(echo $(workload_zone_folder) | awk -F'-' '{print $2}' | xargs ) + case "$LOCATION_CODE" in "AUCE") LOCATION_IN_FILENAME="australiacentral" ;; "AUC2") LOCATION_IN_FILENAME="australiacentral2" ;; "AUEA") LOCATION_IN_FILENAME="australiaeast" ;; @@ -272,392 +279,410 @@ stages: "WUS2") LOCATION_IN_FILENAME="westus2" ;; "WUS3") LOCATION_IN_FILENAME="westus3" ;; *) LOCATION_IN_FILENAME="westeurope" ;; - esac - - NETWORK_IN_FILENAME=$(echo $(workload_zone_folder) | awk -F'-' '{print $3}' | xargs ) - echo "Environment(filename): $ENVIRONMENT_IN_FILENAME" - echo "Location(filename): $LOCATION_IN_FILENAME" - echo "Network(filename): $NETWORK_IN_FILENAME" - echo "Deployer Environment $(deployer_environment)" - echo "Deployer Region $(deployer_region)" - echo "Workload TFvars $workload_zone_configuration_file" - echo "" - - echo "Agent: $(this_agent)" - echo "Organization: $(System.CollectionUri)" - echo "Project: $(System.TeamProject)" - echo "" - echo "Azure CLI version:" - echo "-------------------------------------------------" - az --version - - - if [ $ENVIRONMENT != $ENVIRONMENT_IN_FILENAME ]; then + esac + + NETWORK_IN_FILENAME=$(echo $(workload_zone_folder) | awk -F'-' '{print $3}' | xargs ) + + echo "Environment: $ENVIRONMENT" + echo "Location: $LOCATION" + echo "Network: $NETWORK" + + echo "Environment(filename): $ENVIRONMENT_IN_FILENAME" + echo "Location(filename): $LOCATION_IN_FILENAME" + echo "Network(filename): $NETWORK_IN_FILENAME" + + echo "Deployer Environment $(deployer_environment)" + echo "Deployer Region $(deployer_region)" + echo "Workload TFvars $(workload_zone_configuration_file)" + echo "" + + echo "Agent pool: $(this_agent)" + echo "Organization: $(System.CollectionUri)" + echo "Project: $(System.TeamProject)" + echo "" + echo "Azure CLI version:" + echo "-------------------------------------------------" + az --version + + if [ $ENVIRONMENT != $ENVIRONMENT_IN_FILENAME ]; then echo "##vso[task.logissue type=error]The environment setting in $(workload_zone_configuration_file) '$ENVIRONMENT' does not match the $(workload_zone_configuration_file) file name '$ENVIRONMENT_IN_FILENAME'. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-INFRASTRUCTURE" exit 2 - fi + fi - if [ $LOCATION != $LOCATION_IN_FILENAME ]; then + if [ $LOCATION != $LOCATION_IN_FILENAME ]; then echo "##vso[task.logissue type=error]The location setting in $(workload_zone_configuration_file) '$LOCATION' does not match the $(workload_zone_configuration_file) file name '$LOCATION_IN_FILENAME'. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-INFRASTRUCTURE" exit 2 - fi + fi - if [ $NETWORK != $NETWORK_IN_FILENAME ]; then + if [ $NETWORK != $NETWORK_IN_FILENAME ]; then echo "##vso[task.logissue type=error]The network_logical_name setting in $(workload_zone_configuration_file) '$NETWORK' does not match the $(workload_zone_configuration_file) file name '$NETWORK_IN_FILENAME-. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-INFRASTRUCTURE" exit 2 - fi + fi echo -e "$green--- Configure devops CLI extension ---$reset" - az config set extension.use_dynamic_install=yes_without_prompt --output none + az config set extension.use_dynamic_install=yes_without_prompt --output none - az extension add --name azure-devops --output none + az extension add --name azure-devops --output none --only-show-errors - az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none + az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none - export PARENT_VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(parent_variable_group)'].id | [0]") - echo '$(parent_variable_group) id: ' $PARENT_VARIABLE_GROUP_ID - if [ -z ${PARENT_VARIABLE_GROUP_ID} ]; then + export PARENT_VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(parent_variable_group)'].id | [0]") + + if [ -z ${PARENT_VARIABLE_GROUP_ID} ]; then echo "##vso[task.logissue type=error]Variable group $(parent_variable_group) could not be found." exit 2 - fi + fi + + export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") - export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") - echo '$(variable_group) id: ' $VARIABLE_GROUP_ID - if [ -z ${VARIABLE_GROUP_ID} ]; then + if [ -z ${VARIABLE_GROUP_ID} ]; then echo "##vso[task.logissue type=error]Variable group $(variable_group) could not be found." exit 2 - fi + fi + printf -v tempval '%s id:' $(variable_group) + printf -v val '%-20s' "${tempval}" + echo "$val $VARIABLE_GROUP_ID" + + printf -v tempval '%s id:' $(parent_variable_group) + printf -v val '%-20s' "${tempval}" + echo "$val $PARENT_VARIABLE_GROUP_ID" - echo "Agent Pool: " $(this_agent) + deployer_environment_file_name=$CONFIG_REPO_PATH/.sap_deployment_automation/$(deployer_environment)$(deployer_region) + echo "Deployer Environment File: $deployer_environment_file_name" - echo -e "$green--- Set CONFIG_REPO_PATH variable ---$reset" + workload_environment_file_name=$CONFIG_REPO_PATH/.sap_deployment_automation/${ENVIRONMENT}${LOCATION_CODE}${NETWORK} + echo "Workload Zone Environment File: $workload_environment_file_name" - deployer_environment_file_name=$CONFIG_REPO_PATH/.sap_deployment_automation/$(deployer_environment)$(deployer_region) ; echo 'Deployer Environment File' $deployer_environment_file_name - workload_environment_file_name=$CONFIG_REPO_PATH/.sap_deployment_automation/${ENVIRONMENT}${LOCATION_CODE}${NETWORK} ; echo 'Workload Environment File' $workload_environment_file_name - dos2unix -q ${deployer_environment_file_name} - dos2unix -q ${workload_environment_file_name} + dos2unix -q ${deployer_environment_file_name} + dos2unix -q ${workload_environment_file_name} - if [ ! -f ${deployer_environment_file_name} ]; then + if [ ! -f ${deployer_environment_file_name} ]; then echo -e "$boldred--- $(deployer_environment)$(deployer_region) was not found ---$reset" echo "##vso[task.logissue type=error]Control plane configuration file $(deployer_environment)$(deployer_region) was not found." exit 2 - fi - + fi echo -e "$green--- Read parameter values ---$reset" - if [ "true" == $(inherit) ]; then + if [ "true" == $(inherit) ]; then - az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Deployer_State_FileName.value" | tr -d \") + az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Deployer_State_FileName.value" --out tsv) if [ -z ${az_var} ]; then - deployer_tfstate_key=$(cat ${deployer_environment_file_name} | grep deployer_tfstate_key | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer State File' $deployer_tfstate_key + deployer_tfstate_key=$(grep "^deployer_tfstate_key=" ${deployer_environment_file_name} | awk -F'=' '{print $2}' | xargs) else - deployer_tfstate_key=${az_var} ; echo 'Deployer State File' $deployer_tfstate_key + deployer_tfstate_key=${az_var} fi - az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" | tr -d \") + az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" --out tsv) if [ -z ${az_var} ]; then - key_vault=$(cat ${deployer_environment_file_name} | grep keyvault= | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer Key Vault' ${key_vault} + key_vault=$(grep "^keyvault=" ${deployer_environment_file_name} | awk -F'=' '{print $2}' | xargs) else - key_vault=${az_var}; echo 'Deployer Key Vault' ${key_vault} + key_vault=${az_var} fi - az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" | tr -d \") + az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" --out tsv) if [ -z ${az_var} ]; then - REMOTE_STATE_SA=$(cat ${deployer_environment_file_name} | grep REMOTE_STATE_SA | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file storage account' $REMOTE_STATE_SA + REMOTE_STATE_SA=$(grep "^REMOTE_STATE_SA" ${deployer_environment_file_name} | awk -F'=' '{print $2}' | xargs) ; else - REMOTE_STATE_SA=${az_var}; echo 'Terraform state file storage account' $REMOTE_STATE_SA + REMOTE_STATE_SA=${az_var} fi - az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Subscription.value" | tr -d \") + az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Subscription.value" --out tsv) if [ -z ${az_var} ]; then - STATE_SUBSCRIPTION=$(cat ${deployer_environment_file_name} | grep STATE_SUBSCRIPTION | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION + STATE_SUBSCRIPTION=$(grep "^STATE_SUBSCRIPTION" ${deployer_environment_file_name} | awk -F'=' '{print $2}' | xargs) else - STATE_SUBSCRIPTION=${az_var}; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION + STATE_SUBSCRIPTION=${az_var} + fi - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "ARM_SUBSCRIPTION_ID.value" | tr -d \") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WL_ARM_SUBSCRIPTION_ID.value" --out tsv) if [ -z ${az_var} ]; then - echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined." - exit 2 - else - echo 'Target subscription' $WL_ARM_SUBSCRIPTION_ID + echo "##vso[task.logissue type=error]Variable WL_ARM_SUBSCRIPTION_ID was not defined." + exit 2 fi - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Workload_Key_Vault.value" | tr -d \") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Workload_Key_Vault.value" --out tsv) if [ -z ${az_var} ]; then - if [ -f ${workload_environment_file_name} ]; then - export workload_key_vault=$(cat ${workload_environment_file_name} | grep workloadkeyvault | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Key Vault' ${workload_key_vault} - fi + if [ -f ${workload_environment_file_name} ]; then + export workload_key_vault=$(grep "^workloadkeyvault" ${workload_environment_file_name} | awk -F'=' '{print $2}' | xargs) + fi else - export workload_key_vault=$(Workload_Key_Vault) ; echo 'Workload Key Vault' ${workload_key_vault} + export workload_key_vault=$(Workload_Key_Vault) + fi - else - deployer_tfstate_key=$(cat ${workload_environment_file_name} | grep deployer_tfstate_key | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer State File' $deployer_tfstate_key - key_vault=$(cat ${workload_environment_file_name} | grep workload_key_vault= -m1 | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer Key Vault' ${key_vault} - REMOTE_STATE_SA=$(cat ${workload_environment_file_name} | grep REMOTE_STATE_SA | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file storage account' $REMOTE_STATE_SA - STATE_SUBSCRIPTION=$(cat ${workload_environment_file_name} | grep STATE_SUBSCRIPTION | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION - fi + else + deployer_tfstate_key=$(grep "^deployer_tfstate_key=" ${workload_environment_file_name} | awk -F'=' '{print $2}' | xargs) - secrets_set=1 - echo -e "$green--- az login ---$reset" + key_vault=$(grep -m1 "^workload_key_vault=" ${workload_environment_file_name} | awk -F'=' '{print $2}' | xargs) ; - if [ $LOGON_USING_SPN == "true" ]; then - echo "Using SPN" - az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none - else - echo "Using MSI" - export ARM_USE_MSI=true - export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID - unset ARM_TENANT_ID + REMOTE_STATE_SA=$(grep "^REMOTE_STATE_SA=" ${workload_environment_file_name} | awk -F'=' '{print $2}' | xargs) - az login --identity --allow-no-subscriptions --output none - fi + STATE_SUBSCRIPTION=$(grep "^STATE_SUBSCRIPTION=" ${workload_environment_file_name} | awk -F'=' '{print $2}' | xargs) + fi + + echo "Deployer statefile: $deployer_tfstate_key" + echo "Deployer Key vault: $key_vault" + echo "Workload Key vault: ${workload_key_vault}" + echo "Target subscription $WL_ARM_SUBSCRIPTION_ID" + + echo "Terraform state file subscription: $STATE_SUBSCRIPTION" + echo "Terraform state file storage account:$REMOTE_STATE_SA" + + secrets_set=1 + echo -e "$green---az login ---$reset" - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code - fi + echo -e "$cyan---Sourcing the deploy_server.sh file$reset" + . /etc/profile.d/deploy_server.sh ; /opt/bin/terraform/terraform --version if [ $USE_MSI != "true" ]; then - echo -e "$green --- Set secrets ---$reset" - $SAP_AUTOMATION_REPO_PATH/deploy/scripts/set_secrets.sh --workload --vault "${key_vault}" --environment "${ENVIRONMENT}" \ - --region "${LOCATION}" --subscription $WL_ARM_SUBSCRIPTION_ID --spn_id $WL_ARM_CLIENT_ID --spn_secret "${WL_ARM_CLIENT_SECRET}" \ - --tenant_id $WL_ARM_TENANT_ID --keyvault_subscription $STATE_SUBSCRIPTION - secrets_set=$? ; echo -e "$cyan Set Secrets returned $secrets_set $reset" - az keyvault set-policy --name "${key_vault}" --object-id $WL_ARM_OBJECT_ID --secret-permissions get list --subscription $STATE_SUBSCRIPTION --output none + echo "Deployment credentials: Service Principal" + echo "Deployment credential ID (SPN): $WL_ARM_CLIENT_ID" + echo "Deployer subscription: $STATE_SUBSCRIPTION" + + export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET + export ARM_OBJECT_ID=$WL_ARM_OBJECT_ID + export ARM_TENANT_ID=$WL_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + export ARM_USE_AZUREAD=true + unset ARM_USE_MSI + az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none + + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi + az account set --subscription $STATE_SUBSCRIPTION + echo -e "$green --- Set secrets ---$reset" + + $SAP_AUTOMATION_REPO_PATH/deploy/scripts/set_secrets.sh --workload --vault "${key_vault}" --environment "${ENVIRONMENT}" \ + --region "${LOCATION}" --subscription $ARM_SUBSCRIPTION_ID --spn_id $ARM_CLIENT_ID --spn_secret "${ARM_CLIENT_SECRET}" \ + --tenant_id $ARM_TENANT_ID --keyvault_subscription $STATE_SUBSCRIPTION + secrets_set=$? ; + echo "Set Secrets returned: $secrets_set" + + else + echo "Deployment credentials: Managed Identity" + # export ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID + export ARM_USE_MSI=true + export ARM_USE_AZUREAD=true + unset ARM_CLIENT_SECRET fi debug_variable='--output none' debug_variable='' if [ $USE_MSI != "true" ]; then - az login --service-principal --username $CP_ARM_CLIENT_ID --password=$CP_ARM_CLIENT_SECRET --tenant $CP_ARM_TENANT_ID --output none + echo "Deployment credentials: Service Principal" + echo "Service Principal: $WL_ARM_CLIENT_ID" + echo "Service Principal (OID) $WL_ARM_OBJECT_ID" - isUserAccessAdmin=$(az role assignment list --role "User Access Administrator" --subscription $STATE_SUBSCRIPTION --query "[?principalType=='ServicePrincipal'].principalId | [0] " --assignee $CP_ARM_CLIENT_ID) + isUserAccessAdmin=$(az role assignment list --role "User Access Administrator" --subscription $STATE_SUBSCRIPTION --assignee $WL_ARM_OBJECT_ID --query "[].principalName | [0]" --output tsv) - tfstate_resource_id=$(az resource list --name "${REMOTE_STATE_SA}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Storage/storageAccounts --query "[].id | [0]" -o tsv) + tfstate_resource_id=$(az resource list --name "${REMOTE_STATE_SA}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Storage/storageAccounts --query "[].id | [0]" -o tsv) - if [ -n "${isUserAccessAdmin}" ]; then + if [ -n "${isUserAccessAdmin}" ]; then - echo -e "$green--- Set permissions ---$reset" - perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Reader" --query "[?principalId=='$WL_ARM_CLIENT_ID'].principalId | [0]" -o tsv --only-show-errors) - if [ -z "$perms" ]; then - echo -e "$green --- Assign subscription permissions to $perms ---$reset" - az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Reader" --scope "/subscriptions/${STATE_SUBSCRIPTION}" --output none - fi + echo -e "$green--- Set permissions ---$reset" + perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Reader" --assignee $WL_ARM_OBJECT_ID --query "[].principalName | [0]" --output tsv --only-show-errors) + if [ -z "$perms" ]; then + echo -e "$green --- Assign subscription permissions to $perms ---$reset" + az role assignment create --assignee $ARM_OBJECT_ID --role "Reader" --scope "/subscriptions/${STATE_SUBSCRIPTION}" --output none + fi - perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Storage Account Contributor" --scope "${tfstate_resource_id}" --query "[?principalId=='$WL_ARM_OBJECT_ID'].principalName | [0]" -o tsv --only-show-errors) - if [ -z "$perms" ]; then - echo "Assigning Storage Account Contributor permissions for $WL_ARM_OBJECT_ID to ${tfstate_resource_id}" - az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Storage Account Contributor" --scope "${tfstate_resource_id}" --output none - fi + perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Storage Blob Data Contributor" --scope "${tfstate_resource_id}" --assignee $WL_ARM_OBJECT_ID --query "[].principalName | [0]" --only-show-errors) + if [ -z "$perms" ]; then + echo "Assigning Storage Account Contributor permissions for $ARM_OBJECT_ID to ${tfstate_resource_id}" + az role assignment create --assignee $ARM_OBJECT_ID --role "Storage Blob Data Contributor" --scope "${tfstate_resource_id}" --output none + fi - resource_group_name=$(az resource show --id "${tfstate_resource_id}" --query resourceGroup -o tsv) + resource_group_name=$(az resource show --id "${tfstate_resource_id}" --query resourceGroup -o tsv) - if [ -n ${resource_group_name} ]; then - for scope in $(az resource list --resource-group "${resource_group_name}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Network/privateDnsZones --query "[].id" --output tsv); do - perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Private DNS Zone Contributor" --scope $scope --query "[?principalId=='$WL_ARM_OBJECT_ID'].principalId | [0]" -o tsv --only-show-errors) - if [ -z "$perms" ]; then - echo "Assigning DNS Zone Contributor permissions for $WL_ARM_OBJECT_ID to ${scope}" - az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Private DNS Zone Contributor" --scope $scope --output none + if [ -n "${resource_group_name}" ]; then + for scope in $(az resource list --resource-group "${resource_group_name}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Network/privateDnsZones --query "[].id" --output tsv); do + perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Private DNS Zone Contributor" --scope $scope --assignee $WL_ARM_OBJECT_ID --query "[].principalName | [0]" --output tsv --only-show-errors) + if [ -z "$perms" ]; then + echo "Assigning DNS Zone Contributor permissions for $WL_ARM_OBJECT_ID to ${scope}" + az role assignment create --assignee $ARM_OBJECT_ID --role "Private DNS Zone Contributor" --scope $scope --output none + fi + done fi - done - fi - resource_group_name=$(az keyvault show --name "${key_vault}" --query resourceGroup --subscription ${STATE_SUBSCRIPTION} -o tsv) + resource_group_name=$(az keyvault show --name "${key_vault}" --query resourceGroup --subscription ${STATE_SUBSCRIPTION} -o tsv) - if [ -n ${resource_group_name} ]; then - resource_group_id=$(az group show --name ${resource_group_name} --subscription ${STATE_SUBSCRIPTION} --query id -o tsv) + if [ -n "${resource_group_name}" ]; then + resource_group_id=$(az group show --name ${resource_group_name} --subscription ${STATE_SUBSCRIPTION} --query id -o tsv) - vnet_resource_id=$(az resource list --resource-group "${resource_group_name}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Network/virtualNetworks -o tsv --query "[].id | [0]") - if [ -n "${vnet_resource_id}" ]; then - perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Network Contributor" --scope $vnet_resource_id --only-show-errors --query "[].principalId | [0]" --assignee $WL_ARM_OBJECT_ID -o tsv --only-show-errors) + vnet_resource_id=$(az resource list --resource-group "${resource_group_name}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.Network/virtualNetworks -o tsv --query "[].id | [0]") + if [ -n "${vnet_resource_id}" ]; then + perms=$(az role assignment list --subscription ${STATE_SUBSCRIPTION} --role "Network Contributor" --scope $vnet_resource_id --query "[].principalName | [0]" --assignee $ARM_OBJECT_ID --output tsv --only-show-errors) - if [ -z "$perms" ]; then - echo "Assigning Network Contributor rights for $WL_ARM_OBJECT_ID to ${vnet_resource_id}" - az role assignment create --assignee-object-id $WL_ARM_OBJECT_ID --assignee-principal-type ServicePrincipal --role "Network Contributor" --scope $vnet_resource_id --output none - fi + if [ -z "$perms" ]; then + echo "Assigning Network Contributor rights for $ARM_OBJECT_ID to ${vnet_resource_id}" + az role assignment create --assignee $ARM_OBJECT_ID --role "Network Contributor" --scope $vnet_resource_id --output none + fi + fi fi + else + echo "##vso[task.logissue type=warning]Service Principal $WL_ARM_CLIENT_ID does not have 'User Access Administrator' permissions. Please ensure that the service principal $WL_ARM_CLIENT_ID has permissions on the Terrafrom state storage account and if needed on the Private DNS zone and the source management network resource" fi - else - echo "##vso[task.logissue type=warning]Service Principal $CP_ARM_CLIENT_ID does not have 'User Access Administrator' permissions. Please ensure that the service principal $WL_ARM_CLIENT_ID has permissions on the Terrafrom state storage account and if needed on the Private DNS zone and the source management network resource" - fi fi echo -e "$green--- Deploy the workload zone ---$reset" - cd $CONFIG_REPO_PATH/LANDSCAPE/$(workload_zone_folder) - if [ -f /etc/profile.d/deploy_server.sh ]; then - if [ $LOGON_USING_SPN == "true" ]; then - echo "Logon Using SPN" - - az logout --output none - export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID - export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET - export ARM_TENANT_ID=$WL_ARM_TENANT_ID - export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID - export ARM_USE_MSI=false - az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code - fi - else - export ARM_USE_MSI=true - az login --identity --allow-no-subscriptions --output none - fi - else + cd $CONFIG_REPO_PATH/LANDSCAPE/$(workload_zone_folder) + if [ -f /etc/profile.d/deploy_server.sh ]; then if [ $USE_MSI != "true" ]; then - az logout --output none - export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID - export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET - export ARM_TENANT_ID=$WL_ARM_TENANT_ID - export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID - export ARM_USE_MSI=false - az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code - fi + az logout --output none + export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$WL_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + unset ARM_USE_MSI + az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi + $SAP_AUTOMATION_REPO_PATH/deploy/scripts/install_workloadzone.sh --parameterfile $(workload_zone_configuration_file) \ + --deployer_environment $(deployer_environment) --subscription $ARM_SUBSCRIPTION_ID \ + --spn_id $WL_ARM_CLIENT_ID --spn_secret $WL_ARM_CLIENT_SECRET --tenant_id $WL_ARM_TENANT_ID \ + --deployer_tfstate_key "${deployer_tfstate_key}" --keyvault "${key_vault}" --storageaccountname "${REMOTE_STATE_SA}" \ + --state_subscription "${STATE_SUBSCRIPTION}" --auto-approve --ado + else + $SAP_AUTOMATION_REPO_PATH/deploy/scripts/install_workloadzone.sh --parameterfile $(workload_zone_configuration_file) \ + --deployer_environment $(deployer_environment) --subscription $ARM_SUBSCRIPTION_ID \ + --deployer_tfstate_key "${deployer_tfstate_key}" --keyvault "${key_vault}" --storageaccountname "${REMOTE_STATE_SA}" \ + --state_subscription "${STATE_SUBSCRIPTION}" --auto-approve --ado --msi + fi - fi - - if [ $USE_MSI != "true" ]; then - $SAP_AUTOMATION_REPO_PATH/deploy/scripts/install_workloadzone.sh --parameterfile $(workload_zone_configuration_file) \ - --deployer_environment $(deployer_environment) --subscription $(ARM_SUBSCRIPTION_ID) \ - --spn_id $WL_ARM_CLIENT_ID --spn_secret $WL_ARM_CLIENT_SECRET --tenant_id $WL_ARM_TENANT_ID \ - --deployer_tfstate_key "${deployer_tfstate_key}" --keyvault "${key_vault}" --storageaccountname "${REMOTE_STATE_SA}" \ - --state_subscription "${STATE_SUBSCRIPTION}" --auto-approve --ado - else - $SAP_AUTOMATION_REPO_PATH/deploy/scripts/install_workloadzone.sh --parameterfile $(workload_zone_configuration_file) \ - --deployer_environment $(deployer_environment) --subscription $(ARM_SUBSCRIPTION_ID) \ - --deployer_tfstate_key "${deployer_tfstate_key}" --keyvault "${key_vault}" --storageaccountname "${REMOTE_STATE_SA}" \ - --state_subscription "${STATE_SUBSCRIPTION}" --auto-approve --ado --msi - fi - return_code=$? - - echo "Return code: ${return_code}" - if [ -f ${workload_environment_file_name} ]; then - export workload_key_vault=$(cat ${workload_environment_file_name} | grep workloadkeyvault= | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Key Vault' ${workload_key_vault} - export workload_prefix=$(cat ${workload_environment_file_name} | grep workload_zone_prefix= | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Prefix' ${workload_prefix} - export landscape_tfstate_key=$(cat ${workload_environment_file_name} | grep landscape_tfstate_key= | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Zone State File' $landscape_tfstate_key - fi - - expiry_date=$(date -d "+365 days" +%Y-%m-%d) - - - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "FENCING_SPN_ID.value") - if [ -z ${az_var} ]; then + fi + return_code=$? + + echo "Return code from deployment: ${return_code}" + + if [ -f ${workload_environment_file_name} ]; then + export workload_key_vault=$(cat ${workload_environment_file_name} | grep workloadkeyvault= | awk -F'=' '{print $2}' | xargs) + echo "Workload zone key vault: ${workload_key_vault}" + + export workload_prefix=$(cat ${workload_environment_file_name} | grep workload_zone_prefix= | awk -F'=' '{print $2}' | xargs) + echo "Workload zone prefix: ${workload_prefix}" + + export landscape_tfstate_key=$(cat ${workload_environment_file_name} | grep landscape_tfstate_key= | awk -F'=' '{print $2}' | xargs) + echo "Workload zone state file: ${landscape_tfstate_key}" + fi + + expiry_date=$(date -d "+365 days" +%Y-%m-%d) + + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "FENCING_SPN_ID.value") + if [ -z ${az_var} ]; then echo "##vso[task.logissue type=warning]Variable FENCING_SPN_ID is not set. Required for highly available deployments" - else + else export fencing_id=$(az keyvault secret list --vault-name $workload_key_vault --subscription $STATE_SUBSCRIPTION --query [].name -o tsv | grep ${workload_prefix}-fencing-spn-id | xargs) if [ -z "$fencing_id" ]; then - az keyvault secret set --name ${workload_prefix}-fencing-spn-id --vault-name $workload_key_vault --value $(FENCING_SPN_ID) --subscription $STATE_SUBSCRIPTION --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --output none - az keyvault secret set --name ${workload_prefix}-fencing-spn-pwd --vault-name $workload_key_vault --value=$FENCING_SPN_PWD --subscription $STATE_SUBSCRIPTION --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --output none - az keyvault secret set --name ${workload_prefix}-fencing-spn-tenant --vault-name $workload_key_vault --value $(FENCING_SPN_TENANT) --subscription $STATE_SUBSCRIPTION --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --output none + az keyvault secret set --name ${workload_prefix}-fencing-spn-id --vault-name $workload_key_vault --value $(FENCING_SPN_ID) --subscription $STATE_SUBSCRIPTION --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --output none + az keyvault secret set --name ${workload_prefix}-fencing-spn-pwd --vault-name $workload_key_vault --value=$FENCING_SPN_PWD --subscription $STATE_SUBSCRIPTION --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --output none + az keyvault secret set --name ${workload_prefix}-fencing-spn-tenant --vault-name $workload_key_vault --value $(FENCING_SPN_TENANT) --subscription $STATE_SUBSCRIPTION --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --output none fi - fi - az logout --output none + fi + az logout --output none echo -e "$green--- Add & update files in the DevOps Repository ---$reset" - cd $(Build.Repository.LocalPath) - git pull + cd $(Build.Repository.LocalPath) + git pull - echo -e "$green--- Pull latest ---$reset" - cd $CONFIG_REPO_PATH - git pull + echo -e "$green--- Pull latest ---$reset" + cd $CONFIG_REPO_PATH + git pull - added=0 - if [ -f ${workload_environment_file_name} ]; then + added=0 + if [ -f ${workload_environment_file_name} ]; then git add ${workload_environment_file_name} added=1 - fi - if [ -f ${workload_environment_file_name}.md ]; then + fi + if [ -f ${workload_environment_file_name}.md ]; then git add ${workload_environment_file_name}.md added=1 - fi - if [ -f $(Deployment_Configuration_Path)/LANDSCAPE/$(workload_zone_folder)/.terraform/terraform.tfstate ]; then + fi + if [ -f $(Deployment_Configuration_Path)/LANDSCAPE/$(workload_zone_folder)/.terraform/terraform.tfstate ]; then git add -f $(Deployment_Configuration_Path)/LANDSCAPE/$(workload_zone_folder)/.terraform/terraform.tfstate added=1 - fi - if [ 1 == $added ]; then + fi + if [ 1 == $added ]; then git config --global user.email "$(Build.RequestedForEmail)" git config --global user.name "$(Build.RequestedFor)" git commit -m "Added updates from devops deployment $(Build.DefinitionName) [skip ci]" git -c http.extraheader="AUTHORIZATION: bearer $(System.AccessToken)" push --set-upstream origin $(Build.SourceBranchName) - fi + fi - if [ -f ${workload_environment_file_name}.md ]; then + if [ -f ${workload_environment_file_name}.md ]; then echo "##vso[task.uploadsummary]${workload_environment_file_name}.md" - fi + fi echo -e "$green--- Adding variables to the variable group" $(variable_group) "---$reset" - if [ -n $VARIABLE_GROUP_ID ]; then + if [ -n "${VARIABLE_GROUP_ID}" ]; then az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query Terraform_Remote_Storage_Account_Name.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --value "${REMOTE_STATE_SA}" --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --value "${REMOTE_STATE_SA}" --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --value "${REMOTE_STATE_SA}" --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --value "${REMOTE_STATE_SA}" --output none --only-show-errors fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query Terraform_Remote_Storage_Subscription.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Subscription --value "${STATE_SUBSCRIPTION}" --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Subscription --value "${STATE_SUBSCRIPTION}" --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Subscription --value "${STATE_SUBSCRIPTION}" --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Subscription --value "${STATE_SUBSCRIPTION}" --output none --only-show-errors fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query Deployer_State_FileName.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --value "${deployer_tfstate_key}" --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --value "${deployer_tfstate_key}" --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --value "${deployer_tfstate_key}" --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --value "${deployer_tfstate_key}" --output none --only-show-errors fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query Deployer_Key_Vault.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --value ${key_vault} --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --value ${key_vault} --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --value ${key_vault} --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --value ${key_vault} --output none --only-show-errors fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "${NETWORK}"Workload_Key_Vault.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Key_Vault --value $workload_key_vault --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Key_Vault --value $workload_key_vault --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Key_Vault --value $workload_key_vault --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Key_Vault --value $workload_key_vault --output none --only-show-errors fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "${NETWORK}"Workload_Secret_Prefix.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Secret_Prefix --value "${workload_prefix}" --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Secret_Prefix --value "${workload_prefix}" --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Secret_Prefix --value "${workload_prefix}" --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Secret_Prefix --value "${workload_prefix}" --output none --only-show-errors fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "${NETWORK}"Workload_Zone_State_FileName.value --output table) if [ -n "${az_var}" ]; then - az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Zone_State_FileName --value "${landscape_tfstate_key}" --output none --only-show-errors + az pipelines variable-group variable update --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Zone_State_FileName --value "${landscape_tfstate_key}" --output none --only-show-errors else - az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Zone_State_FileName --value "${landscape_tfstate_key}" --output none --only-show-errors + az pipelines variable-group variable create --group-id ${VARIABLE_GROUP_ID} --name "${NETWORK}"Workload_Zone_State_FileName --value "${landscape_tfstate_key}" --output none --only-show-errors fi - fi + fi - if [ 0 != $return_code ]; then + if [ 0 != $return_code ]; then echo "##vso[task.logissue type=error]Return code from install_workloadzone $return_code." if [ -f ${workload_environment_file_name}.err ]; then - error_message=$(cat ${workload_environment_file_name}.err) - echo "##vso[task.logissue type=error]Error message: $error_message." + error_message=$(cat ${workload_environment_file_name}.err) + echo "##vso[task.logissue type=error]Error message: $error_message." fi - fi + fi exit $return_code diff --git a/deploy/pipelines/03-sap-system-deployment.yaml b/deploy/pipelines/03-sap-system-deployment.yaml index 4a8dbc626e..954c3499cf 100644 --- a/deploy/pipelines/03-sap-system-deployment.yaml +++ b/deploy/pipelines/03-sap-system-deployment.yaml @@ -114,19 +114,11 @@ stages: NETWORK=$(grep "^network_logical_name" $HOME_CONFIG/SYSTEM/$(sap_system_folder)/$(sap_system_configuration) | awk -F'=' '{print $2}' | xargs) SID=$(grep "^sid" $HOME_CONFIG/SYSTEM/$(sap_system_folder)/$(sap_system_configuration) | awk -F'=' '{print $2}' | xargs) - echo "Environment: $ENVIRONMENT" - echo "Location: $LOCATION" - echo "Network: $NETWORK" - echo "SID: $SID" - echo "System TFvars $sap_system_configuration" - echo "" - echo "Agent: $(this_agent)" - echo "Organization: $(System.CollectionUri)" - echo "Project: $(System.TeamProject)" - echo "" - echo "Azure CLI version:" - echo "-------------------------------------------------" - az --version + echo "Environment: $ENVIRONMENT" + echo "Location: $LOCATION" + echo "Network: $NETWORK" + echo "SID: $SID" + echo "System TFvars: $(sap_system_configuration)" ENVIRONMENT_IN_FILENAME=$(echo $(sap_system_folder) | awk -F'-' '{print $1}' | xargs) ; LOCATION_CODE=$(echo $(sap_system_folder) | awk -F'-' '{print $2}' | xargs) ; @@ -191,10 +183,20 @@ stages: *) LOCATION_IN_FILENAME="westeurope" ;; esac - echo "Environment(filename): $ENVIRONMENT_IN_FILENAME" - echo "Location(filename): $LOCATION_IN_FILENAME" - echo "Network(filename): $NETWORK_IN_FILENAME" - echo "SID(filename): $SID_IN_FILENAME" + echo "Environment(filename): $ENVIRONMENT_IN_FILENAME" + echo "Location(filename): $LOCATION_IN_FILENAME" + echo "Network(filename): $NETWORK_IN_FILENAME" + echo "SID(filename): $SID_IN_FILENAME" + + echo "" + echo "Agent: $(this_agent)" + echo "Organization: $(System.CollectionUri)" + echo "Project: $(System.TeamProject)" + echo "" + echo "Azure CLI version:" + echo "-------------------------------------------------" + az --version + if [ $ENVIRONMENT != $ENVIRONMENT_IN_FILENAME ]; then echo "##vso[task.logissue type=error]The environment setting in $(sap_system_configuration) '$ENVIRONMENT' does not match the $(sap_system_configuration) file name '$ENVIRONMENT_IN_FILENAME'. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-[SID]" @@ -231,18 +233,20 @@ stages: fi echo -e "$green--- Configure devops CLI extension ---$reset" - az config set extension.use_dynamic_install=yes_without_prompt --output none + az config set extension.use_dynamic_install=yes_without_prompt --output none --only-show-errors - az extension add --name azure-devops --output none + az extension add --name azure-devops --output none --only-show-errors - az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none + az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none --only-show-errors export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") - echo '$(variable_group) id: ' $VARIABLE_GROUP_ID + if [ -z ${VARIABLE_GROUP_ID} ]; then echo "##vso[task.logissue type=error]Variable group $(variable_group) could not be found." exit 2 fi + printf -v val '%-15s' "$(variable_group) id:" + echo "$val $VARIABLE_GROUP_ID" echo -e "$green--- Login ---$reset" if [ -z $USE_MSI ]; then @@ -256,7 +260,8 @@ stages: fi if [ $USE_MSI != "true" ]; then - echo "Using SPN" + echo "Deployment credentials: Service Principal" + echo "Deployment credential ID (SPN): $WL_ARM_CLIENT_ID" export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET @@ -271,6 +276,7 @@ stages: exit $return_code fi else + echo "Deployment credentials: Managed Identity" export ARM_USE_MSI=true export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID unset ARM_TENANT_ID @@ -280,48 +286,53 @@ stages: echo -e "$green--- Define variables ---$reset" cd $HOME_CONFIG/SYSTEM/$(sap_system_folder) - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Subscription.value" | tr -d \") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Subscription.value" --out tsv) if [ -z ${az_var} ]; then - export STATE_SUBSCRIPTION=$(grep STATE_SUBSCRIPTION ${environment_file_name} | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION + export STATE_SUBSCRIPTION=$(grep STATE_SUBSCRIPTION ${environment_file_name} | awk -F'=' '{print $2}' | xargs) else - export STATE_SUBSCRIPTION=${az_var} ; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION + export STATE_SUBSCRIPTION=${az_var} fi - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" | tr -d \") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" --out tsv) if [ -z ${az_var} ]; then - export REMOTE_STATE_SA=$(grep REMOTE_STATE_SA ${environment_file_name} | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file storage account' $REMOTE_STATE_SA + export REMOTE_STATE_SA=$(grep REMOTE_STATE_SA ${environment_file_name} | awk -F'=' '{print $2}' | xargs) else - export REMOTE_STATE_SA=${az_var} ; echo 'Terraform state file storage account' $REMOTE_STATE_SA + export REMOTE_STATE_SA=${az_var} fi - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_State_FileName.value" | tr -d \") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_State_FileName.value" --out tsv) if [ -z ${az_var} ]; then - export deployer_tfstate_key=$(grep deployer_tfstate_key ${environment_file_name} | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer State File' $deployer_tfstate_key + export deployer_tfstate_key=$(grep deployer_tfstate_key ${environment_file_name} | awk -F'=' '{print $2}' | xargs) else - export deployer_tfstate_key=${az_var} ; echo 'Deployer State File' $deployer_tfstate_key + export deployer_tfstate_key=${az_var} fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "${NETWORK}"Workload_Zone_State_FileName.value | tr -d \") if [ -z ${az_var} ]; then - export landscape_tfstate_key=$(grep landscape_tfstate_key= ${environment_file_name} | awk -F'=' '{print $2}' | xargs) ; echo 'landscape_tfstate_key' $landscape_tfstate_key + export landscape_tfstate_key=$(grep landscape_tfstate_key= ${environment_file_name} | awk -F'=' '{print $2}' | xargs) else - export landscape_tfstate_key=${az_var} ; echo 'landscape_tfstate_key' $landscape_tfstate_key + export landscape_tfstate_key=${az_var} fi - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" | tr -d \") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" --out tsv) if [ -z ${az_var} ]; then - export key_vault=$(grep keyvault= ${environment_file_name} | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer Key Vault' $key_vault + export key_vault=$(grep keyvault= ${environment_file_name} | awk -F'=' '{print $2}' | xargs) else - export key_vault=${az_var} ; echo 'Deployer Key Vault' $key_vault + export key_vault=${az_var} fi az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "${NETWORK}"Workload_Key_Vault.value | tr -d \") if [ -z ${az_var} ]; then - export workload_key_vault=$(grep workloadkeyvault= ${environment_file_name} | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Key Vault' ${workload_key_vault} + export workload_key_vault=$(grep workloadkeyvault= ${environment_file_name} | awk -F'=' '{print $2}' | xargs) else - export workload_key_vault=${az_var} ; echo 'Workload Key Vault' ${workload_key_vault} + export workload_key_vault=${az_var} fi + echo "Deployer state file: $deployer_tfstate_key" + echo "Deployer Key Vault: $key_vault" + echo "Workload Zone state file: $landscape_tfstate_key" + echo "Workload Zone Key Vault: $workload_key_vault" + echo -e "$green--- Run the installer script that deploys the SAP System ---$reset" $SAP_AUTOMATION_REPO_PATH/deploy/scripts/installer.sh --parameterfile $(sap_system_configuration) --type sap_system \ diff --git a/deploy/pipelines/04-sap-software-download.yaml b/deploy/pipelines/04-sap-software-download.yaml index 0966999505..fc13d25f86 100644 --- a/deploy/pipelines/04-sap-software-download.yaml +++ b/deploy/pipelines/04-sap-software-download.yaml @@ -117,7 +117,7 @@ stages: echo "##vso[task.logissue type=error]Variable group $(variable_group) could not be found." exit 2 fi - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" | tr -d \") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" --out tsv) if [ -n ${az_var} ]; then kv_name=${az_var}; echo "Key Vault="$kv_name else diff --git a/deploy/pipelines/10-remover-terraform.yaml b/deploy/pipelines/10-remover-terraform.yaml index cd4c80dded..e9ed4969e1 100644 --- a/deploy/pipelines/10-remover-terraform.yaml +++ b/deploy/pipelines/10-remover-terraform.yaml @@ -7,193 +7,218 @@ # +------------------------------------4--------------------------------------*/ parameters: - - name: cleanup_sap - displayName: Remove the SAP system - type: boolean - default: true - - - name: sap_system - displayName: "SAP System configuration name, use this format: ENV-LOCA-VNET-SID" - type: string - default: DEV-WEEU-SAP01-X00 - - - name: cleanup_zone - displayName: Remove the SAP workload zone - type: boolean - default: true - - - name: workload_zone - displayName: "SAP workload zone configuration name, use this format: ENV-LOCA-VNET-INFRASTRUCTURE" - type: string - default: DEV-WEEU-SAP01-INFRASTRUCTURE - - - name: cleanup_region - displayName: Remove the control plane - type: boolean - default: true - - - name: deployer - displayName: "Deployer configuration name, use this format: ENV-LOCA-VNET-INFRASTRUCTURE" - type: string - default: MGMT-WEEU-DEP00-INFRASTRUCTURE - - - name: library - displayName: "Library configuration name, use this format: ENV-LOCA-SAP_LIBRARY" - type: string - default: MGMT-WEEU-SAP_LIBRARY - - - name: workload_environment - displayName: Environment (DEV, QUA, PRD) - type: string - default: DEV - - - name: deployer_environment - displayName: Environment (MGMT) - type: string - default: MMGMT - - - name: use_deployer - displayName: Run removal on self hosted agent - type: boolean - default: true - - - name: sap_automation_repo_path - displayName: The local path on the agent where the sap_automation repo can be found - type: string - - - name: config_repo_path - displayName: The local path on the agent where the config repo can be found - type: string + - name: cleanup_sap + displayName: Remove the SAP system + type: boolean + default: true + + - name: sap_system + displayName: "SAP System configuration name, use this format: ENV-LOCA-VNET-SID" + type: string + default: DEV-WEEU-SAP01-X00 + + - name: cleanup_zone + displayName: Remove the SAP workload zone + type: boolean + default: true + + - name: workload_zone + displayName: "SAP workload zone configuration name, use this format: ENV-LOCA-VNET-INFRASTRUCTURE" + type: string + default: DEV-WEEU-SAP01-INFRASTRUCTURE + + - name: cleanup_region + displayName: Remove the control plane + type: boolean + default: true + + - name: deployer + displayName: "Deployer configuration name, use this format: ENV-LOCA-VNET-INFRASTRUCTURE" + type: string + default: MGMT-WEEU-DEP00-INFRASTRUCTURE + + - name: library + displayName: "Library configuration name, use this format: ENV-LOCA-SAP_LIBRARY" + type: string + default: MGMT-WEEU-SAP_LIBRARY + + - name: workload_environment + displayName: Environment (DEV, QUA, PRD) + type: string + default: DEV + + - name: deployer_environment + displayName: Environment (MGMT) + type: string + default: MMGMT + + - name: use_deployer + displayName: Run removal on self hosted agent + type: boolean + default: true + + - name: sap_automation_repo_path + displayName: The local path on the agent where the sap_automation repo can be found + type: string + + - name: config_repo_path + displayName: The local path on the agent where the config repo can be found + type: string stages: - stage: Remove_SAP_systems displayName: "Removing the SAP System" condition: and(not(failed()), not(canceled()), eq(${{ parameters.cleanup_sap }}, true)) variables: - - template: variables/10-remover-terraform-variables.yaml + - template: variables/10-remover-terraform-variables.yaml parameters: - deployer_environment: ${{ parameters.deployer_environment }} - workload_environment: ${{ parameters.workload_environment }} - workload_zone: ${{ parameters.workload_zone }} - sap_system: ${{ parameters.sap_system }} + deployer_environment: ${{ parameters.deployer_environment }} + workload_environment: ${{ parameters.workload_environment }} + workload_zone: ${{ parameters.workload_zone }} + sap_system: ${{ parameters.sap_system }} jobs: - - job: Remove_SAP_systems - displayName: "Removing the SAP System" + - job: Remove_SAP_systems + displayName: "Removing the SAP System" variables: - - group: SDAF-${{ parameters.workload_environment }} + - group: SDAF-${{ parameters.workload_environment }} workspace: clean: all steps: - - template: templates\download.yaml + - template: templates\download.yaml - bash: | #!/bin/bash - green="\e[1;32m" ; reset="\e[0m" + green="\e[1;32m" ; reset="\e[0m" ; boldred="\e[1;31m" ; cyan="\e[1;36m" + echo -e "$green--- Checkout $(Build.SourceBranchName) ---$reset" echo "##vso[build.updatebuildnumber]Removing the SAP System defined in $(sap_system_folder)" echo -e "$green--- Validations ---$reset" - HOME_CONFIG=${CONFIG_REPO_PATH} - cd $HOME_CONFIG; mkdir -p .sap_deployment_automation - if [ ! -f SYSTEM/$(sap_system_folder)/$(sap_system_configuration) ]; then + HOME_CONFIG=${CONFIG_REPO_PATH} + cd $HOME_CONFIG; mkdir -p .sap_deployment_automation + if [ ! -f SYSTEM/$(sap_system_folder)/$(sap_system_configuration) ]; then echo -e "$boldred--- $HOME_CONFIG/SYSTEM/$(sap_system_folder)/$(sap_system_configuration) was not found ---$reset" echo "##vso[task.logissue type=error]File SYSTEM/$(sap_system_folder)/$(sap_system_configuration) was not found." exit 2 - fi + fi - if [ -z $WL_ARM_SUBSCRIPTION_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined." - exit 2 - fi + if [ $USE_MSI != "true" ]; then - if [ -z $WL_ARM_CLIENT_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined." - exit 2 - fi + if [ -z $WL_ARM_SUBSCRIPTION_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined in the $(variable_group) variable group." + exit 2 + fi - if [ -z $WL_ARM_CLIENT_SECRET ]; then - echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined." - exit 2 - fi + if [ $WL_ARM_SUBSCRIPTION_ID == '$$(ARM_SUBSCRIPTION_ID)' ]; then + echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined in the $(variable_group) variable group." + exit 2 + fi + + if [ -z $WL_ARM_CLIENT_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined in the $(variable_group) variable group." + exit 2 + fi + + if [ $WL_ARM_CLIENT_ID == '$$(ARM_CLIENT_ID)' ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined in the $(variable_group) variable group." + exit 2 + fi + + if [ -z $WL_ARM_CLIENT_SECRET ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined in the $(variable_group) variable group." + exit 2 + fi + + if [ $WL_ARM_CLIENT_SECRET == '$$(ARM_CLIENT_SECRET)' ]; then + echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined in the $(variable_group) variable group." + exit 2 + fi + + if [ -z $WL_ARM_TENANT_ID ]; then + echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined in the $(variable_group) variable group." + exit 2 + fi + + if [ $WL_ARM_TENANT_ID == '$$(ARM_TENANT_ID)' ]; then + echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined in the $(variable_group) variable group." + exit 2 + fi + + fi - if [ -z $WL_ARM_TENANT_ID ]; then - echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined." - exit 2 - fi # Check if running on deployer if [[ ! -f /etc/profile.d/deploy_server.sh ]]; then - echo -e "$green --- Install dos2unix ---$reset" + echo -e "$green--- Install dos2unix ---$reset" sudo apt-get -qq install dos2unix - echo -e "$green --- Install terraform ---$reset" + echo -e "$green--- Install terraform ---$reset" wget -q $(tf_url) return_code=$? if [ 0 != $return_code ]; then - echo "##vso[task.logissue type=error]Unable to download Terraform version $(tf_version)." - exit 2 + echo "##vso[task.logissue type=error]Unable to download Terraform version $(tf_version)." + exit 2 fi unzip -qq terraform_$(tf_version)_linux_amd64.zip ; sudo mv terraform /bin/ rm -f terraform_$(tf_version)_linux_amd64.zip else - if [ $LOGON_USING_SPN == "true" ]; then - echo "Logon Using SPN" - - export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID - export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET - export ARM_TENANT_ID=$WL_ARM_TENANT_ID - export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID - export ARM_USE_MSI=false - az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code + if [ $USE_MSI != "true" ]; then + + export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$WL_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + unset ARM_USE_MSI + az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi + else + echo "Deployment credentials: ^Managed Identity" + + export ARM_USE_MSI=true + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + unset ARM_TENANT_ID + az login --identity --allow-no-subscriptions --output none fi - else - export ARM_USE_MSI=true - az login --identity --allow-no-subscriptions --output none - fi fi echo -e "$green--- Configure devops CLI extension ---$reset" - az config set extension.use_dynamic_install=yes_without_prompt --output none + az config set extension.use_dynamic_install=yes_without_prompt --output none --only-show-errors + + az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none --only-show-errors - az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none + export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") - export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") - echo '$(variable_group) id: ' $VARIABLE_GROUP_ID - if [ -z ${VARIABLE_GROUP_ID} ]; then + if [ -z ${VARIABLE_GROUP_ID} ]; then echo "##vso[task.logissue type=error]Variable group $(variable_group) could not be found." exit 2 - fi - export PARENT_VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(parent_variable_group)'].id | [0]"); echo PARENT_VARIABLE_GROUP_ID $PARENT_VARIABLE_GROUP_ID - if [ -z ${PARENT_VARIABLE_GROUP_ID} ]; then - echo "##vso[task.logissue type=error]Variable group $(parent_variable_group) could not be found." - exit 2 - fi + fi + export PARENT_VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(parent_variable_group)'].id | [0]"); + + if [ -z ${PARENT_VARIABLE_GROUP_ID} ]; then + echo "##vso[task.logissue type=error]Variable group $(parent_variable_group) could not be found." + exit 2 + fi echo -e "$green--- Convert config file to UX format ---$reset" - dos2unix -q $HOME_CONFIG/SYSTEM/$(sap_system_folder)/$(sap_system_configuration) + dos2unix -q $HOME_CONFIG/SYSTEM/$(sap_system_folder)/$(sap_system_configuration) echo -e "$green--- Read parameters ---$reset" - ENVIRONMENT=$(grep "^environment" $HOME_CONFIG/SYSTEM/$(sap_system_folder)/$(sap_system_configuration) | awk -F'=' '{print $2}' | xargs) - LOCATION=$(grep "^location" $HOME_CONFIG/SYSTEM/$(sap_system_folder)/$(sap_system_configuration) | awk -F'=' '{print $2}' | xargs | tr 'A-Z' 'a-z') - NETWORK=$(grep "^network_logical_name" $HOME_CONFIG/SYSTEM/$(sap_system_folder)/$(sap_system_configuration) | awk -F'=' '{print $2}' | xargs) - SID=$(grep "^sid" $HOME_CONFIG/SYSTEM/$(sap_system_folder)/$(sap_system_configuration) | awk -F'=' '{print $2}' | xargs) - - echo "Environment: $ENVIRONMENT" - echo "Location: $LOCATION" - echo "Network: $NETWORK" - echo "SID: $SID" - - ENVIRONMENT_IN_FILENAME=$(echo $(sap_system_folder) | awk -F'-' '{print $1}' | xargs) ; - LOCATION_CODE=$(echo $(sap_system_folder) | awk -F'-' '{print $2}' | xargs) ; - NETWORK_IN_FILENAME=$(echo $(sap_system_folder) | awk -F'-' '{print $3}' | xargs) ; - SID_IN_FILENAME=$(echo $(sap_system_folder) | awk -F'-' '{print $4}' | xargs) ; - case "$LOCATION_CODE" in + ENVIRONMENT=$(grep "^environment" $HOME_CONFIG/SYSTEM/$(sap_system_folder)/$(sap_system_configuration) | awk -F'=' '{print $2}' | xargs) + LOCATION=$(grep "^location" $HOME_CONFIG/SYSTEM/$(sap_system_folder)/$(sap_system_configuration) | awk -F'=' '{print $2}' | xargs | tr 'A-Z' 'a-z') + NETWORK=$(grep "^network_logical_name" $HOME_CONFIG/SYSTEM/$(sap_system_folder)/$(sap_system_configuration) | awk -F'=' '{print $2}' | xargs) + SID=$(grep "^sid" $HOME_CONFIG/SYSTEM/$(sap_system_folder)/$(sap_system_configuration) | awk -F'=' '{print $2}' | xargs) + + ENVIRONMENT_IN_FILENAME=$(echo $(sap_system_folder) | awk -F'-' '{print $1}' | xargs) + LOCATION_CODE=$(echo $(sap_system_folder) | awk -F'-' '{print $2}' | xargs) + NETWORK_IN_FILENAME=$(echo $(sap_system_folder) | awk -F'-' '{print $3}' | xargs) + SID_IN_FILENAME=$(echo $(sap_system_folder) | awk -F'-' '{print $4}' | xargs) + case "$LOCATION_CODE" in "AUCE") LOCATION_IN_FILENAME="australiacentral" ;; "AUC2") LOCATION_IN_FILENAME="australiacentral2" ;; "AUEA") LOCATION_IN_FILENAME="australiaeast" ;; @@ -250,299 +275,326 @@ stages: "WUS2") LOCATION_IN_FILENAME="westus2" ;; "WUS3") LOCATION_IN_FILENAME="westus3" ;; *) LOCATION_IN_FILENAME="westeurope" ;; - esac + esac - echo "Environment(filename): $ENVIRONMENT_IN_FILENAME" - echo "Location(filename): $LOCATION_IN_FILENAME" - echo "Network(filename): $NETWORK_IN_FILENAME" - echo "SID(filename): $SID_IN_FILENAME" - if [ $ENVIRONMENT != $ENVIRONMENT_IN_FILENAME ]; then + workload_environment_file_name=$HOME/.sap_deployment_automation/$ENVIRONMENT$LOCATION_CODE$NETWORK; + echo "Workload Environment file: $workload_environment_file_name" + + echo "Environment: $ENVIRONMENT" + echo "Location: $LOCATION" + echo "Network: $NETWORK" + echo "SID: $SID" + echo "" + echo "Environment(filename): $ENVIRONMENT_IN_FILENAME" + echo "Location(filename): $LOCATION_IN_FILENAME" + echo "Network(filename): $NETWORK_IN_FILENAME" + echo "SID(filename): $SID_IN_FILENAME" + + printf -v tempval '%s id:' $(variable_group) + printf -v val '%-20s' "${tempval}" + echo "$val $VARIABLE_GROUP_ID" + + printf -v tempval '%s id:' $(parent_variable_group) + printf -v val '%-20s' "${tempval}" + echo "$val $PARENT_VARIABLE_GROUP_ID" + + echo "" + + if [ $ENVIRONMENT != $ENVIRONMENT_IN_FILENAME ]; then echo "##vso[task.logissue type=error]The environment setting in $(sap_system_configuration) '$ENVIRONMENT' does not match the $(sap_system_configuration) file name '$ENVIRONMENT_IN_FILENAME'. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-[SID]" exit 2 - fi + fi - if [ $LOCATION != $LOCATION_IN_FILENAME ]; then + if [ $LOCATION != $LOCATION_IN_FILENAME ]; then echo "##vso[task.logissue type=error]The location setting in $(sap_system_configuration) '$LOCATION' does not match the $(sap_system_configuration) file name '$LOCATION_IN_FILENAME'. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-[SID]" exit 2 - fi + fi - if [ $NETWORK != $NETWORK_IN_FILENAME ]; then + if [ $NETWORK != $NETWORK_IN_FILENAME ]; then echo "##vso[task.logissue type=error]The network_logical_name setting in $(sap_system_configuration) '$NETWORK' does not match the $(sap_system_configuration) file name '$NETWORK_IN_FILENAME-. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-[SID]" exit 2 - fi + fi - if [ $SID != $SID_IN_FILENAME ]; then + if [ $SID != $SID_IN_FILENAME ]; then echo "##vso[task.logissue type=error]The sid setting in $(sap_system_configuration) '$SID' does not match the $(sap_system_configuration) file name '$SID_IN_FILENAME-. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-[SID]" exit 2 - fi + fi # Check if running on deployer if [[ ! -f /etc/profile.d/deploy_server.sh ]]; then - if [ $LOGON_USING_SPN == "true" ]; then - echo "Logon Using SPN" - - export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID - export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET - export ARM_TENANT_ID=$WL_ARM_TENANT_ID - export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID - export ARM_USE_MSI=false - az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code + if [ $USE_MSI != "true" ]; then + echo "Deployment credentials: Service Principal" + echo "Deployment credentials Id (SPN): $WL_ARM_CLIENT_SECRET" + + export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$WL_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + export ARM_USE_MSI=false + az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi + else + export ARM_USE_MSI=true + az login --identity --allow-no-subscriptions --output none fi - else - export ARM_USE_MSI=true - az login --identity --allow-no-subscriptions --output none - fi else - echo -e "$green --- Running on deployer ---$reset" - - if [ $LOGON_USING_SPN == "true" ]; then - echo "Using SPN" - - export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID - export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET - export ARM_TENANT_ID=$WL_ARM_TENANT_ID - export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID - export ARM_USE_MSI=false - az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code + echo -e "$green--- Running on deployer ---$reset" + + if [ $USE_MSI != "true" ]; then + echo "Deployment credentials: Service Principal" + echo "Deployment credentials Id (SPN): $WL_ARM_CLIENT_SECRET" + + export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$WL_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + unset ARM_USE_MSI + az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi + else + echo "Deployment credentials: Managed Identity" + export ARM_USE_MSI=true + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + unset ARM_TENANT_ID + az login --identity --allow-no-subscriptions --output none fi - else - export ARM_USE_MSI=true - export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID - unset ARM_TENANT_ID - az login --identity --allow-no-subscriptions --output none - fi fi echo -e "$green--- Set variables ---$reset" - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "${NETWORK}Workload_Key_Vault.value" | tr -d \") - if [ -z ${az_var} ]; then - export workload_key_vault=$(cat "${workload_environment_file_name}" | grep workloadkeyvault | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Key Vault' ${workload_key_vault} - else - export workload_key_vault="${az_var}" ; echo 'Workload Key Vault' ${workload_key_vault} - fi - - if [ -n $(Deployer_Key_Vault) ]; then - export key_vault=$(Deployer_Key_Vault) ; echo 'Deployer Key Vault' ${key_vault} - else - export key_vault=$(cat ${workload_environment_file_name} | grep keyvault= -m1 | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer Key Vault' ${key_vault} - fi - - az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Subscription.value" | tr -d \") - if [ -n "${az_var}" ]; then - STATE_SUBSCRIPTION="${az_var}" ; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION - else - STATE_SUBSCRIPTION=$(cat ${workload_environment_file_name} | grep STATE_SUBSCRIPTION= | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION - fi - - az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" | tr -d \") - if [ -n "${az_var}" ]; then - REMOTE_STATE_SA="${az_var}" ; echo 'Terraform state file storage account' $REMOTE_STATE_SA - else - REMOTE_STATE_SA=$(cat ${workload_environment_file_name} | grep REMOTE_STATE_SA | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file storage account' $REMOTE_STATE_SA - fi + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "${NETWORK}Workload_Key_Vault.value" --out tsv) + if [ -z ${az_var} ]; then + export workload_key_vault=$(grep -m1 "^workloadkeyvault=" "${workload_environment_file_name}" | awk -F'=' '{print $2}' | xargs) + else + export workload_key_vault="${az_var}" + fi + + if [ -n $(Deployer_Key_Vault) ]; then + export key_vault=$(Deployer_Key_Vault) + else + export key_vault=$(grep -m1 "^keyvault=" ${workload_environment_file_name} | awk -F'=' '{print $2}' | xargs) + fi + + az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Subscription.value" --out tsv) + if [ -n "${az_var}" ]; then + STATE_SUBSCRIPTION="${az_var}" + else + STATE_SUBSCRIPTION=$(grep "^STATE_SUBSCRIPTION=" ${workload_environment_file_name} | awk -F'=' '{print $2}' | xargs) + fi + + az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" --out tsv) + if [ -n "${az_var}" ]; then + REMOTE_STATE_SA="${az_var}" + else + REMOTE_STATE_SA=$(grep "REMOTE_STATE_SA" ${workload_environment_file_name} | awk -F'=' '{print $2}' | xargs) + fi + + echo "Deployer Key Vault: ${key_vault}" + + echo "Workload Key Vault: ${workload_key_vault}" + echo "TF state subscription: $STATE_SUBSCRIPTION" + echo "TF state account: $REMOTE_STATE_SA" + echo "System configuration: $systemConfigurationFile" echo -e "$green--- Run the remover script that destroys the SAP system ---$reset" - cd $CONFIG_REPO_PATH/SYSTEM/$(sap_system_folder) - ${SAP_AUTOMATION_REPO_PATH}/deploy/scripts/remover.sh \ - --parameterfile $(sap_system_configuration) \ - --type sap_system \ - --state_subscription ${STATE_SUBSCRIPTION} \ - --storageaccountname "${REMOTE_STATE_SA}" \ - --auto-approve - return_code=$? + cd $CONFIG_REPO_PATH/SYSTEM/$(sap_system_folder) + ${SAP_AUTOMATION_REPO_PATH}/deploy/scripts/remover.sh \ + --parameterfile $(sap_system_configuration) \ + --type sap_system \ + --state_subscription ${STATE_SUBSCRIPTION} \ + --storageaccountname "${REMOTE_STATE_SA}" \ + --auto-approve + return_code=$? echo -e "$green--- Pull latest from DevOps Repository ---$reset" - git checkout -q $(Build.SourceBranchName) - git pull + git checkout -q $(Build.SourceBranchName) + git pull #stop the pipeline after you have reset the whitelisting on your resources echo "Return code from remover.sh $return_code." if [ 0 != $return_code ]; then - echo "##vso[task.logissue type=error]Return code from remover.sh $return_code." - exit $return_code + echo "##vso[task.logissue type=error]Return code from remover.sh $return_code." + exit $return_code fi echo -e "$green--- Add & update files in the DevOps Repository ---$reset" - cd $(Build.Repository.LocalPath) + cd $(Build.Repository.LocalPath) - changed=0 - # Pull changes - git checkout -q $(Build.SourceBranchName) - git pull origin $(Build.SourceBranchName) + changed=0 + # Pull changes + git checkout -q $(Build.SourceBranchName) + git pull origin $(Build.SourceBranchName) - if [ 0 == $return_code ]; then + if [ 0 == $return_code ]; then if [ -d $(Deployment_Configuration_Path)/SYSTEM/$(sap_system_folder)/.terraform ]; then - git rm -q -r --ignore-unmatch -f $(Deployment_Configuration_Path)/SYSTEM/$(sap_system_folder)/.terraform - changed=1 + git rm -q -r --ignore-unmatch -f $(Deployment_Configuration_Path)/SYSTEM/$(sap_system_folder)/.terraform + changed=1 fi if [ -f $(sap_system_configuration) ]; then - git add $(sap_system_configuration) - added=1 + git add $(sap_system_configuration) + added=1 fi if [ -f $(Deployment_Configuration_Path)/SYSTEM/$(sap_system_folder)/sap-parameters.yaml ]; then - git rm --ignore-unmatch -q $(Deployment_Configuration_Path)/SYSTEM/$(sap_system_folder)/sap-parameters.yaml - changed=1 + git rm --ignore-unmatch -q $(Deployment_Configuration_Path)/SYSTEM/$(sap_system_folder)/sap-parameters.yaml + changed=1 fi if [ $(ls $(Deployment_Configuration_Path)/SYSTEM/$(sap_system_folder)/*_hosts.yaml | wc -l ) -gt 0 ] ; then - git rm --ignore-unmatch -q $(Deployment_Configuration_Path)/SYSTEM/$(sap_system_folder)/*_hosts.yaml - changed=1 + git rm --ignore-unmatch -q $(Deployment_Configuration_Path)/SYSTEM/$(sap_system_folder)/*_hosts.yaml + changed=1 fi if [ $(ls $(Deployment_Configuration_Path)/SYSTEM/$(sap_system_folder)/*.md | wc -l ) -gt 0 ] ; then - git rm --ignore-unmatch -q $(Deployment_Configuration_Path)/SYSTEM/$(sap_system_folder)/*.md - changed=1 + git rm --ignore-unmatch -q $(Deployment_Configuration_Path)/SYSTEM/$(sap_system_folder)/*.md + changed=1 fi if [ 1 == $changed ]; then - git config --global user.email "$(Build.RequestedForEmail)" - git config --global user.name "$(Build.RequestedFor)" - git commit -m "Infrastructure for ${sap_system_folder} removed. [skip ci]" - git -c http.extraheader="AUTHORIZATION: bearer $(System.AccessToken)" push --set-upstream origin $(Build.SourceBranchName) + git config --global user.email "$(Build.RequestedForEmail)" + git config --global user.name "$(Build.RequestedFor)" + git commit -m "Infrastructure for ${sap_system_folder} removed. [skip ci]" + git -c http.extraheader="AUTHORIZATION: bearer $(System.AccessToken)" push --set-upstream origin $(Build.SourceBranchName) fi - fi + fi exit $return_code - displayName: "Remove SAP system" + displayName: "Remove SAP system" env: - SYSTEM_ACCESSTOKEN: $(System.AccessToken) - WL_ARM_SUBSCRIPTION_ID: $(ARM_SUBSCRIPTION_ID) - WL_ARM_CLIENT_ID: $(ARM_CLIENT_ID) - WL_ARM_CLIENT_SECRET: $(ARM_CLIENT_SECRET) - WL_ARM_TENANT_ID: $(ARM_TENANT_ID) - AZURE_DEVOPS_EXT_PAT: $(System.AccessToken) + SYSTEM_ACCESSTOKEN: $(System.AccessToken) + WL_ARM_SUBSCRIPTION_ID: $(WL_ARM_SUBSCRIPTION_ID) + WL_ARM_CLIENT_ID: $(WL_ARM_CLIENT_ID) + WL_ARM_CLIENT_SECRET: $(WL_ARM_CLIENT_SECRET) + WL_ARM_TENANT_ID: $(WL_ARM_TENANT_ID) + AZURE_DEVOPS_EXT_PAT: $(System.AccessToken) SAP_AUTOMATION_REPO_PATH: ${{ parameters.sap_automation_repo_path }} - CONFIG_REPO_PATH: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path) - LOGON_USING_SPN: $(Logon_Using_SPN) - USE_MSI: $(Use_MSI) + CONFIG_REPO_PATH: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path) + LOGON_USING_SPN: $(Logon_Using_SPN) + USE_MSI: $(Use_MSI) - failOnStderr: false + failOnStderr: false - stage: Remove_SAP_workload_zone displayName: "Removing the SAP workload zone" condition: and(not(failed()), not(canceled()), eq(${{ parameters.cleanup_zone }}, true)) variables: - - template: variables/10-remover-terraform-variables.yaml + - template: variables/10-remover-terraform-variables.yaml parameters: - deployer_environment: ${{ parameters.deployer_environment }} - workload_environment: ${{ parameters.workload_environment }} - workload_zone: ${{ parameters.workload_zone }} - sap_system: ${{ parameters.sap_system }} + deployer_environment: ${{ parameters.deployer_environment }} + workload_environment: ${{ parameters.workload_environment }} + workload_zone: ${{ parameters.workload_zone }} + sap_system: ${{ parameters.sap_system }} jobs: - - job: Remove_SAP_workload_zone - displayName: Remove the SAP Workload Zone + - job: Remove_SAP_workload_zone + displayName: Remove the SAP Workload Zone variables: - - group: SDAF-${{ parameters.workload_environment }} + - group: SDAF-${{ parameters.workload_environment }} workspace: - clean: all + clean: all steps: - - template: templates\download.yaml + - template: templates\download.yaml - bash: | #!/bin/bash - #!/bin/bash - green="\e[1;32m" ; reset="\e[0m" + green="\e[1;32m" ; reset="\e[0m" ; boldred="\e[1;31m" ; cyan="\e[1;36m" echo "##vso[build.updatebuildnumber]Removing the SAP Workload zone defined in $(workload_zone_folder)" echo -e "$green--- Validations ---$reset" - HOME_CONFIG=${CONFIG_REPO_PATH} - cd $HOME_CONFIG; mkdir -p .sap_deployment_automation + HOME_CONFIG=${CONFIG_REPO_PATH} + cd $HOME_CONFIG; mkdir -p .sap_deployment_automation - if [ ! -f LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) ]; then + if [ ! -f LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) ]; then echo -e "$boldred--- $(workload_zone_configuration_file) was not found ---$reset" echo "##vso[task.logissue type=error]File $(workload_zone_configuration_file) was not found." exit 2 - fi + fi - if [ -z $WL_ARM_SUBSCRIPTION_ID ]; then + if [ -z $WL_ARM_SUBSCRIPTION_ID ]; then echo "##vso[task.logissue type=error]Variable ARM_SUBSCRIPTION_ID was not defined." exit 2 - fi + fi - if [ -z $WL_ARM_CLIENT_ID ]; then + if [ -z $WL_ARM_CLIENT_ID ]; then echo "##vso[task.logissue type=error]Variable ARM_CLIENT_ID was not defined." exit 2 - fi + fi - if [ -z $WL_ARM_CLIENT_SECRET ]; then + if [ -z $WL_ARM_CLIENT_SECRET ]; then echo "##vso[task.logissue type=error]Variable ARM_CLIENT_SECRET was not defined." exit 2 - fi + fi - if [ -z $WL_ARM_TENANT_ID ]; then + if [ -z $WL_ARM_TENANT_ID ]; then echo "##vso[task.logissue type=error]Variable ARM_TENANT_ID was not defined." exit 2 - fi + fi # Check if running on deployer if [[ ! -f /etc/profile.d/deploy_server.sh ]]; then - echo -e "$green --- Install dos2unix ---$reset" + echo -e "$green--- Install dos2unix ---$reset" sudo apt-get -qq install dos2unix - echo -e "$green --- Install terraform ---$reset" + echo -e "$green--- Install terraform ---$reset" wget -q $(tf_url) return_code=$? if [ 0 != $return_code ]; then - echo "##vso[task.logissue type=error]Unable to download Terraform version $(tf_version)." - exit 2 + echo "##vso[task.logissue type=error]Unable to download Terraform version $(tf_version)." + exit 2 fi unzip -qq terraform_$(tf_version)_linux_amd64.zip ; sudo mv terraform /bin/ rm -f terraform_$(tf_version)_linux_amd64.zip else - source /etc/profile.d/deploy_server.sh + echo "sourcing /etc/profile.d/deploy_server.sh" + source /etc/profile.d/deploy_server.sh fi echo -e "$green--- Configure devops CLI extension ---$reset" - az config set extension.use_dynamic_install=yes_without_prompt --output none + az config set extension.use_dynamic_install=yes_without_prompt --output none - az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none + az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none - export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") - echo '$(variable_group) id: ' $VARIABLE_GROUP_ID - if [ -z ${VARIABLE_GROUP_ID} ]; then + export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(variable_group)'].id | [0]") + if [ -z ${VARIABLE_GROUP_ID} ]; then echo "##vso[task.logissue type=error]Variable group $(variable_group) could not be found." exit 2 - fi - export PARENT_VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(parent_variable_group)'].id | [0]"); echo PARENT_VARIABLE_GROUP_ID $PARENT_VARIABLE_GROUP_ID - if [ -z ${PARENT_VARIABLE_GROUP_ID} ]; then - echo "##vso[task.logissue type=error]Variable group $(parent_variable_group) could not be found." - exit 2 - fi + fi + export PARENT_VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(parent_variable_group)'].id | [0]"); echo PARENT_VARIABLE_GROUP_ID $PARENT_VARIABLE_GROUP_ID + if [ -z ${PARENT_VARIABLE_GROUP_ID} ]; then + echo "##vso[task.logissue type=error]Variable group $(parent_variable_group) could not be found." + exit 2 + fi echo -e "$green--- Convert config file to UX format ---$reset" - dos2unix -q LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) + dos2unix -q LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) echo -e "$green--- Read details ---$reset" - ENVIRONMENT=$(grep "^environment" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs) - LOCATION=$(grep "^location" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs | tr 'A-Z' 'a-z') - NETWORK=$(grep "^network_logical_name" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs) - echo Environment: ${ENVIRONMENT} - echo Location: ${LOCATION} - echo Network: ${NETWORK} + ENVIRONMENT=$(grep "^environment" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs) + LOCATION=$(grep "^location" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs | tr 'A-Z' 'a-z') + NETWORK=$(grep "^network_logical_name" LANDSCAPE/$(workload_zone_folder)/$(workload_zone_configuration_file) | awk -F'=' '{print $2}' | xargs) + + workload_environment_file_name=$HOME/.sap_deployment_automation/$ENVIRONMENT$LOCATION_CODE$NETWORK; - ENVIRONMENT_IN_FILENAME=$(echo $(workload_zone_folder) | awk -F'-' '{print $1}' | xargs ) - LOCATION_CODE=$(echo $(workload_zone_folder) | awk -F'-' '{print $2}' | xargs ) - case "$LOCATION_CODE" in + ENVIRONMENT_IN_FILENAME=$(echo $(workload_zone_folder) | awk -F'-' '{print $1}' | xargs ) + LOCATION_CODE=$(echo $(workload_zone_folder) | awk -F'-' '{print $2}' | xargs ) + case "$LOCATION_CODE" in "AUCE") LOCATION_IN_FILENAME="australiacentral" ;; "AUC2") LOCATION_IN_FILENAME="australiacentral2" ;; "AUEA") LOCATION_IN_FILENAME="australiaeast" ;; @@ -599,187 +651,208 @@ stages: "WUS2") LOCATION_IN_FILENAME="westus2" ;; "WUS3") LOCATION_IN_FILENAME="westus3" ;; *) LOCATION_IN_FILENAME="westeurope" ;; - esac - - NETWORK_IN_FILENAME=$(echo $(workload_zone_folder) | awk -F'-' '{print $3}' | xargs ) - echo "Environment(filename): $ENVIRONMENT_IN_FILENAME" - echo "Location(filename): $LOCATION_IN_FILENAME" - echo "Network(filename): $NETWORK_IN_FILENAME" - - if [ $ENVIRONMENT != $ENVIRONMENT_IN_FILENAME ]; then + esac + + NETWORK_IN_FILENAME=$(echo $(workload_zone_folder) | awk -F'-' '{print $3}' | xargs ) + workload_environment_file_name=$HOME/.sap_deployment_automation/$ENVIRONMENT$LOCATION_CODE$NETWORK; + echo "Workload Environment file: $workload_environment_file_name" + echo "Environment: ${ENVIRONMENT}" + echo "Location: ${LOCATION}" + echo "Network: ${NETWORK}" + echo "" + + workload_environment_file_name=$HOME/.sap_deployment_automation/$ENVIRONMENT$LOCATION_CODE$NETWORK; + echo "Environment file: $workload_environment_file_name" + echo "Environment(filename): $ENVIRONMENT_IN_FILENAME" + echo "Location(filename): $LOCATION_IN_FILENAME" + echo "Network(filename): $NETWORK_IN_FILENAME" + echo "" + + printf -v tempval '%s id:' $(variable_group) + printf -v val '%-20s' "${tempval}" + echo "$val $VARIABLE_GROUP_ID" + + printf -v tempval '%s id:' $(parent_variable_group) + printf -v val '%-20s' "${tempval}" + echo "$val $PARENT_VARIABLE_GROUP_ID" + + if [ $ENVIRONMENT != $ENVIRONMENT_IN_FILENAME ]; then echo "##vso[task.logissue type=error]The environment setting in $(workload_zone_configuration_file) '$ENVIRONMENT' does not match the $(workload_zone_configuration_file) file name '$ENVIRONMENT_IN_FILENAME'. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-INFRASTRUCTURE" exit 2 - fi + fi - if [ $LOCATION != $LOCATION_IN_FILENAME ]; then + if [ $LOCATION != $LOCATION_IN_FILENAME ]; then echo "##vso[task.logissue type=error]The location setting in $(workload_zone_configuration_file) '$LOCATION' does not match the $(workload_zone_configuration_file) file name '$LOCATION_IN_FILENAME'. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-INFRASTRUCTURE" exit 2 - fi + fi - if [ $NETWORK != $NETWORK_IN_FILENAME ]; then + if [ $NETWORK != $NETWORK_IN_FILENAME ]; then echo "##vso[task.logissue type=error]The network_logical_name setting in $(workload_zone_configuration_file) '$NETWORK' does not match the $(workload_zone_configuration_file) file name '$NETWORK_IN_FILENAME-. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-INFRASTRUCTURE" exit 2 - fi + fi - if [ -z $(Deployer_Key_Vault) ]; then + if [ -z $(Deployer_Key_Vault) ]; then if [ ! -f ${workload_environment_file_name} ]; then - echo -e "$boldred--- $workload_environment_file_name was not found ---$reset" - echo "##vso[task.logissue type=error]Workload Zone configuration file ${workload_environment_file_name} was not found." - exit 2 + echo -e "$boldred--- $workload_environment_file_name was not found ---$reset" + echo "##vso[task.logissue type=error]Workload Zone configuration file ${workload_environment_file_name} was not found." + exit 2 fi - fi - - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query $NETWORK"Workload_Key_Vault.value") - if [ -z ${az_var} ]; then - export workload_key_vault=$(cat "${workload_environment_file_name}" | grep workloadkeyvault | awk -F'=' '{print $2}' | xargs) ; echo 'Workload Key Vault' ${workload_key_vault} - else - export workload_key_vault="${az_var}" ; echo 'Workload Key Vault' ${workload_key_vault} - fi - - az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" | tr -d \") - if [ -n "${az_var}" ]; then - key_vault="${az_var}" ; echo 'Deployer Key Vault' ${key_vault} - else - key_vault=$(cat ${workload_environment_file_name} | grep keyvault= -m1 | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer Key Vault' ${key_vault} - fi - - az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Subscription.value" | tr -d \") - if [ -n "${az_var}" ]; then - STATE_SUBSCRIPTION="${az_var}" ; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION - else - STATE_SUBSCRIPTION=$(cat ${workload_environment_file_name} | grep STATE_SUBSCRIPTION | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION - fi - - az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" | tr -d \") - if [ -n "${az_var}" ]; then - REMOTE_STATE_SA="${az_var}" ; echo 'Terraform state file storage account' $REMOTE_STATE_SA - else - REMOTE_STATE_SA=$(cat ${workload_environment_file_name} | grep REMOTE_STATE_SA | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file storage account' $REMOTE_STATE_SA - fi + fi - # Check if running on deployer - if [[ ! -f /etc/profile.d/deploy_server.sh ]]; then + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "${NETWORK}Workload_Key_Vault.value" --out tsv) + if [ -z ${az_var} ]; then + export workload_key_vault=$(grep -m1 "^workloadkeyvault=" "${workload_environment_file_name}" | awk -F'=' '{print $2}' | xargs) + else + export workload_key_vault="${az_var}" + fi - az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none - return_code=$? - if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code - fi + if [ -n $(Deployer_Key_Vault) ]; then + export key_vault=$(Deployer_Key_Vault) + else + export key_vault=$(grep -m1 "^keyvault=" ${workload_environment_file_name} | awk -F'=' '{print $2}' | xargs) + fi + + az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Subscription.value" --out tsv) + if [ -n "${az_var}" ]; then + STATE_SUBSCRIPTION="${az_var}" + else + STATE_SUBSCRIPTION=$(grep "^STATE_SUBSCRIPTION=" ${workload_environment_file_name} | awk -F'=' '{print $2}' | xargs) + fi + + az_var=$(az pipelines variable-group variable list --group-id ${PARENT_VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" --out tsv) + if [ -n "${az_var}" ]; then + REMOTE_STATE_SA="${az_var}" else - echo -e "$green --- Running on deployer ---$reset" + REMOTE_STATE_SA=$(grep "^REMOTE_STATE_SA=" ${workload_environment_file_name} | awk -F'=' '{print $2}' | xargs) + fi - if [ $LOGON_USING_SPN == "true" ]; then - echo "Using SPN" + echo "Workload Key Vault: ${workload_key_vault}" + echo "Deployer Key Vault: ${key_vault}" + echo "Terraform state subscription: $STATE_SUBSCRIPTION" + echo "Terraform state account: $REMOTE_STATE_SA" - export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID - export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET - export ARM_TENANT_ID=$WL_ARM_TENANT_ID - export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID - export ARM_USE_MSI=false - az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none + # Check if running on deployer + if [[ ! -f /etc/profile.d/deploy_server.sh ]]; then + + az login --service-principal --username $WL_ARM_CLIENT_ID --password=$WL_ARM_CLIENT_SECRET --tenant $WL_ARM_TENANT_ID --output none return_code=$? if [ 0 != $return_code ]; then - echo -e "$boldred--- Login failed ---$reset" - echo "##vso[task.logissue type=error]az login failed." - exit $return_code + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi + else + echo -e "$green--- Running on deployer ---$reset" + + if [ "${USE_MSI}" != "true" ]; then + + echo -e "$cyan--- Remove using Service Principals ---$reset" + export ARM_CLIENT_ID=$WL_ARM_CLIENT_ID + export ARM_CLIENT_SECRET=$WL_ARM_CLIENT_SECRET + export ARM_TENANT_ID=$WL_ARM_TENANT_ID + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + export ARM_USE_MSI=false + az login --service-principal --username "${WL_ARM_CLIENT_ID}" --password="${WL_ARM_CLIENT_SECRET}" --tenant "${WL_ARM_TENANT_ID}" --output none + return_code=$? + if [ 0 != $return_code ]; then + echo -e "$boldred--- Login failed ---$reset" + echo "##vso[task.logissue type=error]az login failed." + exit $return_code + fi + else + export ARM_USE_MSI=true + export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID + unset ARM_TENANT_ID + az login --identity --allow-no-subscriptions --output none fi - else - export ARM_USE_MSI=true - export ARM_SUBSCRIPTION_ID=$WL_ARM_SUBSCRIPTION_ID - unset ARM_TENANT_ID - az login --identity --allow-no-subscriptions --output none - fi fi - echo -e "$green--- Run the remover script that destroys the SAP workload zone (landscape) ---$reset" - cd $CONFIG_REPO_PATH/LANDSCAPE/$(workload_zone_folder) + echo -e "$green --- Run the remover script that destroys the SAP workload zone (landscape) ---$reset" + cd "$CONFIG_REPO_PATH/LANDSCAPE/$(workload_zone_folder)" - $SAP_AUTOMATION_REPO_PATH/deploy/scripts/remover.sh \ - --parameterfile $(workload_zone_configuration_file) \ - --type sap_landscape \ - --state_subscription ${STATE_SUBSCRIPTION} \ - --storageaccountname "${REMOTE_STATE_SA}" \ - --auto-approve \ - --ado + $SAP_AUTOMATION_REPO_PATH/deploy/scripts/remover.sh \ + --parameterfile $(workload_zone_configuration_file) \ + --type sap_landscape \ + --state_subscription ${STATE_SUBSCRIPTION} \ + --storageaccountname "${REMOTE_STATE_SA}" \ + --auto-approve \ + --ado - return_code=$? + return_code=$? - - #stop the pipeline after you have reset the whitelisting on your resources - echo "Return code from remover.sh $return_code." - if [ 0 != $return_code ]; then + #stop the pipeline after you have reset the whitelisting on your resources + echo "Return code from remover.sh $return_code." + if [ 0 != $return_code ]; then echo "##vso[task.logissue type=error]Return code from remover.sh $return_code." exit $return_code - fi + fi echo -e "$green--- Add & update files in the DevOps Repository ---$reset" - cd $(Build.Repository.LocalPath) - changed=0 - git checkout -q $(Build.SourceBranchName) - git pull + cd $(Build.Repository.LocalPath) + changed=0 + git checkout -q $(Build.SourceBranchName) + git pull - if [ 0 == $return_code ]; then + if [ 0 == $return_code ]; then - if [ -f ${workload_environment_file_name} ]; then - git rm -q -f ${workload_environment_file_name} - echo "Removed ${workload_environment_file_name}" + if [ -f "${workload_environment_file_name}" ]; then + git rm -q -f ${workload_environment_file_name} + echo "Removed ${workload_environment_file_name}" - changed=1 + changed=1 fi - if [ -f ${workload_environment_file_name}.md ]; then - git rm -q --ignore-unmatch -f ${workload_environment_file_name}.md - changed=1 + if [ -f "${workload_environment_file_name}.md" ]; then + git rm -q --ignore-unmatch -f ${workload_environment_file_name}.md + changed=1 fi if [ -d $(Deployment_Configuration_Path)/LANDSCAPE/$(workload_zone_folder)/.terraform ]; then - git rm -r --ignore-unmatch -f $(Deployment_Configuration_Path)/LANDSCAPE/$(workload_zone_folder)/.terraform - changed=1 + git rm -r --ignore-unmatch -f $(Deployment_Configuration_Path)/LANDSCAPE/$(workload_zone_folder)/.terraform + changed=1 fi if [ 1 == $changed ] ; then - git config --global user.email "$(Build.RequestedForEmail)" - git config --global user.name "$(Build.RequestedFor)" - git commit -m "Workload zone ${workload_zone_folder} removal.[skip ci]" - git -c http.extraheader="AUTHORIZATION: bearer $(System.AccessToken)" push --set-upstream origin $(Build.SourceBranchName) + git config --global user.email "$(Build.RequestedForEmail)" + git config --global user.name "$(Build.RequestedFor)" + git commit -m "Workload zone ${workload_zone_folder} removal.[skip ci]" + git -c http.extraheader="AUTHORIZATION: bearer $(System.AccessToken)" push --set-upstream origin $(Build.SourceBranchName) fi - echo -e "$green--- Deleting variables ---$reset" - if [ -n $VARIABLE_GROUP_ID ]; then - echo "Deleting variables" + echo -e "$green--- Deleting variables ---$reset" + if [ -n "${VARIABLE_GROUP_ID}" ]; then + echo "Deleting variables" - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query ${NETWORK}"Workload_Key_Vault.value") - if [ -n "${az_var}" ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name $NETWORK"Workload_Key_Vault" --yes --only-show-errors - fi + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query ${NETWORK}"Workload_Key_Vault.value") + if [ -n "${az_var}" ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name $NETWORK"Workload_Key_Vault" --yes --only-show-errors + fi - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query $NETWORK"Workload_Zone_State_FileName.value") - if [ -n "${az_var}" ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name $NETWORK"Workload_Zone_State_FileName" --yes --only-show-errors - fi + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query $NETWORK"Workload_Zone_State_FileName.value") + if [ -n "${az_var}" ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name $NETWORK"Workload_Zone_State_FileName" --yes --only-show-errors + fi - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "${NETWORK}"Workload_Secret_Prefix.value --output table) - if [ -n "${az_var}" ]; then - az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name $NETWORK"Workload_Secret_Prefix" --yes --only-show-errors - fi + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "${NETWORK}"Workload_Secret_Prefix.value --output table) + if [ -n "${az_var}" ]; then + az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name $NETWORK"Workload_Secret_Prefix" --yes --only-show-errors + fi fi - fi + fi exit $return_code - displayName: Remove SAP workload_zone + displayName: Remove SAP workload_zone env: - SYSTEM_ACCESSTOKEN: $(System.AccessToken) - WL_ARM_SUBSCRIPTION_ID: $(ARM_SUBSCRIPTION_ID) - WL_ARM_CLIENT_ID: $(ARM_CLIENT_ID) - WL_ARM_CLIENT_SECRET: $(ARM_CLIENT_SECRET) - WL_ARM_TENANT_ID: $(ARM_TENANT_ID) - AZURE_DEVOPS_EXT_PAT: $(System.AccessToken) + SYSTEM_ACCESSTOKEN: $(System.AccessToken) + WL_ARM_SUBSCRIPTION_ID: $(WL_ARM_SUBSCRIPTION_ID) + WL_ARM_CLIENT_ID: $(WL_ARM_CLIENT_ID) + WL_ARM_CLIENT_SECRET: $(WL_ARM_CLIENT_SECRET) + WL_ARM_TENANT_ID: $(WL_ARM_TENANT_ID) + AZURE_DEVOPS_EXT_PAT: $(System.AccessToken) SAP_AUTOMATION_REPO_PATH: ${{ parameters.sap_automation_repo_path }} - CONFIG_REPO_PATH: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path) - LOGON_USING_SPN: $(Logon_Using_SPN) - USE_MSI: $(Use_MSI) + CONFIG_REPO_PATH: ${{ parameters.config_repo_path }}/$(Deployment_Configuration_Path) + LOGON_USING_SPN: $(Logon_Using_SPN) + USE_MSI: $(Use_MSI) - failOnStderr: false + failOnStderr: false diff --git a/deploy/pipelines/11-remover-arm-fallback.yaml b/deploy/pipelines/11-remover-arm-fallback.yaml index bc2d5d8e38..1baad59df9 100644 --- a/deploy/pipelines/11-remover-arm-fallback.yaml +++ b/deploy/pipelines/11-remover-arm-fallback.yaml @@ -319,7 +319,7 @@ stages: return_code=0 export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(parent_variable_group)'].id | [0]"); echo "Variable group: " $VARIABLE_GROUP_ID - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "CP_ARM_SUBSCRIPTION_ID.value" | tr -d \") + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "CP_ARM_SUBSCRIPTION_ID.value" --out tsv) if [ -z $variable_value ]; then subscription=$ARM_SUBSCRIPTION_ID else @@ -406,42 +406,42 @@ stages: if [ ${#VARIABLE_GROUP_ID} != 0 ]; then echo "Deleting variables" - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" ) + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" --out tsv) if [ ${#variable_value} != 0 ]; then az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Account_Name --yes --only-show-errors fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Resource_Group_Name.value" ) + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Resource_Group_Name.value" --out tsv) if [ ${#variable_value} != 0 ]; then az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Resource_Group_Name --yes --only-show-errors fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Subscription.value" ) + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Subscription.value" --out tsv) if [ ${#variable_value} != 0 ]; then az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Terraform_Remote_Storage_Subscription --yes --only-show-errors fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_State_FileName.value" ) + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_State_FileName.value" --out tsv) if [ ${#variable_value} != 0 ]; then az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Deployer_State_FileName --yes --only-show-errors fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" ) + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" --out tsv) if [ ${#variable_value} != 0 ]; then az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name Deployer_Key_Vault --yes --only-show-errors fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WEBAPP_URL_BASE.value" ) + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WEBAPP_URL_BASE.value" --out tsv) if [ ${#variable_value} != 0 ]; then az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name WEBAPP_URL_BASE --yes --only-show-errors fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WEBAPP_IDENTITY.value" ) + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WEBAPP_IDENTITY.value" --out tsv) if [ ${#variable_value} != 0 ]; then az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name WEBAPP_IDENTITY --yes --only-show-errors fi - variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WEBAPP_ID.value" ) + variable_value=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "WEBAPP_ID.value" --out tsv) if [ ${#variable_value} != 0 ]; then az pipelines variable-group variable delete --group-id ${VARIABLE_GROUP_ID} --name WEBAPP_ID --yes --only-show-errors fi diff --git a/deploy/pipelines/12-remove-control-plane.yaml b/deploy/pipelines/12-remove-control-plane.yaml index 4ed304869e..3c101705d1 100644 --- a/deploy/pipelines/12-remove-control-plane.yaml +++ b/deploy/pipelines/12-remove-control-plane.yaml @@ -66,9 +66,7 @@ stages: set -u echo "##vso[build.updatebuildnumber]Removing the control plane defined in $(deployer_folder) $(library_folder)" - green="\e[1;32m" ; reset="\e[0m" - - + green="\e[1;32m" ; reset="\e[0m" ; boldred="\e[1;31m" ; cyan="\e[1;36m" # echo -e "$green--- Checkout $(Build.SourceBranchName) ---$reset" # git fetch -q --all @@ -87,7 +85,7 @@ stages: az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(parent_variable_group)'].id | [0]") - echo '$(parent_variable_group) id: ' $VARIABLE_GROUP_ID + if [ -z ${VARIABLE_GROUP_ID} ]; then echo "##vso[task.logissue type=error]Variable group $(parent_variable_group) could not be found." exit 2 @@ -131,8 +129,8 @@ stages: rm -f terraform_$(tf_version)_linux_amd64.zip fi if [ $USE_MSI != "true" ]; then - echo "Login using SPN" - export ARM_USE_MSI=false + echo -e "$cyan--- Remove using Service Principals ---$reset" + unset ARM_USE_MSI az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none return_code=$? if [ 0 != $return_code ]; then @@ -141,6 +139,7 @@ stages: exit $return_code fi else + echo -e "$cyan--- Remove using Managed Identity ---$reset" source /etc/profile.d/deploy_server.sh export ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID export ARM_USE_MSI=true @@ -152,12 +151,10 @@ stages: dos2unix -q $CONFIG_REPO_PATH/DEPLOYER/$(deployer_folder)/$(deployer_configuration_file) dos2unix -q $CONFIG_REPO_PATH/LIBRARY/$(library_folder)/$(library_configuration_file) - echo -e "$green--- Running the remove region script that destroys deployer VM and SAP library ---$reset" + echo -e "$green--- Environment information ---$reset" ENVIRONMENT=$(grep "^environment" $CONFIG_REPO_PATH/DEPLOYER/$(deployer_folder)/$(deployer_configuration_file) | awk -F'=' '{print $2}' | xargs) LOCATION=$(grep "^location" $CONFIG_REPO_PATH/DEPLOYER/$(deployer_folder)/$(deployer_configuration_file) | awk -F'=' '{print $2}' | xargs | tr 'A-Z' 'a-z') - echo Environment: ${ENVIRONMENT} - echo Location: ${LOCATION} ENVIRONMENT_IN_FILENAME=$(echo $(deployer_folder) | awk -F'-' '{print $1}' | xargs ) LOCATION_CODE=$(echo $(deployer_folder) | awk -F'-' '{print $2}' | xargs ) @@ -220,8 +217,12 @@ stages: *) LOCATION_IN_FILENAME="westeurope" ;; esac - echo "Environment(filename): $ENVIRONMENT_IN_FILENAME" - echo "Location(filename): $LOCATION_IN_FILENAME" + echo "Environment: ${ENVIRONMENT}" + echo "Location: ${LOCATION}" + echo "Environment(filename): $ENVIRONMENT_IN_FILENAME" + echo "Location(filename): $LOCATION_IN_FILENAME" + echo "" + if [ $ENVIRONMENT != $ENVIRONMENT_IN_FILENAME ]; then echo "##vso[task.logissue type=error]The environment setting in $(workload_zone_configuration_file) '$ENVIRONMENT' does not match the $(workload_zone_configuration_file) file name '$ENVIRONMENT_IN_FILENAME'. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-INFRASTRUCTURE" @@ -233,41 +234,49 @@ stages: exit 2 fi - deployer_environment_file_name=$HOME/.sap_deployment_automation/$ENVIRONMENT$LOCATION_CODE; echo "Environment file: " $deployer_environment_file_name + deployer_environment_file_name=$HOME/.sap_deployment_automation/$ENVIRONMENT$LOCATION_CODE; + echo "Environment file: $deployer_environment_file_name" - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" | tr -d \") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" --out tsv) if [ -n "${az_var}" ]; then - key_vault="${az_var}" ; echo 'Deployer Key Vault' ${key_vault} + key_vault="${az_var}" else echo "Reading key vault from environment file" - key_vault=$(cat ${deployer_environment_file_name} | grep keyvault= -m1 | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer Key Vault' ${key_vault} + key_vault=$(grep -m "^keyvault=" ${deployer_environment_file_name} | awk -F'=' '{print $2}' | xargs) fi - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" | tr -d \") + export STATE_SUBSCRIPTION=$ARM_SUBSCRIPTION_ID + + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Account_Name.value" --out tsv) if [ -n "${az_var}" ]; then - REMOTE_STATE_SA="${az_var}" ; echo 'Terraform state file storage account' $REMOTE_STATE_SA + REMOTE_STATE_SA="${az_var}" + else echo "Reading storage account from environment file" - REMOTE_STATE_SA=$(cat ${deployer_environment_file_name} | grep REMOTE_STATE_SA | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file storage account' $REMOTE_STATE_SA + REMOTE_STATE_SA=$(grep -m1 "^REMOTE_STATE_SA=" ${deployer_environment_file_name} | awk -F'=' '{print $2}' | xargs) fi - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Resource_Group_Name.value" | tr -d \") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Terraform_Remote_Storage_Resource_Group_Name.value" --out tsv) if [ -n "${az_var}" ]; then - REMOTE_STATE_RG="${az_var}" ; echo 'Terraform state file resource group' $REMOTE_STATE_RG + REMOTE_STATE_RG="${az_var}" else - REMOTE_STATE_RG=$(cat ${deployer_environment_file_name} | grep REMOTE_STATE_RG | awk -F'=' '{print $2}' | xargs) ; echo 'Terraform state file resource group' $REMOTE_STATE_RG + REMOTE_STATE_RG=$(grep "^REMOTE_STATE_RG" ${deployer_environment_file_name} | awk -F'=' '{print $2}' | xargs) fi - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "DEPLOYER_RANDOM_ID_SEED.value" | tr -d \") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "DEPLOYER_RANDOM_ID_SEED.value" --out tsv) if [ -n "${az_var}" ]; then deployer_random_id="${az_var}" else if [ -f ${deployer_environment_file_name} ] ; then - deployer_random_id=$(cat ${deployer_environment_file_name} | grep deployer_random_id= | awk -F'=' '{print $2}' | xargs) + deployer_random_id=$(grep "^deployer_random_id=" ${deployer_environment_file_name} | awk -F'=' '{print $2}' | xargs) fi fi - export STATE_SUBSCRIPTION=$ARM_SUBSCRIPTION_ID ; echo 'Terraform state file subscription' $STATE_SUBSCRIPTION + echo "Terraform state subscription: $STATE_SUBSCRIPTION" + echo "Terraform state rg name: $REMOTE_STATE_RG" + echo "Terraform state account: $REMOTE_STATE_SA" + echo "Deployer Key Vault: ${key_vault}" + if [ -f ${CONFIG_REPO_PATH}/LIBRARY/$(library_folder)/state.zip ]; then pass=$(echo $DEPLOYER_RANDOM_ID_SEED | sed 's/-//g') unzip -qq -o -P "${pass}" ${CONFIG_REPO_PATH}/LIBRARY/$(library_folder)/state.zip -d ${CONFIG_REPO_PATH}/LIBRARY/$(library_folder) @@ -290,7 +299,7 @@ stages: return_code=$? - echo "Return code from remove_controlplane $return_code." + echo "Return code from remove_controlplane: $return_code." echo -e "$green--- Remove Control Plane Part 1 ---$reset" cd $CONFIG_REPO_PATH @@ -306,7 +315,7 @@ stages: echo "Compressing the state file." sudo apt install zip pass=$(echo $DEPLOYER_RANDOM_ID_SEED | sed 's/-//g') - zip -j -P "${pass}" DEPLOYER/$(deployer_folder)/state DEPLOYER/$(deployer_folder)/terraform.tfstate + zip -q -j -P "${pass}" DEPLOYER/$(deployer_folder)/state DEPLOYER/$(deployer_folder)/terraform.tfstate git add -f DEPLOYER/$(deployer_folder)/state.zip changed=1 fi @@ -318,7 +327,7 @@ stages: sudo apt install zip echo "Compressing the library state file" pass=$(echo $DEPLOYER_RANDOM_ID_SEED | sed 's/-//g') - zip -j -P "${pass}" ${CONFIG_REPO_PATH}/LIBRARY/$(library_folder)/state ${CONFIG_REPO_PATH}/LIBRARY/$(library_folder)/terraform.tfstate + zip -q -j -P "${pass}" ${CONFIG_REPO_PATH}/LIBRARY/$(library_folder)/state ${CONFIG_REPO_PATH}/LIBRARY/$(library_folder)/terraform.tfstate git add -f ${CONFIG_REPO_PATH}/LIBRARY/$(library_folder)/state.zip changed=1 fi @@ -417,15 +426,15 @@ stages: inlineScript: | #!/bin/bash echo "##vso[build.updatebuildnumber]Removing the control plane defined in $(deployer_folder) $(library_folder)" - green="\e[1;32m" ; reset="\e[0m" + green="\e[1;32m" ; reset="\e[0m" ; boldred="\e[1;31m" ; cyan="\e[1;36m" export ARM_USE_MSI=false if [ $USE_MSI != "true" ]; then - echo "use Service Principal" + echo -e "$cyan--- Remove using Service Principals ---$reset" export ARM_CLIENT_ID=$CP_ARM_CLIENT_ID export ARM_TENANT_ID=$CP_ARM_TENANT_ID export ARM_CLIENT_SECRET=$CP_ARM_CLIENT_SECRET else - echo "use MSI" + echo -e "$cyan--- Remove using Managed Identity ---$reset" export ARM_CLIENT_ID=$servicePrincipalId export ARM_TENANT_ID=$tenantId export ARM_CLIENT_SECRET=$servicePrincipalKey @@ -468,7 +477,11 @@ stages: az devops configure --defaults organization=$(System.CollectionUri) project='$(System.TeamProject)' --output none export VARIABLE_GROUP_ID=$(az pipelines variable-group list --query "[?name=='$(parent_variable_group)'].id | [0]") - echo '$(variable_group) id: ' $VARIABLE_GROUP_ID + + printf -v tempval '%s id:' $(parent_variable_group) + printf -v val '%-20s' "${tempval}" + echo "$val $VARIABLE_GROUP_ID" + if [ -z ${VARIABLE_GROUP_ID} ]; then echo "##vso[task.logissue type=error]Variable group $(parent_variable_group) could not be found." exit 2 @@ -500,12 +513,10 @@ stages: dos2unix -q $CONFIG_REPO_PATH/DEPLOYER/$(deployer_folder)/$(deployer_configuration_file) dos2unix -q $CONFIG_REPO_PATH/LIBRARY/$(library_folder)/$(library_configuration_file) - echo -e "$green--- Running the remove region script that destroys deployer VM and SAP library ---$reset" + echo -e "$green--- Environment information ---$reset" ENVIRONMENT=$(grep "^environment" $CONFIG_REPO_PATH/DEPLOYER/$(deployer_folder)/$(deployer_configuration_file) | awk -F'=' '{print $2}' | xargs) LOCATION=$(grep "^location" $CONFIG_REPO_PATH/DEPLOYER/$(deployer_folder)/$(deployer_configuration_file) | awk -F'=' '{print $2}' | xargs | tr 'A-Z' 'a-z') - echo Environment: ${ENVIRONMENT} - echo Location: ${LOCATION} ENVIRONMENT_IN_FILENAME=$(echo $(deployer_folder) | awk -F'-' '{print $1}' | xargs ) LOCATION_CODE=$(echo $(deployer_folder) | awk -F'-' '{print $2}' | xargs ) @@ -568,8 +579,13 @@ stages: *) LOCATION_IN_FILENAME="westeurope" ;; esac - echo "Environment(filename): $ENVIRONMENT_IN_FILENAME" - echo "Location(filename): $LOCATION_IN_FILENAME" + echo "Environment: ${ENVIRONMENT}" + echo "Location: ${LOCATION}" + echo "Location code: ${LOCATION_CODE}" + + echo "Environment(filename): $ENVIRONMENT_IN_FILENAME" + echo "Location(filename): $LOCATION_IN_FILENAME" + echo "" if [ $ENVIRONMENT != $ENVIRONMENT_IN_FILENAME ]; then echo "##vso[task.logissue type=error]The environment setting in $(workload_zone_configuration_file) '$ENVIRONMENT' does not match the $(workload_zone_configuration_file) file name '$ENVIRONMENT_IN_FILENAME'. Filename should have the pattern [ENVIRONMENT]-[REGION_CODE]-[NETWORK_LOGICAL_NAME]-INFRASTRUCTURE" @@ -583,13 +599,15 @@ stages: echo -e "$green--- Running the remove region script that destroys deployer VM and SAP library ---$reset" - deployer_environment_file_name=$HOME/.sap_deployment_automation/$ENVIRONMENT$LOCATION; echo "Environment file: " $deployer_environment_file_name + deployer_environment_file_name=$HOME/.sap_deployment_automation/$ENVIRONMENT$LOCATION_CODE + echo "Environment file: $deployer_environment_file_name" echo -e "$green--- az login ---$reset" if [ $USE_MSI != "true" ]; then - echo "Login using SPN" - export ARM_USE_MSI=false + echo -e "$cyan--- Remove using Service Principals ---$reset" + + unset ARM_USE_MSI az login --service-principal --username $ARM_CLIENT_ID --password=$ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID --output none return_code=$? if [ 0 != $return_code ]; then @@ -600,14 +618,16 @@ stages: fi az account set --subscription $ARM_SUBSCRIPTION_ID - az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" | tr -d \") + az_var=$(az pipelines variable-group variable list --group-id ${VARIABLE_GROUP_ID} --query "Deployer_Key_Vault.value" --out tsv) if [ -n "${az_var}" ]; then - key_vault="${az_var}" ; echo 'Deployer Key Vault' ${key_vault} + key_vault="${az_var}" else echo "Reading key vault from environment file" - key_vault=$(cat ${deployer_environment_file_name} | grep keyvault= -m1 | awk -F'=' '{print $2}' | xargs) ; echo 'Deployer Key Vault' ${key_vault} + key_vault=$(grep -m1 "^keyvault=" ${deployer_environment_file_name} | awk -F'=' '{print $2}' | xargs) fi + echo "Deployer Key Vault: $key_vault" + key_vault_id=$(az resource list --name "${key_vault}" --resource-type Microsoft.KeyVault/vaults --query "[].id | [0]" -o tsv) if [ -n "${key_vault_id}" ]; then diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index c0ac01fdd0..bb5181f51c 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -466,7 +466,7 @@ Write-Host "Creating the variable group SDAF-General" -ForegroundColor Green $general_group_id = (az pipelines variable-group list --query "[?name=='SDAF-General'].id | [0]" --only-show-errors) if ($general_group_id.Length -eq 0) { - az pipelines variable-group create --name SDAF-General --variables ANSIBLE_HOST_KEY_CHECKING=false Deployment_Configuration_Path=WORKSPACES Branch=main tf_version="1.9.5" ansible_core_version="2.15" S-Username=$SUserName S-Password=$SPassword --output yaml --authorize true --output none + az pipelines variable-group create --name SDAF-General --variables ANSIBLE_HOST_KEY_CHECKING=false Deployment_Configuration_Path=WORKSPACES Branch=main tf_version="1.9.8" ansible_core_version="2.15" S-Username=$SUserName S-Password=$SPassword --output yaml --authorize true --output none $general_group_id = (az pipelines variable-group list --query "[?name=='SDAF-General'].id | [0]" --only-show-errors) az pipelines variable-group variable update --group-id $general_group_id --name "S-Password" --value $SPassword --secret true --output none --only-show-errors } diff --git a/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 b/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 index 70a6887638..9c1c9c7d31 100644 --- a/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 +++ b/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 @@ -230,7 +230,7 @@ if ($authenticationMethod -eq "Service Principal") { $GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --only-show-errors ) if ($GroupID.Length -eq 0) { Write-Host "Creating the variable group" $WorkloadZonePrefix -ForegroundColor Green - az pipelines variable-group create --name $WorkloadZonePrefix --variables Agent='Azure Pipelines' ARM_CLIENT_ID=$ARM_CLIENT_ID ARM_OBJECT_ID=$ARM_OBJECT_ID ARM_CLIENT_SECRET=$ARM_CLIENT_SECRET ARM_SUBSCRIPTION_ID=$Workload_zone_subscriptionID ARM_TENANT_ID=$ARM_TENANT_ID POOL=$Pool_Name AZURE_CONNECTION_NAME=$Service_Connection_Name TF_LOG=OFF Logon_Using_SPN=true USE_MSI=false --output none --authorize true + az pipelines variable-group create --name $WorkloadZonePrefix --variables Agent='Azure Pipelines' WL_ARM_CLIENT_ID=$ARM_CLIENT_ID WL_ARM_OBJECT_ID=$ARM_OBJECT_ID WL_ARM_CLIENT_SECRET=$ARM_CLIENT_SECRET WL_ARM_SUBSCRIPTION_ID=$Workload_zone_subscriptionID ARM_TENANT_ID=$ARM_TENANT_ID POOL=$Pool_Name AZURE_CONNECTION_NAME=$Service_Connection_Name TF_LOG=OFF Logon_Using_SPN=true USE_MSI=false --output none --authorize true $GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --only-show-errors) } @@ -241,7 +241,7 @@ else { $GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --only-show-errors ) if ($GroupID.Length -eq 0) { Write-Host "Creating the variable group" $WorkloadZonePrefix -ForegroundColor Green - az pipelines variable-group create --name $WorkloadZonePrefix --variables Agent='Azure Pipelines' ARM_SUBSCRIPTION_ID=$Workload_zone_subscriptionID POOL=$Pool_Name AZURE_CONNECTION_NAME=$Service_Connection_Name TF_LOG=OFF Logon_Using_SPN=false USE_MSI=true --output none --authorize true + az pipelines variable-group create --name $WorkloadZonePrefix --variables Agent='Azure Pipelines' WL_ARM_SUBSCRIPTION_ID=$Workload_zone_subscriptionID POOL=$Pool_Name AZURE_CONNECTION_NAME=$Service_Connection_Name TF_LOG=OFF Logon_Using_SPN=false USE_MSI=true --output none --authorize true $GroupID = (az pipelines variable-group list --query "[?name=='$WorkloadZonePrefix'].id | [0]" --only-show-errors) } } @@ -250,9 +250,9 @@ if ($authenticationMethod -eq "Service Principal") { $Env:AZURE_DEVOPS_EXT_AZURE_RM_SERVICE_PRINCIPAL_KEY = $ARM_CLIENT_SECRET - az pipelines variable-group variable update --group-id $GroupID --name "ARM_CLIENT_SECRET" --value $ARM_CLIENT_SECRET --secret true --output none --only-show-errors - az pipelines variable-group variable update --group-id $GroupID --name "ARM_CLIENT_ID" --value $ARM_CLIENT_ID --output none --only-show-errors - az pipelines variable-group variable update --group-id $GroupID --name "ARM_OBJECT_ID" --value $ARM_OBJECT_ID --output none --only-show-errors + az pipelines variable-group variable update --group-id $GroupID --name "WL_ARM_CLIENT_SECRET" --value $ARM_CLIENT_SECRET --secret true --output none --only-show-errors + az pipelines variable-group variable update --group-id $GroupID --name "WL_ARM_CLIENT_ID" --value $ARM_CLIENT_ID --output none --only-show-errors + az pipelines variable-group variable update --group-id $GroupID --name "WL_ARM_OBJECT_ID" --value $ARM_OBJECT_ID --output none --only-show-errors $epExists = (az devops service-endpoint list --query "[?name=='$Service_Connection_Name'].name | [0]") diff --git a/deploy/scripts/advanced_state_management.sh b/deploy/scripts/advanced_state_management.sh index 16dd4d1d8b..b2c2198e90 100755 --- a/deploy/scripts/advanced_state_management.sh +++ b/deploy/scripts/advanced_state_management.sh @@ -196,14 +196,20 @@ automation_config_directory=$CONFIG_REPO_PATH/.sap_deployment_automation/ system_config_information="${automation_config_directory}""${environment}""${region_code}" #Plugins -if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] -then +isInCloudShellCheck=$(checkIfCloudShell) + +if checkIfCloudShell; then + mkdir -p "${HOME}/.terraform.d/plugin-cache" + export TF_PLUGIN_CACHE_DIR="${HOME}/.terraform.d/plugin-cache" +else + if [ ! -d /opt/terraform/.terraform.d/plugin-cache ]; then mkdir -p /opt/terraform/.terraform.d/plugin-cache + sudo chown -R $USER /opt/terraform + fi + export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache fi -sudo chown -R $USER:$USER /opt/terraform - -export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache +# export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache set_executing_user_environment_variables "none" diff --git a/deploy/scripts/deploy_controlplane.sh b/deploy/scripts/deploy_controlplane.sh index 610c592e5b..77bba5e664 100755 --- a/deploy/scripts/deploy_controlplane.sh +++ b/deploy/scripts/deploy_controlplane.sh @@ -30,12 +30,19 @@ resetformatting="\e[0m" full_script_path="$(realpath "${BASH_SOURCE[0]}")" script_directory="$(dirname "${full_script_path}")" +if [[ -f /etc/profile.d/deploy_server.sh ]]; then + path=$(grep -m 1 "export PATH=" /etc/profile.d/deploy_server.sh | awk -F'=' '{print $2}' | xargs) + export PATH=$path +fi + #call stack has full scriptname when using source source "${script_directory}/deploy_utils.sh" #helper files source "${script_directory}/helpers/script_helpers.sh" + + force=0 recover=0 ado_flag="" @@ -72,7 +79,19 @@ do esac done -echo "ADO flag ${ado_flag}" +echo "ADO flag: ${ado_flag}" + +key=$(basename "${deployer_parameter_file}" | cut -d. -f1) +deployer_tfstate_key="${key}.terraform.tfstate" + +echo "Deployer State File: ${deployer_tfstate_key}" +echo "Deployer Subscription: ${subscription}" + +key=$(basename "${library_parameter_file}" | cut -d. -f1) +library_tfstate_key="${key}.terraform.tfstate" + +echo "Deployer State File: ${deployer_tfstate_key}" +echo "Library State File: ${library_tfstate_key}" this_ip=$(curl -s ipinfo.io/ip) >/dev/null 2>&1 root_dirname=$(pwd) @@ -115,7 +134,8 @@ fi # Convert the region to the correct code get_region_code "$region" -echo "Region code for deployment: $region_code" +echo "Region code: ${region_code}" + automation_config_directory=$CONFIG_REPO_PATH/.sap_deployment_automation generic_config_information="${automation_config_directory}"/config @@ -129,6 +149,9 @@ fi init "${automation_config_directory}" "${generic_config_information}" "${deployer_config_information}" +save_config_var "deployer_tfstate_key" "${deployer_config_information}" + + # Check that the exports ARM_SUBSCRIPTION_ID and SAP_AUTOMATION_REPO_PATH are defined validate_exports return_code=$? @@ -157,25 +180,25 @@ relative_path="${deployer_dirname}" export TF_DATA_DIR="${relative_path}"/.terraform step=0 - +echo "" echo "#########################################################################################" echo "# #" echo -e "# $cyan Starting the control plane deployment $resetformatting #" echo "# #" echo "#########################################################################################" - +echo "" noAccess=$( az account show --query name | grep "N/A(tenant level account)") if [ -n "$noAccess" ]; then - echo "#########################################################################################" - echo "# #" - echo -e "# $boldred The provided credentials do not have access to the subscription!!! $resetformatting #" - echo "# #" - echo "#########################################################################################" + echo "#########################################################################################" + echo "# #" + echo -e "# $boldred The provided credentials do not have access to the subscription!!! $resetformatting #" + echo "# #" + echo "#########################################################################################" - az account show --output table + az account show --output table - exit 65 + exit 65 fi az account list --query "[].{Name:name,Id:id}" --output table #setting the user environment variables @@ -209,38 +232,37 @@ if [ -n "${subscription}" ]; then if [ -n "${keyvault}" ] ; then - kv_found=$(az keyvault list --subscription "${subscription}" --query [].name | grep "${keyvault}") + kv_found=$(az keyvault list --subscription "${subscription}" --query [].name | grep "${keyvault}") - if [ -z "${kv_found}" ] ; then - echo "#########################################################################################" - echo "# #" - echo -e "# $boldred Detected a failed deployment $resetformatting #" - echo "# #" - echo -e "# $cyan Trying to recover $resetformatting #" - echo "# #" - echo "#########################################################################################" - step=0 - save_config_var "step" "${deployer_config_information}" - fi + if [ -z "${kv_found}" ] ; then + echo "#########################################################################################" + echo "# #" + echo -e "# $boldred Detected a failed deployment $resetformatting #" + echo "# #" + echo -e "# $cyan Trying to recover $resetformatting #" + echo "# #" + echo "#########################################################################################" + step=0 + save_config_var "step" "${deployer_config_information}" + fi else - step=0 - save_config_var "step" "${deployer_config_information}" + step=0 + save_config_var "step" "${deployer_config_information}" fi - - fi load_config_vars "${deployer_config_information}" "step" if [ 0 = "${deploy_using_msi_only:-}" ]; then - echo "Using Service Principal for deployment" - set_executing_user_environment_variables "${spn_secret}" + echo "Identity to use: Service Principal" + unset ARM_USE_MSI + set_executing_user_environment_variables "${spn_secret}" else - echo "Using Managed Identity for deployment" - set_executing_user_environment_variables "none" + echo "Identity to use: Managed Identity" + set_executing_user_environment_variables "none" fi if [ $recover == 1 ]; then @@ -253,6 +275,23 @@ if [ $recover == 1 ]; then fi fi +#Persist the parameters +if [ -n "$subscription" ]; then + save_config_var "subscription" "${deployer_config_information}" + export STATE_SUBSCRIPTION=$subscription + save_config_var "STATE_SUBSCRIPTION" "${deployer_config_information}" + export ARM_SUBSCRIPTION_ID=$subscription + save_config_var "ARM_SUBSCRIPTION_ID" "${deployer_config_information}" +fi + +if [ -n "$client_id" ]; then + save_config_var "client_id" "${deployer_config_information}" +fi + +if [ -n "$tenant_id" ]; then + save_config_var "tenant_id" "${deployer_config_information}" +fi + curdir=$(pwd) if [ 0 == $step ]; then echo "" @@ -265,15 +304,16 @@ if [ 0 == $step ]; then allParams=$(printf " --parameterfile %s %s" "${deployer_file_parametername}" "${approveparam}") - echo $allParams - cd "${deployer_dirname}" || exit if [ $force == 1 ]; then rm -Rf .terraform terraform.tfstate* fi - "${SAP_AUTOMATION_REPO_PATH}"/deploy/scripts/install_deployer.sh $allParams + echo "Calling install_deployer.sh: $allParams" + echo "Deployer State File: ${deployer_tfstate_key}" + + "${SAP_AUTOMATION_REPO_PATH}/deploy/scripts/install_deployer.sh" $allParams return_code=$? if [ 0 != $return_code ]; then echo "Bootstrapping of the deployer failed" > "${deployer_config_information}".err @@ -281,7 +321,7 @@ if [ 0 == $step ]; then fi load_config_vars "${deployer_config_information}" "keyvault" - echo "Key vault:" $keyvault + echo "Key vault: ${keyvault}" if [ -z "$keyvault" ]; then echo "#########################################################################################" @@ -292,28 +332,12 @@ if [ 0 == $step ]; then echo "Bootstrapping of the deployer failed" > "${deployer_config_information}".err exit 10 fi - - #Persist the parameters - if [ -n "$subscription" ]; then - save_config_var "subscription" "${deployer_config_information}" - export STATE_SUBSCRIPTION=$subscription - save_config_var "STATE_SUBSCRIPTION" "${deployer_config_information}" - fi - - if [ -n "$client_id" ]; then - save_config_var "client_id" "${deployer_config_information}" - fi - - if [ -n "$tenant_id" ]; then - save_config_var "tenant_id" "${deployer_config_information}" - fi - if [ -n "${FORCE_RESET}" ]; then - step=3 - save_config_var "step" "${deployer_config_information}" - exit 0 + step=3 + save_config_var "step" "${deployer_config_information}" + exit 0 else - export step=1 + export step=1 fi save_config_var "step" "${deployer_config_information}" @@ -341,18 +365,18 @@ if [ 1 == $step ] || [ 3 == $step ] ; then if [ -z "$keyvault" ]; then - key=$(echo "${deployer_file_parametername}" | cut -d. -f1) - if [ $recover == 1 ]; then - terraform_module_directory="$SAP_AUTOMATION_REPO_PATH"/deploy/terraform/run/sap_deployer/ - terraform -chdir="${terraform_module_directory}" init -upgrade=true \ - --backend-config "subscription_id=${STATE_SUBSCRIPTION}" \ - --backend-config "resource_group_name=${REMOTE_STATE_RG}" \ - --backend-config "storage_account_name=${REMOTE_STATE_SA}" \ - --backend-config "container_name=tfstate" \ - --backend-config "key=${key}.terraform.tfstate" - - keyvault=$(terraform -chdir="${terraform_module_directory}" output deployer_kv_user_name | tr -d \") - fi + key=$(echo "${deployer_file_parametername}" | cut -d. -f1) + if [ $recover == 1 ]; then + terraform_module_directory="$SAP_AUTOMATION_REPO_PATH"/deploy/terraform/run/sap_deployer/ + terraform -chdir="${terraform_module_directory}" init -upgrade=true \ + --backend-config "subscription_id=${STATE_SUBSCRIPTION}" \ + --backend-config "resource_group_name=${REMOTE_STATE_RG}" \ + --backend-config "storage_account_name=${REMOTE_STATE_SA}" \ + --backend-config "container_name=tfstate" \ + --backend-config "key=${key}.terraform.tfstate" + + keyvault=$(terraform -chdir="${terraform_module_directory}" output deployer_kv_user_name | tr -d \") + fi fi if [ -z "$keyvault" ]; then @@ -374,15 +398,15 @@ if [ 1 == $step ] || [ 3 == $step ] ; then kv_name_check=$(az keyvault list --query "[?name=='$keyvault'].name | [0]" --subscription "${subscription}") if [ -z $kv_name_check ]; then - echo "" - echo "#########################################################################################" - echo "# #" - echo -e "# $cyan Retrying keyvault access $resetformatting #" - echo "# #" - echo "#########################################################################################" - echo "" - sleep 60 - kv_name_check=$(az keyvault list --query "[?name=='$keyvault'].name | [0]" --subscription "${subscription}") + echo "" + echo "#########################################################################################" + echo "# #" + echo -e "# $cyan Retrying keyvault access $resetformatting #" + echo "# #" + echo "#########################################################################################" + echo "" + sleep 60 + kv_name_check=$(az keyvault list --query "[?name=='$keyvault'].name | [0]" --subscription "${subscription}") fi if [ -z $kv_name_check ]; then @@ -421,30 +445,30 @@ if [ 1 == $step ] || [ 3 == $step ] ; then if [ 0 = "${deploy_using_msi_only:-}" ]; then - read -p "Do you want to specify the SPN Details Y/N?" ans - answer=${ans^^} - if [ "$answer" == 'Y' ]; then - allParams=$(printf " -e %s -r %s -v %s " "${environment}" "${region_code}" "${keyvault}" ) + read -p "Do you want to specify the SPN Details Y/N?" ans + answer=${ans^^} + if [ "$answer" == 'Y' ]; then + allParams=$(printf " -e %s -r %s -v %s " "${environment}" "${region_code}" "${keyvault}" ) - #$allParams as an array (); array math can be done in shell, allowing dynamic parameter lists to be created - #"${allParams[@]}" - quotes all elements of the array + #$allParams as an array (); array math can be done in shell, allowing dynamic parameter lists to be created + #"${allParams[@]}" - quotes all elements of the array - "${SAP_AUTOMATION_REPO_PATH}"/deploy/scripts/set_secrets.sh $allParams - return_code=$? - if [ 0 != $return_code ]; then - exit $return_code - fi - fi + "${SAP_AUTOMATION_REPO_PATH}"/deploy/scripts/set_secrets.sh $allParams + return_code=$? + if [ 0 != $return_code ]; then + exit $return_code + fi + fi else - allParams=$(printf " -e %s -r %s -v %s --subscription %s --msi " "${environment}" "${region_code}" "${keyvault}" "${subscription}") + allParams=$(printf " -e %s -r %s -v %s --subscription %s --msi " "${environment}" "${region_code}" "${keyvault}" "${subscription}") - "${SAP_AUTOMATION_REPO_PATH}"/deploy/scripts/set_secrets.sh $allParams - if [ -f secret.err ]; then - error_message=$(cat secret.err) - echo "##vso[task.logissue type=error]${error_message}" + "${SAP_AUTOMATION_REPO_PATH}"/deploy/scripts/set_secrets.sh $allParams + if [ -f secret.err ]; then + error_message=$(cat secret.err) + echo "##vso[task.logissue type=error]${error_message}" - exit 65 - fi + exit 65 + fi fi fi @@ -458,8 +482,8 @@ if [ 1 == $step ] || [ 3 == $step ] ; then fi cd "${curdir}" || exit if [ 1 == $step ] ; then - step=2 - save_config_var "step" "${deployer_config_information}" + step=2 + save_config_var "step" "${deployer_config_information}" fi else az_subscription_id=$(az account show --query id -o tsv) @@ -511,7 +535,7 @@ if [ 2 == $step ]; then fi allParams=$(printf " -p %s -d %s %s" "${library_file_parametername}" "${relative_path}" "${approveparam}") - echo "${allParams}" + echo "Calling install_library.sh with: $allParams" "${SAP_AUTOMATION_REPO_PATH}"/deploy/scripts/install_library.sh $allParams return_code=$? @@ -526,7 +550,7 @@ if [ 2 == $step ]; then REMOTE_STATE_SA=$(terraform -chdir="${terraform_module_directory}" output -no-color -raw remote_state_storage_account_name | tr -d \") STATE_SUBSCRIPTION=$(terraform -chdir="${terraform_module_directory}" output -no-color -raw created_resource_group_subscription_id | tr -d \") - if [ $ado_flag != "--ado" ] ; then + if [ "${ado_flag}" != "--ado" ] ; then az storage account network-rule add -g "${REMOTE_STATE_RG}" --account-name "${REMOTE_STATE_SA}" --ip-address ${this_ip} --output none fi @@ -601,22 +625,30 @@ if [ 3 == $step ]; then v="" secret=$(az keyvault secret list --vault-name "${keyvault}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" | tr -d \") if [ "${secret}" == "${secretname}" ]; then - TF_VAR_sa_connection_string=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --query value | tr -d \") - export TF_VAR_sa_connection_string + TF_VAR_sa_connection_string=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --query value | tr -d \") + export TF_VAR_sa_connection_string fi if [[ -z $REMOTE_STATE_SA ]]; then - echo "Loading the State file information" load_config_vars "${deployer_config_information}" "REMOTE_STATE_SA" fi - allParams=$(printf " --parameterfile %s --storageaccountname %s --type sap_deployer %s %s " "${deployer_file_parametername}" "${REMOTE_STATE_SA}" "${approveparam}" "${ado_flag}" ) + if [[ -z $STATE_SUBSCRIPTION ]]; + then + load_config_vars "${deployer_config_information}" "STATE_SUBSCRIPTION" + fi - echo -e "$cyan calling installer.sh with parameters: $allParams" + if [[ -z $ARM_SUBSCRIPTION_ID ]]; + then + load_config_vars "${deployer_config_information}" "ARM_SUBSCRIPTION_ID" + fi - "${SAP_AUTOMATION_REPO_PATH}"/deploy/scripts/installer.sh $allParams + allParams=$(printf " --parameterfile %s --storageaccountname %s --state_subscription %s --type sap_deployer %s %s " "${deployer_file_parametername}" "${REMOTE_STATE_SA}" "${STATE_SUBSCRIPTION}" "${approveparam}" "${ado_flag}" ) + + echo "Calling installer.sh with: $allParams" + "${SAP_AUTOMATION_REPO_PATH}/deploy/scripts/installer.sh" $allParams return_code=$? if [ 0 != $return_code ]; then echo "Migrating the deployer state failed" > "${deployer_config_information}".err @@ -649,7 +681,7 @@ if [ 4 == $step ]; then cd "${library_dirname}" || exit allParams=$(printf " --parameterfile %s --storageaccountname %s --type sap_library %s %s" "${library_file_parametername}" "${REMOTE_STATE_SA}" "${approveparam}" "${ado_flag}") - echo -e "$cyan calling installer.sh with parameters: $allParams" + echo "Calling installer.sh with: $allParams" "${SAP_AUTOMATION_REPO_PATH}"/deploy/scripts/installer.sh $allParams return_code=$? diff --git a/deploy/scripts/deploy_utils.sh b/deploy/scripts/deploy_utils.sh index 42e2753ad0..db8929871e 100755 --- a/deploy/scripts/deploy_utils.sh +++ b/deploy/scripts/deploy_utils.sh @@ -1,6 +1,12 @@ #!/bin/bash -export PATH=${PATH}:/opt/terraform/bin:/opt/ansible/bin +if [ -d /opt/terraform/bin ]; then + export PATH=${PATH}:/opt/terraform/bin +fi + +if [ -d /opt/ansible/bin ]; then + export PATH=${PATH}:/opt/ansible/bin +fi ######################################################################### # Helper utilities @@ -40,12 +46,12 @@ function load_config_vars() { return fi for var_name; do # iterate over function params - # NOTE: Should we care if we fail to retrieve a value from the file? + # NOTE: Should we care if we fail to retrieve a value from the file? var_value="$(grep -m1 "^${var_name}=" "${var_file}" | cut -d'=' -f2- | tr -d ' ' | tr -d '"')" - if [ -z "${var_value}" ] + if [ -z ${var_value} ] then - var_value="$(grep -m1 "^${var_name} " "${var_file}" | cut -d'=' -f2- | tr -d ' ' | tr -d '"')" + var_value="$(grep -m1 "^${var_name}[[:space:]]=" "${var_file}" | cut -d'=' -f2- | tr -d ' ' | tr -d '"')" fi # NOTE: this continue means we skip setting an empty value for a variable @@ -122,23 +128,27 @@ function get_and_store_sa_details { local REMOTE_STATE_SA="${1}" local config_file_name="${2}" - echo "Trying to find the storage account ${REMOTE_STATE_SA}" + echo "Trying to find the storage account: ${REMOTE_STATE_SA}" save_config_vars "${config_file_name}" REMOTE_STATE_SA if [ -z $STATE_SUBSCRIPTION ];then - tfstate_resource_id=$(az resource list --name "${REMOTE_STATE_SA}" --resource-type Microsoft.Storage/storageAccounts --query "[].id | [0]" --output tsv) + tf_resource_id=$(az resource list --name "${REMOTE_STATE_SA}" --resource-type Microsoft.Storage/storageAccounts --query "[].id | [0]" --output tsv) + REMOTE_STATE_RGNAME=$(az resource list --name "${REMOTE_STATE_SA}" --resource-type Microsoft.Storage/storageAccounts --query "[].resourceGroup | [0]" --output tsv) else - tfstate_resource_id=$(az resource list --name "${REMOTE_STATE_SA}" --resource-type Microsoft.Storage/storageAccounts --subscription $STATE_SUBSCRIPTION --query "[].id | [0]" --output tsv) + tf_resource_id=$(az resource list --name "${REMOTE_STATE_SA}" --resource-type Microsoft.Storage/storageAccounts --subscription $STATE_SUBSCRIPTION --query "[].id | [0]" --output tsv) + REMOTE_STATE_RGNAME=$(az resource list --name "${REMOTE_STATE_SA}" --resource-type Microsoft.Storage/storageAccounts --subscription $STATE_SUBSCRIPTION --query "[].resourceGroup | [0]" --output tsv) + fi fail_if_null tfstate_resource_id - export STATE_SUBSCRIPTION=$(echo $tfstate_resource_id | cut -d/ -f3 | tr -d \" | xargs) - export REMOTE_STATE_RG=$(echo $tfstate_resource_id | cut -d/ -f5 | tr -d \" | xargs) + + export REMOTE_STATE_RG=$REMOTE_STATE_RGNAME + export tfstate_resource_id=$tf_resource_id save_config_vars "${config_file_name}" \ REMOTE_STATE_RG \ tfstate_resource_id \ STATE_SUBSCRIPTION - echo "Found the storage account ${REMOTE_STATE_SA}" + echo "Found the storage account: ${REMOTE_STATE_SA}" } # /*---------------------------------------------------------------------------8 @@ -188,9 +198,6 @@ function checkIfCloudShell() { local isRunInCloudShell=1 # default value is false if [ "$POWERSHELL_DISTRIBUTION_CHANNEL" == "CloudShell" ]; then isRunInCloudShell=0 - echo "isRunInCloudShell: true" - else - echo "isRunInCloudShell: false" fi return $isRunInCloudShell @@ -282,6 +289,12 @@ function set_executing_user_environment_variables() { az_client_secret="$1" + echo "" + echo "----------------------------------------------------------------------------------------------" + + echo "Setting the environment variables for the executing user" + + echo -e "\t[set_executing_user_environment_variables]: Identifying the executing user and client" set_azure_cloud_environment @@ -371,6 +384,7 @@ function set_executing_user_environment_variables() { export ARM_TENANT_ID export ARM_CLIENT_ID export ARM_CLIENT_SECRET + unset ARM_USE_MSI else echo -e "\t[set_executing_user_environment_variables]: unable to identify the executing user and client" @@ -384,6 +398,9 @@ function set_executing_user_environment_variables() { echo -e "\t\tARM_SUBSCRIPTION_ID: $(printenv ARM_SUBSCRIPTION_ID)" echo -e "\t\tARM_USE_MSI: $(printenv ARM_USE_MSI)" fi + echo "----------------------------------------------------------------------------------------------" + echo "" + } function unset_executing_user_environment_variables() { @@ -393,6 +410,7 @@ function unset_executing_user_environment_variables() { unset ARM_TENANT_ID unset ARM_CLIENT_ID unset ARM_CLIENT_SECRET + unset ARM_USE_MSI } # print the script name and function being called @@ -401,7 +419,7 @@ function print_script_name_and_function() { } function get_region_code() { - region_lower=$(echo "${region}" | tr [:upper:] [:lower:] ) + region_lower=$(echo "${region}" | tr [:upper:] [:lower:] | xargs | tr -d '\r') case "${region_lower}" in "australiacentral") export region_code="AUCE" ;; "australiacentral2") export region_code="AUC2" ;; diff --git a/deploy/scripts/helpers/script_helpers.sh b/deploy/scripts/helpers/script_helpers.sh index b179bf67db..516564242b 100755 --- a/deploy/scripts/helpers/script_helpers.sh +++ b/deploy/scripts/helpers/script_helpers.sh @@ -5,6 +5,18 @@ boldred="\e[1;31m" cyan="\e[1;36m" resetformatting="\e[0m" +full_script_path="$(realpath "${BASH_SOURCE[0]}")" +script_directory="$(dirname "${full_script_path}")" +script_directory_parent="$(dirname "${script_directory}")" + +#call stack has full scriptname when using source +source "${script_directory_parent}"/deploy_utils.sh + +if [[ -f /etc/profile.d/deploy_server.sh ]]; then + path=$(grep -m 1 "export PATH=" /etc/profile.d/deploy_server.sh | awk -F'=' '{print $2}' | xargs) + export PATH=$path +fi + function control_plane_showhelp { echo "" echo "#################################################################################################################" @@ -297,21 +309,21 @@ function validate_webapp_exports { fi if [ "${ARM_USE_MSI}" == "false" ]; then - if [ -z "$TF_VAR_webapp_client_secret" ]; then - echo "" - echo "#########################################################################################" - echo "# #" - echo -e "# $boldred Missing environment variables (TF_VAR_webapp_client_secret)!!! $resetformatting #" - echo "# #" - echo "# Please export the following variables to successfully deploy the Webapp: #" - echo "# TF_VAR_app_registration_app_id (webapp registration application id) #" - echo "# TF_VAR_webapp_client_secret (webapp registration password / secret) #" - echo "# #" - echo "# If you do not wish to deploy the Webapp, unset the TF_VAR_use_webapp variable #" - echo "# #" - echo "#########################################################################################" - return 65 #data format error - fi + if [ -z "$TF_VAR_webapp_client_secret" ]; then + echo "" + echo "#########################################################################################" + echo "# #" + echo -e "# $boldred Missing environment variables (TF_VAR_webapp_client_secret)!!! $resetformatting #" + echo "# #" + echo "# Please export the following variables to successfully deploy the Webapp: #" + echo "# TF_VAR_app_registration_app_id (webapp registration application id) #" + echo "# TF_VAR_webapp_client_secret (webapp registration password / secret) #" + echo "# #" + echo "# If you do not wish to deploy the Webapp, unset the TF_VAR_use_webapp variable #" + echo "# #" + echo "#########################################################################################" + return 65 #data format error + fi fi return 0 @@ -380,8 +392,28 @@ function missing { function validate_dependencies { + tfPath="/opt/terraform/bin/terraform" + + if [ -f /opt/terraform/bin/terraform ]; then + tfPath="/opt/terraform/bin/terraform" + else + tfPath=$(which terraform) + fi + + echo "Checking Terraform: $tfPath" + + # if /opt/terraform exists, assign permissions to the user + if [ -d /opt/terraform ]; then + sudo chown -R "$USER" /opt/terraform + fi + # Check terraform - tf=$(terraform -version | grep Terraform) + if checkIfCloudShell; then + tf=$(terraform --version | grep Terraform) + else + tf=$($tfPath --version | grep Terraform) + fi + if [ -z "$tf" ]; then echo "" echo "#########################################################################################" @@ -392,13 +424,18 @@ function validate_dependencies { echo "" return 2 #No such file or directory fi - # Set Terraform Plug in cache - if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] - then - mkdir -p /opt/terraform/.terraform.d/plugin-cache + + if checkIfCloudShell; then + mkdir -p "${HOME}/.terraform.d/plugin-cache" + export TF_PLUGIN_CACHE_DIR="${HOME}/.terraform.d/plugin-cache" + else + if [ ! -d /opt/terraform/.terraform.d/plugin-cache ]; then + mkdir -p /opt/terraform/.terraform.d/plugin-cache + fi + export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache fi - sudo chown -R $USER:$USER /opt/terraform - export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache + # Set Terraform Plug in cache + az --version >stdout.az 2>&1 @@ -448,21 +485,15 @@ function validate_dependencies { } function validate_key_parameters { - echo "Validating $1" - ext=$(echo $1 | cut -d. -f2) + echo "Validating: $1" # Helper variables - if [ "${ext}" == json ]; then - export environment=$(jq --raw-output .infrastructure.environment $1) - export region=$(jq --raw-output .infrastructure.region $1) - else - load_config_vars $1 "environment" - environment=$(echo ${environment} | xargs | tr "[:lower:]" "[:upper:]" ) - load_config_vars $1 "location" - region=$(echo ${location} | xargs) - fi + load_config_vars $1 "environment" + export environment=$(echo ${environment} | xargs | tr "[:lower:]" "[:upper:]" | tr -d '\r' ) + load_config_vars $1 "location" + export region=$(echo ${location} | xargs | tr -d '\r') - if [ -z "${environment}" ]; then + if [ -z ${environment} ]; then echo "#########################################################################################" echo "# #" echo -e "# $boldred Incorrect parameter file. $resetformatting #" @@ -474,7 +505,7 @@ function validate_key_parameters { return 64 #script usage wrong fi - if [ -z "${region}" ]; then + if [ -z ${region} ]; then echo "#########################################################################################" echo "# #" echo -e "# $boldred Incorrect parameter file. $resetformatting #" diff --git a/deploy/scripts/install_deployer.sh b/deploy/scripts/install_deployer.sh index bbf6648062..3adc7a624b 100755 --- a/deploy/scripts/install_deployer.sh +++ b/deploy/scripts/install_deployer.sh @@ -75,7 +75,7 @@ deployment_system=sap_deployer param_dirname=$(dirname "${parameterfile}") -echo "Parameter file: "${parameterfile}"" +echo "Parameter file: ${parameterfile}" if [ ! -f "${parameterfile}" ] then @@ -139,8 +139,7 @@ export TF_DATA_DIR="${param_dirname}"/.terraform this_ip=$(curl -s ipinfo.io/ip) >/dev/null 2>&1 export TF_VAR_Agent_IP=$this_ip -echo "Agent IP: $this_ip" - +echo "Agent IP: $this_ip" ok_to_proceed=false new_deployment=false @@ -206,6 +205,19 @@ else terraform -chdir="${terraform_module_directory}" init -upgrade=true -backend-config "path=${param_dirname}/terraform.tfstate" fi fi +return_value=$? +if [ 1 == $return_value ] +then + echo "" + echo "#########################################################################################" + echo "# #" + echo -e "# $boldreduscore Errors during the init phase $resetformatting #" + echo "# #" + echo "#########################################################################################" + echo "" + unset TF_DATA_DIR + exit $return_value +fi extra_vars="" @@ -390,6 +402,16 @@ then rm apply_output.json fi fi +if [ 0 != $return_value ] +then + echo "#########################################################################################" + echo "# #" + echo -e "# $boldreduscore !!! Error when Creating the deployer !!! $resetformatting #" + echo "# #" + echo "#########################################################################################" + echo "" + exit $return_value +fi keyvault=$(terraform -chdir="${terraform_module_directory}" output deployer_kv_user_name | tr -d \") temp=$(echo "${keyvault}" | grep "Warning") diff --git a/deploy/scripts/install_library.sh b/deploy/scripts/install_library.sh index 3a6259d733..6a23b82893 100755 --- a/deploy/scripts/install_library.sh +++ b/deploy/scripts/install_library.sh @@ -143,7 +143,6 @@ fi region=$(echo "${region}" | tr "[:upper:]" "[:lower:]") get_region_code $region - if [ true == "$use_deployer" ] then if [ ! -d "${deployer_statefile_foldername}" ] @@ -165,12 +164,18 @@ generic_config_information="${automation_config_directory}"config library_config_information="${automation_config_directory}""${environment}""${region_code}" #Plugins -if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] -then +isInCloudShellCheck=$(checkIfCloudShell) + +if checkIfCloudShell; then + mkdir -p "${HOME}/.terraform.d/plugin-cache" + export TF_PLUGIN_CACHE_DIR="${HOME}/.terraform.d/plugin-cache" +else + if [ ! -d /opt/terraform/.terraform.d/plugin-cache ]; then mkdir -p /opt/terraform/.terraform.d/plugin-cache + sudo chown -R "$USER" /opt/terraform + fi + export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache fi -sudo chown -R $USER:$USER /opt/terraform -export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache param_dirname=$(pwd) @@ -333,7 +338,7 @@ echo "########################################################################## echo "" if [ -n "${deployer_statefile_foldername}" ]; then - echo "Deployer folder specified:" "${deployer_statefile_foldername}" + echo "Deployer folder specified: ${deployer_statefile_foldername}" terraform -chdir="${terraform_module_directory}" plan -no-color -detailed-exitcode -var-file="${var_file}" -var deployer_statefile_foldername="${deployer_statefile_foldername}" > plan_output.log 2>&1 else terraform -chdir="${terraform_module_directory}" plan -no-color -detailed-exitcode -var-file="${var_file}" > plan_output.log 2>&1 @@ -400,7 +405,7 @@ echo "" deployer_parameter="" if [ -n "${deployer_statefile_foldername}" ]; then - echo "Deployer folder specified:" "${deployer_statefile_foldername}" + echo "Deployer folder specified: ${deployer_statefile_foldername}" if [ -n "${approve}" ] then terraform -chdir="${terraform_module_directory}" apply -var-file="${var_file}" -var deployer_statefile_foldername="${deployer_statefile_foldername}" -auto-approve -json | tee -a apply_output.json @@ -435,7 +440,7 @@ then if [ -n "${deployer_statefile_foldername}" ]; then - echo "Deployer folder specified:" "${deployer_statefile_foldername}" + echo "Deployer folder specified: ${deployer_statefile_foldername}" terraform -chdir="${terraform_module_directory}" import -var-file="${var_file}" -var deployer_statefile_foldername="${deployer_statefile_foldername}" $moduleID $resourceID else terraform -chdir="${terraform_module_directory}" import -var-file="${var_file}" $moduleID $resourceID @@ -454,7 +459,7 @@ then echo "" if [ -n "${deployer_statefile_foldername}" ]; then - echo "Deployer folder specified:" "${deployer_statefile_foldername}" + echo "Deployer folder specified: ${deployer_statefile_foldername}" terraform -chdir="${terraform_module_directory}" apply -var-file="${var_file}" -var deployer_statefile_foldername="${deployer_statefile_foldername}" -auto-approve -json | tee -a apply_output.json else terraform -chdir="${terraform_module_directory}" apply -var-file="${var_file}" -auto-approve -json | tee -a apply_output.json @@ -477,7 +482,7 @@ then if [ -n "${deployer_statefile_foldername}" ]; then - echo "Deployer folder specified:" "${deployer_statefile_foldername}" + echo "Deployer folder specified: ${deployer_statefile_foldername}" terraform -chdir="${terraform_module_directory}" import -var-file="${var_file}" -var deployer_statefile_foldername="${deployer_statefile_foldername}" $moduleID $resourceID else terraform -chdir="${terraform_module_directory}" import -var-file="${var_file}" $moduleID $resourceID @@ -497,7 +502,7 @@ then echo "" if [ -n "${deployer_statefile_foldername}" ]; then - echo "Deployer folder specified:" "${deployer_statefile_foldername}" + echo "Deployer folder specified: ${deployer_statefile_foldername}" terraform -chdir="${terraform_module_directory}" apply -var-file="${var_file}" -var deployer_statefile_foldername="${deployer_statefile_foldername}" -auto-approve -json | tee -a apply_output.json else terraform -chdir="${terraform_module_directory}" apply -var-file="${var_file}" -auto-approve -json | tee -a apply_output.json diff --git a/deploy/scripts/install_workloadzone.sh b/deploy/scripts/install_workloadzone.sh index e2e9c9a6b9..1e5231e233 100755 --- a/deploy/scripts/install_workloadzone.sh +++ b/deploy/scripts/install_workloadzone.sh @@ -12,9 +12,11 @@ full_script_path="$(realpath "${BASH_SOURCE[0]}")" script_directory="$(dirname "${full_script_path}")" #call stack has full scriptname when using source +# shellcheck disable=SC1091 source "${script_directory}/deploy_utils.sh" #helper files +# shellcheck disable=SC1091 source "${script_directory}/helpers/script_helpers.sh" force=0 @@ -64,14 +66,14 @@ this_ip=$(curl -s ipinfo.io/ip) >/dev/null 2>&1 deployer_environment=$(echo "${deployer_environment}" | tr "[:lower:]" "[:upper:]") -echo "Deployer environment: $deployer_environment" +echo "Deployer environment: $deployer_environment" if [ 1 == $called_from_ado ] ; then this_ip=$(curl -s ipinfo.io/ip) >/dev/null 2>&1 export TF_VAR_Agent_IP=$this_ip - echo "Agent IP: $this_ip" -fi + echo "Agent IP: $this_ip" +fi workload_file_parametername=$(basename "${parameterfile}") @@ -115,12 +117,24 @@ fi # Check that parameter files have environment and location defined validate_key_parameters "$workload_file_parametername" +return_code=$? if [ 0 != $return_code ]; then exit $return_code fi +# Convert the region to the correct code +get_region_code "$region" + + +if [ "${region_code}" == 'UNKN' ]; then + LOCATION_CODE=$(echo "$workload_file_parametername" | awk -F'-' '{print $2}' ) + region_code=$(echo "${LOCATION_CODE}" | tr "[:lower:]" "[:upper:]" | xargs) +fi + +echo "Region code: ${region_code}" + load_config_vars "$workload_file_parametername" "network_logical_name" -network_logical_name=$(echo "${network_logical_name}" | tr "[:lower:]" "[:upper:]") +network_logical_name=$(echo "${network_logical_name}" | tr "[:lower:]" "[:upper:]" | xargs) if [ -z "${network_logical_name}" ]; then echo "#########################################################################################" @@ -134,31 +148,25 @@ if [ -z "${network_logical_name}" ]; then return 64 #script usage wrong fi - -# Convert the region to the correct code -region=$(echo "${region}" | tr "[:upper:]" "[:lower:]") -get_region_code "$region" - key=$(echo "${workload_file_parametername}" | cut -d. -f1) landscape_tfstate_key=${key}.terraform.tfstate -echo "Deployment region: $region" -echo "Deployment region code: $region_code" -echo "Keyvault: $keyvault" - #Persisting the parameters across executions automation_config_directory=$CONFIG_REPO_PATH/.sap_deployment_automation generic_config_information="${automation_config_directory}"/config -if [ $deployer_environment != $environment ]; then +if [ "$deployer_environment" != "$environment" ]; then if [ -f "${automation_config_directory}"/"${environment}""${region_code}" ]; then # Add support for having multiple vnets in the same environment and zone - rename exiting file to support seamless transition mv "${automation_config_directory}"/"${environment}""${region_code}" "${automation_config_directory}"/"${environment}""${region_code}""${network_logical_name}" fi fi -workload_config_information="${automation_config_directory}"/"${environment}""${region_code}""${network_logical_name}" +workload_config_information="${automation_config_directory}/${environment}${region_code}${network_logical_name}" +deployer_config_information="${automation_config_directory}/${deployer_environment}${region_code}" +save_config_vars "${workload_config_information}" \ + STATE_SUBSCRIPTION REMOTE_STATE_SA subscription if [ "${force}" == 1 ] then @@ -169,13 +177,20 @@ then rm -Rf .terraform terraform.tfstate* fi -echo "Workload configuration file: $workload_config_information" - -if [ -n "$STATE_SUBSCRIPTION" ] +echo "" +echo "Configuration file: ${environment}${region_code}${network_logical_name}" +echo "Deployment region: $region" +echo "Deployment region code: $region_code" +echo "Deployment environment: $deployer_environment" +echo "Deployer Keyvault: $keyvault" +echo "Deployer Subscription: $STATE_SUBSCRIPTION" +echo "Remote state storage account: $REMOTE_STATE_SA" +echo "Target Subscription: $subscription" + +if [[ -n $STATE_SUBSCRIPTION ]] then - echo "Saving the state subscription" if is_valid_guid "$STATE_SUBSCRIPTION" ; then - echo "Valid subscription format" + save_config_vars "${workload_config_information}" \ STATE_SUBSCRIPTION @@ -202,7 +217,6 @@ then fi if [ -n "$REMOTE_STATE_SA" ] ; then - get_and_store_sa_details ${REMOTE_STATE_SA} ${workload_config_information} fi @@ -265,7 +279,7 @@ then if [ -n "$deployer_environment" ] then deployer_config_information="${automation_config_directory}"/"${deployer_environment}""${region_code}" - echo "Deployer config file $deployer_config_information" + echo "Deployer config file: $deployer_config_information" if [ -f "$deployer_config_information" ] then load_config_vars "${deployer_config_information}" "keyvault" @@ -273,7 +287,7 @@ then load_config_vars "${deployer_config_information}" "REMOTE_STATE_SA" load_config_vars "${deployer_config_information}" "tfstate_resource_id" load_config_vars "${deployer_config_information}" "deployer_tfstate_key" - echo "tfstate_resource_id: $tfstate_resource_id" + save_config_vars "${workload_config_information}" \ tfstate_resource_id @@ -286,12 +300,15 @@ then fi fi else - echo "tfstate_resource_id $tfstate_resource_id" + + echo "Terraform Storage Account Id: $tfstate_resource_id" + save_config_vars "${workload_config_information}" \ tfstate_resource_id fi +echo "" init "${automation_config_directory}" "${generic_config_information}" "${workload_config_information}" param_dirname=$(pwd) @@ -301,7 +318,8 @@ export TF_DATA_DIR="${param_dirname}/.terraform" if [ -n "$subscription" ] then if is_valid_guid "$subscription" ; then - echo "Valid subscription format" + echo "" + export ARM_SUBSCRIPTION_ID="${subscription}" else printf -v val %-40.40s "$subscription" echo "#########################################################################################" @@ -319,7 +337,7 @@ if [ 0 = "${deploy_using_msi_only:-}" ]; then if [ -n "$client_id" ] then if is_valid_guid "$client_id" ; then - echo "Valid spn id format" + echo "" else printf -v val %-40.40s "$client_id" echo "#########################################################################################" @@ -347,7 +365,12 @@ if [ 0 = "${deploy_using_msi_only:-}" ]; then fi #setting the user environment variables - set_executing_user_environment_variables "${spn_secret}" + if [ -n "${spn_secret}" ] + then + set_executing_user_environment_variables "${spn_secret}" + else + set_executing_user_environment_variables "none" + fi else #setting the user environment variables set_executing_user_environment_variables "N/A" @@ -419,10 +442,10 @@ fi useSAS=$(az storage account show --name "${REMOTE_STATE_SA}" --query allowSharedKeyAccess --subscription "${STATE_SUBSCRIPTION}" --out tsv) if [ "$useSAS" = "true" ] ; then - echo "Authenticate storage using SAS" + echo "Storage Account authentication: key" export ARM_USE_AZUREAD=false else - echo "Authenticate storage using Entra ID" + echo "Storage Account authentication: Entra ID" export ARM_USE_AZUREAD=true fi @@ -432,9 +455,9 @@ if [ 1 = "${deploy_using_msi_only:-}" ]; then then echo "Setting the secrets" - allParams=$(printf " --workload --environment %s --region %s --vault %s --subscription %s --msi " "${environment}" "${region_code}" "${keyvault}" "${subscription}" ) + allParams=$(printf " --workload --environment %s --region %s --vault %s --keyvault_subscription %s --subscription %s --msi " "${environment}" "${region_code}" "${keyvault}" "${STATE_SUBSCRIPTION}" "${ARM_SUBSCRIPTION_ID}" ) - echo "Calling set_secrets with " "${allParams}" + echo "Calling set_secrets with: ${allParams}" "${SAP_AUTOMATION_REPO_PATH}"/deploy/scripts/set_secrets.sh ${allParams} @@ -456,13 +479,13 @@ else if [ -n "$spn_secret" ] then - allParams=$(printf " --workload --environment %s --region %s --vault %s --spn_secret ***** --subscription %s --spn_id %s --tenant_id %s " "${environment}" "${region_code}" "${keyvault}" "${subscription}" "${client_id}" "${tenant_id}" ) + fixed_allParams=$(printf " --workload --environment %s --region %s --vault %s --subscription %s --spn_secret ***** --keyvault_subscription %s --spn_id %s --tenant_id %s " "${environment}" "${region_code}" "${keyvault}" "${ARM_SUBSCRIPTION_ID}" "${STATE_SUBSCRIPTION}" "${client_id}" "${tenant_id}" ) - echo "Calling set_secrets with " "${allParams}" + echo "Calling set_secrets with: ${fixed_allParams}" - allParams=$(printf " --workload --environment %s --region %s --vault %s --spn_secret %s --subscription %s --spn_id %s --tenant_id %s " "${environment}" "${region_code}" "${keyvault}" "${spn_secret}" "${subscription}" "${client_id}" "${tenant_id}" ) + allParams=$(printf " --workload --environment %s --region %s --vault %s --spn_secret %s --subscription %s --keyvault_subscription %s --spn_id %s --tenant_id %s " "${environment}" "${region_code}" "${keyvault}" "${spn_secret}" "${ARM_SUBSCRIPTION_ID}" "${STATE_SUBSCRIPTION}" "${client_id}" "${tenant_id}" ) - "${SAP_AUTOMATION_REPO_PATH}"/deploy/scripts/set_secrets.sh ${allParams} + "${SAP_AUTOMATION_REPO_PATH}/deploy/scripts/set_secrets.sh" ${allParams} if [ -f secret.err ]; then error_message=$(cat secret.err) @@ -474,9 +497,9 @@ else read -p "Do you want to specify the Workload SPN Details Y/N?" ans answer=${ans^^} if [ ${answer} == 'Y' ]; then - allParams=$(printf " --workload --environment %s --region %s --vault %s --subscription %s --spn_id %s " "${environment}" "${region_code}" "${keyvault}" "${subscription}" "${client_id}" ) + allParams=$(printf " --workload --environment %s --region %s --vault %s --subscription %s --spn_id %s " "${environment}" "${region_code}" "${keyvault}" "${STATE_SUBSCRIPTION}" "${client_id}" ) - "${SAP_AUTOMATION_REPO_PATH}"/deploy/scripts/set_secrets.sh ${allParams} + "${SAP_AUTOMATION_REPO_PATH}/deploy/scripts/set_secrets.sh ${allParams}" if [ $? -eq 255 ] then exit $? @@ -567,19 +590,29 @@ ok_to_proceed=false new_deployment=false #Plugins -if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] -then +isInCloudShellCheck=$(checkIfCloudShell) + +if checkIfCloudShell; then + mkdir -p "${HOME}/.terraform.d/plugin-cache" + export TF_PLUGIN_CACHE_DIR="${HOME}/.terraform.d/plugin-cache" +else + if [ ! -d /opt/terraform/.terraform.d/plugin-cache ]; then mkdir -p /opt/terraform/.terraform.d/plugin-cache + sudo chown -R "$USER" /opt/terraform + fi + export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache fi -sudo chown -R $USER:$USER /opt/terraform -export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache root_dirname=$(pwd) -echo " subscription_id=${STATE_SUBSCRIPTION}" -echo " resource_group_name=${REMOTE_STATE_RG}" -echo "storage_account_name=${REMOTE_STATE_SA}" - +echo "" +echo "Terraform details" +echo "-------------------------------------------------------------------------" +echo "Subscription: ${STATE_SUBSCRIPTION}" +echo "Storage Account: ${REMOTE_STATE_SA}" +echo "Resource Group: ${REMOTE_STATE_RG}" +echo "State file: ${key}.terraform.tfstate" +echo "Target subscription: ${ARM_SUBSCRIPTION_ID}" if [ ! -d ./.terraform/ ]; then @@ -751,7 +784,7 @@ else fi return_value=$? -echo "Terraform Plan return code: " $return_value +echo "Terraform Plan return code: $return_value" if [ 1 == $return_value ] then echo "#########################################################################################" @@ -1042,7 +1075,7 @@ if [ 0 == $return_value ] ; then echo "" save_config_var "workloadkeyvault" "${workload_config_information}" - fi + fi_system fi fi @@ -1063,10 +1096,12 @@ echo "" if [ -n "${spn_secret}" ] then az logout - echo "Login as SPN" az login --service-principal --username "${client_id}" --password="${spn_secret}" --tenant "${tenant_id}" --output none fi +full_script_path="$(realpath "${BASH_SOURCE[0]}")" +script_directory="$(dirname "${full_script_path}")" + rg_name=$(terraform -chdir="${terraform_module_directory}" output -no-color -raw created_resource_group_name | tr -d \") az deployment group create --resource-group "${rg_name}" --name "SAP-WORKLOAD-ZONE_${rg_name}" --subscription "${subscription}" --template-file "${script_directory}/templates/empty-deployment.json" --output none @@ -1086,37 +1121,47 @@ Date : "${now}" EOF -if [ -f "${workload_config_information}".err ]; then - cat "${workload_config_information}".err -fi - +printf -v kvname '%-40s' "${workloadkeyvault}" echo "" echo "#########################################################################################" echo "# #" -echo -e "# $cyan Adding the subnets to storage account firewalls $resetformatting #" +echo -e "# $cyan Please save these values: $resetformatting #" +echo "# - Key Vault: ${kvname} #" echo "# #" echo "#########################################################################################" -echo "" -subnet_id=$(terraform -chdir="${terraform_module_directory}" output -no-color -raw app_subnet_id | tr -d \") -useSAS=$(az storage account show --name "${REMOTE_STATE_SA}" --query allowSharedKeyAccess --subscription "${STATE_SUBSCRIPTION}" --out tsv) -echo "useSAS = $useSAS" - -if [ -n "${subnet_id}" ]; then - echo "Adding the app subnet" - az storage account network-rule add --resource-group "${REMOTE_STATE_RG}" --account-name "${REMOTE_STATE_SA}" --subscription "${STATE_SUBSCRIPTION}" --subnet $subnet_id --output none - if [ -n "$SAPBITS" ] ; then - az storage account network-rule add --resource-group "${REMOTE_STATE_RG}" --account-name $SAPBITS --subscription "${STATE_SUBSCRIPTION}" --subnet $subnet_id --output none - fi +if [ -f "${workload_config_information}".err ]; then + cat "${workload_config_information}".err fi -subnet_id=$(terraform -chdir="${terraform_module_directory}" output -no-color -raw db_subnet_id | tr -d \") +# echo "" +# echo "#########################################################################################" +# echo "# #" +# echo -e "# $cyan Adding the subnets to storage account firewalls $resetformatting #" +# echo "# #" +# echo "#########################################################################################" +# echo "" + +# subnet_id=$(terraform -chdir="${terraform_module_directory}" output -no-color -raw app_subnet_id | tr -d \") + +# useSAS=$(az storage account show --name "${REMOTE_STATE_SA}" --query allowSharedKeyAccess --subscription "${STATE_SUBSCRIPTION}" --out tsv) +# echo "Shared Access Key access: $useSAS" + +# if [ -n "${subnet_id}" ]; then +# echo "Adding the application subnet to the storage account hosting the Terraform State files" +# az storage account network-rule add --resource-group "${REMOTE_STATE_RG}" --account-name "${REMOTE_STATE_SA}" --subscription "${STATE_SUBSCRIPTION}" --subnet $subnet_id --output none +# if [ -n "$SAPBITS" ] ; then +# az storage account network-rule add --resource-group "${REMOTE_STATE_RG}" --account-name $SAPBITS --subscription "${STATE_SUBSCRIPTION}" --subnet $subnet_id --output none +# fi +# fi + +# subnet_id=$(terraform -chdir="${terraform_module_directory}" output -no-color -raw db_subnet_id | tr -d \") -if [ -n "${subnet_id}" ]; then - echo "Adding the db subnet" - az storage account network-rule add --resource-group "${REMOTE_STATE_RG}" --account-name "${REMOTE_STATE_SA}" --subscription "${STATE_SUBSCRIPTION}" --subnet $subnet_id --output none -fi +# if [ -n "${subnet_id}" ]; then +# echo "Adding the db subnet" +# az storage account network-rule add --resource-group "${REMOTE_STATE_RG}" --account-name "${REMOTE_STATE_SA}" --subscription "${STATE_SUBSCRIPTION}" --subnet $subnet_id --output none +# fi unset TF_DATA_DIR diff --git a/deploy/scripts/installer.sh b/deploy/scripts/installer.sh index 95be504f6f..358e3542c9 100755 --- a/deploy/scripts/installer.sh +++ b/deploy/scripts/installer.sh @@ -8,6 +8,7 @@ set -o pipefail boldreduscore="\e[1;4;31m" boldred="\e[1;31m" cyan="\e[1;36m" +green="\e[1;32m" resetformatting="\e[0m" #External helper functions @@ -50,8 +51,10 @@ do done -echo "Parameter file: $parameterfile" -echo "Current directory: $(pwd)" +echo "Parameter file: $parameterfile" +echo "Current directory: $(pwd)" +echo "Terraform state subscription_id: ${STATE_SUBSCRIPTION}" +echo "Terraform state storage account name:${REMOTE_STATE_SA}" tfstate_resource_id="" tfstate_parameter="" @@ -154,25 +157,30 @@ automation_config_directory=$CONFIG_REPO_PATH/.sap_deployment_automation/ generic_config_information="${automation_config_directory}"config system_config_information="${automation_config_directory}""${environment}""${region_code}""${network_logical_name}" -echo "Configuration file: $system_config_information" -echo "Deployment region: $region" -echo "Deployment region code: $region_code" +echo "Configuration file: $system_config_information" +echo "Deployment region: $region" +echo "Deployment region code: $region_code" + if [ 1 == $called_from_ado ] ; then this_ip=$(curl -s ipinfo.io/ip) >/dev/null 2>&1 export TF_VAR_Agent_IP=$this_ip - echo "Agent IP: $this_ip" -fi + echo "Agent IP: $this_ip" +fi #Plugins -if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] -then +isInCloudShellCheck=$(checkIfCloudShell) + +if checkIfCloudShell; then + mkdir -p "${HOME}/.terraform.d/plugin-cache" + export TF_PLUGIN_CACHE_DIR="${HOME}/.terraform.d/plugin-cache" +else + if [ ! -d /opt/terraform/.terraform.d/plugin-cache ]; then mkdir -p /opt/terraform/.terraform.d/plugin-cache + sudo chown -R "$USER" /opt/terraform + fi + export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache fi -sudo chown -R $USER:$USER /opt/terraform - -export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache - parallelism=10 @@ -181,7 +189,7 @@ if [[ -n "${TF_PARALLELLISM}" ]]; then parallelism=$TF_PARALLELLISM fi -echo "Parallelism count $parallelism" +echo "Parallelism count: $parallelism" param_dirname=$(pwd) @@ -198,35 +206,62 @@ fi if [ "${deployment_system}" == sap_deployer ] then deployer_tfstate_key=${key}.terraform.tfstate - STATE_SUBSCRIPTION=$ARM_SUBSCRIPTION_ID + ARM_SUBSCRIPTION_ID=$STATE_SUBSCRIPTION + export ARM_SUBSCRIPTION_ID fi if [[ -z $STATE_SUBSCRIPTION ]]; then STATE_SUBSCRIPTION=$ARM_SUBSCRIPTION_ID fi + +if [[ -n $STATE_SUBSCRIPTION ]]; +then + echo "" + echo "#########################################################################################" + echo "# #" + echo -e "# $cyan Changing the subscription to: $STATE_SUBSCRIPTION $resetformatting #" + echo "# #" + echo "#########################################################################################" + echo "" + az account set --sub "${STATE_SUBSCRIPTION}" + + return_code=$? + if [ 0 != $return_code ]; then + + echo "#########################################################################################" + echo "# #" + echo -e "# $boldred The deployment account (MSI or SPN) does not have access to $resetformatting #" + echo -e "# $boldred ${STATE_SUBSCRIPTION} $resetformatting #" + echo "# #" + echo "#########################################################################################" + + echo "##vso[task.logissue type=error]The deployment account (MSI or SPN) does not have access to ${STATE_SUBSCRIPTION}" + exit $return_code + fi + + account_set=1 +fi + if [[ -z $REMOTE_STATE_SA ]]; then - echo "Loading the State file information" load_config_vars "${system_config_information}" "REMOTE_STATE_SA" load_config_vars "${system_config_information}" "REMOTE_STATE_RG" load_config_vars "${system_config_information}" "tfstate_resource_id" load_config_vars "${system_config_information}" "STATE_SUBSCRIPTION" + load_config_vars "${system_config_information}" "ARM_SUBSCRIPTION_ID" else save_config_vars "${system_config_information}" REMOTE_STATE_SA fi -echo "Terraform state file storage:" "${REMOTE_STATE_SA}" -echo "Terraform state subscription:" "${STATE_SUBSCRIPTION}" - deployer_tfstate_key_parameter='' if [[ -z $deployer_tfstate_key ]]; then load_config_vars "${system_config_information}" "deployer_tfstate_key" else - echo "Deployer state file name:" "${deployer_tfstate_key}" - save_config_vars "${system_config_information}" deployer_tfstate_key + echo "Deployer state file name: ${deployer_tfstate_key}" + echo "Target subscription: $ARM_SUBSCRIPTION_ID" fi if [ "${deployment_system}" != sap_deployer ] @@ -251,22 +286,25 @@ then fi else deployer_tfstate_key_parameter=" -var deployer_tfstate_key=${deployer_tfstate_key}" + echo "Deployer state file name: ${deployer_tfstate_key}" fi else load_config_vars "${system_config_information}" "keyvault" export TF_VAR_deployer_kv_user_arm_id=$(az resource list --name "${keyvault}" --subscription ${STATE_SUBSCRIPTION} --resource-type Microsoft.KeyVault/vaults --query "[].id | [0]" -o tsv) - echo "Deployer Keyvault: $TF_VAR_deployer_kv_user_arm_id" + echo "Deployer Keyvault ID: $TF_VAR_deployer_kv_user_arm_id" + deployer_parameter=" -var subscription_id=${STATE_SUBSCRIPTION} " + export ARM_SUBSCRIPTION_ID=$STATE_SUBSCRIPTION fi -useSAS=$(az storage account show --name "${REMOTE_STATE_SA}" --query allowSharedKeyAccess --subscription "${STATE_SUBSCRIPTION}" --out tsv) +useSAS=$(az storage account show --name "${REMOTE_STATE_SA}" --query allowSharedKeyAccess --subscription ${STATE_SUBSCRIPTION} --out tsv) if [ "$useSAS" = "true" ] ; then - echo "Authenticate storage using SAS" + echo "Storage Account Authentication: Key" export ARM_USE_AZUREAD=false else - echo "Authenticate storage using Entra ID" + echo "Storage Account Authentication: Entra ID" export ARM_USE_AZUREAD=true fi @@ -277,7 +315,7 @@ if [[ -z $landscape_tfstate_key ]]; then load_config_vars "${system_config_information}" "landscape_tfstate_key" else - echo "Workload zone file name:" "${landscape_tfstate_key}" + echo "Workload zone state file: ${landscape_tfstate_key}" save_config_vars "${system_config_information}" landscape_tfstate_key fi @@ -311,7 +349,7 @@ if [[ -z $STATE_SUBSCRIPTION ]]; then load_config_vars "${system_config_information}" "STATE_SUBSCRIPTION" else - echo "Saving the state subscription" + if is_valid_guid "$STATE_SUBSCRIPTION" ; then save_config_var "STATE_SUBSCRIPTION" "${system_config_information}" else @@ -348,34 +386,6 @@ if [[ -n ${subscription} ]]; then export ARM_SUBSCRIPTION_ID="${subscription}" fi -if [[ -n $STATE_SUBSCRIPTION ]]; -then - echo "" - echo "#########################################################################################" - echo "# #" - echo -e "# $cyan Changing the subscription to: $STATE_SUBSCRIPTION $resetformatting #" - echo "# #" - echo "#########################################################################################" - echo "" - az account set --sub "${STATE_SUBSCRIPTION}" - - return_code=$? - if [ 0 != $return_code ]; then - - echo "#########################################################################################" - echo "# #" - echo -e "# $boldred The deployment account (MSI or SPN) does not have access to $resetformatting #" - echo -e "# $boldred ${STATE_SUBSCRIPTION} $resetformatting #" - echo "# #" - echo "#########################################################################################" - - echo "##vso[task.logissue type=error]The deployment account (MSI or SPN) does not have access to ${STATE_SUBSCRIPTION}" - exit $return_code - fi - - account_set=1 -fi - load_config_vars "${system_config_information}" "STATE_SUBSCRIPTION" load_config_vars "${system_config_information}" "REMOTE_STATE_RG" load_config_vars "${system_config_information}" "tfstate_resource_id" @@ -391,8 +401,6 @@ if [[ -z ${REMOTE_STATE_SA} ]]; then fi fi -echo "Terraform state storage " "${REMOTE_STATE_SA}" - if [ -z ${REMOTE_STATE_SA} ]; then option="REMOTE_STATE_SA" missing @@ -440,10 +448,6 @@ fi ok_to_proceed=false -echo "Terraform state subscription_id = ${STATE_SUBSCRIPTION}" -echo "Terraform state resource group name = ${REMOTE_STATE_RG}" -echo "Terraform state storage account name = ${REMOTE_STATE_SA}" - # This is used to tell Terraform if this is a new deployment or an update deployment_parameter="" # This is used to tell Terraform the version information from the state file @@ -452,38 +456,50 @@ version_parameter="" export TF_DATA_DIR="${param_dirname}/.terraform" terraform --version +echo "" +echo "Terraform details" +echo "-------------------------------------------------------------------------" +echo "Subscription: ${STATE_SUBSCRIPTION}" +echo "Storage Account: ${REMOTE_STATE_SA}" +echo "Resource Group: ${REMOTE_STATE_RG}" +echo "State file: ${key}.terraform.tfstate" +echo "Target subscription: ${ARM_SUBSCRIPTION_ID}" +echo "" check_output=0 if [ -f terraform.tfstate ]; then + if [ -f ./.terraform/terraform.tfstate ]; then + if grep "\"type\": \"azurerm\"" .terraform/terraform.tfstate ; then + echo "" + else - if [ "${deployment_system}" == sap_deployer ] - then - echo "" - echo -e "$cyan Reinitializing deployer in case of on a new deployer $resetformatting" - - terraform_module_directory="${SAP_AUTOMATION_REPO_PATH}"/deploy/terraform/bootstrap/"${deployment_system}"/ - terraform -chdir="${terraform_module_directory}" init -backend-config "path=${param_dirname}/terraform.tfstate" -reconfigure - echo "" - key_vault_id=$(terraform -chdir="${terraform_module_directory}" output -no-color -raw deployer_kv_user_arm_id | tr -d \") + if [ "${deployment_system}" == sap_deployer ]; then - if [ -n "${key_vault_id}" ] - then - export TF_VAR_deployer_kv_user_arm_id="${key_vault_id}" ; echo $TF_VAR_deployer_kv_user_arm_id - fi + echo "" + echo -e "$cyan Reinitializing deployer in case of on a new deployer $resetformatting" + terraform_module_directory="${SAP_AUTOMATION_REPO_PATH}"/deploy/terraform/bootstrap/"${deployment_system}"/ + terraform -chdir="${terraform_module_directory}" init -backend-config "path=${param_dirname}/terraform.tfstate" -reconfigure + echo "" + key_vault_id=$(terraform -chdir="${terraform_module_directory}" output -no-color -raw deployer_kv_user_arm_id | tr -d \") - fi + if [ -n "${key_vault_id}" ] + then + export TF_VAR_deployer_kv_user_arm_id="${key_vault_id}" ; echo $TF_VAR_deployer_kv_user_arm_id + fi + fi - if [ "${deployment_system}" == sap_library ] - then - echo "Reinitializing library in case of on a new deployer" - terraform_module_directory="${SAP_AUTOMATION_REPO_PATH}"/deploy/terraform/bootstrap/"${deployment_system}"/ - terraform -chdir="${terraform_module_directory}" init -backend-config "path=${param_dirname}/terraform.tfstate" -reconfigure - fi + if [ "${deployment_system}" == sap_library ] + then + echo "Reinitializing library in case of on a new deployer" + terraform_module_directory="${SAP_AUTOMATION_REPO_PATH}"/deploy/terraform/bootstrap/"${deployment_system}"/ + terraform -chdir="${terraform_module_directory}" init -backend-config "path=${param_dirname}/terraform.tfstate" -reconfigure + fi + fi + fi fi - terraform_module_directory="${SAP_AUTOMATION_REPO_PATH}"/deploy/terraform/run/"${deployment_system}"/ export TF_DATA_DIR="${param_dirname}/.terraform" @@ -603,7 +619,7 @@ then echo "" echo "#########################################################################################" echo "# #" - echo -e "# $cyan Deployed using the Terraform templates version: $val $resetformatting #" + echo -e "# $cyan Deployed using the Terraform templates version: $val $resetformatting #" echo "# #" echo "#########################################################################################" echo "" @@ -625,11 +641,11 @@ then rm plan_output.log fi -allParams=$(printf " -var-file=%s %s %s %s %s %s %s" "${var_file}" "${extra_vars}" "${tfstate_parameter}" "${landscape_tfstate_key_parameter}" "${deployer_tfstate_key_parameter}" "${deployment_parameter}" "${version_parameter}" ) +allParams=$(printf " -var-file=%s %s %s %s %s %s %s %s" "${var_file}" "${extra_vars}" "${tfstate_parameter}" "${landscape_tfstate_key_parameter}" "${deployer_tfstate_key_parameter}" "${deployment_parameter}" "${version_parameter}" "${deployer_parameter}" ) terraform -chdir="$terraform_module_directory" plan -no-color -detailed-exitcode $allParams | tee -a plan_output.log return_value=$? -echo "Terraform Plan return code: " $return_value +echo "Terraform Plan return code: $return_value" if [ 1 == $return_value ] ; then echo "" @@ -737,8 +753,6 @@ if [ 1 != $return_value ] ; then fi fi fi - - fi fi @@ -746,12 +760,36 @@ if [ 1 != $return_value ] ; then fi -container_exists=$(az storage container exists --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --name tfvars --only-show-errors --query exists) +useSAS=$(az storage account show --name "${REMOTE_STATE_SA}" --query allowSharedKeyAccess --subscription "${STATE_SUBSCRIPTION}" --out tsv) + +if [ "$useSAS" = "true" ] ; then + echo "Storage Account authentication: key" + export ARM_USE_AZUREAD=false +else + echo "Storage Account authentication: Entra ID" + export ARM_USE_AZUREAD=true +fi + + +if [ "$useSAS" = "true" ] ; then + container_exists=$(az storage container exists --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --name tfvars --only-show-errors --query exists) +else + container_exists=$(az storage container exists --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --name tfvars --only-show-errors --query exists --auth-mode login) +fi if [ "${container_exists}" == "false" ]; then + if [ "$useSAS" = "true" ] ; then az storage container create --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --name tfvars --only-show-errors + else + az storage container create --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --name tfvars --auth-mode login --only-show-errors + fi fi +if [ "$useSAS" = "true" ] ; then + az storage blob upload --file "${parameterfile}" --container-name tfvars/LANDSCAPE/"${key}" --name "${parameterfile_name}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --only-show-errors --output none +else + az storage blob upload --file "${parameterfile}" --container-name tfvars/LANDSCAPE/"${key}" --name "${parameterfile_name}" --subscription "${STATE_SUBSCRIPTION}" --account-name "${REMOTE_STATE_SA}" --no-progress --overwrite --auth-mode login --only-show-errors --output none +fi fatal_errors=0 # HANA VM @@ -1130,6 +1168,12 @@ if [ 1 == $ok_to_proceed ]; then echo "# #" echo "#########################################################################################" echo "" + if [ 1 == $called_from_ado ] ; then + terraform -chdir="${terraform_module_directory}" apply -parallelism="${parallelism}" -no-color -compact-warnings -json $allParams | tee -a apply_output.json + else + terraform -chdir="${terraform_module_directory}" apply -parallelism="${parallelism}" -json $allParams | tee -a apply_output.json + fi + return_value=$? fi fi @@ -1157,6 +1201,9 @@ fi if [ "${deployment_system}" == sap_deployer ] then + + # terraform -chdir="${terraform_module_directory}" output + deployer_public_ip_address=$(terraform -chdir="${terraform_module_directory}" output -no-color -raw deployer_public_ip_address | tr -d \") keyvault=$(terraform -chdir="${terraform_module_directory}" output -no-color -raw deployer_kv_user_name | tr -d \") @@ -1188,7 +1235,8 @@ then else az login --identity --output none fi - + full_script_path="$(realpath "${BASH_SOURCE[0]}")" + script_directory="$(dirname "${full_script_path}")" az deployment group create --resource-group ${created_resource_group_name} --name "ControlPlane_Deployer_${created_resource_group_name}" --template-file "${script_directory}/templates/empty-deployment.json" --output none return_value=0 if [ 1 == $called_from_ado ] ; then @@ -1317,7 +1365,8 @@ then echo "#########################################################################################" echo "" echo "" - + full_script_path="$(realpath "${BASH_SOURCE[0]}")" + script_directory="$(dirname "${full_script_path}")" az deployment group create --resource-group ${rg_name} --name "SAP_${rg_name}" --subscription $ARM_SUBSCRIPTION_ID --template-file "${script_directory}/templates/empty-deployment.json" --output none fi @@ -1338,7 +1387,8 @@ then echo "#########################################################################################" echo "" echo "" - + full_script_path="$(realpath "${BASH_SOURCE[0]}")" + script_directory="$(dirname "${full_script_path}")" az deployment group create --resource-group ${rg_name} --name "SAP-WORKLOAD-ZONE_${rg_name}" --template-file "${script_directory}/templates/empty-deployment.json" --output none fi @@ -1393,12 +1443,15 @@ then echo "" echo "" + full_script_path="$(realpath "${BASH_SOURCE[0]}")" + script_directory="$(dirname "${full_script_path}")" az deployment group create --resource-group ${rg_name} --name "SAP-LIBRARY_${rg_name}" --template-file "${script_directory}/templates/empty-deployment.json" --output none fi if [ -f "${system_config_information}".err ]; then cat "${system_config_information}".err + rm "${system_config_information}".err fi unset TF_DATA_DIR @@ -1407,7 +1460,6 @@ unset TF_DATA_DIR # # # Copy tfvars to storage account # # # -# # ################################################################################# useSAS=$(az storage account show --name "${REMOTE_STATE_SA}" --query allowSharedKeyAccess --subscription "${STATE_SUBSCRIPTION}" --out tsv) @@ -1449,5 +1501,14 @@ if [ "${deployment_system}" == sap_library ] ; then fi fi +echo "" +echo "#########################################################################################" +echo "# #" +echo -e "# $green Deployment completed $resetformatting #" +echo "# #" +echo "#########################################################################################" +echo "" + + exit $return_value diff --git a/deploy/scripts/remove_controlplane.sh b/deploy/scripts/remove_controlplane.sh index 630452b8ec..292d34037f 100755 --- a/deploy/scripts/remove_controlplane.sh +++ b/deploy/scripts/remove_controlplane.sh @@ -39,34 +39,34 @@ keep_agent=0 function showhelp { echo "" - echo "#################################################################################################################" - echo "# #" - echo "# #" - echo "# This file contains the logic to remove the deployer and library from an Azure region #" - echo "# #" - echo "# The script experts the following exports: #" - echo "# #" - echo "# SAP_AUTOMATION_REPO_PATH the path to the folder containing the cloned sap-automation #" - echo "# #" - echo "# The script is to be run from a parent folder to the folders containing the json parameter files for #" - echo "# the deployer and the library and the environment. #" - echo "# #" - echo "# The script will persist the parameters needed between the executions in the #" - echo "# [CONFIG_REPO_PATH]/.sap_deployment_automation folder #" - echo "# #" - echo "# #" - echo "# Usage: remove_region.sh #" - echo "# -d or --deployer_parameter_file deployer parameter file #" - echo "# -l or --library_parameter_file library parameter file #" - echo "# #" - echo "# #" - echo "# Example: #" - echo "# #" - echo "# SAP_AUTOMATION_REPO_PATH/scripts/remove_controlplane.sh \ #" - echo "# --deployer_parameter_file DEPLOYER/PROD-WEEU-DEP00-INFRASTRUCTURE/PROD-WEEU-DEP00-INFRASTRUCTURE.json \ #" - echo "# --library_parameter_file LIBRARY/PROD-WEEU-SAP_LIBRARY/PROD-WEEU-SAP_LIBRARY.json \ #" - echo "# #" - echo "#################################################################################################################" + echo "##################################################################################################################" + echo "# #" + echo "# #" + echo "# This file contains the logic to remove the deployer and library from an Azure region #" + echo "# #" + echo "# The script experts the following exports: #" + echo "# #" + echo "# SAP_AUTOMATION_REPO_PATH the path to the folder containing the cloned sap-automation #" + echo "# #" + echo "# The script is to be run from a parent folder to the folders containing the json parameter files for #" + echo "# the deployer and the library and the environment. #" + echo "# #" + echo "# The script will persist the parameters needed between the executions in the #" + echo "# [CONFIG_REPO_PATH]/.sap_deployment_automation folder #" + echo "# #" + echo "# #" + echo "# Usage: remove_region.sh #" + echo "# -d or --deployer_parameter_file deployer parameter file #" + echo "# -l or --library_parameter_file library parameter file #" + echo "# #" + echo "# #" + echo "# Example: #" + echo "# #" + echo "# SAP_AUTOMATION_REPO_PATH/scripts/remove_controlplane.sh \ #" + echo "# --deployer_parameter_file DEPLOYER/PROD-WEEU-DEP00-INFRASTRUCTURE/PROD-WEEU-DEP00-INFRASTRUCTURE.tfvars \ #" + echo "# --library_parameter_file LIBRARY/PROD-WEEU-SAP_LIBRARY/PROD-WEEU-SAP_LIBRARY.tfvars \ #" + echo "# #" + echo "##################################################################################################################" } function missing { @@ -177,11 +177,11 @@ init "${automation_config_directory}" "${generic_config_information}" "${deploye this_ip=$(curl -s ipinfo.io/ip) >/dev/null 2>&1 export TF_IN_AUTOMATION="true" -echo "Deployer environment: $deployer_environment" +echo "Deployer environment: $deployer_environment" this_ip=$(curl -s ipinfo.io/ip) >/dev/null 2>&1 export TF_VAR_Agent_IP=$this_ip -echo "Agent IP: $this_ip" +echo "Agent IP: $this_ip" if [ -n "${subscription}" ] then @@ -244,10 +244,10 @@ key=$(echo "${deployer_file_parametername}" | cut -d. -f1) useSAS=$(az storage account show --name "${REMOTE_STATE_SA}" --query allowSharedKeyAccess --subscription "${STATE_SUBSCRIPTION}" --out tsv) if [ "$useSAS" = "true" ] ; then - echo "Authenticate storage using SAS" + echo "Storage Account Authentication: Key" export ARM_USE_AZUREAD=false else - echo "Authenticate storage using Entra ID" + echo "Storage Account Authentication: Entra ID" export ARM_USE_AZUREAD=true fi diff --git a/deploy/scripts/remove_deployer.sh b/deploy/scripts/remove_deployer.sh index 3aa3129ef5..6ef413741b 100755 --- a/deploy/scripts/remove_deployer.sh +++ b/deploy/scripts/remove_deployer.sh @@ -75,7 +75,7 @@ deployment_system=sap_deployer param_dirname=$(dirname "${parameterfile}") -echo "Parameter file: "${parameterfile}"" +echo "Parameter file: ${parameterfile}" if [ ! -f "${parameterfile}" ] then diff --git a/deploy/scripts/remover.sh b/deploy/scripts/remover.sh index 8dc14b1896..35cb1baf32 100755 --- a/deploy/scripts/remover.sh +++ b/deploy/scripts/remover.sh @@ -29,13 +29,13 @@ function showhelp { echo "# This file contains the logic to remove the different systems #" echo "# The script expects the following exports: #" echo "# #" - echo "# SAP_AUTOMATION_REPO_PATH (path to the repo folder (sap-automation)) #" + echo "# SAP_AUTOMATION_REPO_PATH (path to the repo folder (sap-automation)) #" echo "# ARM_SUBSCRIPTION_ID (subscription containing the state file storage account) #" echo "# REMOTE_STATE_RG (resource group name for storage account containing state files) #" echo "# REMOTE_STATE_SA (storage account for state file) #" echo "# #" echo "# The script will persist the parameters needed between the executions in the #" - echo "# [CONFIG_REPO_PATH]/.sap_deployment_automation folder. #" + echo "# [CONFIG_REPO_PATH]/.sap_deployment_automation folder. #" echo "# #" echo "# #" echo "# Usage: remover.sh #" @@ -56,7 +56,7 @@ function showhelp { echo "# Example: #" echo "# #" echo "# [REPO-ROOT]deploy/scripts/remover.sh \ #" - echo "# --parameterfile DEV-WEEU-SAP01-X00.json \ #" + echo "# --parameterfile DEV-WEEU-SAP01-X00.tfvars \ #" echo "# --type sap_system #" echo "# #" echo "#########################################################################################" @@ -114,7 +114,7 @@ landscape_tfstate_key_parameter="" #show_help=false #deployer_tfstate_key_exists=false #landscape_tfstate_key_exists=false -echo "parameterfile: $parameterfile" +echo "parameterfile: $parameterfile" working_directory=$(pwd) @@ -193,11 +193,9 @@ fi this_ip=$(curl -s ipinfo.io/ip) >/dev/null 2>&1 -echo "Deployer environment: $deployer_environment" - this_ip=$(curl -s ipinfo.io/ip) >/dev/null 2>&1 export TF_VAR_Agent_IP=$this_ip -echo "Agent IP: $this_ip" +echo "Agent IP: $this_ip" automation_config_directory=$CONFIG_REPO_PATH/.sap_deployment_automation generic_config_information="${automation_config_directory}"/config @@ -218,19 +216,35 @@ if [ "${deployment_system}" == sap_system ]; then system_config_information="${automation_config_directory}"/"${environment}""${region_code}""${network_logical_name}" fi -echo "Configuration file: $system_config_information" -echo "Deployment region: $region" -echo "Deployment region code: $region_code" +echo "Configuration file: $system_config_information" +echo "Deployment region: $region" +echo "Deployment region code: $region_code" key=$(echo "${parameterfile_name}" | cut -d. -f1) +echo "" +echo "Terraform details" +echo "-------------------------------------------------------------------------" +echo "Subscription: ${STATE_SUBSCRIPTION}" +echo "Storage Account: ${REMOTE_STATE_SA}" +echo "Resource Group: ${REMOTE_STATE_RG}" +echo "State file: ${key}.terraform.tfstate" +echo "Target subscription: ${ARM_SUBSCRIPTION_ID}" +echo "" + #Plugins -if [ ! -d /opt/terraform/.terraform.d/plugin-cache ] -then - mkdir -p /opt/terraform/.terraform.d/plugin-cache -fi -export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache +isInCloudShellCheck=$(checkIfCloudShell) +if checkIfCloudShell; then + mkdir -p "${HOME}/.terraform.d/plugin-cache" + export TF_PLUGIN_CACHE_DIR="${HOME}/.terraform.d/plugin-cache" +else + if [ ! -d /opt/terraform/.terraform.d/plugin-cache ]; then + mkdir -p /opt/terraform/.terraform.d/plugin-cache + sudo chown -R "$USER" /opt/terraform + fi + export TF_PLUGIN_CACHE_DIR=/opt/terraform/.terraform.d/plugin-cache +fi init "${automation_config_directory}" "${generic_config_information}" "${system_config_information}" var_file="${parameterfile_dirname}"/"${parameterfile}" @@ -254,15 +268,15 @@ load_config_vars "${system_config_information}" "ARM_SUBSCRIPTION_ID" deployer_tfstate_key_parameter='' if [ "${deployment_system}" != sap_deployer ]; then - deployer_tfstate_key_parameter=" -var deployer_tfstate_key=${deployer_tfstate_key}" + deployer_tfstate_key_parameter=" -var deployer_tfstate_key=${deployer_tfstate_key} " fi landscape_tfstate_key_parameter='' if [ "${deployment_system}" == sap_system ]; then - landscape_tfstate_key_parameter=" -var landscape_tfstate_key=${landscape_tfstate_key}" + landscape_tfstate_key_parameter=" -var landscape_tfstate_key=${landscape_tfstate_key} " fi -tfstate_parameter=" -var tfstate_resource_id=${tfstate_resource_id}" +tfstate_parameter=" -var tfstate_resource_id=${tfstate_resource_id} " #setting the user environment variables set_executing_user_environment_variables "none" @@ -309,7 +323,6 @@ else export ARM_USE_AZUREAD=true fi - echo "" echo "#########################################################################################" echo "# #" @@ -341,7 +354,7 @@ else resource_group_exist=true fi -if [ $resource_group_exist ]; +if [ "$resource_group_exist" ]; then echo "" echo "#########################################################################################" @@ -353,11 +366,11 @@ then if [ "$deployment_system" == "sap_deployer" ]; then terraform -chdir="${terraform_bootstrap_directory}" refresh -var-file="${var_file}" \ - $deployer_tfstate_key_parameter + "$deployer_tfstate_key_parameter" echo -e "#$cyan processing $deployment_system removal as defined in $parameterfile_name $resetformatting" terraform -chdir="${terraform_module_directory}" destroy -var-file="${var_file}" \ - $deployer_tfstate_key_parameter + "$deployer_tfstate_key_parameter" elif [ "$deployment_system" == "sap_library" ]; then echo -e "#$cyan processing $deployment_system removal as defined in $parameterfile_name $resetformatting" @@ -376,28 +389,23 @@ then terraform -chdir="${terraform_bootstrap_directory}" init -upgrade=true -force-copy terraform -chdir="${terraform_bootstrap_directory}" refresh -var-file="${var_file}" \ - $landscape_tfstate_key_parameter \ - $deployer_tfstate_key_parameter + "$landscape_tfstate_key_parameter" \ + "$deployer_tfstate_key_parameter" - terraform -chdir="${terraform_bootstrap_directory}" destroy -var-file="${var_file}" ${approve} \ - $landscape_tfstate_key_parameter \ - $deployer_tfstate_key_parameter + terraform -chdir="${terraform_bootstrap_directory}" destroy -var-file="${var_file}" "${approve}" \ + "$landscape_tfstate_key_parameter" \ + "$deployer_tfstate_key_parameter" else - echo -e "#$cyan processing $deployment_system removal as defined in $parameterfile_name $resetformatting" - echo $tfstate_parameter $landscape_tfstate_key_parameter $deployer_tfstate_key_parameter + echo -e "#$cyan processing "$deployment_system" removal as defined in "$parameterfile_name" "$resetformatting"" if [ -n "${approve}" ] then - terraform -chdir="${terraform_module_directory}" destroy -var-file="${var_file}" ${approve} \ - $tfstate_parameter \ - $landscape_tfstate_key_parameter \ + terraform -chdir="${terraform_module_directory}" destroy -var-file="${var_file}" $approve $tfstate_parameter $landscape_tfstate_key_parameter \ + $deployer_tfstate_key_parameter -json | tee -a destroy_output.json else - terraform -chdir="${terraform_module_directory}" destroy -var-file="${var_file}" ${approve} \ - $tfstate_parameter \ - $landscape_tfstate_key_parameter \ - $deployer_tfstate_key_parameter + terraform -chdir="${terraform_module_directory}" destroy -var-file="${var_file}" $approve $tfstate_parameter $landscape_tfstate_key_parameter $deployer_tfstate_key_parameter fi diff --git a/deploy/scripts/set_secrets.sh b/deploy/scripts/set_secrets.sh index 046929bedf..51cd63b91f 100755 --- a/deploy/scripts/set_secrets.sh +++ b/deploy/scripts/set_secrets.sh @@ -60,54 +60,54 @@ fi eval set -- "$INPUT_ARGUMENTS" while :; do case "$1" in - -e | --environment) - environment="$2" - shift 2 + -e | --environment) + environment="$2" + shift 2 ;; - -r | --region) - region_code="$2" - shift 2 + -r | --region) + region_code="$2" + shift 2 ;; - -v | --vault) - keyvault="$2" - shift 2 + -v | --vault) + keyvault="$2" + shift 2 ;; - -s | --subscription) - subscription="$2" - shift 2 + -s | --subscription) + subscription="$2" + shift 2 ;; - -c | --spn_id) - client_id="$2" - shift 2 + -c | --spn_id) + client_id="$2" + shift 2 ;; - -p | --spn_secret) - client_secret="$2" - shift 2 + -p | --spn_secret) + client_secret="$2" + shift 2 ;; - -t | --tenant_id) - tenant_id="$2" - shift 2 + -t | --tenant_id) + tenant_id="$2" + shift 2 ;; - -b | --keyvault_subscription) - STATE_SUBSCRIPTION="$2" - shift 2 + -b | --keyvault_subscription) + STATE_SUBSCRIPTION="$2" + shift 2 ;; - -w | --workload) - workload=1 - shift + -w | --workload) + workload=1 + shift ;; - -m | --msi) - deploy_using_msi_only=1 - shift + -m | --msi) + deploy_using_msi_only=1 + shift ;; - -h | --help) - showhelp - exit 3 - shift + -h | --help) + showhelp + exit 3 + shift ;; - --) - shift - break + --) + shift + break ;; esac done @@ -121,8 +121,8 @@ while [ -z "${region_code}" ]; do done if [ -z "${region_code}" ]; then - # Convert the region to the correct code - get_region_code $region + # Convert the region to the correct code + get_region_code $region fi # if ! valid_environment "${environment}"; then @@ -194,85 +194,85 @@ fi if [ 0 = "${deploy_using_msi_only:-}" ]; then - if [ -z "${client_id}" ]; then - load_config_vars "${environment_config_information}" "client_id" - if [ -z "$client_id" ]; then - read -r -p "SPN App ID: " client_id - fi - else - if is_valid_guid "${client_id}" ; then - echo "Valid client_id specified" - else - printf -v val %-40.40s "$client_id" - echo "#########################################################################################" - echo "# #" - echo -e "# The provided client_id is not valid:$boldred ${val} $resetformatting #" - echo "# #" - echo "#########################################################################################" - return_code=65 - echo "The provided client_id is not valid " "${val}" > secret.err - exit $return_code - fi - fi - - if [ ! -n "$client_secret" ]; then - #do not output the secret to screen - read -rs -p " -> Kindly provide SPN Password: " client_secret - echo "********" - fi - - if [ -z "${tenant_id}" ]; then - load_config_vars "${environment_config_information}" "tenant_id" - if [ -z "${tenant_id}" ]; then - read -r -p "SPN Tenant ID: " tenant_id - fi - else - if is_valid_guid "${tenant_id}" ; then - echo "Valid tenant_id specified" - else - printf -v val %-40.40s "$tenant_id" - echo "#########################################################################################" - echo "# #" - echo -e "# The provided tenant_id is not valid:$boldred ${val} $resetformatting #" - echo "# #" - echo "#########################################################################################" - return_code=65 - echo "The provided tenant_id is not valid " "${val}" > secret.err - exit $return_code - fi - fi - if [ -z "${client_id}" ]; then - echo "Missing client_id" - echo "No client_id specified" > secret.err - showhelp - return_code=65 #/* data format error */ - echo $return_code - exit $return_code - fi - - if [ -z "$client_secret" ]; then - echo "Missing client_secret" - echo "No client_secret specified" > secret.err - showhelp - return_code=65 #/* data format error */ - echo $return_code - exit $return_code - fi - - if [ -z "${tenant_id}" ]; then - echo "Missing tenant_id" - echo "No tenant_id specified" > secret.err - showhelp - return_code=65 #/* data format error */ - echo $return_code - exit $return_code - fi + if [ -z "${client_id}" ]; then + load_config_vars "${environment_config_information}" "client_id" + if [ -z "$client_id" ]; then + read -r -p "SPN App ID: " client_id + fi + else + if is_valid_guid "${client_id}" ; then + echo "" + else + printf -v val %-40.40s "$client_id" + echo "#########################################################################################" + echo "# #" + echo -e "# The provided client_id is not valid:$boldred ${val} $resetformatting #" + echo "# #" + echo "#########################################################################################" + return_code=65 + echo "The provided client_id is not valid " "${val}" > secret.err + exit $return_code + fi + fi + + if [ ! -n "$client_secret" ]; then + #do not output the secret to screen + read -rs -p " -> Kindly provide SPN Password: " client_secret + echo "********" + fi + + if [ -z "${tenant_id}" ]; then + load_config_vars "${environment_config_information}" "tenant_id" + if [ -z "${tenant_id}" ]; then + read -r -p "SPN Tenant ID: " tenant_id + fi + else + if is_valid_guid "${tenant_id}" ; then + echo "" + else + printf -v val %-40.40s "$tenant_id" + echo "#########################################################################################" + echo "# #" + echo -e "# The provided tenant_id is not valid:$boldred ${val} $resetformatting #" + echo "# #" + echo "#########################################################################################" + return_code=65 + echo "The provided tenant_id is not valid " "${val}" > secret.err + exit $return_code + fi + fi + if [ -z "${client_id}" ]; then + echo "Missing client_id" + echo "No client_id specified" > secret.err + showhelp + return_code=65 #/* data format error */ + echo $return_code + exit $return_code + fi + + if [ -z "$client_secret" ]; then + echo "Missing client_secret" + echo "No client_secret specified" > secret.err + showhelp + return_code=65 #/* data format error */ + echo $return_code + exit $return_code + fi + + if [ -z "${tenant_id}" ]; then + echo "Missing tenant_id" + echo "No tenant_id specified" > secret.err + showhelp + return_code=65 #/* data format error */ + echo $return_code + exit $return_code + fi fi if [ -z "${subscription}" ]; then read -r -p "SPN Subscription: " subscription else if is_valid_guid "${subscription}" ; then - echo "Valid subscription specified" + echo "" else printf -v val %-40.40s "${subscription}" echo "#########################################################################################" @@ -294,13 +294,16 @@ echo "# echo "#########################################################################################" echo "" +echo "Key vault: ${keyvault}" +echo "Subscription: ${STATE_SUBSCRIPTION}" + save_config_vars "${environment_config_information}" \ - keyvault \ - environment \ - subscription \ - client_id \ - tenant_id \ - STATE_SUBSCRIPTION +keyvault \ +environment \ +subscription \ +client_id \ +tenant_id \ +STATE_SUBSCRIPTION secretname="${environment}"-subscription-id @@ -325,110 +328,110 @@ if [ "${deleted}" == "${secretname}" ]; then else exists=$(az keyvault secret list --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) if [ "${exists}" == "${secretname}" ]; then - v=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query value -o tsv) - if [ "${v}" != "${subscription}" ] ; then - echo -e "\t $cyan Setting secret ${secretname} in keyvault ${keyvault} $resetformatting \n" - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${subscription}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" >stdout.az 2>&1 - fi + v=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query value -o tsv) + if [ "${v}" != "${subscription}" ] ; then + echo -e "\t $cyan Setting secret ${secretname} in keyvault ${keyvault} $resetformatting \n" + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${subscription}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" >stdout.az 2>&1 + fi else - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${subscription}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" >stdout.az 2>&1 + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${subscription}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" >stdout.az 2>&1 fi fi if [ -f stdout.az ]; then - result=$(grep "ERROR: The user, group or application" stdout.az) - - if [ -n "${result}" ]; then - printf -v val "%-20.20s" "$keyvault" - echo "#########################################################################################" - echo "# #" - echo -e "# No access to add the secrets in the$boldred" "${val}" "$resetformatting keyvault #" - echo "# Please add an access policy for the account you use #" - echo "# #" - echo "#########################################################################################" - echo "" - rm stdout.az - echo "No access to add the secrets in the " "${val}" "keyvault" > secret.err - return_code=77 - exit $return_code - fi - - result=$(grep "The Vault may not exist" stdout.az) - if [ -n "${result}" ]; then - printf -v val "%-20.20s could not be found!" "$keyvault" - echo "#########################################################################################" - echo "# #" - echo -e "# $boldred Keyvault" "${val}" "$resetformatting #" - echo "# #" - echo "#########################################################################################" - echo "" - rm stdout.az - return_code=65 #/* name unknown */ - echo "Keyvault" "${val}" > secret.err - exit $return_code - - fi + result=$(grep "ERROR: The user, group or application" stdout.az) + + if [ -n "${result}" ]; then + printf -v val "%-20.20s" "$keyvault" + echo "#########################################################################################" + echo "# #" + echo -e "# No access to add the secrets in the$boldred" "${val}" "$resetformatting keyvault #" + echo "# Please add an access policy for the account you use #" + echo "# #" + echo "#########################################################################################" + echo "" + rm stdout.az + echo "No access to add the secrets in the " "${val}" "keyvault" > secret.err + return_code=77 + exit $return_code + fi + + result=$(grep "The Vault may not exist" stdout.az) + if [ -n "${result}" ]; then + printf -v val "%-20.20s could not be found!" "$keyvault" + echo "#########################################################################################" + echo "# #" + echo -e "# $boldred Keyvault" "${val}" "$resetformatting #" + echo "# #" + echo "#########################################################################################" + echo "" + rm stdout.az + return_code=65 #/* name unknown */ + echo "Keyvault" "${val}" > secret.err + exit $return_code + + fi fi if [ 0 = "${deploy_using_msi_only:-}" ]; then - #turn off output, we do not want to show the details being uploaded to keyvault - secretname="${environment}"-client-id - deleted=$(az keyvault secret list-deleted --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) - if [ "${deleted}" == "${secretname}" ]; then - echo -e "\t $cyan Recovering secret ${secretname} in keyvault ${keyvault} $resetformatting \n" - az keyvault secret recover --name "${secretname}" --vault-name "${keyvault}" --subscription $STATE_SUBSCRIPTION - sleep 10 - fi - - v="" - secret=$(az keyvault secret list --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) - if [ "${secret}" == "${secretname}" ]; - then - v=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query value -o tsv) - if [ "${v}" != "${client_id}" ] ; then - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${client_id}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none - fi - else - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${client_id}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none - fi - - secretname="${environment}"-tenant-id - deleted=$(az keyvault secret list-deleted --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) - if [ "${deleted}" == "${secretname}" ]; then - echo -e "\t $cyan Recovering secret ${secretname} in keyvault ${keyvault} $resetformatting \n" - az keyvault secret recover --name "${secretname}" --vault-name "${keyvault}" --subscription $STATE_SUBSCRIPTION - sleep 10 - fi - v="" - secret=$(az keyvault secret list --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) - if [ "${secret}" == "${secretname}" ]; - then - v=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query value -o tsv) - if [ "${v}" != "${tenant_id}" ] ; then - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${tenant_id}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none - fi - else - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${tenant_id}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none - fi - - secretname="${environment}"-client-secret - deleted=$(az keyvault secret list-deleted --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) - if [ "${deleted}" == "${secretname}" ]; then - echo -e "\t $cyan Recovering secret ${secretname} in keyvault ${keyvault} $resetformatting \n" - az keyvault secret recover --name "${secretname}" --vault-name "${keyvault}" --subscription $STATE_SUBSCRIPTION - sleep 10 - fi - - v="" - secret=$(az keyvault secret list --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) - if [ "${secret}" == "${secretname}" ]; - then - v=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query value -o tsv) - if [ "${v}" != "${client_secret}" ] ; then - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value="${client_secret}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none - fi - else - az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value="${client_secret}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none - fi + #turn off output, we do not want to show the details being uploaded to keyvault + secretname="${environment}"-client-id + deleted=$(az keyvault secret list-deleted --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) + if [ "${deleted}" == "${secretname}" ]; then + echo -e "\t $cyan Recovering secret ${secretname} in keyvault ${keyvault} $resetformatting \n" + az keyvault secret recover --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" + sleep 10 + fi + + v="" + secret=$(az keyvault secret list --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) + if [ "${secret}" == "${secretname}" ]; + then + v=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query value -o tsv) + if [ "${v}" != "${client_id}" ] ; then + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${client_id}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none + fi + else + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${client_id}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none + fi + + secretname="${environment}"-tenant-id + deleted=$(az keyvault secret list-deleted --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) + if [ "${deleted}" == "${secretname}" ]; then + echo -e "\t $cyan Recovering secret ${secretname} in keyvault ${keyvault} $resetformatting \n" + az keyvault secret recover --name "${secretname}" --vault-name "${keyvault}" --subscription $STATE_SUBSCRIPTION + sleep 10 + fi + v="" + secret=$(az keyvault secret list --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) + if [ "${secret}" == "${secretname}" ]; + then + v=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query value -o tsv) + if [ "${v}" != "${tenant_id}" ] ; then + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${tenant_id}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none + fi + else + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value "${tenant_id}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none + fi + + secretname="${environment}"-client-secret + deleted=$(az keyvault secret list-deleted --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) + if [ "${deleted}" == "${secretname}" ]; then + echo -e "\t $cyan Recovering secret ${secretname} in keyvault ${keyvault} $resetformatting \n" + az keyvault secret recover --name "${secretname}" --vault-name "${keyvault}" --subscription $STATE_SUBSCRIPTION + sleep 10 + fi + + v="" + secret=$(az keyvault secret list --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query "[].{Name:name} | [? contains(Name,'${secretname}')] | [0]" -o tsv) + if [ "${secret}" == "${secretname}" ]; + then + v=$(az keyvault secret show --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --query value -o tsv) + if [ "${v}" != "${client_secret}" ] ; then + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value="${client_secret}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none + fi + else + az keyvault secret set --name "${secretname}" --vault-name "${keyvault}" --subscription "${STATE_SUBSCRIPTION}" --value="${client_secret}" --expires "$(date -d '+1 year' -u +%Y-%m-%dT%H:%M:%SZ)" --only-show-errors --output none + fi fi exit $return_code diff --git a/deploy/scripts/validate.sh b/deploy/scripts/validate.sh index 433f7c17db..f2f9e02d5f 100755 --- a/deploy/scripts/validate.sh +++ b/deploy/scripts/validate.sh @@ -9,6 +9,11 @@ boldreduscore="\e[1;4;31m" boldred="\e[1;31m" cyan="\e[1;36m" resetformatting="\e[0m" +full_script_path="$(realpath "${BASH_SOURCE[0]}")" +script_directory="$(dirname "${full_script_path}")" + +#call stack has full scriptname when using source +source "${script_directory}/deploy_utils.sh" min() { printf "%s\n" "${@:2}" | sort "$1" | head -n1 @@ -26,7 +31,7 @@ heading() { echo "----------------------------------------------------------------------------" } -showhelp() +showhelp() { echo "" echo "#########################################################################################" @@ -132,7 +137,7 @@ else fi ############################################################################### -# SAP System # +# SAP System # ############################################################################### if [ "${deployment_system}" == sap_system ] ; then @@ -167,7 +172,7 @@ if [ "${deployment_system}" == sap_system ] ; then # subnet identifier and output prefix string changing. As such # they can be converted into a parameterised function call. - # Admin subnet + # Admin subnet subnet_name=$(jq --raw-output .infrastructure.vnets.sap.subnet_admin.name "${parameterfile}") subnet_arm_id=$(jq --raw-output .infrastructure.vnets.sap.subnet_admin.arm_id "${parameterfile}") @@ -176,7 +181,7 @@ if [ "${deployment_system}" == sap_system ] ; then then subnet_name=$(echo $subnet_arm_id | cut -d/ -f11 | xargs) fi - + subnet_nsg_name=$(jq --raw-output .infrastructure.vnets.sap.subnet_admin.nsg.name "${parameterfile}") subnet_nsg_arm_id=$(jq --raw-output .infrastructure.vnets.sap.subnet_admin.nsg.arm_id "${parameterfile}") @@ -205,9 +210,9 @@ if [ "${deployment_system}" == sap_system ] ; then else echo "Admin nsg: " "Defined by the workload/automation" fi - - # db subnet - + + # db subnet + subnet_name=$(jq --raw-output .infrastructure.vnets.sap.subnet_db.name "${parameterfile}") subnet_arm_id=$(jq --raw-output .infrastructure.vnets.sap.subnet_db.arm_id "${parameterfile}") subnet_prefix=$(jq --raw-output .infrastructure.vnets.sap.subnet_db.prefix "${parameterfile}") @@ -215,7 +220,7 @@ if [ "${deployment_system}" == sap_system ] ; then then subnet_name=$(echo $subnet_arm_id | cut -d/ -f11 | xargs) fi - + subnet_nsg_name=$(jq --raw-output .infrastructure.vnets.sap.subnet_db.nsg.name "${parameterfile}") subnet_nsg_arm_id=$(jq --raw-output .infrastructure.vnets.sap.subnet_db.nsg.arm_id "${parameterfile}") @@ -244,9 +249,9 @@ if [ "${deployment_system}" == sap_system ] ; then else echo "db nsg: " "Defined by the workload/automation" fi - - # app subnet - + + # app subnet + subnet_name=$(jq --raw-output .infrastructure.vnets.sap.subnet_app.name "${parameterfile}") subnet_arm_id=$(jq --raw-output .infrastructure.vnets.sap.subnet_app.arm_id "${parameterfile}") subnet_prefix=$(jq --raw-output .infrastructure.vnets.sap.subnet_app.prefix "${parameterfile}") @@ -265,7 +270,7 @@ if [ "${deployment_system}" == sap_system ] ; then if [ \( -n "${subnet_name}" \) -a \( "${subnet_name}" != "null" \) ] then echo "app subnet: " "${subnet_name}" - else + else echo "app subnet: " "Subnet defined by the workload/automation" fi @@ -282,9 +287,9 @@ if [ "${deployment_system}" == sap_system ] ; then else echo "app nsg: " "Defined by the workload/automation" fi - - # web subnet - + + # web subnet + subnet_name=$(jq --raw-output .infrastructure.vnets.sap.subnet_web.name "${parameterfile}") subnet_arm_id=$(jq --raw-output .infrastructure.vnets.sap.subnet_web.arm_id "${parameterfile}") subnet_prefix=$(jq --raw-output .infrastructure.vnets.sap.subnet_web.prefix "${parameterfile}") @@ -320,9 +325,9 @@ if [ "${deployment_system}" == sap_system ] ; then else echo "web nsg: " "Defined by the workload/automation" fi - + echo "" - + heading "Database tier" platform=$(jq --raw-output '.databases[0].platform' "${parameterfile}") echo "Platform: " "${platform}" @@ -357,7 +362,7 @@ if [ "${deployment_system}" == sap_system ] ; then version=$(jq --raw-output '.databases[0].os.version' "${parameterfile}") echo "Image version: " "${version}" fi - + if jq --exit-status '.databases[0].zones' "${parameterfile}" >/dev/null; then echo "Deployment: " "Zonal" zones=$(jq --compact-output '.databases[0].zones' "${parameterfile}") @@ -381,9 +386,9 @@ if [ "${deployment_system}" == sap_system ] ; then else echo "Authentication: " "key" fi - + echo - + heading "Application tier" if jq --exit-status '.application.authentication.type' "${parameterfile}" >/dev/null; then authentication=$(jq --raw-output '.application.authentication.type' "${parameterfile}") @@ -391,7 +396,7 @@ if [ "${deployment_system}" == sap_system ] ; then else echo "Authentication: " "key" fi - + echo "Application servers" if [ $app_zone_count -gt 1 ] ; then echo " Application avset: " "($app_zone_count) (name defined by automation)" @@ -426,7 +431,7 @@ if [ "${deployment_system}" == sap_system ] ; then else echo " Deployment: " "Regional" fi - + echo "Central Services" echo " SCS load balancer: " "(name defined by automation)" if [ $scs_zone_count -gt 1 ] ; then @@ -487,7 +492,7 @@ if [ "${deployment_system}" == sap_system ] ; then else echo " Deployment: " "Regional" fi - + echo "Web dispatcher" web_server_count=$(jq --raw-output .application.webdispatcher_count "${parameterfile}") echo " Web dispatcher lb: " "(name defined by automation)" @@ -497,7 +502,7 @@ if [ "${deployment_system}" == sap_system ] ; then echo " Web dispatcher avset: " "(name defined by automation)" fi echo " Number of servers: " "${web_server_count}" - + if jq --exit-status '.application.web_os' "${parameterfile}" >/dev/null; then if jq --exit-status '.application.web_os.source_image_id' "${parameterfile}" >/dev/null; then image=$(jq --raw-output .application.web_os.source_image_id "${parameterfile}") @@ -546,7 +551,7 @@ if [ "${deployment_system}" == sap_system ] ; then else echo " Deployment: " "Regional" fi - + echo "" heading "Key Vault" if jq --exit-status '.key_vault.kv_spn_id' "${parameterfile}" >/dev/null; then @@ -555,29 +560,29 @@ if [ "${deployment_system}" == sap_system ] ; then else echo " SPN Key Vault: " "Deployer keyvault" fi - + if jq --exit-status '.key_vault.kv_user_id' "${parameterfile}" >/dev/null; then kv=$(jq --raw-output .key_vault.kv_user_id "${parameterfile}") echo " User Key Vault: " "${kv}" else echo " User Key Vault: " "Workload keyvault" fi - + if jq --exit-status '.key_vault.kv_prvt_id' "${parameterfile}" >/dev/null; then kv=$(jq --raw-output .key_vault.kv_prvt_id "${parameterfile}") echo " Automation Key Vault: " "${kv}" else echo " Automation Key Vault: " "Workload keyvault" fi - + fi ############################################################################### -# SAP Landscape # +# SAP Landscape # ############################################################################### if [ "${deployment_system}" == sap_landscape ] ; then heading "Networking" - + vnet_name=$(jq --raw-output .infrastructure.vnets.sap.name "${parameterfile}") vnet_arm_id=$(jq --raw-output .infrastructure.vnets.sap.arm_id "${parameterfile}") vnet_address_space=$(jq --raw-output .infrastructure.vnets.sap.address_space "${parameterfile}") @@ -588,7 +593,7 @@ if [ "${deployment_system}" == sap_landscape ] ; then echo "VNet Logical name: " "${vnet_name}" echo "Address space: " "${vnet_address_space}" - # Admin subnet + # Admin subnet subnet_name=$(jq --raw-output .infrastructure.vnets.sap.subnet_admin.name "${parameterfile}") subnet_arm_id=$(jq --raw-output .infrastructure.vnets.sap.subnet_admin.arm_id "${parameterfile}") @@ -623,9 +628,9 @@ if [ "${deployment_system}" == sap_landscape ] ; then else echo "Admin nsg: " "Defined by the system/automation" fi - - # db subnet - + + # db subnet + subnet_name=$(jq --raw-output .infrastructure.vnets.sap.subnet_db.name "${parameterfile}") subnet_arm_id=$(jq --raw-output .infrastructure.vnets.sap.subnet_db.arm_id "${parameterfile}") subnet_prefix=$(jq --raw-output .infrastructure.vnets.sap.subnet_db.prefix "${parameterfile}") @@ -633,7 +638,7 @@ if [ "${deployment_system}" == sap_landscape ] ; then then subnet_name=$(echo $subnet_arm_id | cut -d/ -f11 | xargs) fi - + subnet_nsg_name=$(jq --raw-output .infrastructure.vnets.sap.subnet_db.nsg.name "${parameterfile}") subnet_nsg_arm_id=$(jq --raw-output .infrastructure.vnets.sap.subnet_db.nsg.arm_id "${parameterfile}") if [ -z "${subnet_nsg_arm_id}" ] @@ -659,9 +664,9 @@ if [ "${deployment_system}" == sap_landscape ] ; then else echo "db nsg: " "Defined by the system/automation" fi - - # app subnet - + + # app subnet + subnet_name=$(jq --raw-output .infrastructure.vnets.sap.subnet_app.name "${parameterfile}") subnet_arm_id=$(jq --raw-output .infrastructure.vnets.sap.subnet_app.arm_id "${parameterfile}") subnet_prefix=$(jq --raw-output .infrastructure.vnets.sap.subnet_app.prefix "${parameterfile}") @@ -695,9 +700,9 @@ if [ "${deployment_system}" == sap_landscape ] ; then else echo "app nsg: " "Defined by the system/automation" fi - - # web subnet - + + # web subnet + subnet_name=$(jq --raw-output .infrastructure.vnets.sap.subnet_web.name "${parameterfile}") subnet_arm_id=$(jq --raw-output .infrastructure.vnets.sap.subnet_web.arm_id "${parameterfile}") subnet_prefix=$(jq --raw-output .infrastructure.vnets.sap.subnet_web.prefix "${parameterfile}") @@ -716,7 +721,7 @@ if [ "${deployment_system}" == sap_landscape ] ; then if [ -z "${subnet_name}" ] then echo "web subnet: " "${subnet_name}" - else + else echo "web subnet: " "Subnet defined by the system/automation" fi if [ -z "${subnet_prefix}" ] @@ -731,8 +736,8 @@ if [ "${deployment_system}" == sap_landscape ] ; then else echo "web nsg: " "Defined by the system/automation" fi - - + + echo "" heading "Key Vault" if jq --exit-status '.key_vault.kv_spn_id' "${parameterfile}" >/dev/null; then @@ -741,14 +746,14 @@ if [ "${deployment_system}" == sap_landscape ] ; then else echo " SPN Key Vault: " "Deployer keyvault" fi - + if jq --exit-status '.key_vault.kv_user_id' "${parameterfile}" >/dev/null; then kv=$(jq --raw-output .key_vault.kv_user_id "${parameterfile}") echo " User Key Vault: " "${kv}" else echo " User Key Vault: " "Workload keyvault" fi - + if jq --exit-status '.key_vault.kv_prvt_id' "${parameterfile}" >/dev/null; then kv=$(jq --raw-output .key_vault.kv_prvt_id "${parameterfile}") echo " Automation Key Vault: " "${kv}" @@ -758,7 +763,7 @@ if [ "${deployment_system}" == sap_landscape ] ; then fi ############################################################################### -# SAP Library # +# SAP Library # ############################################################################### if [ "${deployment_system}" == sap_library ] ; then @@ -770,29 +775,29 @@ if [ "${deployment_system}" == sap_library ] ; then else echo " SPN Key Vault: " "Deployer keyvault" fi - + if jq --exit-status '.key_vault.kv_user_id' "${parameterfile}" >/dev/null; then kv=$(jq --raw-output .key_vault.kv_user_id "${parameterfile}") echo " User Key Vault: " "${kv}" else echo " User Key Vault: " "Library keyvault" fi - + if jq --exit-status '.key_vault.kv_prvt_id' "${parameterfile}" >/dev/null; then kv=$(jq --raw-output .key_vault.kv_prvt_id "${parameterfile}") echo " Automation Key Vault: " "${kv}" else echo " Automation Key Vault: " "Library keyvault" fi - + fi ############################################################################### -# SAP Deployer # +# SAP Deployer # ############################################################################### if [ "${deployment_system}" == sap_deployer ] ; then - heading "Networking" + heading "Networking" if jq --exit-status '.infrastructure.vnets.management' "${parameterfile}" >/dev/null; then if jq --exit-status '.infrastructure.vnets.management.arm_id' "${parameterfile}" >/dev/null; then arm_id=$(jq --raw-output .infrastructure.vnets.management.arm_id "${parameterfile}") @@ -812,23 +817,23 @@ if [ "${deployment_system}" == sap_deployer ] ; then else error "The Virtual network must be defined" fi - + echo "" - heading "Key Vault" + heading "Key Vault" if jq --exit-status '.key_vault.kv_spn_id' "${parameterfile}" >/dev/null; then kv=$(jq --raw-output .key_vault.kv_spn_id "${parameterfile}") echo " SPN Key Vault: " "${kv}" else echo " SPN Key Vault: " "Deployer keyvault" fi - + if jq --exit-status '.key_vault.kv_user_id' "${parameterfile}" >/dev/null; then kv=$(jq --raw-output .key_vault.kv_user_id "${parameterfile}") echo " User Key Vault: " "${kv}" else echo " User Key Vault: " "Deployer keyvault" fi - + if jq --exit-status '.key_vault.kv_prvt_id' "${parameterfile}" >/dev/null; then kv=$(jq --raw-output .key_vault.kv_prvt_id "${parameterfile}") echo " Automation Key Vault: " "${kv}" diff --git a/deploy/terraform/bootstrap/sap_deployer/providers.tf b/deploy/terraform/bootstrap/sap_deployer/providers.tf index cdc795d5a1..c5bfa21270 100644 --- a/deploy/terraform/bootstrap/sap_deployer/providers.tf +++ b/deploy/terraform/bootstrap/sap_deployer/providers.tf @@ -20,7 +20,7 @@ data "azurerm_client_config" "current" { provider "azurerm" { features { resource_group { - prevent_deletion_if_contains_resources = true + prevent_deletion_if_contains_resources = var.prevent_deletion_if_contains_resources } key_vault { purge_soft_delete_on_destroy = !var.enable_purge_control_for_keyvaults @@ -86,7 +86,7 @@ terraform { } azurerm = { source = "hashicorp/azurerm" - version = "4.4.0" + version = "4.6.0" } } } diff --git a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf index 68876b134e..2d26d87b37 100644 --- a/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_deployer/tfvar_variables.tf @@ -22,6 +22,11 @@ variable "location" { type = string } +variable "prevent_deletion_if_contains_resources" { + description = "Controls if resource groups are deleted even if they contain resources" + type = bool + default = true + } #######################################4#######################################8 # # # Resource group definitioms # @@ -240,16 +245,6 @@ variable "deployer_image" { } } -variable "plan" { - description = "The plan for the marketplace item" - default = { - use = false - "name" = "" - "publisher" = "" - "product" = "" - } - } - variable "deployer_private_ip_address" { description = "If provides, the value of the deployer Virtual machine IPs" default = [""] @@ -381,7 +376,7 @@ variable "deployer_diagnostics_account_arm_id" { variable "tf_version" { description = "Terraform version to install on deployer" - default = "1.9.5" + default = "1.9.8" } variable "name_override_file" { @@ -411,10 +406,10 @@ variable "subnets_to_add_to_firewall_for_keyvaults_and_storage" { default = [] } -variable "tags" { - description = "If provided, tags for all resources" - default = {} - } +variable "tags" { + description = "If provided, tags for all resources" + default = {} + } ######################################################################################### # # # DNS settings # diff --git a/deploy/terraform/bootstrap/sap_deployer/transform.tf b/deploy/terraform/bootstrap/sap_deployer/transform.tf index c2687ef478..ae37d4b372 100644 --- a/deploy/terraform/bootstrap/sap_deployer/transform.tf +++ b/deploy/terraform/bootstrap/sap_deployer/transform.tf @@ -178,8 +178,6 @@ locals { ), "") } - plan = var.plan - private_ip_address = try(coalesce( var.deployer_private_ip_address, try(var.deployers[0].private_ip_address, "") diff --git a/deploy/terraform/bootstrap/sap_library/imports.tf b/deploy/terraform/bootstrap/sap_library/imports.tf index 02cb5d22ba..2ba1c4a262 100644 --- a/deploy/terraform/bootstrap/sap_library/imports.tf +++ b/deploy/terraform/bootstrap/sap_library/imports.tf @@ -18,28 +18,28 @@ data "azurerm_key_vault_secret" "subscription_id" { provider = azurerm.deployer count = local.use_spn ? 1 : 0 name = format("%s-subscription-id", upper(local.infrastructure.environment)) - key_vault_id = local.spn_key_vault_arm_id + key_vault_id = local.key_vault.kv_spn_id } data "azurerm_key_vault_secret" "client_id" { provider = azurerm.deployer count = local.use_spn ? 1 : 0 name = format("%s-client-id", upper(local.infrastructure.environment)) - key_vault_id = local.spn_key_vault_arm_id + key_vault_id = local.key_vault.kv_spn_id } data "azurerm_key_vault_secret" "client_secret" { provider = azurerm.deployer count = local.use_spn ? 1 : 0 name = format("%s-client-secret", upper(local.infrastructure.environment)) - key_vault_id = local.spn_key_vault_arm_id + key_vault_id = local.key_vault.kv_spn_id } data "azurerm_key_vault_secret" "tenant_id" { provider = azurerm.deployer count = local.use_spn ? 1 : 0 name = format("%s-tenant-id", upper(local.infrastructure.environment)) - key_vault_id = local.spn_key_vault_arm_id + key_vault_id = local.key_vault.kv_spn_id } // Import current service principal diff --git a/deploy/terraform/bootstrap/sap_library/providers.tf b/deploy/terraform/bootstrap/sap_library/providers.tf index 73aecd7152..d65ee77d1c 100644 --- a/deploy/terraform/bootstrap/sap_library/providers.tf +++ b/deploy/terraform/bootstrap/sap_library/providers.tf @@ -26,6 +26,8 @@ provider "azurerm" { } storage_use_azuread = true + use_msi = var.use_spn ? false : true + } provider "azurerm" { @@ -44,6 +46,7 @@ provider "azurerm" { alias = "main" storage_use_azuread = true + use_msi = var.use_spn ? false : true } @@ -53,6 +56,8 @@ provider "azurerm" { alias = "deployer" storage_use_azuread = true + use_msi = false + subscription_id = var.use_deployer ? data.terraform_remote_state.deployer[0].outputs.created_resource_group_subscription_id : null } provider "azurerm" { @@ -64,6 +69,7 @@ provider "azurerm" { alias = "dnsmanagement" storage_use_azuread = true + use_msi = var.use_spn ? false : true } provider "azurerm" { @@ -75,6 +81,7 @@ provider "azurerm" { alias = "privatelinkdnsmanagement" storage_use_azuread = true + use_msi = var.use_spn ? false : true } provider "azuread" { @@ -104,7 +111,7 @@ terraform { } azurerm = { source = "hashicorp/azurerm" - version = "4.4.0" + version = "4.6.0" } } } diff --git a/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf b/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf index 2e9efbf7fa..e74f9a63fd 100644 --- a/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf +++ b/deploy/terraform/bootstrap/sap_library/tfvar_variables.tf @@ -42,6 +42,11 @@ variable "use_spn" { default = false } +variable "tags" { + description = "If provided, tags for all resources" + default = {} + } + #######################################4#######################################8 # # # Resource group definitioms # diff --git a/deploy/terraform/bootstrap/sap_library/transform.tf b/deploy/terraform/bootstrap/sap_library/transform.tf index 0f3c93c59a..7cbfc2be08 100644 --- a/deploy/terraform/bootstrap/sap_library/transform.tf +++ b/deploy/terraform/bootstrap/sap_library/transform.tf @@ -1,123 +1,67 @@ locals { infrastructure = { - environment = coalesce(var.environment, try(var.infrastructure.environment, "")) - region = coalesce(var.location, try(var.infrastructure.region, "")) - codename = try(var.codename, try(var.infrastructure.codename, "")) + environment = var.environment + region = var.location + codename = var.codename resource_group = { - name = try(coalesce(var.resourcegroup_name, try(var.infrastructure.resource_group.name, "")), "") - arm_id = try(coalesce(var.resourcegroup_arm_id, try(var.infrastructure.resource_group.arm_id, "")), "") + name = var.resourcegroup_name + arm_id = var.resourcegroup_arm_id } - tags = try(coalesce(var.resourcegroup_tags, try(var.infrastructure.tags, {})), {}) + tags = try(coalesce(var.resourcegroup_tags, var.tags, {}), {}) } deployer = { use = var.use_deployer } key_vault = { - kv_spn_id = try(coalesce(local.spn_key_vault_arm_id, var.spn_keyvault_id, try(var.key_vault.kv_spn_id, "")), "") + kv_spn_id = coalesce(var.spn_keyvault_id, local.spn_key_vault_arm_id) } storage_account_sapbits = { - arm_id = try(coalesce(var.library_sapmedia_arm_id, try(var.storage_account_sapbits.arm_id, "")), "") - name = var.library_sapmedia_name - account_tier = coalesce( - var.library_sapmedia_account_tier, - try(var.storage_account_sapbits.account_tier, "Standard") - ) - account_replication_type = coalesce( - var.library_sapmedia_account_replication_type, - try(var.storage_account_sapbits.account_replication_type, "ZRS") - ) - account_kind = coalesce( - var.library_sapmedia_account_kind, - try(var.storage_account_sapbits.account_kind, "StorageV2") - ) + arm_id = var.library_sapmedia_arm_id + name = var.library_sapmedia_name + account_tier = var.library_sapmedia_account_tier + account_replication_type = var.library_sapmedia_account_replication_type + account_kind = var.library_sapmedia_account_kind file_share = { - enable_deployment = ( - var.library_sapmedia_file_share_enable_deployment || - try(var.storage_account_sapbits.file_share.enable_deployment, true) - ) - is_existing = ( - var.library_sapmedia_file_share_is_existing || - try(var.storage_account_sapbits.file_share.is_existing, false) - ) - name = coalesce( - var.library_sapmedia_file_share_name, - try( - var.storage_account_sapbits.file_share.name, - module.sap_namegenerator.naming.resource_suffixes.sapbits - ) - ) + enable_deployment = var.library_sapmedia_file_share_enable_deployment + is_existing = var.library_sapmedia_file_share_is_existing + name = coalesce(var.library_sapmedia_file_share_name,module.sap_namegenerator.naming.resource_suffixes.sapbits) } sapbits_blob_container = { - enable_deployment = ( - var.library_sapmedia_blob_container_enable_deployment || - try(var.storage_account_sapbits.sapbits_blob_container.enable_deployment, true) - ) - is_existing = ( - var.library_sapmedia_blob_container_is_existing || - try(var.storage_account_sapbits.sapbits_blob_container.is_existing, false) - ) - name = coalesce( - var.library_sapmedia_blob_container_name, - try( - var.storage_account_sapbits.sapbits_blob_container.name, - module.sap_namegenerator.naming.resource_suffixes.sapbits - ) - ) + enable_deployment = var.library_sapmedia_blob_container_enable_deployment + is_existing = var.library_sapmedia_blob_container_is_existing + name = coalesce(var.library_sapmedia_blob_container_name, module.sap_namegenerator.naming.resource_suffixes.sapbits) } shared_access_key_enabled = var.shared_access_key_enabled public_network_access_enabled = var.public_network_access_enabled } + storage_account_tfstate = { - arm_id = try( - coalesce( - var.library_terraform_state_arm_id, - try(var.storage_account_tfstate.arm_id, "")) - , "" - ) - name = var.library_terraform_state_name - account_tier = coalesce( - var.library_terraform_state_account_tier, - try(var.storage_account_tfstate.account_tier, "Standard") - ) - account_replication_type = coalesce( - var.library_terraform_state_account_replication_type, - try(var.storage_account_tfstate.account_replication_type, "ZRS") - ) - account_kind = coalesce( - var.library_terraform_state_account_kind, - try(var.storage_account_tfstate.account_kind, "StorageV2") - ) + arm_id = var.library_terraform_state_arm_id + name = var.library_terraform_state_name + account_tier = var.library_terraform_state_account_tier + account_replication_type = var.library_terraform_state_account_replication_type + account_kind = var.library_terraform_state_account_kind tfstate_blob_container = { - is_existing = ( - var.library_terraform_state_blob_container_is_existing || - try(var.storage_account_tfstate.tfstate_blob_container.is_existing, false) - ) - name = coalesce( - var.library_terraform_state_blob_container_name, - try(var.storage_account_tfstate.tfstate_blob_container.name, "tfstate") - ) + is_existing = var.library_terraform_state_blob_container_is_existing + name = var.library_terraform_state_blob_container_name } tfvars_blob_container = { - is_existing = var.library_terraform_vars_blob_container_is_existing - name = var.library_terraform_vars_blob_container_name + is_existing = var.library_terraform_vars_blob_container_is_existing + name = var.library_terraform_vars_blob_container_name } ansible_blob_container = { - is_existing = ( - var.library_ansible_blob_container_is_existing || - try(var.storage_account_tfstate.ansible_blob_container.is_existing, false) - ) - name = coalesce( - var.library_ansible_blob_container_name, - try(var.storage_account_tfstate.ansible_blob_container.name, "ansible") - ) + is_existing = var.library_ansible_blob_container_is_existing + name = var.library_ansible_blob_container_name } - shared_access_key_enabled = var.shared_access_key_enabled + + shared_access_key_enabled = var.shared_access_key_enabled public_network_access_enabled = var.public_network_access_enabled } + dns_settings = { use_custom_dns_a_registration = var.use_custom_dns_a_registration dns_label = var.dns_label diff --git a/deploy/terraform/run/sap_deployer/providers.tf b/deploy/terraform/run/sap_deployer/providers.tf index 8709f410de..41f3b50f86 100644 --- a/deploy/terraform/run/sap_deployer/providers.tf +++ b/deploy/terraform/run/sap_deployer/providers.tf @@ -16,7 +16,7 @@ Description: provider "azurerm" { features { resource_group { - prevent_deletion_if_contains_resources = true + prevent_deletion_if_contains_resources = var.prevent_deletion_if_contains_resources } key_vault { purge_soft_delete_on_destroy = !var.enable_purge_control_for_keyvaults @@ -27,7 +27,9 @@ provider "azurerm" { } partner_id = "f94f50f2-2539-42f8-9c8e-c65b28c681f7" storage_use_azuread = !var.shared_access_key_enabled - use_msi = var.use_spn ? false : true + subscription_id = var.subscription_id + + use_msi = true } provider "azurerm" { @@ -44,7 +46,7 @@ provider "azurerm" { } partner_id = "f94f50f2-2539-42f8-9c8e-c65b28c681f7" - subscription_id = local.spn.subscription_id + subscription_id = data.azurerm_key_vault_secret.subscription_id[0].value client_id = var.use_spn ? local.spn.client_id : null client_secret = var.use_spn ? local.spn.client_secret: null tenant_id = var.use_spn ? local.spn.tenant_id: null @@ -85,7 +87,7 @@ terraform { } azurerm = { source = "hashicorp/azurerm" - version = "4.4.0" + version = "4.6.0" } } } diff --git a/deploy/terraform/run/sap_deployer/tfvar_variables.tf b/deploy/terraform/run/sap_deployer/tfvar_variables.tf index 5bd696982b..faae59eed2 100644 --- a/deploy/terraform/run/sap_deployer/tfvar_variables.tf +++ b/deploy/terraform/run/sap_deployer/tfvar_variables.tf @@ -22,6 +22,18 @@ variable "location" { type = string } +variable "subscription_id" { + description = "Defines the Azure subscription_id" + type = string + default = null + } + + +variable "prevent_deletion_if_contains_resources" { + description = "Controls if resource groups are deleted even if they contain resources" + type = bool + default = true + } #######################################4#######################################8 # # # Resource group definitioms # @@ -240,16 +252,6 @@ variable "deployer_image" { } } -variable "plan" { - description = "The plan for the marketplace item" - default = { - use = false - "name" = "" - "publisher" = "" - "product" = "" - } - } - variable "deployer_private_ip_address" { description = "If provides, the value of the deployer Virtual machine IPs" default = [""] @@ -378,7 +380,7 @@ variable "deployer_diagnostics_account_arm_id" { variable "tf_version" { description = "Terraform version to install on deployer" - default = "1.9.5" + default = "1.9.8" } variable "name_override_file" { diff --git a/deploy/terraform/run/sap_deployer/transform.tf b/deploy/terraform/run/sap_deployer/transform.tf index 7e65b601e5..eadf3b5215 100644 --- a/deploy/terraform/run/sap_deployer/transform.tf +++ b/deploy/terraform/run/sap_deployer/transform.tf @@ -175,8 +175,6 @@ locals { ), "") } - plan = var.plan - private_ip_address = try(coalesce( var.deployer_private_ip_address, try(var.deployers[0].private_ip_address, "") diff --git a/deploy/terraform/run/sap_deployer/variables_local.tf b/deploy/terraform/run/sap_deployer/variables_local.tf index 6dbb79d863..879de6874d 100644 --- a/deploy/terraform/run/sap_deployer/variables_local.tf +++ b/deploy/terraform/run/sap_deployer/variables_local.tf @@ -36,10 +36,10 @@ locals { ) spn = { - subscription_id = length(var.deployer_kv_user_arm_id) > 0 && var.use_spn ? data.azurerm_key_vault_secret.subscription_id[0].value : null, - client_id = length(var.deployer_kv_user_arm_id) > 0 && var.use_spn ? data.azurerm_key_vault_secret.client_id[0].value : null, - client_secret = length(var.deployer_kv_user_arm_id) > 0 && var.use_spn ? data.azurerm_key_vault_secret.client_secret[0].value : null, - tenant_id = length(var.deployer_kv_user_arm_id) > 0 && var.use_spn ? data.azurerm_key_vault_secret.tenant_id[0].value : null + subscription_id = data.azurerm_key_vault_secret.subscription_id[0].value + client_id = var.use_spn ? data.azurerm_key_vault_secret.client_id[0].value : null, + client_secret = var.use_spn ? data.azurerm_key_vault_secret.client_secret[0].value : null, + tenant_id = var.use_spn ? data.azurerm_key_vault_secret.tenant_id[0].value : null } } diff --git a/deploy/terraform/run/sap_landscape/imports.tf b/deploy/terraform/run/sap_landscape/imports.tf index 6b2010c8f1..344f40aa43 100644 --- a/deploy/terraform/run/sap_landscape/imports.tf +++ b/deploy/terraform/run/sap_landscape/imports.tf @@ -68,8 +68,7 @@ data "azurerm_key_vault_secret" "cp_tenant_id" { } // Import current service principal -data "azuread_service_principal" "sp" { - count = var.use_spn ? 1 : 0 - client_id = local.spn.client_id -} - +data "azuread_service_principal" "sp" { + count = var.use_spn ? 1 : 0 + client_id = local.spn.client_id + } diff --git a/deploy/terraform/run/sap_landscape/providers.tf b/deploy/terraform/run/sap_landscape/providers.tf index 563b3b20f7..2588fd88ee 100644 --- a/deploy/terraform/run/sap_landscape/providers.tf +++ b/deploy/terraform/run/sap_landscape/providers.tf @@ -22,7 +22,7 @@ provider "azurerm" { provider "azurerm" { features { resource_group { - prevent_deletion_if_contains_resources = true + prevent_deletion_if_contains_resources = var.prevent_deletion_if_contains_resources } key_vault { purge_soft_delete_on_destroy = !var.enable_purge_control_for_keyvaults @@ -130,7 +130,7 @@ terraform { } azurerm = { source = "hashicorp/azurerm" - version = "4.4.0" + version = "4.6.0" } azapi = { source = "Azure/azapi" diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index a022608ca3..5e5c64bafd 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -33,6 +33,11 @@ variable "place_delete_lock_on_resources" { default = false } +variable "prevent_deletion_if_contains_resources" { + description = "Controls if resource groups are deleted even if they contain resources" + type = bool + default = true + } #######################################4#######################################8 # # # Resource group definitioms # diff --git a/deploy/terraform/run/sap_landscape/variables_local.tf b/deploy/terraform/run/sap_landscape/variables_local.tf index cc0e882808..d00529619c 100644 --- a/deploy/terraform/run/sap_landscape/variables_local.tf +++ b/deploy/terraform/run/sap_landscape/variables_local.tf @@ -29,10 +29,9 @@ locals { deployer_subscription_id = coalesce( try(data.terraform_remote_state.deployer[0].outputs.created_resource_group_subscription_id,""), - length(local.spn_key_vault_arm_id) > 0 ? ( - split("/", local.spn_key_vault_arm_id)[2]) : ( - "" - )) + length(local.spn_key_vault_arm_id) > 0 ? (split("/", local.spn_key_vault_arm_id)[2]) : (""), + local.saplib_subscription_id + ) spn = { subscription_id = data.azurerm_key_vault_secret.subscription_id.value, @@ -51,7 +50,7 @@ locals { service_principal = { subscription_id = local.spn.subscription_id, tenant_id = local.spn.tenant_id, - object_id = var.use_spn ? try(data.azuread_service_principal.sp[0].id, null) : null + object_id = var.use_spn ? data.azuread_service_principal.sp[0].object_id : null } account = { diff --git a/deploy/terraform/run/sap_library/imports.tf b/deploy/terraform/run/sap_library/imports.tf index 2c67219195..c601e7ea02 100644 --- a/deploy/terraform/run/sap_library/imports.tf +++ b/deploy/terraform/run/sap_library/imports.tf @@ -21,28 +21,28 @@ data "azurerm_key_vault_secret" "subscription_id" { provider = azurerm.deployer count = local.use_spn ? 1 : 0 name = format("%s-subscription-id", upper(local.infrastructure.environment)) - key_vault_id = local.spn_key_vault_arm_id + key_vault_id = local.key_vault.kv_spn_id } data "azurerm_key_vault_secret" "client_id" { provider = azurerm.deployer count = local.use_spn ? 1 : 0 name = format("%s-client-id", upper(local.infrastructure.environment)) - key_vault_id = local.spn_key_vault_arm_id + key_vault_id = local.key_vault.kv_spn_id } data "azurerm_key_vault_secret" "client_secret" { provider = azurerm.deployer count = local.use_spn ? 1 : 0 name = format("%s-client-secret", upper(local.infrastructure.environment)) - key_vault_id = local.spn_key_vault_arm_id + key_vault_id = local.key_vault.kv_spn_id } data "azurerm_key_vault_secret" "tenant_id" { provider = azurerm.deployer count = local.use_spn ? 1 : 0 name = format("%s-tenant-id", upper(local.infrastructure.environment)) - key_vault_id = local.spn_key_vault_arm_id + key_vault_id = local.key_vault.kv_spn_id } // Import current service principal diff --git a/deploy/terraform/run/sap_library/providers.tf b/deploy/terraform/run/sap_library/providers.tf index bb2c513d19..6dccee6cd6 100644 --- a/deploy/terraform/run/sap_library/providers.tf +++ b/deploy/terraform/run/sap_library/providers.tf @@ -49,8 +49,8 @@ provider "azurerm" { } alias = "deployer" storage_use_azuread = !var.shared_access_key_enabled - use_msi = var.use_spn ? false : true - + use_msi = true + subscription_id = local.saplib_subscription_id } provider "azurerm" { @@ -73,6 +73,7 @@ provider "azurerm" { tenant_id = local.use_spn ? local.spn.tenant_id : null alias = "privatelinkdnsmanagement" storage_use_azuread = true + use_msi = var.use_spn ? false : true } @@ -104,7 +105,7 @@ terraform { } azurerm = { source = "hashicorp/azurerm" - version = "4.4.0" + version = "4.6.0" } } } diff --git a/deploy/terraform/run/sap_library/tfvar_variables.tf b/deploy/terraform/run/sap_library/tfvar_variables.tf index 8627d877de..e0a4878a6a 100644 --- a/deploy/terraform/run/sap_library/tfvar_variables.tf +++ b/deploy/terraform/run/sap_library/tfvar_variables.tf @@ -328,3 +328,8 @@ variable "dns_label" { description = "DNS label" default = "" } + +variable "tags" { + description = "If provided, tags for all resources" + default = {} + } diff --git a/deploy/terraform/run/sap_library/transform.tf b/deploy/terraform/run/sap_library/transform.tf index ede51c324b..1e71d20b52 100644 --- a/deploy/terraform/run/sap_library/transform.tf +++ b/deploy/terraform/run/sap_library/transform.tf @@ -1,122 +1,63 @@ locals { infrastructure = { - environment = coalesce(var.environment, try(var.infrastructure.environment, "")) - region = coalesce(var.location, try(var.infrastructure.region, "")) - codename = try(var.codename, try(var.infrastructure.codename, "")) + environment = var.environment + region = var.location + codename = var.codename resource_group = { - name = try(coalesce(var.resourcegroup_name, try(var.infrastructure.resource_group.name, "")), "") - arm_id = try(coalesce(var.resourcegroup_arm_id, try(var.infrastructure.resource_group.arm_id, "")), "") + name = var.resourcegroup_name + arm_id = var.resourcegroup_arm_id } - tags = try(coalesce(var.resourcegroup_tags, try(var.infrastructure.tags, {})), {}) + tags = try(coalesce(var.resourcegroup_tags, var.tags, {}), {}) } deployer = { use = var.use_deployer } key_vault = { - kv_spn_id = try(coalesce(local.spn_key_vault_arm_id, var.spn_keyvault_id, try(var.key_vault.kv_spn_id, "")), "") + kv_spn_id = coalesce(var.spn_keyvault_id, local.spn_key_vault_arm_id) } storage_account_sapbits = { - arm_id = try(coalesce(var.library_sapmedia_arm_id, try(var.storage_account_sapbits.arm_id, "")), "") - name = var.library_sapmedia_name - account_tier = coalesce( - var.library_sapmedia_account_tier, - try(var.storage_account_sapbits.account_tier, "Standard") - ) - account_replication_type = coalesce( - var.library_sapmedia_account_replication_type, - try(var.storage_account_sapbits.account_replication_type, "ZRS") - ) - account_kind = coalesce( - var.library_sapmedia_account_kind, - try(var.storage_account_sapbits.account_kind, "StorageV2") - ) + arm_id = var.library_sapmedia_arm_id + name = var.library_sapmedia_name + account_tier = var.library_sapmedia_account_tier + account_replication_type = var.library_sapmedia_account_replication_type + account_kind = var.library_sapmedia_account_kind file_share = { - enable_deployment = ( - var.library_sapmedia_file_share_enable_deployment || - try(var.storage_account_sapbits.file_share.enable_deployment, true) - ) - is_existing = ( - var.library_sapmedia_file_share_is_existing || - try(var.storage_account_sapbits.file_share.is_existing, false) - ) - name = coalesce( - var.library_sapmedia_file_share_name, - try( - var.storage_account_sapbits.file_share.name, - module.sap_namegenerator.naming.resource_suffixes.sapbits - ) - ) + enable_deployment = var.library_sapmedia_file_share_enable_deployment + is_existing = var.library_sapmedia_file_share_is_existing + name = coalesce(var.library_sapmedia_file_share_name,module.sap_namegenerator.naming.resource_suffixes.sapbits) } sapbits_blob_container = { - enable_deployment = ( - var.library_sapmedia_blob_container_enable_deployment || - try(var.storage_account_sapbits.sapbits_blob_container.enable_deployment, true) - ) - is_existing = ( - var.library_sapmedia_blob_container_is_existing || - try(var.storage_account_sapbits.sapbits_blob_container.is_existing, false) - ) - name = coalesce( - var.library_sapmedia_blob_container_name, - try( - var.storage_account_sapbits.sapbits_blob_container.name, - module.sap_namegenerator.naming.resource_suffixes.sapbits - ) - ) + enable_deployment = var.library_sapmedia_blob_container_enable_deployment + is_existing = var.library_sapmedia_blob_container_is_existing + name = coalesce(var.library_sapmedia_blob_container_name, module.sap_namegenerator.naming.resource_suffixes.sapbits) } shared_access_key_enabled = var.shared_access_key_enabled public_network_access_enabled = var.public_network_access_enabled } storage_account_tfstate = { - arm_id = try( - coalesce( - var.library_terraform_state_arm_id, - try(var.storage_account_tfstate.arm_id, "")) - , "" - ) - name = var.library_terraform_state_name - account_tier = coalesce( - var.library_terraform_state_account_tier, - try(var.storage_account_tfstate.account_tier, "Standard") - ) - account_replication_type = coalesce( - var.library_terraform_state_account_replication_type, - try(var.storage_account_tfstate.account_replication_type, "ZRS") - ) - account_kind = coalesce( - var.library_terraform_state_account_kind, - try(var.storage_account_tfstate.account_kind, "StorageV2") - ) + arm_id = var.library_terraform_state_arm_id + name = var.library_terraform_state_name + account_tier = var.library_terraform_state_account_tier + account_replication_type = var.library_terraform_state_account_replication_type + account_kind = var.library_terraform_state_account_kind tfstate_blob_container = { - is_existing = ( - var.library_terraform_state_blob_container_is_existing || - try(var.storage_account_tfstate.tfstate_blob_container.is_existing, false) - ) - name = coalesce( - var.library_terraform_state_blob_container_name, - try(var.storage_account_tfstate.tfstate_blob_container.name, "tfstate") - ) + is_existing = var.library_terraform_state_blob_container_is_existing + name = var.library_terraform_state_blob_container_name } tfvars_blob_container = { - is_existing = var.library_terraform_vars_blob_container_is_existing - name = var.library_terraform_vars_blob_container_name + is_existing = var.library_terraform_vars_blob_container_is_existing + name = var.library_terraform_vars_blob_container_name } ansible_blob_container = { - is_existing = ( - var.library_ansible_blob_container_is_existing || - try(var.storage_account_tfstate.ansible_blob_container.is_existing, false) - ) - name = coalesce( - var.library_ansible_blob_container_name, - try(var.storage_account_tfstate.ansible_blob_container.name, "ansible") - ) + is_existing = var.library_ansible_blob_container_is_existing + name = var.library_ansible_blob_container_name } - shared_access_key_enabled = var.shared_access_key_enabled + shared_access_key_enabled = var.shared_access_key_enabled public_network_access_enabled = var.public_network_access_enabled } diff --git a/deploy/terraform/run/sap_library/variables_local.tf b/deploy/terraform/run/sap_library/variables_local.tf index 59798b8039..8360cf4301 100644 --- a/deploy/terraform/run/sap_library/variables_local.tf +++ b/deploy/terraform/run/sap_library/variables_local.tf @@ -31,7 +31,8 @@ locals { ) // Retrieve the arm_id of deployer's Key Vault from deployer's terraform.tfstate - spn_key_vault_arm_id = try(data.terraform_remote_state.deployer[0].outputs.deployer_kv_user_arm_id, "") + spn_key_vault_arm_id = try(data.terraform_remote_state.deployer[0].outputs.deployer_kv_user_arm_id, "") + spn = { subscription_id = local.use_spn ? data.azurerm_key_vault_secret.subscription_id[0].value : null, diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index 0a13d0c6d5..93094b8142 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -93,6 +93,7 @@ module "common_infrastructure" { use_random_id_for_storageaccounts = var.use_random_id_for_storageaccounts use_scalesets_for_deployment = var.use_scalesets_for_deployment dns_settings = local.dns_settings + enable_firewall_for_keyvaults_and_storage = var.enable_firewall_for_keyvaults_and_storage } #------------------------------------------------------------------------------- diff --git a/deploy/terraform/run/sap_system/providers.tf b/deploy/terraform/run/sap_system/providers.tf index f61497c0c7..050d29beb9 100644 --- a/deploy/terraform/run/sap_system/providers.tf +++ b/deploy/terraform/run/sap_system/providers.tf @@ -99,7 +99,7 @@ terraform { } azurerm = { source = "hashicorp/azurerm" - version = "4.4.0" + version = "4.6.0" } } } diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index 3f2b7cab9b..a51596c730 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -135,6 +135,11 @@ variable "shared_access_key_enabled_nfs" { type = bool } +variable "enable_firewall_for_keyvaults_and_storage" { + description = "Boolean value indicating if firewall should be enabled for key vaults and storage" + default = true + type = bool + } ######################################################################################### # # diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf index 1f00c03ccc..82e2c28764 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/infrastructure.tf @@ -78,6 +78,7 @@ data "azurerm_subnet" "subnet_mgmt" { // Creates boot diagnostics storage account for Deployer resource "azurerm_storage_account" "deployer" { + depends_on = [ azurerm_subnet.subnet_mgmt ] count = length(var.deployer.deployer_diagnostics_account_arm_id) > 0 ? 0 : 1 name = local.storageaccount_names resource_group_name = local.resource_group_exists ? data.azurerm_resource_group.deployer[0].name : azurerm_resource_group.deployer[0].name @@ -88,12 +89,14 @@ resource "azurerm_storage_account" "deployer" { min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false shared_access_key_enabled = var.deployer.shared_access_key_enabled - network_rules { - default_action = "Deny" - virtual_network_subnet_ids = [azurerm_subnet.subnet_mgmt[0].id] - } + cross_tenant_replication_enabled = false - depends_on = [ azurerm_subnet.subnet_mgmt ] + + network_rules { + default_action = var.enable_firewall_for_keyvaults_and_storage ? "Deny" : "Allow" + virtual_network_subnet_ids = var.use_service_endpoint ? [(local.management_subnet_exists) ? local.management_subnet_arm_id : azurerm_subnet.subnet_mgmt[0].id] : null + } + } data "azurerm_storage_account" "deployer" { @@ -103,6 +106,15 @@ data "azurerm_storage_account" "deployer" { } +resource "azurerm_role_assignment" "deployer" { + provider = azurerm.main + count = var.assign_subscription_permissions && var.deployer.add_system_assigned_identity ? var.deployer_vm_count : 0 + scope = length(var.deployer.deployer_diagnostics_account_arm_id) > 0 ? var.deployer.deployer_diagnostics_account_arm_id : azurerm_storage_account.deployer[0].id + role_definition_name = "Storage Blob Data Contributor" + principal_id = azurerm_linux_virtual_machine.deployer[count.index].identity[0].principal_id +} + + resource "azurerm_role_assignment" "resource_group_contributor" { provider = azurerm.main count = var.assign_subscription_permissions && var.deployer.add_system_assigned_identity ? var.deployer_vm_count : 0 diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/providers.tf b/deploy/terraform/terraform-units/modules/sap_deployer/providers.tf index e64b177544..9a31dbf08c 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/providers.tf @@ -3,7 +3,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" configuration_aliases = [azurerm.dnsmanagement, azurerm.main] - version = "4.4.0" + version = "4.6.0" } } } diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl index ead70e2bc2..cd0a5c6f5f 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_deployer/templates/configure_deployer.sh.tmpl @@ -271,6 +271,7 @@ export AZADHOME="/home/$${local_user}" if [ -f /etc/profile.d/deploy_server.sh ] ; then echo echo ##vso[task.logissue type=warning]Deployer already configured + sudo chmod 775 /etc/profile.d/deploy_server.sh exit 0 else @@ -456,9 +457,12 @@ else "$${tf_bin}" \ "$${tf_cache}" - wget -nv -O "/tmp/$${tf_zip}" "https://releases.hashicorp.com/terraform/$${tfversion}/$${tf_zip}" - sudo unzip -o "/tmp/$${tf_zip}" -d "$${tf_dir}" + wget -nv -O "/$${asad_home}/$${tf_zip}" "https://releases.hashicorp.com/terraform/$${tfversion}/$${tf_zip}" + sudo unzip -qq -o "/$${asad_home}/$${tf_zip}" -d "$${tf_dir}" sudo ln -vfs "../$(basename "$${tf_dir}")/terraform" "$${tf_bin}/terraform" + sudo chmod 755 "$${tf_bin}/terraform" + + sudo rm "/$${asad_home}/$${tf_zip}" # Uninstall Azure CLI - For some platforms case "$(get_distro_name)" in @@ -758,14 +762,13 @@ else sudo touch "$${agent_home}/no_tok" fi - set -o xtrace if [ -f "$${agent_home}/agent_configured" ]; then echo "Agent already configured" else - set +o xtrace if [[ -n "$${TOKEN}" ]]; then + echo "Configuring Agent" sudo chmod -R 744 "$${agent_home}" - set -o xtrace + cd "$${agent_home}" ./env.sh @@ -775,6 +778,9 @@ else (echo "$${agent_home}/config.sh" --unattended --url "$${DEVURL}" --auth pat --token "$${TOKEN}" --pool "$${POOL}" --agent "$${AGENTNAME}" --replace --acceptTeeEula | sudo tee -a "$${agent_home}/config_fixed_params.sh") > /dev/null 2>&1 sudo chmod +x "$${agent_home}/config_fixed_params.sh" + sudo chown "$${local_user}" "$${agent_home}/config_fixed_params.sh" + + echo "Installing Agent" sudo runuser -l "$${local_user}" -c "$${agent_home}/config_fixed_params.sh" | sudo tee -a "$${agent_home}/log.txt" @@ -790,6 +796,7 @@ else sudo touch "$${agent_home}/agent_configured" echo export "PATH=$${ansible_bin}:$${tf_bin}:"'$${PATH}'::"$${DOTNET_ROOT}" | sudo tee -a /etc/profile.d/deploy_server.sh + sudo chmod 775 /etc/profile.d/deploy_server.sh # Install dotNet case "$(get_distro_name)" in (ubuntu) @@ -813,10 +820,12 @@ else else echo "NO TOKEN specified" echo export "PATH=$${ansible_bin}:$${tf_bin}:"'$${PATH}'::"$${DOTNET_ROOT}":'$${AZADHOME}/Azure_SAP_Automated_Deployment/sap-automation/deploy/scripts' | sudo tee -a /etc/profile.d/deploy_server.sh + sudo chmod 775 /etc/profile.d/deploy_server.sh echo "export SAP_AUTOMATION_REPO_PATH='$${AZADHOME}/Azure_SAP_Automated_Deployment/sap-automation'" | sudo tee -a /etc/profile.d/deploy_server.sh echo "export DEPLOYMENT_REPO_PATH='$${AZADHOME}/Azure_SAP_Automated_Deployment/sap-automation'" | sudo tee -a /etc/profile.d/deploy_server.sh echo "export CONFIG_REPO_PATH='$${AZADHOME}/Azure_SAP_Automated_Deployment/WORKSPACES'" | sudo tee -a /etc/profile.d/deploy_server.sh fi fi + fi diff --git a/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf b/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf index 9cfb588d7b..b99a2749b0 100644 --- a/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf +++ b/deploy/terraform/terraform-units/modules/sap_deployer/vm-deployer.tf @@ -156,13 +156,12 @@ resource "azurerm_linux_virtual_machine" "deployer" { version = var.deployer.os.version } } - dynamic "plan" { for_each = range(var.deployer.os.type == "marketplace_with_plan" ? 1 : 0) content { - name = var.deployer.plan.name - publisher = var.deployer.plan.publisher - product = var.deployer.plan.product + name = var.deployer.os.sku + publisher = var.deployer.os.publisher + product = var.deployer.os.offer } } diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf index 1d2288a86a..7fe898cfaf 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/key_vault_sap_landscape.tf @@ -1,3 +1,6 @@ +data "azuread_client_config" "current" {} + + #######################################4#######################################8 # # # Workload zone key vault # @@ -32,7 +35,7 @@ resource "azurerm_key_vault" "kv_user" { content { bypass = "AzureServices" - default_action = local.management_subnet_exists ? "Deny" : "Allow" + default_action = var.enable_firewall_for_keyvaults_and_storage ? "Deny" : "Allow" ip_rules = compact( [ @@ -85,7 +88,7 @@ resource "azurerm_role_assignment" "role_assignment_msi" { resource "azurerm_role_assignment" "role_assignment_spn" { provider = azurerm.main - count = var.enable_rbac_authorization_for_keyvault && local.service_principal.object_id != "" ? 1 : 0 + count = var.enable_rbac_authorization_for_keyvault && local.service_principal.object_id != "" && !var.options.use_spn ? 1 : 0 scope = local.user_keyvault_exist ? ( local.user_key_vault_id) : ( azurerm_key_vault.kv_user[0].id @@ -95,14 +98,29 @@ resource "azurerm_role_assignment" "role_assignment_spn" { } resource "azurerm_key_vault_access_policy" "kv_user" { - provider = azurerm.main - count = (var.key_vault.exists || var.enable_rbac_authorization_for_keyvault) ? ( - 0) : ( - (length(var.deployer_tfstate) > 0 ? var.deployer_tfstate.deployer_uai.principal_id == local.service_principal.object_id : false) ? 0 : 1 - ) + provider = azurerm.deployer + count = 0 key_vault_id = local.user_keyvault_exist ? local.user_key_vault_id : azurerm_key_vault.kv_user[0].id tenant_id = local.service_principal.tenant_id - object_id = local.service_principal.object_id != "" ? local.service_principal.object_id : "00000000-0000-0000-0000-000000000000" + object_id = var.deployer_tfstate.deployer_uai.principal_id + + secret_permissions = [ + "Get", + "List", + "Set", + "Delete", + "Recover", + "Restore", + "Purge" + ] +} + +resource "azurerm_key_vault_access_policy" "kv_user_spn" { + provider = azurerm.main + count = var.options.use_spn ? 1 : 0 + key_vault_id = local.user_keyvault_exist ? local.user_key_vault_id : azurerm_key_vault.kv_user[0].id + tenant_id = data.azuread_client_config.current.tenant_id + object_id = data.azuread_client_config.current.object_id secret_permissions = [ "Get", @@ -115,6 +133,7 @@ resource "azurerm_key_vault_access_policy" "kv_user" { ] } + ############################################################################### # # # Secrets # @@ -148,7 +167,8 @@ resource "azurerm_key_vault_secret" "sid_ppk" { azurerm_key_vault_access_policy.kv_user, azurerm_role_assignment.role_assignment_spn, azurerm_role_assignment.role_assignment_msi, - azurerm_key_vault_access_policy.kv_user_msi + azurerm_key_vault_access_policy.kv_user_msi, + azurerm_key_vault_access_policy.kv_user_spn ] content_type = "" name = local.sid_ppk_name @@ -174,7 +194,8 @@ resource "azurerm_key_vault_secret" "sid_pk" { azurerm_key_vault_access_policy.kv_user, azurerm_role_assignment.role_assignment_spn, azurerm_role_assignment.role_assignment_msi, - azurerm_key_vault_access_policy.kv_user_msi + azurerm_key_vault_access_policy.kv_user_msi, + azurerm_key_vault_access_policy.kv_user_spn ] content_type = "" name = local.sid_pk_name @@ -205,7 +226,8 @@ resource "azurerm_key_vault_secret" "sid_username" { azurerm_key_vault_access_policy.kv_user, azurerm_role_assignment.role_assignment_spn, azurerm_role_assignment.role_assignment_msi, - azurerm_key_vault_access_policy.kv_user_msi + azurerm_key_vault_access_policy.kv_user_msi, + azurerm_key_vault_access_policy.kv_user_spn ] content_type = "" name = local.sid_username_secret_name @@ -234,7 +256,8 @@ resource "azurerm_key_vault_secret" "sid_password" { azurerm_key_vault_access_policy.kv_user, azurerm_role_assignment.role_assignment_spn, azurerm_role_assignment.role_assignment_msi, - azurerm_key_vault_access_policy.kv_user_msi + azurerm_key_vault_access_policy.kv_user_msi, + azurerm_key_vault_access_policy.kv_user_spn ] name = local.sid_password_secret_name content_type = "" @@ -265,7 +288,8 @@ resource "azurerm_key_vault_secret" "witness_access_key" { azurerm_key_vault_access_policy.kv_user, azurerm_role_assignment.role_assignment_spn, azurerm_role_assignment.role_assignment_msi, - azurerm_key_vault_access_policy.kv_user_msi + azurerm_key_vault_access_policy.kv_user_msi, + azurerm_key_vault_access_policy.kv_user_spn ] content_type = "" name = replace( @@ -302,7 +326,8 @@ resource "azurerm_key_vault_secret" "witness_name" { azurerm_key_vault_access_policy.kv_user, azurerm_role_assignment.role_assignment_spn, azurerm_role_assignment.role_assignment_msi, - azurerm_key_vault_access_policy.kv_user_msi + azurerm_key_vault_access_policy.kv_user_msi, + azurerm_key_vault_access_policy.kv_user_spn ] content_type = "" name = replace( @@ -369,7 +394,8 @@ resource "azurerm_key_vault_secret" "deployer_keyvault_user_name" { azurerm_key_vault_access_policy.kv_user, azurerm_role_assignment.role_assignment_spn, azurerm_role_assignment.role_assignment_msi, - azurerm_key_vault_access_policy.kv_user_msi + azurerm_key_vault_access_policy.kv_user_msi, + azurerm_key_vault_access_policy.kv_user_spn ] content_type = "" name = "deployer-kv-name" diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf b/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf index 3704a96e56..94690842df 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/providers.tf @@ -3,7 +3,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement, azurerm.peering, azurerm.privatelinkdnsmanagement] - version = "4.4.0" + version = "4.6.0" } azapi = { diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf index cd14c2e1b0..cd5a28f734 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf @@ -326,7 +326,7 @@ resource "azurerm_storage_account" "transport" { resource "azurerm_private_dns_a_record" "transport" { provider = azurerm.privatelinkdnsmanagement - count = var.create_transport_storage && local.use_Azure_native_DNS && local.use_AFS_for_shared && length(var.transport_private_endpoint_id) == 0 ? 1 : 0 + count = var.use_private_endpoint && var.create_transport_storage && local.use_Azure_native_DNS && local.use_AFS_for_shared && length(var.transport_private_endpoint_id) == 0 ? 1 : 0 name = replace( lower( format("%s", local.landscape_shared_transport_storage_account_name) @@ -525,7 +525,7 @@ resource "azurerm_storage_account" "install" { resource "azurerm_storage_account_network_rules" "install" { provider = azurerm.main - count = local.use_AFS_for_shared && length(var.install_storage_account_id) == 0 ? 1 : 0 + count = local.use_AFS_for_shared && var.enable_firewall_for_keyvaults_and_storage && length(var.install_storage_account_id) == 0 ? 1 : 0 depends_on = [ azurerm_storage_account.install, azurerm_storage_share.install, diff --git a/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf b/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf index 4fbfa3c789..3c3986ec92 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/key_vault.tf @@ -82,7 +82,7 @@ resource "azurerm_key_vault_secret" "tfstate" { resource "azurerm_private_dns_a_record" "kv_user" { - provider = azurerm.deployer + provider = azurerm.privatelinkdnsmanagement count = var.dns_settings.register_storage_accounts_keyvaults_with_dns ? 1 : 0 name = lower(split("/", var.key_vault.kv_spn_id)[8]) zone_name = var.dns_settings.dns_zone_names.vault_dns_zone_name diff --git a/deploy/terraform/terraform-units/modules/sap_library/providers.tf b/deploy/terraform/terraform-units/modules/sap_library/providers.tf index fb48736d38..6367145873 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/providers.tf @@ -3,7 +3,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement, azurerm.privatelinkdnsmanagement] - version = "4.4.0" + version = "4.6.0" } } } diff --git a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf index 0f87cbcaa8..6930834d21 100644 --- a/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_library/storage_accounts.tf @@ -105,7 +105,7 @@ resource "azurerm_role_assignment" "storage_tfstate_contributor_ssi" { } resource "azurerm_private_dns_a_record" "storage_tfstate_pep_a_record_registry" { - provider = azurerm.dnsmanagement + provider = azurerm.privatelinkdnsmanagement count = var.dns_settings.register_storage_accounts_keyvaults_with_dns && var.use_private_endpoint && var.use_custom_dns_a_registration && !local.sa_tfstate_exists ? 1 : 0 depends_on = [ azurerm_private_dns_zone.blob @@ -343,7 +343,7 @@ resource "azurerm_storage_account_network_rules" "storage_sapbits" { resource "azurerm_private_dns_a_record" "storage_sapbits_pep_a_record_registry" { - provider = azurerm.dnsmanagement + provider = azurerm.privatelinkdnsmanagement count = var.use_private_endpoint && var.use_custom_dns_a_registration && !local.sa_sapbits_exists ? 1 : 0 depends_on = [ azurerm_private_dns_zone.blob @@ -476,7 +476,7 @@ resource "azurerm_role_assignment" "storage_sapbits_contributor" { provider = azurerm.main count = try(var.deployer_tfstate.deployer_uai.principal_id, "") != "" ? 1 : 0 scope = local.sa_sapbits_exists ? var.storage_account_sapbits.arm_id : azurerm_storage_account.storage_sapbits[0].id - role_definition_name = "Storage Account Contributor" + role_definition_name = "Storage Blob Data Contributor" principal_id = var.deployer_tfstate.deployer_uai.principal_id } @@ -484,7 +484,7 @@ resource "azurerm_role_assignment" "storage_sapbits_contributor_ssi" { provider = azurerm.main count = try(var.deployer_tfstate.add_system_assigned_identity, false) ? length(var.deployer_tfstate.deployer_system_assigned_identity) : 0 scope = local.sa_sapbits_exists ? var.storage_account_sapbits.arm_id : azurerm_storage_account.storage_sapbits[0].id - role_definition_name = "Storage Account Contributor" + role_definition_name = "Storage Blob Data Contributor" principal_id = var.deployer_tfstate.deployer_system_assigned_identity[count.index] } diff --git a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/providers.tf b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/providers.tf index 31e19ce0d8..9a5ea245e6 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/anydb_node/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/anydb_node/providers.tf @@ -3,7 +3,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement, azurerm.privatelinkdnsmanagement] // - version = "4.4.0" + version = "4.6.0" } } } diff --git a/deploy/terraform/terraform-units/modules/sap_system/app_tier/providers.tf b/deploy/terraform/terraform-units/modules/sap_system/app_tier/providers.tf index 31e19ce0d8..9a5ea245e6 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/app_tier/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/app_tier/providers.tf @@ -3,7 +3,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement, azurerm.privatelinkdnsmanagement] // - version = "4.4.0" + version = "4.6.0" } } } diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/providers.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/providers.tf index 31e19ce0d8..9a5ea245e6 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/providers.tf @@ -3,7 +3,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement, azurerm.privatelinkdnsmanagement] // - version = "4.4.0" + version = "4.6.0" } } } diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf index 4785530e08..bab63ad0a1 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf @@ -44,23 +44,28 @@ resource "azurerm_storage_account" "sapmnt" { public_network_access_enabled = try(var.landscape_tfstate.public_network_access_enabled, true) tags = var.tags - network_rules { - default_action = "Deny" - virtual_network_subnet_ids = compact( - [ - try(var.landscape_tfstate.admin_subnet_id, ""), - try(var.landscape_tfstate.app_subnet_id, ""), - try(var.landscape_tfstate.db_subnet_id, ""), - try(var.landscape_tfstate.web_subnet_id, ""), - try(var.landscape_tfstate.subnet_mgmt_id, "") - ] - ) - ip_rules = compact( - [ - length(var.Agent_IP) > 0 ? var.Agent_IP : "" - ] - ) - } + dynamic "network_rules" { + for_each = range(var.enable_firewall_for_keyvaults_and_storage ? 1 : 0) + content { + + default_action = var.enable_firewall_for_keyvaults_and_storage ? "Deny" : "Allow" + virtual_network_subnet_ids = compact( + [ + try(var.landscape_tfstate.admin_subnet_id, ""), + try(var.landscape_tfstate.app_subnet_id, ""), + try(var.landscape_tfstate.db_subnet_id, ""), + try(var.landscape_tfstate.web_subnet_id, ""), + try(var.landscape_tfstate.subnet_mgmt_id, "") + ] + ) + ip_rules = compact( + [ + length(var.Agent_IP) > 0 ? var.Agent_IP : "" + ] + ) + + } + } } diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf index 922e45fa16..e1cc5e4826 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/variables_global.tf @@ -216,6 +216,10 @@ variable "use_private_endpoint" { default = false type = bool } +variable "enable_firewall_for_keyvaults_and_storage" { + description = "Boolean value indicating if firewall should be enabled for key vaults and storage" + type = bool + } ######################################################################################### # # diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/providers.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/providers.tf index 48bf4b3735..1447e1ee52 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/providers.tf @@ -3,7 +3,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" configuration_aliases = [azurerm.main, azurerm.deployer, azurerm.dnsmanagement, azurerm.privatelinkdnsmanagement] - version = "4.4.0" + version = "4.6.0" } # azapi = { diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/providers.tf b/deploy/terraform/terraform-units/modules/sap_system/output_files/providers.tf index abc9ae403b..5ef1914498 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/providers.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/providers.tf @@ -3,7 +3,7 @@ terraform { azurerm = { source = "hashicorp/azurerm" configuration_aliases = [azurerm.main, azurerm.dnsmanagement] - version = "4.4.0" + version = "4.6.0" } } } From 0fd2845e53e7366fca08ee2ac59a67bbf4218fa5 Mon Sep 17 00:00:00 2001 From: Swatibehl <127396693+Swatibehl@users.noreply.github.com> Date: Thu, 24 Oct 2024 12:44:28 +0530 Subject: [PATCH 160/164] ansible python fix (#653) Co-authored-by: swatibehl --- .../roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml index 693c66ce8e..645619b3c9 100644 --- a/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml +++ b/deploy/ansible/roles-os/1.17-generic-pacemaker/tasks/1.17.1-pre_checks.yml @@ -10,6 +10,8 @@ ansible.builtin.pip: name: passlib state: present + vars: + ansible_python_interpreter: "python3" tags: - always From 1ce46d9fa1feac98ec436b4be940db9696fc78fd Mon Sep 17 00:00:00 2001 From: hdamecharla Date: Thu, 24 Oct 2024 17:24:03 +0530 Subject: [PATCH 161/164] Fix conditional statement in install_workloadzone.sh --- deploy/scripts/install_workloadzone.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/scripts/install_workloadzone.sh b/deploy/scripts/install_workloadzone.sh index 1e5231e233..f8b410e545 100755 --- a/deploy/scripts/install_workloadzone.sh +++ b/deploy/scripts/install_workloadzone.sh @@ -1075,7 +1075,7 @@ if [ 0 == $return_value ] ; then echo "" save_config_var "workloadkeyvault" "${workload_config_information}" - fi_system + fi fi fi From 813c71c652ee46cf8ba57b26638ee5d6e7ce0dd5 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Tue, 29 Oct 2024 10:29:37 -0400 Subject: [PATCH 162/164] added documentation --- .../2.6-sap-mounts/library/setting_vars.py | 92 +++++++++++-------- .../library/setting_vars_oracle.py | 14 ++- 2 files changed, 64 insertions(+), 42 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py index d488e7af1f..91146a9f5a 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars.py @@ -1,12 +1,29 @@ +"""Settings Vars Module for SAP Mounts Role. This uses the AnsibleModule from the Ansible module_utils to set the parameters for the SAP mounts. +""" from ansible.module_utils.basic import AnsibleModule + def run_module(): - distro_versions = ['redhat8.4', 'redhat8.6', 'redhat8.8', 'redhat9.0', 'redhat9.2', 'sles_sap15.2', 'sles_sap15.3', 'sles_sap15.4', 'sles_sap15.5'] + """This function sets the parameters for the SAP mounts. + Input parameters are the SAP SID, HDBADM UID, platform, SIDADM UID, multi SIDs, asesidadm_uid(not required), SCS instance number, PAS instance number, APP instance number, server name and distribution full ID(not required). + The output parameters are the SID, all_sap_mounts, first_server_temp, mnt_options and nfs_service. + """ + distro_versions = [ + "redhat8.4", + "redhat8.6", + "redhat8.8", + "redhat9.0", + "redhat9.2", + "sles_sap15.2", + "sles_sap15.3", + "sles_sap15.4", + "sles_sap15.5", + ] module_args = dict( sap_sid=dict(type="str", required=True), hdbadm_uid=dict(type="str", required=True), platform=dict(type="str", required=True), sidadm_uid=dict(type="str", required=True), - multi_sids=dict(type='list', required=False), + multi_sids=dict(type="list", required=False), asesidadm_uid=dict(type="str", required=False), scs_instance_number=dict(type="str", required=True), pas_instance_number=dict(type="str", required=True), @@ -14,7 +31,7 @@ def run_module(): server_name=dict(type="str", required=True), distribution_full_id=dict(type="str", required=False), ) - + result = { "this_sid": {}, "all_sap_mounts": [], @@ -22,50 +39,47 @@ def run_module(): "mnt_options": {}, "nfs_service": "", } - + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) - - distribution_full_id = module.params['distribution_full_id'] - - result['this_sid'] = { - 'sid': module.params['sap_sid'].upper(), - 'dbsid_uid': module.params['hdbadm_uid'], - 'sidadm_uid': module.params['asesidadm_uid'] if module.params['platform'] == 'SYSBASE' else module.params['sidadm_uid'], - 'ascs_inst_no': module.params['scs_instance_number'], - 'pas_inst_no': module.params['pas_instance_number'], - 'app_inst_no': module.params['app_instance_number'] + + distribution_full_id = module.params["distribution_full_id"] + + result["this_sid"] = { + "sid": module.params["sap_sid"].upper(), + "dbsid_uid": module.params["hdbadm_uid"], + "sidadm_uid": ( + module.params["asesidadm_uid"] + if module.params["platform"] == "SYSBASE" + else module.params["sidadm_uid"] + ), + "ascs_inst_no": module.params["scs_instance_number"], + "pas_inst_no": module.params["pas_instance_number"], + "app_inst_no": module.params["app_instance_number"], } - try: - if module.params['multi_sids'] is not None: - result['all_sap_mounts'] = module.params['multi_sids'] - + try: + if module.params["multi_sids"] is not None: + result["all_sap_mounts"] = module.params["multi_sids"] + else: - result['all_sap_mounts'].append(result['this_sid']) + result["all_sap_mounts"].append(result["this_sid"]) except Exception as e: - module.fail_json(msg=str(e),**result) + module.fail_json(msg=str(e), **result) - result['first_server_temp'].append(module.params['server_name']) + result["first_server_temp"].append(module.params["server_name"]) - if distribution_full_id in distro_versions: - result['mnt_options'] = { - 'afs_mnt_options': 'noresvport,vers=4,minorversion=1,sec=sys', - 'anf_mnt_options': 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys,nconnect=8', - } - else: - result['mnt_options'] = { - 'afs_mnt_options': 'noresvport,vers=4,minorversion=1,sec=sys', - 'anf_mnt_options': 'rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys', - } + result["mnt_options"] = { + "afs_mnt_options": "noresvport,vers=4,minorversion=1,sec=sys", + "anf_mnt_options": "rw,nfsvers=4.1,hard,timeo=600,rsize=262144,wsize=262144,noatime,lock,_netdev,sec=sys" + (",nconnect=8" if distribution_full_id in distro_versions else ""), + } - if distribution_full_id in ['redhat8', 'redhat9']: - result['nfs_service'] = 'nfs-server' - elif distribution_full_id == 'redhat7': - result['nfs_service'] = 'nfs' - elif distribution_full_id == 'oraclelinux8': - result['nfs_service'] = 'rpcbind' - else: - result['nfs_service'] = 'nfsserver' + nfs_service_mapping = { + "redhat8": "nfs-server", + "redhat9": "nfs-server", + "redhat7": "nfs", + "oraclelinux8": "rpcbind", + } + result["nfs_service"] = nfs_service_mapping.get(distribution_full_id, "nfsserver") module.exit_json(**result) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars_oracle.py b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars_oracle.py index ce6dd8b230..5dd912f049 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars_oracle.py +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/library/setting_vars_oracle.py @@ -1,8 +1,15 @@ +"""Settings Vars Module for SAP Mounts Oracle Role. This uses the AnsibleModule from the Ansible module_utils to set the parameters for the SAP mounts on Oracle. +""" from ansible.module_utils.basic import AnsibleModule + def run_module(): + """ This function sets the parameters for the SAP mounts on Oracle. + Input parameters are nfs_server and NFS_provider. + The output parameters are nfs_server_temp and nfs_server. + """ module_args = dict( - nfs_server_temp=dict(type="str",required=True), - NFS_provider=dict(type="str",required=True), + nfs_server_temp=dict(type="str", required=True), + NFS_provider=dict(type="str", required=True), ) result = { @@ -12,9 +19,10 @@ def run_module(): module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) - result['nfs_server_temp'].append(module.params['nfs_server_temp']) + result["nfs_server_temp"].append(module.params["nfs_server_temp"]) module.exit_json(**result) + if __name__ == "__main__": run_module() From 123204e00443fb494e9cf7e06069cbb8a94282c9 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Tue, 29 Oct 2024 10:36:32 -0400 Subject: [PATCH 163/164] removed trailing spaces --- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml | 4 ++-- .../roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml | 4 ++-- .../2.6-sap-mounts/tasks/2.6.2-oracle-mounts.yaml | 4 ++-- .../2.6-sap-mounts/tasks/2.6.3-oracle-asm-prereq.yaml | 2 +- .../2.6-sap-mounts/tasks/2.6.3-oracle-observer.yaml | 2 +- .../2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml | 4 ++-- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml index f8efa6b34d..e36ebf366c 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.0-afs-mounts.yaml @@ -5,8 +5,8 @@ # | | # +------------------------------------4--------------------------------------*/ -- name: "calling setting variables python script" - setting_vars: +- name: "calling setting variables python script" + setting_vars: sap_sid: "{{ sap_sid }}" hdbadm_uid: "{{ hdbadm_uid }}" platform: "{{ platform }}" diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml index 2752352ad0..91ff05561d 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.1-anf-mounts.yaml @@ -5,8 +5,8 @@ # +------------------------------------4--------------------------------------*/ --- -- name: "calling setting variables python script" - setting_vars: +- name: "calling setting variables python script" + setting_vars: sap_sid: "{{ sap_sid }}" hdbadm_uid: "{{ hdbadm_uid }}" platform: "{{ platform }}" diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.2-oracle-mounts.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.2-oracle-mounts.yaml index 6577b863f8..a00d605b60 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.2-oracle-mounts.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.2-oracle-mounts.yaml @@ -16,9 +16,9 @@ nfs_server_temp: "{{ item }}" with_items: - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" - - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" register: setting_vars_output - + - name: "ORACLE: Set the NFS Server name" ansible.builtin.set_fact: nfs_server: "{{ setting_vars_output.results[0]['nfs_server_temp'] | first }}" diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-prereq.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-prereq.yaml index 06186194e2..73973c0e1a 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-prereq.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-asm-prereq.yaml @@ -13,7 +13,7 @@ nfs_server_temp: "{{ item }}" with_items: - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" - - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" register: setting_vars_output - name: "2.6.3 ORACLE ASM - Prerequisites: Set the NFS Server name" diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-observer.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-observer.yaml index 8efd7ea33a..521b121131 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-observer.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.3-oracle-observer.yaml @@ -13,7 +13,7 @@ nfs_server_temp: "{{ item }}" with_items: - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_SCS') }}" - - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" + - "{{ query('inventory_hostnames', '{{ sap_sid | upper }}_DB') }}" register: setting_vars_output - name: "2.6 SAP Mounts: - Set the NFS Server name" diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml index 4eccaf75e9..4d1027eea1 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.7-afs-mounts-simplemount.yaml @@ -5,8 +5,8 @@ # | | # +------------------------------------4--------------------------------------*/ -- name: "calling setting variables python script" - setting_vars: +- name: "calling setting variables python script" + setting_vars: sap_sid: "{{ sap_sid }}" hdbadm_uid: "{{ hdbadm_uid }}" platform: "{{ platform }}" From aee9d3127ec81e0f526d96bffe940649354545e8 Mon Sep 17 00:00:00 2001 From: Dhruv Aggarwal Date: Tue, 29 Oct 2024 10:58:56 -0400 Subject: [PATCH 164/164] removed trailing spaces --- .../2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml index 749e5bf141..c76a0a9b8c 100644 --- a/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml +++ b/deploy/ansible/roles-sap-os/2.6-sap-mounts/tasks/2.6.8-anf-mounts-simplemount.yaml @@ -5,8 +5,8 @@ # +------------------------------------4--------------------------------------*/ --- -- name: "calling setting variables python script" - setting_vars: +- name: "calling setting variables python script" + setting_vars: sap_sid: "{{ sap_sid }}" hdbadm_uid: "{{ hdbadm_uid }}" platform: "{{ platform }}"