diff --git a/Webapp/SDAF/Models/CustomValidators.cs b/Webapp/SDAF/Models/CustomValidators.cs index bab3b8854c..342d0bf4c3 100644 --- a/Webapp/SDAF/Models/CustomValidators.cs +++ b/Webapp/SDAF/Models/CustomValidators.cs @@ -51,22 +51,22 @@ public override bool IsValid(object value) if (addresses.Contains(",")) { bool returnValue = true; - foreach(string address in addresses.Split(',')) + foreach (string address in addresses.Split(',')) + { + if (!RegexValidation(address, pattern)) { - if(!RegexValidation(address, pattern)) - { - returnValue = false; - } + returnValue = false; } + } return returnValue; } else { - + return RegexValidation(value, pattern); } - + } } public class IpAddressValidator : ValidationAttribute @@ -133,20 +133,56 @@ public override bool IsValid(object value) return RegexValidation(value, pattern); } } + public class PrivateEndpointIdValidator : ValidationAttribute { public override bool IsValid(object value) { + if (value == null) return true; string pattern = @"^\/subscriptions\/\w{8}-\w{4}-\w{4}-\w{4}-\w{12}\/resourceGroups\/[a-zA-Z0-9-_]+\/providers\/Microsoft.Network\/privateEndpoints\/[a-zA-Z0-9-_]+$"; - return RegexValidation(value, pattern); + if (value.GetType().IsArray) + { + string[] values = (string[])value; + foreach (string v in values) + { + if (!RegexValidation(v, pattern)) return false; + } + return true; + } + else if (value.GetType() == typeof(string)) + { + return RegexValidation(value, pattern); + } + else + { + return false; + } } } public class StorageAccountIdValidator : ValidationAttribute { + public override bool IsValid(object value) { + if (value == null) return true; string pattern = @"^\/subscriptions\/\w{8}-\w{4}-\w{4}-\w{4}-\w{12}\/resourceGroups\/[a-zA-Z0-9-_]+\/providers\/Microsoft.Storage\/storageAccounts\/[a-zA-Z0-9-_]+$"; - return RegexValidation(value, pattern); + if (value.GetType().IsArray) + { + string[] values = (string[])value; + foreach (string v in values) + { + if (!RegexValidation(v, pattern)) return false; + } + return true; + } + else if (value.GetType() == typeof(string)) + { + return RegexValidation(value, pattern); + } + else + { + return false; + } } } public class GuidValidator : ValidationAttribute @@ -224,7 +260,7 @@ public override bool IsValid(object value) } } } - + public class UserAssignedIdentityIdValidator : ValidationAttribute { public override bool IsValid(object value) @@ -268,7 +304,7 @@ public override bool IsValid(object value) return RegexValidation(value, pattern); } } - + public class SubnetRequired : ValidationAttribute { diff --git a/Webapp/SDAF/Models/LandscapeModel.cs b/Webapp/SDAF/Models/LandscapeModel.cs index a133589ab1..cd429b37c1 100644 --- a/Webapp/SDAF/Models/LandscapeModel.cs +++ b/Webapp/SDAF/Models/LandscapeModel.cs @@ -362,7 +362,9 @@ public bool IsValid() [PrivateEndpointIdValidator] public string install_private_endpoint_id { get; set; } + public bool? shared_access_key_enabled { get; set; } = false; + public bool? shared_access_key_enabled_nfs { get; set; } = true; /*---------------------------------------------------------------------------8 | | diff --git a/Webapp/SDAF/Models/SystemModel.cs b/Webapp/SDAF/Models/SystemModel.cs index ea0c638cd9..973fc66346 100644 --- a/Webapp/SDAF/Models/SystemModel.cs +++ b/Webapp/SDAF/Models/SystemModel.cs @@ -211,12 +211,17 @@ public bool IsValid() public bool? register_endpoints_with_dns { get; set; } = true; + public bool? shared_access_key_enabled { get; set; } = false; - /*---------------------------------------------------------------------------8 - | | - | Cluster information | - | | - +------------------------------------4--------------------------------------*/ + public bool? shared_access_key_enabled_nfs { get; set; } = true; + + + +/*---------------------------------------------------------------------------8 +| | +| Cluster information | +| | ++------------------------------------4--------------------------------------*/ public string fencing_role_name { get; set; } diff --git a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json index 5a294381a5..7cef5bba19 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeDetails.json +++ b/Webapp/SDAF/ParameterDetails/LandscapeDetails.json @@ -144,6 +144,25 @@ "Options": [], "Overrules": "", "Display": 2 + }, + { + "Name": "shared_access_key_enabled", + "Required": false, + "Description": "Storage account authorization using Shared Access Key.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, + + { + "Name": "shared_access_key_enabled_nfs", + "Required": false, + "Description": "Storage account authorization using Shared Access Key (NFS shares).", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 } ] }, diff --git a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt index 4cf26cb4e7..62c36142af 100644 --- a/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/LandscapeTemplate.txt @@ -427,6 +427,12 @@ $$witness_storage_account_arm_id$$ # storage_account_replication_type defines the replication type for Azure Files for NFS storage accounts $$storage_account_replication_type$$ +# shared_access_key_enabled defines Storage account authorization using Shared Access Key. +$$shared_access_key_enabled$$ + +# shared_access_key_enabled_nfs defines Storage account used for NFS shares authorization using Shared Access Key. +$$shared_access_key_enabled_nfs$$ + ######################################################################################### # # diff --git a/Webapp/SDAF/ParameterDetails/SystemDetails.json b/Webapp/SDAF/ParameterDetails/SystemDetails.json index e08945110a..0aebafb0a0 100644 --- a/Webapp/SDAF/ParameterDetails/SystemDetails.json +++ b/Webapp/SDAF/ParameterDetails/SystemDetails.json @@ -313,6 +313,25 @@ "Options": [], "Overrules": "", "Display": 2 + }, + { + "Name": "shared_access_key_enabled", + "Required": false, + "Description": "Storage account authorization using Shared Access Key.", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 + }, + + { + "Name": "shared_access_key_enabled_nfs", + "Required": false, + "Description": "Storage account authorization using Shared Access Key (NFS shares).", + "Type": "checkbox", + "Options": [], + "Overrules": "", + "Display": 2 } ] diff --git a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt index 22343d7491..85dae6b1d6 100644 --- a/Webapp/SDAF/ParameterDetails/SystemTemplate.txt +++ b/Webapp/SDAF/ParameterDetails/SystemTemplate.txt @@ -965,4 +965,8 @@ $$use_prefix$$ # use_zonal_markers defines if a zonal markers will be added to the virtual machine resource names $$use_zonal_markers$$ +# shared_access_key_enabled defines Storage account authorization using Shared Access Key. +$$shared_access_key_enabled$$ +# shared_access_key_enabled_nfs defines Storage account used for NFS shares authorization using Shared Access Key. +$$shared_access_key_enabled_nfs$$ diff --git a/Webapp/SDAF/SDAFWebApp.csproj b/Webapp/SDAF/SDAFWebApp.csproj index 1f9dfe2068..295391fb10 100644 --- a/Webapp/SDAF/SDAFWebApp.csproj +++ b/Webapp/SDAF/SDAFWebApp.csproj @@ -31,7 +31,7 @@ - + diff --git a/Webapp/SDAF/wwwroot/js/site.js b/Webapp/SDAF/wwwroot/js/site.js index 9686190ef6..9f452873b2 100644 --- a/Webapp/SDAF/wwwroot/js/site.js +++ b/Webapp/SDAF/wwwroot/js/site.js @@ -472,7 +472,8 @@ $("#subscription").on("change", function () { "witness_storage_account_arm_id", "transport_storage_account_id", "install_storage_account_id", - "azure_files_sapmnt_id" + "azure_files_sapmnt_id", + "hanashared_id" ], controller: "/Armclient/GetStorageAccountOptions", errorMessage: "Error retrieving storage accounts for specified subscription", @@ -483,7 +484,8 @@ $("#subscription").on("change", function () { { ids: ["transport_private_endpoint_id", "install_private_endpoint_id", - "sapmnt_private_endpoint_id" + "sapmnt_private_endpoint_id", + "hanashared_private_endpoint_id" ], controller: "/Armclient/GetPrivateEndpointOptions", errorMessage: "Error retrieving private endpoints for specified subscription", @@ -503,7 +505,9 @@ $("#subscription").on("change", function () { } }, { - ids: ["proximityplacementgroup_arm_ids"], + ids: ["proximityplacementgroup_arm_ids", + "app_proximityplacementgroup_arm_ids" + ], controller: "/Armclient/GetPPGroupOptions", errorMessage: "Error retrieving proximity placement groups for specified subscription", input: { @@ -552,11 +556,12 @@ $("#network_arm_id").on("change", function () { controller: "/Armclient/GetSubnetOptions", ids: [ "admin_subnet_arm_id", - "db_subnet_arm_id", + "anf_subnet_arm_id", "app_subnet_arm_id", - "web_subnet_arm_id", + "db_subnet_arm_id", "iscsi_subnet_arm_id", - "anf_subnet_arm_id" + "storage_subnet_arm_id", + "web_subnet_arm_id" ], errorMessage: "Error retrieving subnets for specified vnet", input: { @@ -567,11 +572,12 @@ $("#network_arm_id").on("change", function () { controller: "/Armclient/GetNsgOptions", ids: [ "admin_subnet_nsg_arm_id", - "db_subnet_nsg_arm_id", + "anf_subnet_nsg_arm_id", "app_subnet_nsg_arm_id", - "web_subnet_nsg_arm_id", + "db_subnet_nsg_arm_id", "iscsi_subnet_nsg_arm_id", - "anf_subnet_nsg_arm_id" + "storage_subnet_nsg_arm_id", + "web_subnet_nsg_arm_id" ], errorMessage: "Error retrieving network security groups for specified vnet's resource group", input: { diff --git a/deploy/ansible/ansible.cfg b/deploy/ansible/ansible.cfg index bb35510bad..b40fd560ab 100644 --- a/deploy/ansible/ansible.cfg +++ b/deploy/ansible/ansible.cfg @@ -15,7 +15,9 @@ stdout_callback = yaml bin_ansible_callbacks = True host_key_checking = False error_on_undefined_vars = True -log_path = /var/log/ansible.log +log_path = /var/tmp/ansible.log + +allow_world_readable_tmpfiles = True [connection] # ServerAliveInternal - Coming from Achmea, keeps the connection alive and diff --git a/deploy/ansible/playbook_02_os_sap_specific_config.yaml b/deploy/ansible/playbook_02_os_sap_specific_config.yaml index 47d76d161a..bd0d9c1b1d 100644 --- a/deploy/ansible/playbook_02_os_sap_specific_config.yaml +++ b/deploy/ansible/playbook_02_os_sap_specific_config.yaml @@ -284,12 +284,6 @@ vars: tier: ha - - name: "SAP OS configuration playbook: - directory permissions" - ansible.builtin.include_role: - name: roles-sap-os/2.2-sapPermissions - tags: - - 2.2-sapPermissions - - name: "SAP OS configuration playbook: - Configurations according to SAP Notes" ansible.builtin.include_role: name: roles-sap-os/2.10-sap-notes @@ -308,6 +302,12 @@ tags: - 2.6-sap-mounts + - name: "SAP OS configuration playbook: - directory permissions" + ansible.builtin.include_role: + name: roles-sap-os/2.2-sapPermissions + tags: + - 2.2-sapPermissions + when: - ansible_os_family != "Windows" diff --git a/deploy/ansible/playbook_04_00_00_db_install.yaml b/deploy/ansible/playbook_04_00_00_db_install.yaml index 062a8b9921..08b2d6390b 100644 --- a/deploy/ansible/playbook_04_00_00_db_install.yaml +++ b/deploy/ansible/playbook_04_00_00_db_install.yaml @@ -324,6 +324,7 @@ become: true when: - node_tier == 'hana' + - database_scale_out block: - name: "Database Installation Playbook: - Setting the DB facts" ansible.builtin.set_fact: @@ -332,7 +333,7 @@ sapbits_location_base_path: "{{ hostvars.localhost.sapbits_location_base_path }}" sapbits_sas_token: "{{ hostvars.localhost.sapbits_sas_token }}" primary_instance_name: "{{ ansible_play_hosts_all[0] }}" # Setting up Primary Instance Name - secondary_instance_name: "{{ ansible_play_hosts_all[1] }}" # Setting up Secondary Instance Name + secondary_instance_name: "{{ ansible_play_hosts_all[1] | default('')}}" # Setting up Secondary Instance Name # Only applicable for scale out with HSR tags: - always diff --git a/deploy/ansible/roles-db/4.0.0-hdb-install/tasks/main.yaml b/deploy/ansible/roles-db/4.0.0-hdb-install/tasks/main.yaml index b772bba222..ae3f261361 100644 --- a/deploy/ansible/roles-db/4.0.0-hdb-install/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.0-hdb-install/tasks/main.yaml @@ -71,8 +71,12 @@ _rsp_sid: "{{ db_sid | upper }}" _rsp_number: "{{ db_instance_number }}" _rsp_system_usage: "custom" - use_master_password: "{{ hana_use_master_password }}" - password_copy: "{%if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" + pwd_hdb_system: "{{ hana_system_user_password | default(main_password) }}" + pwd_os_sidadm: "{{ hana_os_sidadm_password | default(main_password) }}" + pwd_os_sapadm: "{{ hana_os_sapadm_password | default(main_password) }}" + + # use_master_password: "{{ hana_use_master_password }}" + # password_copy: "{{ main_password if hana_use_master_password == 'n' else '' }}" - name: "SAP HANA: Progress" ansible.builtin.debug: diff --git a/deploy/ansible/roles-db/4.0.0-hdb-install/templates/HANA_2_00_install.rsp b/deploy/ansible/roles-db/4.0.0-hdb-install/templates/HANA_2_00_install.rsp index c38c05cd70..321fc9bf33 100644 --- a/deploy/ansible/roles-db/4.0.0-hdb-install/templates/HANA_2_00_install.rsp +++ b/deploy/ansible/roles-db/4.0.0-hdb-install/templates/HANA_2_00_install.rsp @@ -7,7 +7,7 @@ component_medium= component_dirs= # Use single master password for all users, created during installation ( Default: n ) -use_master_password= {{ use_master_password }} +use_master_password= # Directory root to search for components component_root={{ _rsp_component_root }} @@ -66,7 +66,7 @@ root_user=root root_password= # SAP Host Agent User (sapadm) Password -sapadm_password={{ password_copy }} +sapadm_password={{ pwd_os_sapadm }} # Directory containing a storage configuration storage_cfg= @@ -123,7 +123,7 @@ certificates_hostmap= master_password={{ main_password }} # System Administrator Password -password={{ password_copy }} +password={{ pwd_os_sidadm }} # System Administrator Home Directory ( Default: /usr/sap/${sid}/home ) home=/usr/sap/${sid}/home @@ -138,7 +138,7 @@ userid={{ hdbadm_uid }} groupid={{ sapsys_gid }} # Database User (SYSTEM) Password -system_user_password={{ password_copy }} +system_user_password={{ pwd_hdb_system }} # Restart system after machine reboot? ( Default: n ) autostart=n diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml index 0c082f8062..e606c4def8 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/tasks/main.yaml @@ -118,8 +118,11 @@ _rsp_sid: "{{ db_sid | upper }}" _rsp_number: "{{ db_instance_number }}" _rsp_system_usage: "custom" - use_master_password: "{{ hana_use_master_password }}" - password_copy: "{% if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" + # use_master_password: "{{ hana_use_master_password }}" + pwd_hdb_system: "{{ hana_system_user_password | default(main_password) }}" + pwd_os_sidadm: "{{ hana_os_sidadm_password | default(main_password) }}" + pwd_os_sapadm: "{{ hana_os_sapadm_password | default(main_password) }}" + # password_copy: "{% if hana_use_master_password == 'n' %}{{ main_password }}{% else %}{% endif %}" _rsp_internal_network: "{{ subnet_cidr_db | default((subnet_address + '/' + subnet_prefix), true) }}" # This comes in from the main ansible playbook. It is the password for the root user. Must be randomized after the installation. _rsp_root_password: "{{ root_password }}" diff --git a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_install_scaleout.rsp b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_install_scaleout.rsp index 85fced0f6e..5ec7b963d0 100644 --- a/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_install_scaleout.rsp +++ b/deploy/ansible/roles-db/4.0.3-hdb-install-scaleout/templates/HANA_2_00_install_scaleout.rsp @@ -7,7 +7,7 @@ component_medium= component_dirs= # Use single master password for all users, created during installation ( Default: n ) -use_master_password= {{ use_master_password }} +use_master_password= # Directory root to search for components component_root={{ _rsp_component_root }} @@ -66,7 +66,7 @@ root_user=root root_password={{ _rsp_root_password }} # SAP Host Agent User (sapadm) Password -sapadm_password={{ password_copy }} +sapadm_password={{ pwd_os_sapadm }} # Directory containing a storage configuration storage_cfg= @@ -123,7 +123,7 @@ certificates_hostmap= master_password={{ main_password }} # System Administrator Password -password={{ password_copy }} +password={{ pwd_os_sidadm }} # System Administrator Home Directory ( Default: /usr/sap/${sid}/home ) home=/usr/sap/${sid}/home @@ -138,7 +138,7 @@ userid={{ hdbadm_uid }} groupid={{ sapsys_gid }} # Database User (SYSTEM) Password -system_user_password={{ password_copy }} +system_user_password={{ pwd_hdb_system }} # Restart system after machine reboot? ( Default: n ) autostart=n diff --git a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml index bf57a065c9..67fb779c0b 100644 --- a/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml +++ b/deploy/ansible/roles-os/1.1-swap/tasks/main.yaml @@ -20,9 +20,10 @@ - Restart waagent - name: "1.1 Swap: - Update WAAgent on {{ ansible_os_family }}" - ansible.builtin.package: + ansible.builtin.dnf: name: WALinuxAgent state: latest + when: - ansible_os_family == 'RedHat' tags: diff --git a/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml b/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml index f831e412a9..2a6b05dd6d 100644 --- a/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml +++ b/deploy/ansible/roles-os/1.3-repository/vars/repos.yaml @@ -12,31 +12,31 @@ # For example, XX.Y where XX is the major version and Y is the minor version repos: redhat7.4: - - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm', state: 'present' } + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm', state: 'absent' } redhat7.6: - - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm', state: 'present' } + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm', state: 'absent' } redhat7.7: - - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm', state: 'present' } + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm', state: 'absent' } redhat7.9: - - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm', state: 'present' } + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm', state: 'absent' } redhat8.1: - - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'absent' } redhat8.2: - - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'absent' } redhat8.4: - - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'absent' } redhat8.6: - - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'absent' } redhat8.8: - - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'absent' } redhat8.9: - - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'absent' } redhat8.10: - - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'present' } + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm', state: 'absent' } redhat9.0: - - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm', state: 'present' } + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm', state: 'absent' } redhat9.2: - - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm', state: 'present' } + - { tier: 'os', repo: 'epel', url: 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm', state: 'absent' } # do not have any repos that are needed for RedHat at the moment. sles_sap12.3: sles_sap12.4: diff --git a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.1-packages.yaml b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.1-packages.yaml index b7af028a92..54c69acd3d 100644 --- a/deploy/ansible/roles-os/1.4-packages/tasks/1.4.1-packages.yaml +++ b/deploy/ansible/roles-os/1.4-packages/tasks/1.4.1-packages.yaml @@ -1,6 +1,11 @@ - name: "1.4 Packages: - Import package list" ansible.builtin.include_vars: os-packages.yaml +- name: "1.4 Packages: - Installing packages for tier" + ansible.builtin.debug: + msg: "Installing packages for tier {{ tier }} on {{ distribution_id }}" + + # Analyse the package list for this distribution selecting only those # packages assigned to the active tier or 'all'. - name: "1.4 Packages: - Determine packages appropriate for tier on: {{ distribution_id }}" diff --git a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml index da3906c5bd..979d785afe 100644 --- a/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml +++ b/deploy/ansible/roles-os/1.4-packages/vars/os-packages.yaml @@ -44,7 +44,7 @@ packages: - { tier: 'os', package: 'lvm2', node_tier: 'all', state: 'present' } - { tier: 'os', package: 'numad', node_tier: 'all', state: 'present' } - { tier: 'os', package: 'cifs-utils', node_tier: 'all', state: 'present' } - - { tier: 'os', package: 'unar', node_tier: 'scs', state: 'present' } +# - { tier: 'os', package: 'unar', node_tier: 'scs', state: 'present' } # --------------------------- Begin - Packages required for DB2 -----------------------------------------8 # https://www.ibm.com/docs/en/db2/11.5?topic=servers-linux - { tier: 'os', package: 'libaio', node_tier: 'db2', state: 'present' } @@ -59,13 +59,13 @@ packages: - { tier: 'os', package: 'glibc.i686', node_tier: 'sybase', state: 'present' } - { tier: 'sybase', package: 'libnsl', node_tier: 'sybase', state: 'present' } # --------------------------- End - Packages required for SYBASE -------------------------------------------8 - - { tier: sapos', package: 'autofs', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'nfs4-acl-tools', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'tuned-profiles-sap*', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'compat-sap-c++-*', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'libatomic', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'uuidd', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'csh', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'autofs', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'nfs4-acl-tools', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'tuned-profiles-sap*', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'compat-sap-c++-*', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'libatomic', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'uuidd', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'csh', node_tier: 'all', state: 'present' } # ------------------------- Begin - Packages required for Clustering ---------------------------------------8 - { tier: 'ha', package: 'pcs', node_tier: 'all', state: 'present' } - { tier: 'ha', package: 'pacemaker', node_tier: 'all', state: 'present' } @@ -145,7 +145,7 @@ packages: - { tier: 'os', package: 'sg3_utils', node_tier: 'all', state: 'present' } - { tier: 'os', package: 'sudo', node_tier: 'all', state: 'present' } - { tier: 'os', package: 'tcsh', node_tier: 'all', state: 'present' } - - { tier: 'os', package: 'unar', node_tier: 'scs', state: 'present' } +# - { tier: 'os', package: 'unar', node_tier: 'scs', state: 'present' } # --------------------------- Begin - Packages required for DB2 -----------------------------------------8 # https://www.ibm.com/docs/en/db2/11.5?topic=servers-linux - { tier: 'os', package: 'libaio', node_tier: 'db2', state: 'present' } @@ -160,6 +160,11 @@ packages: # 2371942 - Error Executing isql or dscp on SAP ASE or SAP HANA Accelerator for SAP ASE - { tier: 'os', package: 'glibc.i686', node_tier: 'sybase', state: 'present' } # --------------------------- End - Packages required for SYBASE -------------------------------------------8 + # --------------------------- Begin - Packages required for HANA ----------------------------------------8 + - { tier: 'sapos', package: 'chkconfig', node_tier: 'hana', state: 'present' } + - { tier: 'sapos', package: 'tuned', node_tier: 'hana', state: 'present' } + - { tier: 'sapos', package: 'tuned-profiles-sap-hana', node_tier: 'hana', state: 'present' } + # --------------------------- End - Packages required for HANA ------------------------------------------8 - { tier: 'sapos', package: 'autofs', node_tier: 'all', state: 'present' } - { tier: 'sapos', package: 'compat-sap-c++-*', node_tier: 'all', state: 'present' } - { tier: 'sapos', package: 'csh', node_tier: 'all', state: 'present' } @@ -177,7 +182,7 @@ packages: - { tier: 'sapos', package: 'openssl', node_tier: 'all', state: 'present' } - { tier: 'sapos', package: 'PackageKit-gtk3-module', node_tier: 'all', state: 'present' } - { tier: 'sapos', package: 'rsyslog', node_tier: 'all', state: 'present' } - - { tier: 'sapos', package: 'tuned-profiles-sap*', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'tuned-profiles-sap', node_tier: 'all', state: 'present' } - { tier: 'sapos', package: 'uuidd', node_tier: 'all', state: 'present' } - { tier: 'sapos', package: 'xfsprogs', node_tier: 'all', state: 'present' } - { tier: 'sapos', package: 'xorg-x11-xauth', node_tier: 'all', state: 'present' } @@ -186,6 +191,7 @@ packages: - { tier: 'ha', package: 'pacemaker', node_tier: 'all', state: 'present' } - { tier: 'ha', package: 'nmap', node_tier: 'all', state: 'present' } - { tier: 'ha', package: 'fence-agents-azure-arm', node_tier: 'all', state: 'present' } + - { tier: 'ha', package: 'resource-agents-sap-hana', node_tier: 'hana', state: 'present' } - { tier: 'ha', package: 'resource-agents-sap', node_tier: 'scs', state: 'present' } - { tier: 'ha', package: 'resource-agents-sap', node_tier: 'ers', state: 'present' } # ------------------------- End - Packages required for Clustering -----------------------------------------8 @@ -217,7 +223,7 @@ packages: - { tier: 'os', package: 'sg3_utils', node_tier: 'all', state: 'present' } - { tier: 'os', package: 'sudo', node_tier: 'all', state: 'present' } - { tier: 'os', package: 'tcsh', node_tier: 'all', state: 'present' } - - { tier: 'os', package: 'unar', node_tier: 'scs', state: 'present' } +# - { tier: 'os', package: 'unar', node_tier: 'scs', state: 'present' } - { tier: 'os', package: 'xfsprogs', node_tier: 'all', state: 'present' } # --------------------------- Begin - Packages required for DB2 -----------------------------------------8 @@ -234,9 +240,9 @@ packages: - { tier: 'os', package: 'glibc.i686', node_tier: 'sybase', state: 'present' } # --------------------------- End - Packages required for SYBASE -------------------------------------------8 # --------------------------- Begin - Packages required for HANA ----------------------------------------8 - - { tier: sapos', package: 'chkconfig', node_tier: 'hana', state: 'present' } - - { tier: sapos', package: 'tuned', node_tier: 'hana', state: 'present' } - - { tier: sapos', package: 'tuned-profiles-sap-hana', node_tier: 'hana', state: 'present' } + - { tier: 'sapos', package: 'chkconfig', node_tier: 'hana', state: 'present' } + - { tier: 'sapos', package: 'tuned', node_tier: 'hana', state: 'present' } + - { tier: 'sapos', package: 'tuned-profiles-sap-hana', node_tier: 'hana', state: 'present' } # --------------------------- End - Packages required for HANA ------------------------------------------8 - { tier: 'sapos', package: 'autofs', node_tier: 'all', state: 'present' } - { tier: 'sapos', package: 'compat-openssl11', node_tier: 'all', state: 'present' } @@ -270,6 +276,8 @@ packages: - { tier: 'ha', package: 'resource-agents-cloud', node_tier: 'ers', state: 'present' } - { tier: 'ha', package: 'resource-agents-cloud', node_tier: 'hana', state: 'present' } - { tier: 'ha', package: 'resource-agents-cloud', node_tier: 'scs', state: 'present' } + - { tier: 'ha', package: 'resource-agents-cloud', node_tier: 'db2', state: 'present' } + - { tier: 'ha', package: 'resource-agents-sap-hana', node_tier: 'hana', state: 'present' } - { tier: 'ha', package: 'resource-agents-sap', node_tier: 'ers', state: 'present' } - { tier: 'ha', package: 'resource-agents-sap', node_tier: 'scs', state: 'present' } # ------------------------- End - Packages required for Clustering -----------------------------------------8 @@ -318,15 +326,15 @@ packages: - { tier: 'os', package: 'libpam.so.0', node_tier: 'db2', state: 'present' } - { tier: 'db2', package: 'acl', node_tier: 'db2', state: 'present' } # --------------------------- End - Packages required for DB2 -------------------------------------------8 - - { tier: sapos', package: 'autofs', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'nfs-utils', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'nfs-client', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'nfs4-acl-tools', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'saptune', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'libgcc_s1', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'libstdc++6', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'libatomic1', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'uuidd', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'autofs', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'nfs-utils', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'nfs-client', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'nfs4-acl-tools', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'saptune', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'libgcc_s1', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'libstdc++6', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'libatomic1', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'uuidd', node_tier: 'all', state: 'present' } # ------------------------- Begin - Packages required for Clustering ------------------------------------8 - { tier: 'ha', package: 'socat', node_tier: 'all', state: 'present' } - { tier: 'ha', package: 'corosync', node_tier: 'all', state: 'present' } @@ -373,15 +381,15 @@ packages: - { tier: 'os', package: 'cloud-regionsrv-client-plugin-azure', node_tier: 'all', state: 'present' } - { tier: 'os', package: 'regionServiceClientConfigAzure', node_tier: 'all', state: 'present' } - { tier: 'os', package: 'regionServiceCertsAzure', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'autofs', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'nfs-utils', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'nfs4-acl-tools', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'saptune', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'libgcc_s1', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'libstdc++6', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'libatomic1', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'uuidd', node_tier: 'all', state: 'present' } - - { tier: sapos', package: 'acl', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'autofs', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'nfs-utils', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'nfs4-acl-tools', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'saptune', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'libgcc_s1', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'libstdc++6', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'libatomic1', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'uuidd', node_tier: 'all', state: 'present' } + - { tier: 'sapos', package: 'acl', node_tier: 'all', state: 'present' } # ----------------------- Begin - Packages required for Clustering -------------------------------------8 - { tier: 'ha', package: 'acl', node_tier: 'hana', state: 'present' } - { tier: 'ha', package: 'socat', node_tier: 'all' , state: 'present' } diff --git a/deploy/ansible/roles-sap-os/2.2-sapPermissions/tasks/main.yaml b/deploy/ansible/roles-sap-os/2.2-sapPermissions/tasks/main.yaml index 59c144d58d..a83fa9f3e8 100644 --- a/deploy/ansible/roles-sap-os/2.2-sapPermissions/tasks/main.yaml +++ b/deploy/ansible/roles-sap-os/2.2-sapPermissions/tasks/main.yaml @@ -13,11 +13,12 @@ state: "{{ item.state }}" recurse: true loop: - - { node_tier: 'hana', path: '/hana', mode: '0755', owner: 'root', group: 'root', state: 'directory' } - - { node_tier: 'pas', path: '/sapmnt', mode: '0755', owner: 'root', group: 'sapsys', state: 'directory' } - - { node_tier: 'app', path: '/sapmnt', mode: '0755', owner: 'root', group: 'sapsys', state: 'directory' } - - { node_tier: 'scs', path: '/sapmnt', mode: '0755', owner: 'root', group: 'sapsys', state: 'directory' } + - { node_tier: 'pas', path: '/sapmnt', mode: '0755', owner: '{% if platform == "SYBASE" %}{{ asesidadm_uid }}{% else %}{{ sidadm_uid }}{% endif %}', group: 'sapsys', state: 'directory' } + - { node_tier: 'app', path: '/sapmnt', mode: '0755', owner: '{% if platform == "SYBASE" %}{{ asesidadm_uid }}{% else %}{{ sidadm_uid }}{% endif %}', group: 'sapsys', state: 'directory' } + - { node_tier: 'scs', path: '/sapmnt', mode: '0755', owner: '{% if platform == "SYBASE" %}{{ asesidadm_uid }}{% else %}{{ sidadm_uid }}{% endif %}', group: 'sapsys', state: 'directory' } + - { node_tier: 'hana', path: '/hana', mode: '0755', owner: '{{ sidadm_uid }}', group: 'sapsys', state: 'directory' } when: - item.node_tier == "all" or item.node_tier == node_tier - not users_created.stat.exists + ... diff --git a/deploy/ansible/roles-sap/3.3-bom-processing/tasks/process_exe_archives.yaml b/deploy/ansible/roles-sap/3.3-bom-processing/tasks/process_exe_archives.yaml index 858422273a..3d22009c0a 100644 --- a/deploy/ansible/roles-sap/3.3-bom-processing/tasks/process_exe_archives.yaml +++ b/deploy/ansible/roles-sap/3.3-bom-processing/tasks/process_exe_archives.yaml @@ -13,6 +13,13 @@ - "Extract directory: {{ tempdir.path }}" - "Extract command: {% if (ansible_os_family | upper) == 'REDHAT' %}unar -s -D{% else %}unrar x{% endif %} {{ target_media_location }}/{% if item.path is undefined %}downloads{% else %}{{ item.path }}{% endif %}/{% if item.filename is undefined %}{{ item.archive }}{% else %}{{ item.filename }}{% endif %}" +- name: "Install unar on RHEL" + ansible.builtin.dnf: + name: unar + state: present + skip_broken: true + when: (ansible_os_family | upper) == 'REDHAT' + - name: "3.3 BoM Processing: - Extract File, exe file" ansible.builtin.command : "{% if (ansible_os_family | upper) == 'REDHAT' %}unar -s -D{% else %}unrar x{% endif %} {{ target_media_location }}/{% if item.path is undefined %}downloads{% else %}{{ item.path }}{% endif %}/{% if item.filename is undefined %}{{ item.archive }}{% else %}{{ item.filename }}{% endif %}" args: diff --git a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml index f71113010a..b13910641b 100644 --- a/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.1-dbload/tasks/main.yaml @@ -362,7 +362,7 @@ - name: "DBLoad: Get hdbuserstore path" become: true - become_user: "{{ sid_to_be_deployed.sid | lower }}adm" + become_user: "root" when: - database_high_availability - platform == 'HANA' @@ -374,6 +374,8 @@ register: hdbuserstore_file environment: SAPSYSTEMNAME: "{{ sid_to_be_deployed.sid | upper }}" + vars: + allow_world_readable_tmpfiles: true - name: "DBLoad: Set hdbuserstore path" when: @@ -412,7 +414,7 @@ - name: "DBLoad: Set DB Virtual Host name ({{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }})" become: true - become_user: "{{ sid_to_be_deployed.sid | lower }}adm" + become_user: "root" when: - database_high_availability - platform == 'HANA' @@ -463,7 +465,7 @@ - name: "DBLoad: Get hdbuserstore path" become: true - become_user: "{{ sid_to_be_deployed.sid | lower }}adm" + become_user: "root" when: - database_high_availability - platform == 'HANA' @@ -476,7 +478,6 @@ environment: SAPSYSTEMNAME: "{{ sid_to_be_deployed.sid | upper }}" - - name: "DBLoad: Set hdbuserstore path" when: - database_high_availability @@ -487,7 +488,6 @@ hdbuserstore_path: "{{ hdbuserstore_file.files[0].path }}" db_lb_virtual_host: "{{ sid_to_be_deployed.sid | lower }}{{ db_sid | lower }}db{{ db_instance_number }}cl.{{ sap_fqdn }}" - - name: "DBLoad: show hdbuserstore path" when: - database_high_availability @@ -496,10 +496,9 @@ var: hdbuserstore_path verbosity: 2 - - name: "DBLoad: Set DB Virtual Host name ({{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }})" become: true - become_user: "{{ sid_to_be_deployed.sid | lower }}adm" + become_user: "root" when: - database_high_availability - platform == 'HANA' diff --git a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml index 9096a69054..2b1e3a2cd2 100644 --- a/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.2-pas-install/tasks/main.yaml @@ -239,37 +239,38 @@ - ansible_os_family == 'RedHat' - "distribution_id != 'oraclelinux8'" - - name: "PAS Install: progress" + - name: "PAS Install: Get hdbuserstore path" + become: true + become_user: "root" + ansible.builtin.find: + paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/,/usr/sap/{{ sid_to_be_deployed.sid }}/hdbclient" + file_type: file + patterns: 'hdbuserstore' + recurse: true + register: hdbuserstore_file + vars: + allow_world_readable_tmpfiles: true + when: + - database_high_availability + - platform == 'HANA' + + - name: "PAS Install: Set hdbuserstore path" + ansible.builtin.set_fact: + hdbuserstore_path: "{{ hdbuserstore_file.files[0].path }}" + when: + - database_high_availability + - platform == 'HANA' + - hdbuserstore_file | length > 0 + + - name: "PAS Install: show hdbuserstore path" ansible.builtin.debug: - msg: "Starting PAS installation ({{ sid_to_be_deployed.sid | upper }})" + var: hdbuserstore_path + when: + - database_high_availability + - platform == 'HANA' - name: "Starting PAS installation" block: - - name: "PAS Install: Get hdbuserstore path" - ansible.builtin.find: - paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/,/usr/sap/{{ sid_to_be_deployed.sid }}/hdbclient" - file_type: file - patterns: 'hdbuserstore' - recurse: true - register: hdbuserstore_file - when: - - database_high_availability - - platform == 'HANA' - - - name: "PAS Install: Set hdbuserstore path" - ansible.builtin.set_fact: - hdbuserstore_path: "{{ hdbuserstore_file.files[0].path }}" - when: - - database_high_availability - - platform == 'HANA' - - hdbuserstore_file | length > 0 - - - name: "PAS Install: show hdbuserstore path" - ansible.builtin.debug: - var: hdbuserstore_path - when: - - database_high_availability - - platform == 'HANA' - name: "PAS Install: Set DB Virtual Host name ({{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }})" become: true @@ -285,6 +286,7 @@ when: - database_high_availability - platform == 'HANA' + - hdbuserstore_path | length > 0 - name: "PAS Install: Set DB Virtual Host name ({{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }})" become: true @@ -300,8 +302,24 @@ when: - database_high_availability - platform == 'HANA' + - hdbuserstore_path | length > 0 + + - name: "PAS Install: Ensure ownership" + ansible.builtin.file: + path: "{{ item.path }}" + owner: '{{ sid_to_be_deployed.sidadm_uid }}' + group: sapsys + recurse: true + state: directory + loop: + - { path: '/sapmnt/{{ sap_sid | upper }}' } + - { path: '/usr/sap/{{ sap_sid | upper }}' } - - name: "PAS Install" + - name: "PAS Install: progress" + ansible.builtin.debug: + msg: "Starting PAS installation ({{ sid_to_be_deployed.sid | upper }})" + + - name: "PAS Install" ansible.builtin.shell: | umask {{ custom_umask | default('022') }} ; ./sapinst SAPINST_INPUT_PARAMETERS_URL={{ dir_params }}/{{ sap_inifile }} \ @@ -369,51 +387,6 @@ - pas_installed_according_to_sapinst is defined - pas_installed_according_to_sapinst | length > 0 - # - name: "PAS Install: Get hdbuserstore path" - # ansible.builtin.find: - # paths: "/usr/sap/{{ sap_sid | upper }}" - # file_type: file - # patterns: 'hdbuserstore' - # recurse: true - # register: hdbuserstore_file - # when: - # - database_high_availability - # - platform == 'HANA' - - - # - name: "PAS Install: Set hdbuserstore path" - # ansible.builtin.set_fact: - # hdbuserstore_path: "{{ hdbuserstore_file.files[0].path }}" - # when: - # - database_high_availability - # - platform == 'HANA' - # - hdbuserstore_file | length > 0 - - # - name: "PAS Install: show hdbuserstore path" - # ansible.builtin.debug: - # var: hdbuserstore_path - # when: - # - database_high_availability - # - platform == 'HANA' - - # - name: "PAS Install: Set DB Virtual Host name ({{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }})" - # become: true - # become_user: "{{ sid_to_be_deployed.sid | lower }}adm" - # ansible.builtin.shell: | - # {{ hdbuserstore_path }} -H {{ pas_virtual_hostname }} SET DEFAULT {{ custom_db_virtual_hostname | default(db_lb_virtual_host, true) }}:3{{ db_instance_number }}13@{{ db_sid | upper }} {{ schema_name }} {{ main_password }} - # environment: - # SAPSYSTEMNAME: "{{ sid_to_be_deployed.sid | upper }}" - # TMPDIR: "{{ hdbuserstore_path }}" - # ssfs_connect: "1" - # register: hdbuserstore - # vars: - # allow_world_readable_tmpfiles: true - # when: - # - database_high_availability - # - platform == 'HANA' - # - pas_installed_according_to_sapinst is defined - # - pas_installed_according_to_sapinst | length > 0 - - name: "PAS Install: Check if Standalone" ansible.builtin.set_fact: servers_temp: "{{ servers_temp | default([]) + [item] }}" @@ -451,12 +424,16 @@ pas_virtual_hostname: "{{ custom_pas_virtual_hostname | default(virtual_host, true) }}" - name: "PAS Install: Get hdbuserstore path" + become: true + become_user: "root" ansible.builtin.find: paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/,/usr/sap/{{ sid_to_be_deployed.sid }}/hdbclient" file_type: file patterns: 'hdbuserstore' recurse: true register: hdbuserstore_file + vars: + allow_world_readable_tmpfiles: true when: - database_high_availability - platform == 'HANA' diff --git a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml index e4ef646b60..86d4f62d36 100644 --- a/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml +++ b/deploy/ansible/roles-sap/5.3-app-install/tasks/main.yaml @@ -287,15 +287,20 @@ - app_installed_according_to_sapinst | length > 0 - name: "APP Install: Get hdbuserstore path" + become: true + become_user: "root" ansible.builtin.find: paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/,/usr/sap/{{ sid_to_be_deployed.sid }}/hdbclient" file_type: file patterns: 'hdbuserstore' recurse: true register: hdbuserstore_file + vars: + allow_world_readable_tmpfiles: true when: - database_high_availability - platform == 'HANA' + - name: "APP Install: Set hdbuserstore path" ansible.builtin.set_fact: hdbuserstore_path: "{{ hdbuserstore_file.files[0].path }}" @@ -354,12 +359,16 @@ app_already_installed: true - name: "APP Install: Get hdbuserstore path" + become: true + become_user: "root" ansible.builtin.find: paths: "/usr/sap/{{ sid_to_be_deployed.sid | upper }}/SYS/exe/uc/linuxx86_64/hdbclient/,/usr/sap/{{ sid_to_be_deployed.sid }}/hdbclient" file_type: file patterns: 'hdbuserstore' recurse: true register: hdbuserstore_file + vars: + allow_world_readable_tmpfiles: true when: - database_high_availability - platform == 'HANA' @@ -386,7 +395,7 @@ ssfs_connect: "1" register: hdbuserstore become: true - become_user: "{{ sid_to_be_deployed.sid | lower }}adm" + become_user: "root" vars: allow_world_readable_tmpfiles: true when: @@ -400,7 +409,7 @@ ssfs_connect: "1" register: hdbuserstore become: true - become_user: "{{ sid_to_be_deployed.sid | lower }}adm" + become_user: "root" vars: allow_world_readable_tmpfiles: true when: diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4-provision.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4-provision.yml index 5a50d05545..bf1cb105a5 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4-provision.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4-provision.yml @@ -1,5 +1,3 @@ ---- - ########################################################################################### # This file calls the OS specific tasks to configure HANA specific clustering resources #8 ########################################################################################### @@ -7,9 +5,21 @@ # Clustering commands are based on the Host OS - name: Cluster based on OS in VM - ansible.builtin.include_tasks: "5.5.4.0-clusterPrep-{{ ansible_os_family }}.yml" + ansible.builtin.include_tasks: + file: "5.5.4.0-clusterPrep-{{ ansible_os_family }}.yml" + apply: + become: true + become_user: root + tags: + - "5.5.4.0-cluster-prep" + when: not hana_cluster_existence_check - name: Cluster based on OS in VM - ansible.builtin.include_tasks: "5.5.4.1-cluster-{{ ansible_os_family }}.yml" - -... + ansible.builtin.include_tasks: + file: "5.5.4.1-cluster-{{ ansible_os_family }}.yml" + apply: + become: true + become_user: root + tags: + - "5.5.4.1-cluster" + when: not hana_cluster_existence_check diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml index 39bb58e634..38e0fc9ebc 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.0-clusterPrep-RedHat.yml @@ -49,8 +49,8 @@ changed_when: false register: hana_system_stopped - - name: Stop HANA Database - ansible.builtin.import_tasks: ../../../roles-misc/0.4-helpers/tasks/04.01-stop_hana.yml + # - name: Stop HANA Database + # ansible.builtin.include_tasks: ../../../roles-misc/0.4-helpers/tasks/04.01-stop_hana.yml - name: "Configure ANF hana mounts on {{ primary_instance_name }}" when: ansible_hostname == primary_instance_name @@ -322,7 +322,7 @@ changed_when: false register: hana_system_started - - name: Start HANA Database - ansible.builtin.import_tasks: ../../../roles-misc/0.4-helpers/tasks/04.01-start_hana.yml + # - name: Start HANA Database + # ansible.builtin.include_tasks: ../../../roles-misc/0.4-helpers/tasks/04.01-start_hana.yml # End of HANA clustering resources diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml index 2f5fdf6e66..97d16b9540 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/5.5.4.1-cluster-RedHat.yml @@ -294,11 +294,11 @@ # '*' is a special character in regexp and needs to be escaped for literal matching # if we are worried about character spacing across distros we can match for '\* Online:' - name: "5.5.4.1 HANA Cluster configuration - Wait until cluster has stabilized on RHEL 8 or 9" - ansible.builtin.shell: set -o pipefail && pcs status | grep '^ \* Online:' + ansible.builtin.shell: "set -o pipefail && pcs status nodes | grep '^ Online: ' | cut -d ':' -f2" register: cluster_stable_check retries: 12 delay: 10 - until: "(primary_instance_name + ' ' + secondary_instance_name) in cluster_stable_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in cluster_stable_check.stdout" + until: "primary_instance_name in cluster_stable_check.stdout and secondary_instance_name in cluster_stable_check.stdout" when: ansible_distribution_major_version in ["8", "9"] # - name: Ensure Cluster resources are started diff --git a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/main.yml b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/main.yml index 414e8a9d19..96ea5ba60d 100644 --- a/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/main.yml +++ b/deploy/ansible/roles-sap/5.5-hanadb-pacemaker/tasks/main.yml @@ -1,16 +1,23 @@ --- -- name: "5.5 HANADB Pacemaker - import - set_runtime_facts" - ansible.builtin.import_tasks: 5.5.1-set_runtime_facts.yml +- name: "5.5 HANADB Pacemaker - import - set_runtime_facts" + ansible.builtin.include_tasks: 5.5.1-set_runtime_facts.yml -- name: "5.5 HANADB Pacemaker - import - pre_checks" - ansible.builtin.import_tasks: 5.5.2-pre_checks.yml +- name: "5.5 HANADB Pacemaker - import - pre_checks" + ansible.builtin.include_tasks: 5.5.2-pre_checks.yml -- name: "5.5 HANADB Pacemaker - import - SAP HanaSR" - ansible.builtin.import_tasks: 5.5.3-SAPHanaSR.yml +- name: "5.5 HANADB Pacemaker - import - SAP HanaSR" + ansible.builtin.include_tasks: 5.5.3-SAPHanaSR.yml -- name: "5.5 HANADB Pacemaker - import - provision" - ansible.builtin.import_tasks: 5.5.4-provision.yml - when: not hana_cluster_existence_check +- name: "5.5 HANADB Pacemaker - import - SAP HanaSR" + ansible.builtin.include_tasks: + file: 5.5.4-provision.yml + apply: + tags: + - 5.5.4-provision + become: true + become_user: root -- name: "5.5 HANADB Pacemaker - import - post_provision_report" - ansible.builtin.import_tasks: 5.5.5-post_provision_report.yml +- name: "5.5 HANADB Pacemaker - import - post_provision_report" + ansible.builtin.include_tasks: 5.5.5-post_provision_report.yml + +... diff --git a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.6-validate.yml b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.6-validate.yml index 166428f264..0a60acd1c9 100644 --- a/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.6-validate.yml +++ b/deploy/ansible/roles-sap/5.6-scsers-pacemaker/tasks/5.6.6-validate.yml @@ -5,27 +5,28 @@ python_version: "python2" when: (ansible_distribution | lower ~ ansible_distribution_major_version) in ['sles_sap12'] +- name: "5.6 SCS/ERS Validation: Get sapcontrol path" + become_user: "root" + become: true + ansible.builtin.find: + paths: "/usr/sap/{{ sap_sid | upper }}/SYS/exe/uc/linuxx86_64,/usr/sap/hostctrl/exe" + file_type: file + patterns: 'sapcontrol' + recurse: true + follow: true + register: sapcontrol_file + +- name: "5.6 SCS/ERS Validation: Set sapcontrol path" + ansible.builtin.set_fact: + sapcontrol_path: "{{ sapcontrol_file.files[0].path }}" + when: sapcontrol_file | length > 0 + - name: "5.6 SCS/ERS Validation: Determine if SCS is running on {{ ansible_hostname }}" - become_user: "{{ sap_sid | lower }}adm" + become_user: "root" become: true when: - ansible_hostname == primary_instance_name block: - - name: "5.6 SCS/ERS Validation: Get sapcontrol path" - become_user: "root" - become: true - ansible.builtin.find: - paths: "/usr/sap/{{ sap_sid | upper }}/SYS/exe/uc/linuxx86_64,/usr/sap/hostctrl/exe" - file_type: file - patterns: 'sapcontrol' - recurse: true - follow: true - register: sapcontrol_file - - - name: "5.6 SCS/ERS Validation: Set sapcontrol path" - ansible.builtin.set_fact: - sapcontrol_path: "{{ sapcontrol_file.files[0].path }}" - when: sapcontrol_file | length > 0 # {{ sapcontrol_path }} -nr {{ scs_instance_number }} -function GetProcessList | grep MessageServer | awk '{split($0,result,", "); print result[1],result[3] }' - name: "5.6 SCS/ERS Validation: Determine if SCS is running on {{ ansible_hostname }}" @@ -62,8 +63,6 @@ - is_running.stdout | regex_search('MessageServer') - name: "Check where the cluster group g-{{ sap_sid | upper }}_{{ instance_type | upper }} is running" - become: true - become_user: root vars: allow_world_readable_tmpfiles: true ansible_python_interpreter: "{{ python_version }}" @@ -86,8 +85,6 @@ # move cluster group g-{{ sap_sid | upper }}_{{ instance_type | upper }} to primary_instance_name - name: "5.6 SCS/ERS Validation: Move cluster group g-{{ sap_sid | upper }}_{{ instance_type | upper }} to {{ primary_instance_name }}" - become: true - become_user: root ansible.builtin.shell: "crm resource move g-{{ sap_sid | upper }}_{{ instance_type | upper }} {{ primary_instance_name }}" vars: allow_world_readable_tmpfiles: true @@ -102,8 +99,6 @@ # move cluster group g-{{ sap_sid | upper }}_{{ instance_type | upper }} to primary_instance_name - name: "5.6 SCS/ERS Validation: Move cluster group g-{{ sap_sid | upper }}_{{ instance_type | upper }} to {{ primary_instance_name }}" - become: true - become_user: root ansible.builtin.shell: "pcs resource move g-{{ sap_sid | upper }}_{{ instance_type | upper }} {{ primary_instance_name }}" vars: allow_world_readable_tmpfiles: true @@ -140,8 +135,6 @@ - name: "5.6 SCS/ERS Validation: Validate Cluster resource move and SAP start when the group g-{{ sap_sid | upper }}_{{ instance_type | upper }} has moved" block: - name: "Check if cluster group g-{{ sap_sid | upper }}_{{ instance_type | upper }} is running on {{ primary_instance_name }}" - become: true - become_user: root ansible.builtin.shell: >- set -o pipefail; crm_resource --resource g-{{ sap_sid | upper }}_{{ instance_type | upper }} --locate | cut -d ':' -f 2 | cut -d " " -f 2 diff --git a/deploy/ansible/roles-sap/5.7-db2-pacemaker/tasks/5.7.3.0-cluster-RedHat.yml b/deploy/ansible/roles-sap/5.7-db2-pacemaker/tasks/5.7.3.0-cluster-RedHat.yml index 369ef34c2b..b0927a5c63 100644 --- a/deploy/ansible/roles-sap/5.7-db2-pacemaker/tasks/5.7.3.0-cluster-RedHat.yml +++ b/deploy/ansible/roles-sap/5.7-db2-pacemaker/tasks/5.7.3.0-cluster-RedHat.yml @@ -11,7 +11,7 @@ # if it is not started, then we do not need to stop it # +------------------------------------4--------------------------------------*/ -- name: "DB2 - Find if the databases are active" +- name: "DB2 Cluster: - Find if the databases are active" become: true become_user: db2{{ db_sid | lower }} ansible.builtin.shell: db2 list active databases @@ -22,7 +22,7 @@ register: db2_list_active_databases failed_when: db2_list_active_databases.rc not in [0,4] -- name: " DB2 - Stop the primary DB" +- name: "DB2 Cluster: - Stop the primary DB" become: true become_user: db2{{ db_sid | lower }} ansible.builtin.shell: db2stop force @@ -33,17 +33,17 @@ when: db2_list_active_databases.rc == 0 -- name: "Change to ksh Shell" +- name: "DB2 Cluster: Change to ksh Shell" ansible.builtin.user: user: db2{{ db_sid | lower }} shell: /bin/ksh -- name: "Optimise the Pacemaker cluster for SAP DB2" +- name: "DB2 Cluster: Optimise the Pacemaker cluster for SAP DB2" block: - - name: "Enable Maintenance mode for the cluster" + - name: "DB2 Cluster: Enable Maintenance mode for the cluster" ansible.builtin.command: pcs property set maintenance-mode=true - - name: "Ensure SAP DB2 resource is created" + - name: "DB2 Cluster: Ensure SAP DB2 resource is created" ansible.builtin.shell: > pcs resource create Db2_HADR_{{ db_sid | upper }} db2 instance='db2{{ db_sid | lower }}' dblist='{{ db_sid | upper }}' master meta notify=true resource-stickiness=5000 @@ -51,7 +51,7 @@ failed_when: sap_db2.rc > 1 when: ansible_distribution_major_version == "7" - - name: "Ensure SAP DB2 DB2 resource is created" + - name: "DB2 Cluster: Ensure SAP DB2 resource is created" ansible.builtin.shell: > pcs resource create Db2_HADR_{{ db_sid | upper }} db2 instance='db2{{ db_sid | lower }}' dblist='{{ db_sid | upper }}' meta resource-stickiness=5000 promotable notify=true @@ -59,75 +59,85 @@ failed_when: sap_db2.rc > 1 when: ansible_distribution_major_version in ["8", "9"] - - name: "Ensure the Virtual IP resource for the Load Balancer Front End IP is created" + - name: "DB2 Cluster: Ensure the Virtual IP resource for the Load Balancer Front End IP is created" ansible.builtin.command: pcs resource create vip_db2{{ db_sid | lower }}_{{ db_sid | upper }} IPaddr2 ip='{{ database_loadbalancer_ip }}' register: vip failed_when: vip.rc > 1 - - name: "Ensure the netcat resource for the Load Balancer Healthprobe is created - Probe port for Azure Load Balacer" + - name: "DB2 Cluster: Ensure the netcat resource for the Load Balancer Healthprobe is created - Probe port for Azure Load Balacer" ansible.builtin.command: pcs resource create nc_db2{{ db_sid | lower }}_{{ db_sid | upper }} azure-lb port=625{{ db_instance_number }} register: netcat failed_when: netcat.rc > 1 - - name: "Ensure a group for ip and Azure loadbalancer probe port is created" + - name: "DB2 Cluster: Ensure a group for ip and Azure loadbalancer probe port is created" ansible.builtin.command: pcs resource group add g_ipnc_db2{{ db_sid | lower }}_{{ db_sid | upper }} vip_db2{{ db_sid | lower }}_{{ db_sid | upper }} nc_db2{{ db_sid | lower }}_{{ db_sid | upper }} register: vip_g failed_when: vip_g.rc > 1 - - name: "Create colocation constraints - keep Db2 HADR Master and Group on same node - Rhel 7" + - name: "DB2 Cluster: Create colocation constraints - keep Db2 HADR Master and Group on same node - Rhel 7" ansible.builtin.command: pcs constraint colocation add g_ipnc_db2{{ db_sid | lower }}_{{ db_sid | upper }} with master Db2_HADR_{{ db_sid | upper }}-master register: constraint failed_when: constraint.rc > 1 when: ansible_distribution_major_version == "7" - - name: "Create colocation constraints - keep Db2 HADR Master and Group on same node - Rhel 8" - ansible.builtin.command: pcs constraint colocation add g_ipnc_db2{{ db_sid | lower }}_{{ db_sid | upper }} with master Db2_HADR_{{ db_sid | upper }}-clone + - name: "DB2 Cluster: Create colocation constraints - keep Db2 HADR Master and Group on same node - Rhel 8 & 9" + ansible.builtin.command: pcs constraint colocation add g_ipnc_db2{{ db_sid | lower }}_{{ db_sid | upper }} with Promoted Db2_HADR_{{ db_sid | upper }}-clone register: constraint failed_when: constraint.rc > 1 when: ansible_distribution_major_version in ["8", "9"] - - name: "Ensure the order constraint for the SAP DB2 is configured - Rhel - 7" + - name: "DB2 Cluster: Ensure the order constraint for the SAP DB2 is configured - Rhel - 7" ansible.builtin.command: pcs constraint order promote Db2_HADR_{{ db_sid | upper }}-master then g_ipnc_db2{{ db_sid | lower }}_{{ db_sid | upper }} register: constraint failed_when: constraint.rc > 1 when: ansible_distribution_major_version == "7" - - name: "Ensure the order constraint for the SAP DB2 is configured - Rhel - 8" + - name: "DB2 Cluster: Ensure the order constraint for the SAP DB2 is configured - Rhel - 8 & 9" ansible.builtin.command: pcs constraint order promote Db2_HADR_{{ db_sid | upper }}-clone then g_ipnc_db2{{ db_sid | lower }}_{{ db_sid | upper }} register: constraint failed_when: constraint.rc > 1 when: ansible_distribution_major_version in ["8", "9"] - - name: "Disable Maintenance mode for the cluster" + - name: "DB2 Cluster: Disable Maintenance mode for the cluster" ansible.builtin.command: pcs property set maintenance-mode=false - - name: "Wait until cluster has stabilized" + - name: "DB2 Cluster: Wait until cluster has stabilized (debug)" + ansible.builtin.shell: "set -o pipefail && pcs status nodes | grep '^ Online: ' | cut -d ':' -f2" + + register: cluster_stable_check_debug + + - name: "DB2 Cluster: Wait until cluster has stabilized output" + ansible.builtin.debug: + var: cluster_stable_check_debug + + + - name: "DB2 Cluster: Wait until cluster has stabilized" ansible.builtin.shell: set -o pipefail && pcs status | grep '^Online:' register: cluster_stable_check retries: 12 delay: 10 until: "(primary_instance_name + ' ' + secondary_instance_name) in cluster_stable_check.stdout or (secondary_instance_name + ' ' + primary_instance_name) in cluster_stable_check.stdout" - when: ansible_distribution_major_version != "8" + when: ansible_distribution_major_version not in ["8", "9"] # '*' is a special character in regexp and needs to be escaped for literal matching # if we are worried about character spacing across distros we can match for '\* Online:' - - name: "Wait until cluster has stabilized - RHEL 8.x" - ansible.builtin.shell: set -o pipefail && pcs status | grep '^ \* Online:' + - name: "DB2 Cluster: Wait until cluster has stabilized - RHEL 8.x and 9.x" + ansible.builtin.shell: "set -o pipefail && pcs status nodes | grep '^ Online: ' | cut -d ':' -f2" register: cluster_stable_check retries: 12 delay: 10 - until: "'{{ primary_instance_name }} {{ secondary_instance_name }}' in cluster_stable_check.stdout or '{{ secondary_instance_name }} {{ primary_instance_name }}' in cluster_stable_check.stdout" + until: "primary_instance_name in cluster_stable_check.stdout and secondary_instance_name in cluster_stable_check.stdout" when: ansible_distribution_major_version in ["8", "9"] - - name: "Ensure Cluster resources are started" + - name: "DB2 Cluster: Ensure Cluster resources are started" ansible.builtin.shell: set -o pipefail && pcs resource show | grep ' Started:' register: db2_cluster_resource_check retries: 12 delay: 10 until: "'{{ primary_instance_name }} {{ secondary_instance_name }}' in db2_cluster_resource_check.stdout or '{{ secondary_instance_name }} {{ primary_instance_name }}' in db2_cluster_resource_check.stdout" - when: ansible_distribution_major_version != "8" and ansible_distribution_major_version != "9" + when: ansible_distribution_major_version not in ["8", "9"] - - name: "Ensure Cluster resources are started - RHEL 8.x" + - name: "DB2 Cluster: Ensure Cluster resources are started - RHEL 8.x" ansible.builtin.shell: set -o pipefail && pcs resource status | grep 'Started' register: db2_cluster_resource_check retries: 12 diff --git a/deploy/scripts/New-SDAFDevopsProject.ps1 b/deploy/scripts/New-SDAFDevopsProject.ps1 index e23ad4f650..795ddc38af 100644 --- a/deploy/scripts/New-SDAFDevopsProject.ps1 +++ b/deploy/scripts/New-SDAFDevopsProject.ps1 @@ -148,9 +148,14 @@ else { $WebApp = $false } -$confirmation = Read-Host "Use Agent pool with name '$Pool_Name' y/n?" -if ($confirmation -ne 'y') { - $Pool_Name = Read-Host "Enter the name of the agent pool" +if ($Env:SDAF_AGENT_POOL_NAME.Length -ne 0) { + $Pool_Name = $Env:SDAF_AGENT_POOL_NAME +} +else { + $confirmation = Read-Host "Use Agent pool with name '$Pool_Name' y/n?" + if ($confirmation -ne 'y') { + $Pool_Name = Read-Host "Enter the name of the agent pool" + } } $pipeline_permission_url = "" @@ -191,14 +196,14 @@ if ($Project_ID.Length -eq 0) { Add-Content -Path $fname -Value "" Add-Content -Path $fname -Value "Using Azure DevOps Project: $ADO_PROJECT" - az devops configure --defaults organization=$ADO_ORGANIZATION project=$ADO_PROJECT + az devops configure --defaults organization=$ADO_ORGANIZATION project='$ADO_PROJECT' - $repo_id = (az repos list --query "[?name=='$ADO_Project'].id | [0]" --out tsv) + $repo_id = (az repos list --query "[?name=='$ADO_Project'].id | [0]" --project '$ADO_PROJECT' --out tsv) Write-Host "Importing the content from GitHub" -ForegroundColor Green - az repos import create --git-url https://github.com/Azure/SAP-automation-bootstrap --repository $repo_id --output none + az repos import create --git-url https://github.com/Azure/SAP-automation-bootstrap --repository $repo_id --project '$ADO_PROJECT' --output none - az repos update --repository $repo_id --default-branch main --output none + az repos update --repository $repo_id --default-branch main --project '$ADO_PROJECT' --output none } @@ -209,14 +214,14 @@ else { Write-Host "Using an existing project" - az devops configure --defaults organization=$ADO_ORGANIZATION project=$ADO_PROJECT + az devops configure --defaults organization=$ADO_ORGANIZATION project='$ADO_PROJECT' - $repo_id = (az repos list --query "[?name=='$ADO_Project'].id | [0]" --output tsv) + $repo_id = (az repos list --query "[?name=='$ADO_Project'].id | [0]" --project '$ADO_PROJECT' --output tsv) if ($repo_id.Length -ne 0) { Write-Host "Using repository '$ADO_Project'" -ForegroundColor Green } - $repo_size = (az repos list --query "[?name=='$ADO_Project'].size | [0]" --output tsv) + $repo_size = (az repos list --query "[?name=='$ADO_Project'].size | [0]" --project '$ADO_PROJECT' --output tsv) if ($repo_size -eq 0) { Write-Host "Importing the repository from GitHub" -ForegroundColor Green @@ -224,12 +229,12 @@ else { Add-Content -Path $fname -Value "" Add-Content -Path $fname -Value "Terraform and Ansible code repository stored in the DevOps project (sap-automation)" - az repos import create --git-url https://github.com/Azure/SAP-automation-bootstrap --repository $repo_id --output tsv + az repos import create --git-url https://github.com/Azure/SAP-automation-bootstrap --repository $repo_id --project '$ADO_PROJECT' --output tsv if ($LastExitCode -eq 1) { Write-Host "The repository already exists" -ForegroundColor Yellow Write-Host "Creating repository 'SDAF Configuration'" -ForegroundColor Green $repo_id = (az repos create --name "SDAF Configuration" --query id --output tsv) - az repos import create --git-url https://github.com/Azure/SAP-automation-bootstrap --repository $repo_id --output none + az repos import create --git-url https://github.com/Azure/SAP-automation-bootstrap --repository $repo_id --project '$ADO_PROJECT' --output none } } @@ -237,12 +242,12 @@ else { $confirmation = Read-Host "The repository already exists, use it? y/n" if ($confirmation -ne 'y') { Write-Host "Creating repository 'SDAF Configuration'" -ForegroundColor Green - $repo_id = (az repos create --name "SDAF Configuration" --query id --output tsv) - az repos import create --git-url https://github.com/Azure/SAP-automation-bootstrap --repository $repo_id --output none + $repo_id = (az repos create --name "SDAF Configuration" --query id --project '$ADO_PROJECT' --output tsv) + az repos import create --git-url https://github.com/Azure/SAP-automation-bootstrap --repository $repo_id --project '$ADO_PROJECT' --output none } } - az repos update --repository $repo_id --default-branch main --output none + az repos update --repository $repo_id --default-branch main --project '$ADO_PROJECT' --output none } $confirmation = Read-Host "You can optionally import the Terraform and Ansible code from GitHub into Azure DevOps, however, this should only be done if you cannot access github from the Azure DevOps agent or if you intend to customize the code. Do you want to run the code from GitHub y/n?" @@ -253,24 +258,24 @@ if ($confirmation -ne 'y') { $import_code = $true $repo_name = "sap-automation" Write-Host "Creating $repo_name repository" -ForegroundColor Green - az repos create --name $repo_name --query id --output none - $code_repo_id = (az repos list --query "[?name=='$repo_name'].id | [0]" --out tsv) - az repos import create --git-url https://github.com/Azure/SAP-automation --repository $code_repo_id --output none - az repos update --repository $code_repo_id --default-branch main --output none + az repos create --name $repo_name --query id --project '$ADO_PROJECT' --output none + $code_repo_id = (az repos list --query "[?name=='$repo_name'].id | [0]" --project '$ADO_PROJECT' --out tsv) + az repos import create --git-url https://github.com/Azure/SAP-automation --repository $code_repo_id --project '$ADO_PROJECT' --output none + az repos update --repository $code_repo_id --default-branch main --project '$ADO_PROJECT' --output none $import_code = $true $repo_name = "sap-samples" Write-Host "Creating $repo_name repository" -ForegroundColor Green - az repos create --name $repo_name --query id --output none - $sample_repo_id = (az repos list --query "[?name=='$repo_name'].id | [0]" --out tsv) - az repos import create --git-url https://github.com/Azure/SAP-automation-samples --repository $sample_repo_id --output none - az repos update --repository $sample_repo_id --default-branch main --output none + az repos create --name $repo_name --query id --project '$ADO_PROJECT' --output none + $sample_repo_id = (az repos list --query "[?name=='$repo_name'].id | [0]" --project '$ADO_PROJECT' --out tsv) + az repos import create --git-url https://github.com/Azure/SAP-automation-samples --repository $sample_repo_id --project '$ADO_PROJECT' --output none + az repos update --repository $sample_repo_id --default-branch main --project '$ADO_PROJECT' --output none if ($ADO_Project -ne "SAP Deployment Automation Framework") { Write-Host "Using a non standard DevOps project name, need to update some of the parameter files" -ForegroundColor Green - $objectId = (az devops invoke --area git --resource refs --route-parameters project=$ADO_Project repositoryId=$repo_id --query-parameters filter=heads/main --query value[0] | ConvertFrom-Json).objectId + $objectId = (az devops invoke --area git --resource refs --route-parameters project='$ADO_Project' repositoryId=$repo_id --query-parameters filter=heads/main --query value[0] | ConvertFrom-Json).objectId $templatename = "resources.yml" @@ -321,8 +326,8 @@ if ($confirmation -ne 'y') { az devops invoke ` --area git --resource pushes ` - --route-parameters project=$ADO_Project repositoryId=$repo_id ` - --http-method POST --in-file "SDAF.json" ` + --route-parameters project='$ADO_Project' repositoryId=$repo_id ` + --http-method POST --in-file $inputfile ` --api-version "6.0" --output none Remove-Item $templatename @@ -348,7 +353,7 @@ if ($confirmation -ne 'y') { Add-Content -Path $templatename " name: $ADO_Project/sap-samples" Add-Content -Path $templatename " ref: refs/heads/main" - $objectId = (az devops invoke --area git --resource refs --route-parameters project=$ADO_Project repositoryId=$repo_id --query-parameters filter=heads/main --query value[0] | ConvertFrom-Json).objectId + $objectId = (az devops invoke --area git --resource refs --route-parameters project='$ADO_Project' repositoryId=$repo_id --query-parameters filter=heads/main --query value[0] | ConvertFrom-Json).objectId Remove-Item "sdaf.json" $cont = Get-Content -Path $templatename -Raw @@ -375,14 +380,14 @@ if ($confirmation -ne 'y') { az devops invoke ` --area git --resource pushes ` - --route-parameters project=$ADO_Project repositoryId=$repo_id ` - --http-method POST --in-file "SDAF.json" ` + --route-parameters project='$ADO_Project' repositoryId=$repo_id ` + --http-method POST --in-file $inputfile ` --api-version "6.0" --output none Remove-Item $templatename } - $code_repo_id = (az repos list --query "[?name=='sap-automation'].id | [0]" --out tsv) + $code_repo_id = (az repos list --query "[?name=='sap-automation'].id | [0]" --project '$ADO_PROJECT' --out tsv) $queryString = "?api-version=6.0-preview" $pipeline_permission_url = "$ADO_ORGANIZATION/$projectID/_apis/pipelines/pipelinePermissions/repository/$projectID.$code_repo_id$queryString" @@ -402,9 +407,9 @@ else { Start-Process $gh_connection_url Read-Host "Please press enter when you have created the connection" - $ghConn = (az devops service-endpoint list --query "[?type=='github'].name | [0]" --out tsv) + $ghConn = (az devops service-endpoint list --query "[?type=='github'].name | [0]" --project '$ADO_PROJECT' --out tsv) - $objectId = (az devops invoke --area git --resource refs --route-parameters project=$ADO_Project repositoryId=$repo_id --query-parameters filter=heads/main --query value[0] | ConvertFrom-Json).objectId + $objectId = (az devops invoke --area git --resource refs --route-parameters project='$ADO_Project' repositoryId=$repo_id --query-parameters filter=heads/main --query value[0] | ConvertFrom-Json).objectId $templatename = "resources.yml" if (Test-Path $templatename) { @@ -455,7 +460,7 @@ else { az devops invoke ` --area git --resource pushes ` - --route-parameters project=$ADO_Project repositoryId=$repo_id ` + --route-parameters project='$ADO_Project' repositoryId=$repo_id ` --http-method POST --in-file $inputfile ` --api-version "6.0" --output none @@ -485,7 +490,7 @@ else { $cont2 = Get-Content -Path $templatename -Raw - $objectId = (az devops invoke --area git --resource refs --route-parameters project=$ADO_Project repositoryId=$repo_id --query-parameters filter=heads/main --query value[0] | ConvertFrom-Json).objectId + $objectId = (az devops invoke --area git --resource refs --route-parameters project='$ADO_Project' repositoryId=$repo_id --query-parameters filter=heads/main --query value[0] | ConvertFrom-Json).objectId Remove-Item "sdaf.json" @@ -511,7 +516,7 @@ else { az devops invoke ` --area git --resource pushes ` - --route-parameters project=$ADO_Project repositoryId=$repo_id ` + --route-parameters project='$ADO_Project' repositoryId=$repo_id ` --http-method POST --in-file $inputfile ` --api-version "6.0" --output none @@ -522,18 +527,27 @@ else { #endregion -$repo_id = (az repos list --query "[?name=='$ADO_Project'].id | [0]" --out tsv) -$repo_name = (az repos list --query "[?name=='$ADO_Project'].name | [0]" --out tsv) +$repo_id = (az repos list --query "[?name=='$ADO_Project'].id | [0]" --project '$ADO_PROJECT' --out tsv) +$repo_name = (az repos list --query "[?name=='$ADO_Project'].name | [0]" --project '$ADO_PROJECT' --out tsv) $SUserName = 'Enter your S User' $SPassword = 'Enter your S user password' -$provideSUser = Read-Host "Do you want to provide the S user details y/n?" -if ($provideSUser -eq 'y') { - $SUserName = Read-Host "Enter your S User ID" - $SPassword = Read-Host "Enter your S user password" +if ($Env:SUserName.Length -ne 0) { + $SUserName = $Env:SUserName } +if ($Env:SPassword.Length -ne 0) { + $SPassword = $Env:SPassword +} + +if ($Env:SUserName.Length -eq 0 -and $Env:SPassword.Length -eq 0) { + $provideSUser = Read-Host "Do you want to provide the S user details y/n?" + if ($provideSUser -eq 'y') { + $SUserName = Read-Host "Enter your S User ID" + $SPassword = Read-Host "Enter your S user password" + } +} $groups = New-Object System.Collections.Generic.List[System.Object] $pipelines = New-Object System.Collections.Generic.List[System.Object] @@ -1022,7 +1036,7 @@ if (!$AlreadySet -or $ResetPAT ) { accessLevel = @{ accountLicenseType = "stakeholder" } - user = @{ + user = @{ origin = "aad" originId = $MSI_objectId subjectKind = "servicePrincipal" diff --git a/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 b/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 index 30ab069ed0..026bb47be2 100644 --- a/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 +++ b/deploy/scripts/New-SDAFDevopsWorkloadZone.ps1 @@ -146,7 +146,7 @@ if ($Workload_zoneSubscriptionName.Length -eq 0) { } -az devops configure --defaults organization=$ADO_Organization project=$ADO_Project --output none +az devops configure --defaults organization=$ADO_Organization project='$ADO_Project' --output none if ($Workload_zone_code.Length -eq 0) { Write-Host "Workload zone code is not set (DEV, etc)" diff --git a/deploy/terraform/bootstrap/sap_deployer/providers.tf b/deploy/terraform/bootstrap/sap_deployer/providers.tf index ddfa4e92b7..ffc09b438d 100644 --- a/deploy/terraform/bootstrap/sap_deployer/providers.tf +++ b/deploy/terraform/bootstrap/sap_deployer/providers.tf @@ -82,7 +82,7 @@ terraform { } azuread = { source = "hashicorp/azuread" - version = ">=2.2" + version = ">=3.0" } azurerm = { source = "hashicorp/azurerm" diff --git a/deploy/terraform/bootstrap/sap_library/providers.tf b/deploy/terraform/bootstrap/sap_library/providers.tf index 0c8737b0b0..62b521bc2b 100644 --- a/deploy/terraform/bootstrap/sap_library/providers.tf +++ b/deploy/terraform/bootstrap/sap_library/providers.tf @@ -100,7 +100,7 @@ terraform { } azuread = { source = "hashicorp/azuread" - version = ">=2.2" + version = ">=3.0" } azurerm = { source = "hashicorp/azurerm" diff --git a/deploy/terraform/run/sap_deployer/providers.tf b/deploy/terraform/run/sap_deployer/providers.tf index 3c0f7e4e89..201ed3865b 100644 --- a/deploy/terraform/run/sap_deployer/providers.tf +++ b/deploy/terraform/run/sap_deployer/providers.tf @@ -81,7 +81,7 @@ terraform { } azuread = { source = "hashicorp/azuread" - version = ">=2.2" + version = ">=3.0" } azurerm = { source = "hashicorp/azurerm" diff --git a/deploy/terraform/run/sap_landscape/providers.tf b/deploy/terraform/run/sap_landscape/providers.tf index ee256f7527..3492587bb4 100644 --- a/deploy/terraform/run/sap_landscape/providers.tf +++ b/deploy/terraform/run/sap_landscape/providers.tf @@ -117,7 +117,7 @@ terraform { } azuread = { source = "hashicorp/azuread" - version = ">=2.2" + version = ">=3.0" } azurerm = { source = "hashicorp/azurerm" diff --git a/deploy/terraform/run/sap_landscape/tfvar_variables.tf b/deploy/terraform/run/sap_landscape/tfvar_variables.tf index 1db3605fb9..ff45283825 100644 --- a/deploy/terraform/run/sap_landscape/tfvar_variables.tf +++ b/deploy/terraform/run/sap_landscape/tfvar_variables.tf @@ -936,3 +936,14 @@ variable "deployer_tfstate_key" { default = "" } +variable "shared_access_key_enabled" { + description = "Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key." + default = false + type = bool + } + +variable "shared_access_key_enabled_nfs" { + description = "Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key." + default = true + type = bool + } diff --git a/deploy/terraform/run/sap_landscape/transform.tf b/deploy/terraform/run/sap_landscape/transform.tf index 018d6f8b4a..c7f7b77f21 100644 --- a/deploy/terraform/run/sap_landscape/transform.tf +++ b/deploy/terraform/run/sap_landscape/transform.tf @@ -179,26 +179,28 @@ locals { } temp_infrastructure = { - environment = coalesce(var.environment, try(var.infrastructure.environment, "")) - region = lower(coalesce(var.location, try(var.infrastructure.region, ""))) - codename = try(var.infrastructure.codename, var.codename) - tags = try(merge(var.resourcegroup_tags, try(var.infrastructure.tags, {})), {}) - deploy_monitoring_extension = var.deploy_monitoring_extension - deploy_defender_extension = var.deploy_defender_extension - user_assigned_identity_id = var.user_assigned_identity_id - patch_mode = var.patch_mode - patch_assessment_mode = var.patch_assessment_mode + environment = var.environment + region = lower(var.location) + codename = var.codename + tags = var.resourcegroup_tags + deploy_monitoring_extension = var.deploy_monitoring_extension + deploy_defender_extension = var.deploy_defender_extension + user_assigned_identity_id = var.user_assigned_identity_id + patch_mode = var.patch_mode + patch_assessment_mode = var.patch_assessment_mode + shared_access_key_enabled = var.shared_access_key_enabled + shared_access_key_enabled_nfs = var.shared_access_key_enabled_nfs } authentication = { - username = coalesce(var.automation_username, try(var.authentication.username, "azureadm")) - password = try(coalesce(var.automation_password, try(var.authentication.password, "")), "") - path_to_public_key = try(coalesce(var.automation_path_to_public_key, try(var.authentication.path_to_public_key, "")), "") - path_to_private_key = try(coalesce(var.automation_path_to_private_key, try(var.authentication.path_to_private_key, "")), "") + username = coalesce(var.automation_username, "azureadm") + password = var.automation_password + path_to_public_key = var.automation_path_to_public_key + path_to_private_key = var.automation_path_to_private_key } options = { enable_secure_transfer = true - use_spn = var.use_spn || try(var.options.use_spn, true) + use_spn = var.use_spn } key_vault_temp = { exists = length(var.user_keyvault_id) > 0 @@ -215,7 +217,7 @@ locals { ) > 0 spn_kv = local.spn_keyvault_specified ? ( - try(var.key_vault.kv_spn_id, var.spn_keyvault_id) + var.spn_keyvault_id ) : ( "" ) @@ -235,43 +237,43 @@ locals { ) diagnostics_storage_account = { - arm_id = try(coalesce(var.diagnostics_storage_account_arm_id, try(var.diagnostics_storage_account.arm_id, "")), "") + arm_id = var.diagnostics_storage_account_arm_id } witness_storage_account = { - arm_id = try(coalesce(var.witness_storage_account_arm_id, try(var.witness_storage_account.arm_id, "")), "") + arm_id = var.witness_storage_account_arm_id } vnets = { } sap = { - name = try(var.infrastructure.vnets.sap.name, var.network_name) - logical_name = coalesce(var.network_logical_name, try(var.infrastructure.vnets.sap.logical_name, "")) + name = var.network_name + logical_name = var.network_logical_name - arm_id = try(var.infrastructure.vnets.sap.arm_id, var.network_arm_id) + arm_id = var.network_arm_id address_space = tolist(split(",", var.network_address_space)) } subnet_admin = merge(( { - "name" = try(var.infrastructure.vnets.sap.subnet_admin.name, var.admin_subnet_name) + "name" = var.admin_subnet_name } ), ( local.subnet_admin_arm_id_defined ? ( { - "arm_id" = try(var.infrastructure.vnets.sap.subnet_admin.arm_id, var.admin_subnet_arm_id) + "arm_id" = var.admin_subnet_arm_id } ) : ( null )), ( { - "prefix" = try(var.infrastructure.vnets.sap.subnet_admin.prefix, var.admin_subnet_address_prefix) + "prefix" = var.admin_subnet_address_prefix } ), ( local.subnet_admin_nsg_defined ? ( { "nsg" = { - "name" = try(var.infrastructure.vnets.sap.subnet_admin.nsg.name, var.admin_subnet_nsg_name) - "arm_id" = try(var.infrastructure.vnets.sap.subnet_admin.nsg.arm_id, var.admin_subnet_nsg_arm_id) + "name" = var.admin_subnet_nsg_name + "arm_id" = var.admin_subnet_nsg_arm_id } } ) : ( @@ -283,25 +285,25 @@ locals { subnet_db = merge( ( { - "name" = try(var.infrastructure.vnets.sap.subnet_db.name, var.db_subnet_name) + "name" = var.db_subnet_name } ), ( local.subnet_db_arm_id_defined ? ( { - "arm_id" = try(var.infrastructure.vnets.sap.subnet_db.arm_id, var.db_subnet_arm_id) + "arm_id" = var.db_subnet_arm_id } ) : ( null) ), ( { - "prefix" = try(var.infrastructure.vnets.sap.subnet_db.prefix, var.db_subnet_address_prefix) + "prefix" = var.db_subnet_address_prefix } ), ( local.subnet_db_nsg_defined ? ( { "nsg" = { - "name" = try(var.infrastructure.vnets.sap.subnet_db.nsg.name, var.db_subnet_nsg_name) - "arm_id" = try(var.infrastructure.vnets.sap.subnet_db.nsg.arm_id, var.db_subnet_nsg_arm_id) + "name" = var.db_subnet_nsg_name + "arm_id" = var.db_subnet_nsg_arm_id } } ) : null @@ -311,25 +313,25 @@ locals { subnet_app = merge( ( { - "name" = try(var.infrastructure.vnets.sap.subnet_app.name, var.app_subnet_name) + "name" = var.app_subnet_name } ), ( local.subnet_app_arm_id_defined ? ( { - "arm_id" = try(var.infrastructure.vnets.sap.subnet_app.arm_id, var.app_subnet_arm_id) + "arm_id" = var.app_subnet_arm_id } ) : ( null )), ( { - "prefix" = try(var.infrastructure.vnets.sap.subnet_app.prefix, var.app_subnet_address_prefix) + "prefix" = var.app_subnet_address_prefix } ), ( local.subnet_app_nsg_defined ? ( { "nsg" = { - "name" = try(var.infrastructure.vnets.sap.subnet_app.nsg.name, var.app_subnet_nsg_name) - "arm_id" = try(var.infrastructure.vnets.sap.subnet_app.nsg.arm_id, var.app_subnet_nsg_arm_id) + "name" = var.app_subnet_nsg_name + "arm_id" = var.app_subnet_nsg_arm_id } } ) : null @@ -339,25 +341,25 @@ locals { subnet_web = merge( ( { - "name" = try(var.infrastructure.vnets.sap.subnet_web.name, var.web_subnet_name) + "name" = var.web_subnet_name } ), ( local.subnet_web_arm_id_defined ? ( { - "arm_id" = try(var.infrastructure.vnets.sap.subnet_web.arm_id, var.web_subnet_arm_id) + "arm_id" = var.web_subnet_arm_id } ) : ( null )), ( { - "prefix" = try(var.infrastructure.vnets.sap.subnet_web.prefix, var.web_subnet_address_prefix) + "prefix" = var.web_subnet_address_prefix } ), ( local.subnet_web_nsg_defined ? ( { "nsg" = { - "name" = try(var.infrastructure.vnets.sap.subnet_web.nsg.name, var.web_subnet_nsg_name) - "arm_id" = try(var.infrastructure.vnets.sap.subnet_web.nsg.arm_id, var.web_subnet_nsg_arm_id) + "name" = var.web_subnet_nsg_name + "arm_id" = var.web_subnet_nsg_arm_id } } ) : null @@ -367,25 +369,25 @@ locals { subnet_storage = merge( ( { - "name" = try(var.infrastructure.vnets.sap.subnet_storage.name, var.storage_subnet_name) + "name" = var.storage_subnet_name } ), ( local.subnet_storage_arm_id_defined ? ( { - "arm_id" = try(var.infrastructure.vnets.sap.subnet_storage.arm_id, var.storage_subnet_arm_id) + "arm_id" = var.storage_subnet_arm_id } ) : ( null )), ( { - "prefix" = try(var.infrastructure.vnets.sap.subnet_storage.prefix, var.storage_subnet_address_prefix) + "prefix" = var.storage_subnet_address_prefix } ), ( local.subnet_storage_nsg_defined ? ( { "nsg" = { - "name" = try(var.infrastructure.vnets.sap.subnet_storage.nsg.name, var.storage_subnet_nsg_name) - "arm_id" = try(var.infrastructure.vnets.sap.subnet_storage.nsg.arm_id, var.storage_subnet_nsg_arm_id) + "name" = var.storage_subnet_nsg_name + "arm_id" = var.storage_subnet_nsg_arm_id } } ) : null @@ -395,25 +397,25 @@ locals { subnet_anf = merge( ( { - "name" = try(var.infrastructure.vnets.sap.subnet_anf.name, var.anf_subnet_name) + "name" = var.anf_subnet_name } ), ( local.subnet_anf_arm_id_defined ? ( { - "arm_id" = try(var.infrastructure.vnets.sap.subnet_anf.arm_id, var.anf_subnet_arm_id) + "arm_id" = var.anf_subnet_arm_id } ) : ( null )), ( { - "prefix" = try(var.infrastructure.vnets.sap.subnet_anf.prefix, var.anf_subnet_address_prefix) + "prefix" = var.anf_subnet_address_prefix } ), ( local.subnet_anf_nsg_defined ? ( { "nsg" = { - "name" = try(var.infrastructure.vnets.sap.subnet_anf.nsg.name, var.anf_subnet_nsg_name) - "arm_id" = try(var.infrastructure.vnets.sap.subnet_anf.nsg.arm_id, var.anf_subnet_nsg_arm_id) + "name" = var.anf_subnet_nsg_name + "arm_id" = var.anf_subnet_nsg_arm_id } } ) : ( @@ -425,25 +427,25 @@ locals { subnet_iscsi = merge( ( { - "name" = try(var.infrastructure.vnets.sap.subnet_iscsi.name, var.iscsi_subnet_name) + "name" = var.iscsi_subnet_name } ), ( local.subnet_iscsi_arm_id_defined ? ( { - "arm_id" = try(var.infrastructure.vnets.sap.subnet_iscsi.arm_id, var.iscsi_subnet_arm_id) + "arm_id" = var.iscsi_subnet_arm_id } ) : ( null )), ( { - "prefix" = try(var.infrastructure.vnets.sap.subnet_iscsi.prefix, var.iscsi_subnet_address_prefix) + "prefix" = var.iscsi_subnet_address_prefix } ), ( local.subnet_iscsi_nsg_defined ? ( { "nsg" = { - "name" = try(var.infrastructure.vnets.sap.subnet_iscsi.nsg.name, var.iscsi_subnet_nsg_name) - "arm_id" = try(var.infrastructure.vnets.sap.subnet_iscsi.nsg.arm_id, var.iscsi_subnet_nsg_arm_id) + "name" = var.iscsi_subnet_nsg_name + "arm_id" = var.iscsi_subnet_nsg_arm_id } } ) : ( @@ -455,25 +457,25 @@ locals { subnet_ams = merge( ( { - "name" = try(var.infrastructure.vnets.sap.subnet_ams.name, var.ams_subnet_name) + "name" = var.ams_subnet_name } ), ( local.subnet_ams_arm_id_defined ? ( { - "arm_id" = try(var.infrastructure.vnets.sap.subnet_ams.arm_id, var.ams_subnet_arm_id) + "arm_id" = var.ams_subnet_arm_id } ) : ( null )), ( { - "prefix" = try(var.infrastructure.vnets.sap.subnet_ams.prefix, var.ams_subnet_address_prefix) + "prefix" = var.ams_subnet_address_prefix } ), ( local.subnet_web_nsg_defined ? ( { "nsg" = { - "name" = try(var.infrastructure.vnets.sap.subnet_ams.nsg.name, var.ams_subnet_nsg_name) - "arm_id" = try(var.infrastructure.vnets.sap.subnet_ams.nsg.arm_id, var.ams_subnet_nsg_arm_id) + "name" = var.ams_subnet_nsg_name + "arm_id" = var.ams_subnet_nsg_arm_id } } ) : ( diff --git a/deploy/terraform/run/sap_library/providers.tf b/deploy/terraform/run/sap_library/providers.tf index 6760605ed5..deb8d2ea69 100644 --- a/deploy/terraform/run/sap_library/providers.tf +++ b/deploy/terraform/run/sap_library/providers.tf @@ -100,7 +100,7 @@ terraform { } azuread = { source = "hashicorp/azuread" - version = ">=2.2" + version = ">=3.0" } azurerm = { source = "hashicorp/azurerm" diff --git a/deploy/terraform/run/sap_system/module.tf b/deploy/terraform/run/sap_system/module.tf index dd7eeb9be1..90f12ed5a7 100644 --- a/deploy/terraform/run/sap_system/module.tf +++ b/deploy/terraform/run/sap_system/module.tf @@ -27,9 +27,9 @@ module "sap_namegenerator" { local.application_tier.scs_server_count ) : 0 - app_zones = local.enable_app_tier_deployment ? try(local.application_tier.app_zones, []) : [] - scs_zones = local.enable_app_tier_deployment ? try(local.application_tier.scs_zones, []) : [] - web_zones = local.enable_app_tier_deployment ? try(local.application_tier.web_zones, []) : [] + app_zones = local.enable_app_tier_deployment && var.application_server_count > 0 ? try(local.application_tier.app_zones, []) : [] + scs_zones = local.enable_app_tier_deployment && var.scs_server_count > 0 ? try(local.application_tier.scs_zones, []) : [] + web_zones = local.enable_app_tier_deployment && var.webdispatcher_server_count > 0 ? try(local.application_tier.web_zones, []) : [] db_zones = try(local.database.zones, []) resource_offset = try(var.resource_offset, 0) diff --git a/deploy/terraform/run/sap_system/providers.tf b/deploy/terraform/run/sap_system/providers.tf index ca5f975d4f..a7fcc28f1a 100644 --- a/deploy/terraform/run/sap_system/providers.tf +++ b/deploy/terraform/run/sap_system/providers.tf @@ -79,7 +79,7 @@ terraform { } azuread = { source = "hashicorp/azuread" - version = ">=2.2" + version = ">=3.0" } azurerm = { source = "hashicorp/azurerm" diff --git a/deploy/terraform/run/sap_system/tfvar_variables.tf b/deploy/terraform/run/sap_system/tfvar_variables.tf index 7ca5322630..3f2b7cab9b 100644 --- a/deploy/terraform/run/sap_system/tfvar_variables.tf +++ b/deploy/terraform/run/sap_system/tfvar_variables.tf @@ -123,6 +123,18 @@ variable "use_private_endpoint" { type = bool } +variable "shared_access_key_enabled" { + description = "Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key." + default = false + type = bool + } + +variable "shared_access_key_enabled_nfs" { + description = "Indicates whether the storage account used for NFS permits requests to be authorized with the account access key via Shared Key." + default = true + type = bool + } + ######################################################################################### # # diff --git a/deploy/terraform/run/sap_system/transform.tf b/deploy/terraform/run/sap_system/transform.tf index ab3088c9db..0596891ad0 100644 --- a/deploy/terraform/run/sap_system/transform.tf +++ b/deploy/terraform/run/sap_system/transform.tf @@ -13,19 +13,22 @@ locals { deploy_defender_extension = var.deploy_defender_extension patch_mode = var.patch_mode patch_assessment_mode = var.patch_assessment_mode + shared_access_key_enabled = var.shared_access_key_enabled + shared_access_key_enabled_nfs = var.shared_access_key_enabled_nfs + } resource_group = { - name = try(coalesce(var.resourcegroup_name, try(var.infrastructure.resource_group.name, "")), "") - arm_id = try(coalesce(var.resourcegroup_arm_id, try(var.infrastructure.resource_group.arm_id, "")), "") + name = var.resourcegroup_name + arm_id = var.resourcegroup_arm_id } resource_group_defined = (length(local.resource_group.name) + length(local.resource_group.arm_id) ) > 0 ppg = { - arm_ids = distinct(concat(var.proximityplacementgroup_arm_ids, try(var.infrastructure.ppg.arm_ids, []))) - names = distinct(concat(var.proximityplacementgroup_names, try(var.infrastructure.ppg.names, []))) + arm_ids = distinct(var.proximityplacementgroup_arm_ids) + names = distinct(var.proximityplacementgroup_names) } ppg_defined = (length(local.ppg.names) + length(local.ppg.arm_ids)) > 0 @@ -39,9 +42,9 @@ locals { anchor_vms = local.deploy_anchor_vm ? ( { - deploy = var.deploy_anchor_vm || length(try(var.infrastructure.anchor_vms, {})) > 0 - use_DHCP = var.anchor_vm_use_DHCP || try(var.infrastructure.anchor_vms.use_DHCP, false) - accelerated_networking = var.anchor_vm_accelerated_networking || try(var.infrastructure.anchor_vms.accelerated_networking, false) + deploy = var.deploy_anchor_vm + use_DHCP = var.anchor_vm_use_DHCP + accelerated_networking = var.anchor_vm_accelerated_networking sku = var.anchor_vm_sku os = var.anchor_vm_image @@ -75,14 +78,14 @@ locals { avset_arm_ids = var.database_vm_avset_arm_ids db_avset_arm_ids_defined = length(local.avset_arm_ids) > 0 - frontend_ips = try(coalesce(var.database_loadbalancer_ips, try(var.databases[0].loadbalancer.frontend_ip, [])), []) - db_tags = try(coalesce(var.database_tags, try(var.databases[0].tags, {})), {}) + frontend_ips = var.database_loadbalancer_ips + db_tags = var.database_tags databases_temp = { database_cluster_type = var.database_cluster_type database_server_count = var.database_high_availability ? 2 * var.database_server_count : var.database_server_count database_vm_sku = var.database_vm_sku - db_sizing_key = coalesce(var.db_sizing_dictionary_key, var.database_size, try(var.databases[0].size, "")) + db_sizing_key = coalesce(var.db_sizing_dictionary_key, var.database_size) deploy_v1_monitoring_extension = var.deploy_v1_monitoring_extension dual_nics = var.database_dual_nics high_availability = var.database_high_availability @@ -155,7 +158,7 @@ locals { substr(var.database_platform, 0, 3)) )) number = upper(local.databases_temp.platform) == "HANA" ? ( - coalesce(var.database_instance_number, try(var.databases[0].instance_number, "00")) + var.database_instance_number ) : ( "00" ) @@ -167,20 +170,20 @@ locals { } app_authentication_defined = (length(local.app_authentication.type) + length(local.app_authentication.username)) > 3 - app_zones_temp = distinct(var.application_server_zones) - scs_zones_temp = distinct(var.scs_server_zones) - web_zones_temp = distinct(var.webdispatcher_server_zones) + app_zones_temp = var.application_server_count > 0 ? distinct(var.application_server_zones) : [] + scs_zones_temp = var.scs_server_count > 0 ? distinct(var.scs_server_zones) : [] + web_zones_temp = var.webdispatcher_server_count > 0 ? distinct(var.webdispatcher_server_zones) : [] application_temp = { - sid = try(coalesce(var.sid, try(var.application_tier.sid, "")), "") + sid = var.sid enable_deployment = local.enable_app_tier_deployment use_DHCP = var.app_tier_use_DHCP dual_nics = var.app_tier_dual_nics - vm_sizing_dictionary_key = try(coalesce(var.app_tier_sizing_dictionary_key, var.app_tier_vm_sizing, try(var.application_tier.vm_sizing, "")), "Optimized") - app_instance_number = coalesce(var.app_instance_number, try(var.application_tier.app_instance_number, "00")) + vm_sizing_dictionary_key = coalesce(var.app_tier_sizing_dictionary_key, "Optimized") + app_instance_number = coalesce(var.app_instance_number, "00") application_server_count = local.enable_app_tier_deployment ? ( - max(var.application_server_count, try(var.application_tier.application_server_count, 0)) + var.application_server_count ) : ( 0 ) @@ -196,18 +199,18 @@ locals { avset_arm_ids = var.application_server_vm_avset_arm_ids scs_server_count = local.enable_app_tier_deployment ? ( - max(var.scs_server_count, try(var.application_tier.scs_server_count, 0)) + var.scs_server_count ) : ( 0 ) scs_high_availability = local.enable_app_tier_deployment ? ( - var.scs_high_availability || try(var.application_tier.scs_high_availability, false) + var.scs_high_availability ) : ( false ) scs_cluster_type = var.scs_cluster_type - scs_instance_number = coalesce(var.scs_instance_number, try(var.application_tier.scs_instance_number, "00")) - ers_instance_number = coalesce(var.ers_instance_number, try(var.application_tier.ers_instance_number, "02")) + scs_instance_number = coalesce(var.scs_instance_number, "00") + ers_instance_number = coalesce(var.ers_instance_number, "02") scs_sku = var.scs_server_sku scs_use_ppg = var.scs_server_count > 0 ? var.use_scalesets_for_deployment ? ( false) : ( @@ -222,7 +225,7 @@ locals { scs_cluster_disk_type = var.scs_cluster_disk_type webdispatcher_count = local.enable_app_tier_deployment ? ( - max(var.webdispatcher_server_count, try(var.application_tier.webdispatcher_count, 0)) + var.webdispatcher_server_count ) : ( 0 ) @@ -242,9 +245,9 @@ locals { user_assigned_identity_id = var.user_assigned_identity_id } - app_tags = try(coalesce(var.application_server_tags, try(var.application_tier.app_tags, {})), {}) - scs_tags = try(coalesce(var.scs_server_tags, try(var.application_tier.scs_tags, {})), {}) - web_tags = try(coalesce(var.webdispatcher_server_tags, try(var.application_tier.web_tags, {})), {}) + app_tags = var.application_server_tags + scs_tags = var.scs_server_tags + web_tags = var.webdispatcher_server_tags app_os = { source_image_id = try(var.application_server_image.source_image_id, "") @@ -283,13 +286,13 @@ locals { app_os_specified = (length(local.app_os.source_image_id) + length(local.app_os.publisher)) > 0 scs_os = { - os_type = try(coalesce(var.scs_server_image.os_type, var.application_server_image.os_type, "LINUX"), "LINUX") - source_image_id = try(coalesce(var.scs_server_image.source_image_id, try(var.application_tier.scs_os.source_image_id, "")), "") - publisher = try(coalesce(var.scs_server_image.publisher, try(var.application_tier.scs_os.publisher, "SUSE")), "SUSE") - offer = try(coalesce(var.scs_server_image.offer, try(var.application_tier.scs_os.offer, "sles-sap-15-sp5")), "sles-sap-15-sp5") - sku = try(coalesce(var.scs_server_image.sku, try(var.application_tier.scs_os.sku, "gen2")), "gen2") - version = try(coalesce(var.scs_server_image.version, try(var.application_tier.scs_os.version, "latest")), "latest") - type = try(var.database_vm_image.type, "marketplace") + os_type = coalesce(var.scs_server_image.os_type, var.application_server_image.os_type, "LINUX") + source_image_id = trimspace(coalesce(var.scs_server_image.source_image_id, var.application_server_image.source_image_id, " ")) + publisher = coalesce(var.scs_server_image.publisher, var.application_server_image.publisher, "SUSE") + offer = coalesce(var.scs_server_image.offer, var.application_server_image.offer, "sles-sap-15-sp5") + sku = coalesce(var.scs_server_image.sku, var.application_server_image.sku, "gen2") + version = coalesce(var.scs_server_image.version, var.application_server_image.version, "latest") + type = coalesce(var.database_vm_image.type, "marketplace") } scs_os_specified = (length(local.scs_os.source_image_id) + length(local.scs_os.publisher)) > 0 @@ -306,13 +309,13 @@ locals { ) web_os = { - os_type = try(coalesce(var.webdispatcher_server_image.os_type, var.application_server_image.os_type, "LINUX"), "LINUX") - source_image_id = try(coalesce(var.webdispatcher_server_image.source_image_id, try(var.application_tier.web_os.source_image_id, "")), "") - publisher = try(coalesce(var.webdispatcher_server_image.publisher, try(var.application_tier.web_os.publisher, "SUSE")), "SUSE") - offer = try(coalesce(var.webdispatcher_server_image.offer, try(var.application_tier.web_os.offer, "sles-sap-15-sp5")), "sles-sap-15-sp5") - sku = try(coalesce(var.webdispatcher_server_image.sku, try(var.application_tier.web_os.sku, "gen2")), "gen2") - version = try(coalesce(var.webdispatcher_server_image.version, try(var.application_tier.web_os.version, "latest")), "latest") - type = try(var.database_vm_image.type, "marketplace") + os_type = coalesce(var.webdispatcher_server_image.os_type, var.application_server_image.os_type, "LINUX") + source_image_id = coalesce(var.webdispatcher_server_image.source_image_id, var.application_server_image.source_image_id, " ") + publisher = coalesce(var.webdispatcher_server_image.publisher, var.application_server_image.publisher, "SUSE") + offer = coalesce(var.webdispatcher_server_image.offer, var.application_server_image.offer, "sles-sap-15-sp5") + sku = coalesce(var.webdispatcher_server_image.sku, var.application_server_image.sku, "gen2") + version = coalesce(var.webdispatcher_server_image.version, var.application_server_image.version, "latest") + type = coalesce(var.database_vm_image.type, "marketplace") } web_os_specified = (length(local.web_os.source_image_id) + length(local.web_os.publisher)) > 0 @@ -324,110 +327,83 @@ locals { subnet_admin_defined = ( length(var.admin_subnet_address_prefix) + - length(try(var.infrastructure.vnets.sap.subnet_admin.prefix, "")) + - length(var.admin_subnet_arm_id) + - length(try(var.infrastructure.vnets.sap.subnet_admin.arm_id, "")) + length(var.admin_subnet_arm_id) ) > 0 subnet_admin_arm_id_defined = ( - length(var.admin_subnet_arm_id) + - length(try(var.infrastructure.vnets.sap.subnet_admin.arm_id, "")) + length(var.admin_subnet_arm_id) ) > 0 subnet_admin_nsg_defined = ( length(var.admin_subnet_nsg_name) + - length(try(var.infrastructure.vnets.sap.subnet_admin.nsg.name, "")) + - length(var.admin_subnet_nsg_arm_id) + - length(try(var.infrastructure.vnets.sap.subnet_admin.nsg.arm_id, "")) + length(var.admin_subnet_nsg_arm_id) ) > 0 subnet_db_defined = ( length(var.db_subnet_address_prefix) + - length(try(var.infrastructure.vnets.sap.subnet_db.prefix, "")) + - length(var.db_subnet_arm_id) + - length(try(var.infrastructure.vnets.sap.subnet_db.arm_id, "")) + length(var.db_subnet_arm_id) ) > 0 subnet_db_arm_id_defined = ( - length(var.db_subnet_arm_id) + - length(try(var.infrastructure.vnets.sap.subnet_db.arm_id, "")) + length(var.db_subnet_arm_id) ) > 0 subnet_db_nsg_defined = ( length(var.db_subnet_nsg_name) + - length(try(var.infrastructure.vnets.sap.subnet_db.nsg.name, "")) + - length(var.db_subnet_nsg_arm_id) + - length(try(var.infrastructure.vnets.sap.subnet_db.nsg.arm_id, "")) + length(var.db_subnet_nsg_arm_id) ) > 0 subnet_app_defined = ( length(var.app_subnet_address_prefix) + - length(try(var.infrastructure.vnets.sap.subnet_app.prefix, "")) + - length(var.app_subnet_arm_id) + - length(try(var.infrastructure.vnets.sap.subnet_app.arm_id, "")) + length(var.app_subnet_arm_id) ) > 0 subnet_app_arm_id_defined = ( - length(var.app_subnet_arm_id) + - length(try(var.infrastructure.vnets.sap.subnet_app.arm_id, "")) + length(var.app_subnet_arm_id) ) > 0 subnet_app_nsg_defined = ( length(var.app_subnet_nsg_name) + - length(try(var.infrastructure.vnets.sap.subnet_app.nsg.name, "")) + - length(var.app_subnet_nsg_arm_id) + - length(try(var.infrastructure.vnets.sap.subnet_app.nsg.arm_id, "")) + length(var.app_subnet_nsg_arm_id) ) > 0 subnet_web_defined = ( length(var.web_subnet_address_prefix) + - length(try(var.infrastructure.vnets.sap.subnet_web.prefix, "")) + - length(var.web_subnet_arm_id) + - length(try(var.infrastructure.vnets.sap.subnet_web.arm_id, "")) + length(var.web_subnet_arm_id) ) > 0 subnet_web_arm_id_defined = ( - length(var.web_subnet_arm_id) + - length(try(var.infrastructure.vnets.sap.subnet_web.arm_id, "")) + length(var.web_subnet_arm_id) ) > 0 subnet_web_nsg_defined = ( length(var.web_subnet_nsg_name) + - length(try(var.infrastructure.vnets.sap.subnet_web.nsg.name, "")) + - length(var.web_subnet_nsg_arm_id) + - length(try(var.infrastructure.vnets.sap.subnet_web.nsg.arm_id, "")) + length(var.web_subnet_nsg_arm_id) ) > 0 subnet_storage_defined = ( length(var.storage_subnet_address_prefix) + - length(try(var.infrastructure.vnets.sap.subnet_storage.prefix, "")) + - length(var.storage_subnet_arm_id) + - length(try(var.infrastructure.vnets.sap.subnet_storage.arm_id, "")) + length(var.storage_subnet_arm_id) ) > 0 - subnet_storage_arm_id_defined = ( - length(var.storage_subnet_arm_id) + - length(try(var.infrastructure.vnets.sap.subnet_storage.arm_id, "")) - ) > 0 + subnet_storage_arm_id_defined = (length(var.storage_subnet_arm_id)) > 0 subnet_storage_nsg_defined = ( length(var.storage_subnet_nsg_name) + - length(try(var.infrastructure.vnets.sap.subnet_storage.nsg.name, "")) + - length(var.storage_subnet_nsg_arm_id) + - length(try(var.infrastructure.vnets.sap.subnet_storage.nsg.arm_id, "")) + length(var.storage_subnet_nsg_arm_id) ) > 0 - app_nic_ips = distinct(concat(var.application_server_app_nic_ips, try(var.application_tier.app_nic_ips, []))) + app_nic_ips = distinct(var.application_server_app_nic_ips) app_nic_secondary_ips = distinct(var.application_server_app_nic_ips) - app_admin_nic_ips = distinct(concat(var.application_server_admin_nic_ips, try(var.application_tier.app_admin_nic_ips, []))) + app_admin_nic_ips = distinct(var.application_server_admin_nic_ips) - scs_nic_ips = distinct(concat(var.scs_server_app_nic_ips, try(var.application_tier.scs_nic_ips, []))) - scs_admin_nic_ips = distinct(concat(var.scs_server_admin_nic_ips, try(var.application_tier.scs_admin_nic_ips, []))) - scs_server_loadbalancer_ips = distinct(concat(var.scs_server_loadbalancer_ips, try(var.application_tier.scs_server_loadbalancer_ips, []))) + scs_nic_ips = distinct(var.scs_server_app_nic_ips) + scs_admin_nic_ips = distinct(var.scs_server_admin_nic_ips) + scs_server_loadbalancer_ips = distinct(var.scs_server_loadbalancer_ips) - web_nic_ips = distinct(concat(var.webdispatcher_server_app_nic_ips, try(var.application_tier.web_nic_ips, []))) - web_admin_nic_ips = distinct(concat(var.webdispatcher_server_admin_nic_ips, try(var.application_tier.web_admin_nic_ips, []))) - webdispatcher_loadbalancer_ips = distinct(concat(var.webdispatcher_server_loadbalancer_ips, try(var.application_tier.webdispatcher_loadbalancer_ips, []))) + web_nic_ips = concat(var.webdispatcher_server_app_nic_ips) + web_admin_nic_ips = concat(var.webdispatcher_server_admin_nic_ips) + webdispatcher_loadbalancer_ips = concat(var.webdispatcher_server_loadbalancer_ips) subnet_admin = merge(( { @@ -610,32 +586,25 @@ locals { temp_vnet = merge(local.vnets, { sap = local.all_subnets }) - user_keyvault_specified = ( - length(var.user_keyvault_id) + - length(try(var.key_vault.kv_user_id, "")) - ) > 0 + user_keyvault_specified = (length(var.user_keyvault_id) ) > 0 user_keyvault = local.user_keyvault_specified ? ( - try(coalesce(var.user_keyvault_id, try(var.key_vault.kv_user_id, "")), "")) : ( - "" - ) + var.user_keyvault_id + ) : "" - spn_keyvault_specified = ( - length(var.spn_keyvault_id) + - length(try(var.key_vault.kv_spn_id, "")) - ) > 0 - spn_kv = local.spn_keyvault_specified ? try(coalesce(var.spn_keyvault_id, try(var.key_vault.kv_spn_id, "")), "") : "" + spn_keyvault_specified = length(var.spn_keyvault_id) > 0 + spn_kv = local.spn_keyvault_specified ? var.spn_keyvault_id : "" - username_specified = (length(var.automation_username) + length(try(var.authentication.username, ""))) > 0 - username = try(coalesce(var.automation_username, try(var.authentication.username, "")), "") - password_specified = (length(var.automation_password) + length(try(var.authentication.password, ""))) > 0 - password = try(coalesce(var.automation_password, try(var.authentication.password, "")), "") - path_to_public_key_specified = (length(var.automation_path_to_public_key) + length(try(var.authentication.path_to_public_key, ""))) > 0 - path_to_public_key = try(coalesce(var.automation_path_to_public_key, try(var.authentication.path_to_public_key, "")), "") - path_to_private_key_specified = (length(var.automation_path_to_private_key) + length(try(var.authentication.path_to_private_key, ""))) > 0 - path_to_private_key = try(coalesce(var.automation_path_to_private_key, try(var.authentication.path_to_private_key, "")), "") + username_specified = (length(var.automation_username)) > 0 + username = var.automation_username + password_specified = (length(var.automation_password) ) > 0 + password = var.automation_password + path_to_public_key_specified = (length(var.automation_path_to_public_key) ) > 0 + path_to_public_key = var.automation_path_to_public_key + path_to_private_key_specified = (length(var.automation_path_to_private_key)) > 0 + path_to_private_key = var.automation_path_to_private_key - disk_encryption_set_defined = (length(var.vm_disk_encryption_set_id) + length(try(var.options.disk_encryption_set_id, ""))) > 0 - disk_encryption_set_id = try(coalesce(var.vm_disk_encryption_set_id, try(var.options.disk_encryption_set_id, null)), null) + disk_encryption_set_defined = (length(var.vm_disk_encryption_set_id) ) > 0 + disk_encryption_set_id = var.vm_disk_encryption_set_id infrastructure = merge(local.temp_infrastructure, ( local.resource_group_defined ? { resource_group = local.resource_group } : null), ( @@ -770,9 +739,8 @@ locals { )) privatelink_dns_subscription_id = trimspace(coalesce(var.privatelink_dns_subscription_id, try(data.terraform_remote_state.landscape.outputs.privatelink_dns_subscription_id, - try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, ""), - " " - ) + try(data.terraform_remote_state.landscape.outputs.management_dns_subscription_id, "") + ), " " )) register_storage_accounts_keyvaults_with_dns = var.register_storage_accounts_keyvaults_with_dns diff --git a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf index 15c181384f..93fb13eb2c 100644 --- a/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_landscape/storage_accounts.tf @@ -30,6 +30,7 @@ resource "azurerm_storage_account" "storage_bootdiag" { allow_nested_items_to_be_public = false cross_tenant_replication_enabled = false tags = var.tags + shared_access_key_enabled = var.infrastructure.shared_access_key_enabled } @@ -147,6 +148,7 @@ resource "azurerm_storage_account" "witness_storage" { allow_nested_items_to_be_public = false cross_tenant_replication_enabled = false public_network_access_enabled = var.public_network_access_enabled + shared_access_key_enabled = var.infrastructure.shared_access_key_enabled tags = var.tags network_rules { @@ -292,7 +294,8 @@ resource "azurerm_storage_account" "transport" { https_traffic_only_enabled = false min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false - # shared_access_key_enabled = false + + shared_access_key_enabled = var.infrastructure.shared_access_key_enabled_nfs cross_tenant_replication_enabled = false public_network_access_enabled = var.public_network_access_enabled @@ -516,7 +519,7 @@ resource "azurerm_storage_account" "install" { cross_tenant_replication_enabled = false public_network_access_enabled = var.public_network_access_enabled tags = var.tags - # shared_access_key_enabled = false + shared_access_key_enabled = var.infrastructure.shared_access_key_enabled_nfs } diff --git a/deploy/terraform/terraform-units/modules/sap_namegenerator/output.tf b/deploy/terraform/terraform-units/modules/sap_namegenerator/output.tf index 552b1a617d..414a86c07c 100644 --- a/deploy/terraform/terraform-units/modules/sap_namegenerator/output.tf +++ b/deploy/terraform/terraform-units/modules/sap_namegenerator/output.tf @@ -61,9 +61,9 @@ output "naming" { ANCHOR_COMPUTERNAME = local.anchor_computer_names ANCHOR_SECONDARY_DNSNAME = local.anchor_secondary_dnsnames ANCHOR_VMNAME = local.anchor_vm_names - ANYDB_COMPUTERNAME = var.database_high_availability ? local.anydb_computer_names_ha : local.anydb_computer_names - ANYDB_SECONDARY_DNSNAME = var.database_high_availability ? local.anydb_secondary_dnsnames_ha : local.anydb_secondary_dnsnames - ANYDB_VMNAME = var.database_high_availability ? local.anydb_vm_names_ha : local.anydb_vm_names + ANYDB_COMPUTERNAME = var.database_high_availability ? concat(local.anydb_computer_names, local.anydb_computer_names_ha) : local.anydb_computer_names + ANYDB_SECONDARY_DNSNAME = concat(local.anydb_secondary_dnsnames, local.anydb_secondary_dnsnames_ha) + ANYDB_VMNAME = var.database_high_availability ? concat(local.anydb_vm_names, local.anydb_vm_names_ha) : local.anydb_vm_names DEPLOYER = local.deployer_vm_names HANA_COMPUTERNAME = var.database_high_availability ? var.scale_out ? local.hana_computer_names_scaleout : concat(local.hana_computer_names, local.hana_computer_names_ha) : local.hana_computer_names HANA_SECONDARY_DNSNAME = var.database_high_availability ? var.scale_out ? local.hana_secondary_dnsnames_scaleout : concat(local.hana_secondary_dnsnames, local.hana_secondary_dnsnames_ha) : local.hana_secondary_dnsnames diff --git a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf index cf0c629f9d..4785530e08 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/common_infrastructure/storage_accounts.tf @@ -38,6 +38,8 @@ resource "azurerm_storage_account" "sapmnt" { min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false cross_tenant_replication_enabled = false + shared_access_key_enabled = var.infrastructure.shared_access_key_enabled_nfs + public_network_access_enabled = try(var.landscape_tfstate.public_network_access_enabled, true) tags = var.tags diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf index 4cda489877..7f80d56e0f 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/outputs.tf @@ -293,7 +293,7 @@ output "site_information" { output "hana_shared_afs_path" { description = "Defines the hanashared mount path" - value = compact( + value = var.database.scale_out ? compact( [ format("%s:/%s/%s", try(azurerm_private_endpoint.hanashared[0].private_dns_zone_configs[0].record_sets[0].fqdn, @@ -318,5 +318,5 @@ output "hana_shared_afs_path" { ), azurerm_storage_share.hanashared[1].name ) : "" - ]) + ]) : [] } diff --git a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/storage_accounts.tf b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/storage_accounts.tf index 572923312d..18bef54a11 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/hdb_node/storage_accounts.tf +++ b/deploy/terraform/terraform-units/modules/sap_system/hdb_node/storage_accounts.tf @@ -29,6 +29,7 @@ resource "azurerm_storage_account" "hanashared" { min_tls_version = "TLS1_2" allow_nested_items_to_be_public = false cross_tenant_replication_enabled = false + shared_access_key_enabled = var.infrastructure.shared_access_key_enabled_nfs public_network_access_enabled = try(var.landscape_tfstate.public_network_access_enabled, true) tags = var.tags @@ -176,7 +177,7 @@ resource "time_sleep" "wait_for_private_endpoints" { data "azurerm_private_endpoint_connection" "hanashared" { provider = azurerm.main - count = var.NFS_provider == "AFS" ? ( + count = var.NFS_provider == "AFS" && var.use_private_endpoint && var.database.scale_out ? ( length(var.hanashared_private_endpoint_id) > 0 ? ( length(var.database.zones)) : ( 0 diff --git a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl index cba151b693..1e3116e3ef 100644 --- a/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl +++ b/deploy/terraform/terraform-units/modules/sap_system/output_files/sap-parameters.tmpl @@ -81,6 +81,15 @@ db_instance_number: "${db_instance_number}" platform: ${platform} +# Database User (SYSTEM) Password +# hana_system_user_password: + +# OS password for adm user +# hana_os_sidadm_password: + +# SAP Host Agent User (sapadm) Password +# hana_os_sapadm_password: + %{~ if scale_out } #############################################################################