Skip to content

Commit

Permalink
Merge pull request #15 from UKCloud/feature/addMonitoring
Browse files Browse the repository at this point in the history
Feature/add monitoring
  • Loading branch information
stevemul authored Sep 20, 2017
2 parents eee0256 + 60b44a4 commit 7056be2
Show file tree
Hide file tree
Showing 19 changed files with 115 additions and 74 deletions.
4 changes: 2 additions & 2 deletions deploy-openshift.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#!/bin/bash

ansible-playbook --private-key ../id_rsa_jenkins -i openshift-ansible-hosts bastion.yml
ansible-playbook --private-key ../id_rsa_jenkins -i openshift-ansible-hosts site.yml
ansible-playbook --private-key ~/id_rsa_jenkins -i openshift-ansible-hosts bastion.yml
ansible-playbook --private-key ~/id_rsa_jenkins -i openshift-ansible-hosts site.yml
4 changes: 4 additions & 0 deletions monitoring.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
- hosts: all, localhost
roles:
- name: monitoring
when: setupMonitoring == True
4 changes: 0 additions & 4 deletions roles/common/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,3 @@
- name: Include environment vairables for infrastructure hosts
include_vars:
file: /etc/ansible/group_vars/all.yml
name: env_details
- name: Check if we have already set resolv.conf
stat:
path: .resolv_conf_set
Expand Down
2 changes: 1 addition & 1 deletion roles/common/templates/resolv.j2
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
search {{ env_details.localDomainSuffix }} localdomain
search {{ localDomainSuffix }} localdomain
{% for hostname in groups.dns %}
nameserver {{ hostvars[hostname].ansible_default_ipv4.address }}
{% endfor %}
4 changes: 0 additions & 4 deletions roles/dns/tasks/dns_common.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,3 @@
- name: Load vairables
include_vars:
file: /etc/ansible/group_vars/all.yml
name: env_details
- name: Install bind packages
yum:
name: "{{ item }}"
Expand Down
6 changes: 1 addition & 5 deletions roles/dns/tasks/dns_master.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,3 @@
- name: Load vairables
include_vars:
file: /etc/ansible/group_vars/all.yml
name: env_details
- name: inv hostname
debug:
var: inventory_hostname
Expand Down Expand Up @@ -35,7 +31,7 @@
- name: Copy across zone
template:
src: templates/zonefile.j2
dest: /var/named/{{ env_details.localDomainSuffix }}.zone
dest: /var/named/{{ localDomainSuffix }}.zone
when: inventory_hostname == groups.dns[0]
- name: Restart bind
systemd:
Expand Down
4 changes: 0 additions & 4 deletions roles/dns/tasks/dns_slave.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,3 @@
- name: Load vairables
include_vars:
file: /etc/ansible/group_vars/all.yml
name: env_details
- name: Setup slave key
template:
src: templates/slave_key_config.j2
Expand Down
4 changes: 2 additions & 2 deletions roles/dns/templates/master_zone_config.j2
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
zone "{{ env_details.localDomainSuffix }}" IN {
zone "{{ localDomainSuffix }}" IN {
type master;
file "{{ env_details.localDomainSuffix }}.zone";
file "{{ localDomainSuffix }}.zone";
allow-update { none; };
allow-transfer { key TRANSFER; };
};
Expand Down
4 changes: 2 additions & 2 deletions roles/dns/templates/slave_zone_config.j2
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
zone"{{ env_details.localDomainSuffix }}" IN {
zone"{{ localDomainSuffix }}" IN {
type slave;
masters { {{ hostvars[groups.dns[0]].ansible_default_ipv4.address }}; };
file "{{ env_details.localDomainSuffix }}.zone";
file "{{ localDomainSuffix }}.zone";
allow-update { none; };
allow-transfer { none; };
};
Expand Down
16 changes: 8 additions & 8 deletions roles/dns/templates/zonefile.j2
Original file line number Diff line number Diff line change
@@ -1,32 +1,32 @@
$ORIGIN {{ env_details.localDomainSuffix }}.
$ORIGIN {{ localDomainSuffix }}.
$TTL 60s
@ IN SOA dns1.{{ env_details.localDomainSuffix }}. hostmaster.{{ env_details.localDomainSuffix }}. (
@ IN SOA dns1.{{ localDomainSuffix }}. hostmaster.{{ localDomainSuffix }}. (
2001062501 ; serial
21600 ; refresh after 6 hours
3600 ; retry after 1 hour
604800 ; expire after 1 week
86400 ) ; minimum TTL of 1 day


IN NS dns1.{{ env_details.localDomainSuffix }}.
IN NS dns2.{{ env_details.localDomainSuffix }}.
IN NS dns1.{{ localDomainSuffix }}.
IN NS dns2.{{ localDomainSuffix }}.

{% set count = 1 %}
{% for hostname in groups.dns %}
dns{{ count }} IN A {{ hostvars[hostname].ansible_default_ipv4.address }}
{% set count = count + 1 %}
{% endfor %}

{% for ip, hostname in env_details.haproxy_details.iteritems() %}
{% for ip, hostname in haproxy_details.iteritems() %}
{{ hostname }}. IN A {{ ip }}
{% endfor %}

{% for ip, hostname in env_details.worker_details.iteritems() %}
{% for ip, hostname in worker_details.iteritems() %}
{{ hostname }}. IN A {{ ip }}
{% endfor %}

{% for ip, hostname in env_details.master_details.iteritems() %}
{% for ip, hostname in master_details.iteritems() %}
{{ hostname }}. IN A {{ ip }}
{% endfor %}

console IN A {{ env_details.haproxy_vip }}
console IN A {{ haproxy_vip }}
4 changes: 0 additions & 4 deletions roles/haproxy/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,3 @@
- name: Include environment vairables for infrastructure hosts
include_vars:
file: /etc/ansible/group_vars/all.yml
name: env_details
- name: Install haproxy
yum:
name: haproxy
Expand Down
6 changes: 3 additions & 3 deletions roles/haproxy/templates/haproxy.j2
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ frontend atomic-openshift-api
backend atomic-openshift-router-http
balance source
mode tcp
{% for ip, hostname in env_details.worker_details.items() %}
{% for ip, hostname in worker_details.items() %}
server {{ hostname }} {{ ip }}:80 check
{% endfor %}

Expand All @@ -101,12 +101,12 @@ backend atomic-openshift-router-ssl
# Learn on response if server hello.
stick store-response payload_lv(43,1) if serverhello
option ssl-hello-chk
{% for ip, hostname in env_details.worker_details.items() %}
{% for ip, hostname in worker_details.items() %}
server {{ hostname }} {{ ip }}:443 check
{% endfor %}
backend atomic-openshift-api
balance source
mode tcp
{% for ip, hostname in env_details.master_details.items() %}
{% for ip, hostname in master_details.items() %}
server {{ hostname }} {{ ip }}:8443 check
{% endfor %}
6 changes: 1 addition & 5 deletions roles/initalisation/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,9 @@
yum:
name: bind-utils
state: latest
- name: Include environment vairables for infrastructure hosts
include_vars:
file: /etc/ansible/group_vars/all.yml
name: env_details
- name: Configure ansible hosts file
template:
src: templates/ansible-hosts-multimaster.j2
dest: ./openshift-ansible-hosts
force: yes
backup: yes
backup: yes
44 changes: 22 additions & 22 deletions roles/initalisation/templates/ansible-hosts-multimaster.j2
Original file line number Diff line number Diff line change
Expand Up @@ -25,16 +25,16 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
openshift_master_htpasswd_users={'admin': '$apr1$mSg5YHIt$TSWms4a24FdbZbqiGPJvc/', 'demo': '$apr1$kxv5vr/4$Y6gc7iyLEa7kvG.DaYwrc/'}

openshift_master_cluster_method=native
openshift_master_cluster_hostname=console.{{ env_details.localDomainSuffix }}
openshift_master_cluster_public_hostname=ocp.{{ env_details.domainSuffix }}
openshift_master_cluster_hostname=console.{{ localDomainSuffix }}
openshift_master_cluster_public_hostname=ocp.{{ domainSuffix }}

openshift_set_hostname=true

# Makes the openshift_ip setting in the host groups are below take effect.
# This sets the nodeIP setting in node-config on each host to the correct IP
openshift_set_node_ip=true

openshift_master_default_subdomain={{ env_details.domainSuffix }}
openshift_master_default_subdomain={{ domainSuffix }}

# Set the MTU size for the Docker0 bridge and docker native containers
openshift_docker_options="--mtu=1400 --log-driver=journald"
Expand All @@ -48,18 +48,18 @@ openshift_hosted_registry_replicas=1
#openshift_hosted_registry_storage_kind=openstack
#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce']
#openshift_hosted_registry_storage_openstack_filesystem=ext4
#openshift_hosted_registry_storage_openstack_volumeID={{ env_details.registryVolume }}
#openshift_hosted_registry_storage_volume_size={{ env_details.registryVolumeSize }}
#openshift_hosted_registry_storage_openstack_volumeID={{ registryVolume }}
#openshift_hosted_registry_storage_volume_size={{ registryVolumeSize }}

# Any S3 service (Minio, ExoScale, ...): Basically the same as above
# but with regionendpoint configured
# S3 bucket must already exist.
openshift_hosted_registry_storage_kind=object
openshift_hosted_registry_storage_provider=s3
openshift_hosted_registry_storage_s3_accesskey={{ env_details.s3accesskey }}
openshift_hosted_registry_storage_s3_secretkey={{ env_details.s3secretkey }}
openshift_hosted_registry_storage_s3_regionendpoint={{ env_details.s3regionendpoint }}
openshift_hosted_registry_storage_s3_bucket={{ env_details.s3bucketname }}
openshift_hosted_registry_storage_s3_accesskey={{ s3accesskey }}
openshift_hosted_registry_storage_s3_secretkey={{ s3secretkey }}
openshift_hosted_registry_storage_s3_regionendpoint={{ s3regionendpoint }}
openshift_hosted_registry_storage_s3_bucket={{ s3bucketname }}
openshift_hosted_registry_storage_s3_region=bucket_region
openshift_hosted_registry_storage_s3_chunksize=26214400
openshift_hosted_registry_storage_s3_rootdirectory=/registry
Expand All @@ -75,49 +75,49 @@ openshift_clock_enabled=true

# openstack cinder integration for persistant volume claims
openshift_cloudprovider_kind=openstack
openshift_cloudprovider_openstack_auth_url={{ env_details.osAuthUrl }}
openshift_cloudprovider_openstack_username={{ env_details.openstackOpenshiftUsername}}
openshift_cloudprovider_openstack_password={{ env_details.openstackOpenshiftPassword }}
openshift_cloudprovider_openstack_tenant_id={{ env_details.osTenantId }}
openshift_cloudprovider_openstack_tenant_name={{ env_details.osTenantName }}
openshift_cloudprovider_openstack_region={{ env_details.osRegion }}
openshift_cloudprovider_openstack_auth_url={{ osAuthUrl }}
openshift_cloudprovider_openstack_username={{ openstackOpenshiftUsername}}
openshift_cloudprovider_openstack_password={{ openstackOpenshiftPassword }}
openshift_cloudprovider_openstack_tenant_id={{ osTenantId }}
openshift_cloudprovider_openstack_tenant_name={{ osTenantName }}
openshift_cloudprovider_openstack_region={{ osRegion }}


# openshift stats deployment - comment out if no stats deployment is required. this will run on the worker nodes
openshift_hosted_metrics_deploy=true
openshift_hosted_metrics_public_url=https://hawkular-metrics.{{ env_details.domainSuffix }}/hawkular/metrics
openshift_hosted_metrics_public_url=https://hawkular-metrics.{{ domainSuffix }}/hawkular/metrics
openshift_hosted_metrics_storage_kind=dynamic
openshift_metrics_cassandra_pvc_size=50Gi

# Create an OSEv3 group that contains the masters and nodes groups
# host group for masters
[masters]
{% for ip, hostname in env_details.master_details.items() %}
{% for ip, hostname in master_details.items() %}
{{ hostname }}
{% endfor %}

[etcd]
{% for ip, hostname in env_details.master_details.items() %}
{% for ip, hostname in master_details.items() %}
{{ hostname }}
{% endfor %}

[loadbalancers]
{% for ip, hostname in env_details.haproxy_details.items() %}
{% for ip, hostname in haproxy_details.items() %}
{{ hostname }}
{% endfor %}

[dns]
{% for ip, hostname in env_details.haproxy_details.items() %}
{% for ip, hostname in haproxy_details.items() %}
{{ hostname }}
{% endfor %}

# host group for nodes, includes region info
# Routers are placed only on first 3 workers
[nodes]
{% for ip, hostname in env_details.master_details.items() %}
{% for ip, hostname in master_details.items() %}
{{ hostname }} openshift_ip={{ ip }}
{% endfor %}
{% for ip, hostname in env_details.worker_details.items() %}
{% for ip, hostname in worker_details.items() %}
{% if loop.index <= 3 %}
{{ hostname }} openshift_ip={{ ip }} openshift_node_labels="{'router':'true','purpose':'tenant'}"
{% else %}
Expand Down
4 changes: 0 additions & 4 deletions roles/keepalived/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,6 @@
- name: Store keepalived password
set_fact:
keepalived_password: "{{ keepalived_password_output }}"
- name: Include environment vairables for infrastructure hosts
include_vars:
file: /etc/ansible/group_vars/all.yml
name: env_details
- name: Install keepalived
yum:
name: keepalived
Expand Down
2 changes: 1 addition & 1 deletion roles/keepalived/templates/keepalived.j2
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ vrrp_instance VI_1 {
auth_pass "{{ hostvars[groups.loadbalancers[0]].keepalived_password.stdout }}"
}
virtual_ipaddress {
{{ env_details.haproxy_vip }} dev eth0
{{ haproxy_vip }} dev eth0
}
track_script {
chk_haproxy
Expand Down
64 changes: 64 additions & 0 deletions roles/monitoring/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
- name: Enable extras repo
command: subscription-manager repos --enable=rhel-7-server-extras-rpms
when: inventory_hostname in groups.loadbalancers
- name: Install docker
yum:
name: docker
state: latest
when: inventory_hostname == 'localhost' or inventory_hostname in groups.loadbalancers
become: yes
become_method: sudo
- name: Enable and start docker daemon service
systemd:
name: docker
state: started
enabled: yes
daemon_reload: yes
when: inventory_hostname == 'localhost' or inventory_hostname in groups.loadbalancers
become: yes
become_method: sudo
- name: Create a password for zabbix db
shell: openssl passwd -in /dev/urandom | head -1
register: zabbixDbPassword
when: inventory_hostname == 'localhost'
- name: create persistent storage volume for zabbix
command: docker run -d -v /var/lib/mysql --name zabbix-db-storage busybox:latest
when: inventory_hostname == 'localhost'
become: yes
become_method: sudo
args:
creates: .zabbix_persistence_created
- name: install zabbix db
command: docker run -d --name zabbix-db -v /backups:/backups -v /etc/localtime:/etc/localtime:ro --volumes-from zabbix-db-storage --env="MARIADB_USER=zabbix" --env="MARIADB_PASS={{ zabbixDbPassword.stdout }}" monitoringartist/zabbix-db-mariadb
when: inventory_hostname == 'localhost'
become: yes
become_method: sudo
args:
creates: .zabbix_db_created
- name: install zabbix server
command: docker run -d --name zabbix -p 80:80 -p 10051:10051 -v /etc/localtime:/etc/localtime:ro --link zabbix-db:zabbix.db --env="ZS_DBHost=zabbix.db" --env="ZS_DBUser=zabbix" --env="ZS_DBPassword={{ zabbixDbPassword.stdout }}" monitoringartist/zabbix-xxl:latest
when: inventory_hostname == 'localhost'
become: yes
become_method: sudo
args:
creates: .zabbix_server_created
- name: install zabbix agents
command: docker run --name=dockbix-agent-xxl --net=host --privileged -v /:/rootfs -v /var/run:/var/run --restart unless-stopped -e "ZA_Server=10.2.1.101" -e "ZA_ServerActive=10.2.1.101" -e 'ZA_Hostname={{ inventory_hostname }}' -d monitoringartist/dockbix-agent-xxl-limited:latest
when: inventory_hostname != 'localhost'
become: yes
become_method: sudo
args:
creates: .zabbix_agent_setup
- name: Setup iptables on nodes
command: iptables -I INPUT -p tcp -s 10.2.1.101 --dport 10050 -j ACCEPT
when: inventory_hostname in groups.nodes
become: yes
become_method: sudo
- name: Setup firewall for zabbix agent on loadbalancers
firewalld:
port: 10050/tcp
immediate: true
permanent: true
zone: public
state: enabled
when: inventory_hostname in groups.loadbalancers
3 changes: 2 additions & 1 deletion site.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
- include: loadbalancers.yml
- include: dns.yml
- include: all.yml
- include: openshift.yml
- include: openshift.yml
- include: monitoring.yml
4 changes: 2 additions & 2 deletions tools/create-user.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@ if [[ -z $username ]] || [[ -z $password ]]; then
exit 1
else
if [[ $debug != 0 ]]; then
ansible-playbook --private-key ../../id_rsa_jenkins -i ../openshift-ansible-hosts -e USERNAME=$username -e PASSWORD=$password playbooks/htpassword.yaml -vv
ansible-playbook --private-key ~/id_rsa_jenkins -i ../openshift-ansible-hosts -e USERNAME=$username -e PASSWORD=$password playbooks/htpassword.yaml -vv
else
ansible-playbook --private-key ../../id_rsa_jenkins -i ../openshift-ansible-hosts -e USERNAME=$username -e PASSWORD=$password playbooks/htpassword.yaml
ansible-playbook --private-key ~/id_rsa_jenkins -i ../openshift-ansible-hosts -e USERNAME=$username -e PASSWORD=$password playbooks/htpassword.yaml
fi
fi

0 comments on commit 7056be2

Please sign in to comment.