From c6f9ac2e4bd8c06053d2f088a52318ac0f554bcb Mon Sep 17 00:00:00 2001 From: Anton Sidelnikov Date: Wed, 5 May 2021 11:16:22 +0300 Subject: [PATCH 1/4] job for csm playbooks --- .../zuul/run-csm-production-playbook.yaml | 79 +++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 playbooks/zuul/run-csm-production-playbook.yaml diff --git a/playbooks/zuul/run-csm-production-playbook.yaml b/playbooks/zuul/run-csm-production-playbook.yaml new file mode 100644 index 00000000..dbd29000 --- /dev/null +++ b/playbooks/zuul/run-csm-production-playbook.yaml @@ -0,0 +1,79 @@ +- hosts: localhost + tasks: + - name: Add bridge to inventory + add_host: + name: bridge.eco.tsi-dev.otc-service.com + ansible_python_interpreter: python3 + ansible_user: zuul + ansible_host: bridge.eco.tsi-dev.otc-service.com + ansible_port: 22 + +- hosts: localhost + tasks: + - name: Add bridge hostkey to known hosts + known_hosts: + name: bridge.eco.tsi-dev.otc-service.com + key: "bridge.eco.tsi-dev.otc-service.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCihP3c2JSZG6aFFVruAh3TXlygaoVfon3aUwmpmBVwLbmlpHmuIMfG3dpdFfuEVjwRB1FNp3w510gbDZl+K/E/6trnxkZ7iNkVCL1VZoFrpFQU065QaP3uIrwGWdNeatTrI14YlM4CFIyrdsUithy28RaoKDBFOV4DTLuNZvGvogc7fR4nkTDitzeyEkGugD7v9ZNQiW7tyPiUv1nP911vTSA+R1cJkXlXx1FAxC6y1qXJuH4nKoCmPrYBGanljiUvGHt4YLCF0evYnMipuO0uSMvZG1qGjP1GtSOac1BhKnjTUNaXIYPr8WFB7N57VLDHgfa5s/zLS5P6BdC7FogPuFs9+6k9n4uko9ugYx4cXKObYzbrvvWMwEG5dIphde+Tv9uwpY66cKDpaFYapKN3FpGE3Q9wi43JSjXeySSJJIgafIskTpkmBwgEAM8L0NOqIAAjW8Q+gdxMneD3C5QlAXsb5dLPPuVylVObg5VDi4+u278ndko+DfCbGDw/rYU=" + +- hosts: bridge.eco.tsi-dev.otc-service.com + tasks: + - name: Should we run from master + set_fact: + infra_prod_run_from_master: "{{ zuul.pipeline|default('') in ['periodic', 'otc-infra-prod-hourly'] }}" + + - name: Update from master + when: infra_prod_run_from_master|bool + git: + repo: https://github.com/opentelekomcloud-infra/customer-service-monitoring + dest: /home/zuul/src/github.com/opentelekomcloud-infra/customer-service-monitoring + force: yes + + - name: Run the csm production playbook and capture logs + block: + + - name: Log a playbook start header + become: yes + shell: 'echo "Running {{ ansible_date_time.iso8601 }}: ansible-playbook -v -f {{ infra_prod_ansible_forks }} /home/zuul/src/github.com/opentelekomcloud-infra/customer-service-monitoring/playbooks/{{ playbook_name }}" > /var/log/ansible/{{ playbook_name }}.log' + + - name: Run specified playbook on bridge and redirect output + become: yes + shell: 'ansible-playbook -vvv -f {{ infra_prod_ansible_forks }} /home/zuul/src/github.com/opentelekomcloud-infra/customer-service-monitoring/playbooks/{{ playbook_name }} >> /var/log/ansible/{{ playbook_name }}.log' + + always: + + # Not using normal zuul job roles as bridge.eco.tsi-dev.otc-service.com is not a + # test node with all the normal bits in place. + - name: Collect log output + synchronize: + dest: "{{ zuul.executor.log_root }}/{{ playbook_name }}.log" + mode: pull + src: "/var/log/ansible/{{ playbook_name }}.log" + verify_host: true + when: infra_prod_playbook_collect_log + + - name: Return playbook log artifact to Zuul + when: infra_prod_playbook_collect_log + zuul_return: + data: + zuul: + artifacts: + - name: "Playbook Log" + url: "{{ playbook_name }}.log" + metadata: + type: text + + # If we aren't publishing logs through zuul then keep a set on + # bridge directly. + - name: Rename playbook log on bridge + when: not infra_prod_playbook_collect_log + become: yes + copy: + remote_src: yes + src: "/var/log/ansible/{{ playbook_name }}.log" + dest: "/var/log/ansible/{{ playbook_name }}.log.{{ ansible_date_time.iso8601 }}" + + - name: Cleanup old playbook logs on bridge + when: not infra_prod_playbook_collect_log + become: yes + shell: | + find /var/log/ansible -name '{{ playbook_name }}.log.*' -type f -mtime 30 -delete From 5b9ae95ce36422de65f398550d1980bf451f21af Mon Sep 17 00:00:00 2001 From: Anton Sidelnikov Date: Thu, 6 May 2021 11:45:51 +0300 Subject: [PATCH 2/4] inventory --- .../service/group_vars/csm-production.yaml | 1 + inventory/service/group_vars/csm.yaml | 371 ++++++++++++++++++ ...on-playbook.yaml => run-csm-playbook.yaml} | 2 +- 3 files changed, 373 insertions(+), 1 deletion(-) create mode 100644 inventory/service/group_vars/csm-production.yaml create mode 100644 inventory/service/group_vars/csm.yaml rename playbooks/zuul/{run-csm-production-playbook.yaml => run-csm-playbook.yaml} (98%) diff --git a/inventory/service/group_vars/csm-production.yaml b/inventory/service/group_vars/csm-production.yaml new file mode 100644 index 00000000..276d5628 --- /dev/null +++ b/inventory/service/group_vars/csm-production.yaml @@ -0,0 +1 @@ +csm_instance: production_de diff --git a/inventory/service/group_vars/csm.yaml b/inventory/service/group_vars/csm.yaml new file mode 100644 index 00000000..1fe2632f --- /dev/null +++ b/inventory/service/group_vars/csm.yaml @@ -0,0 +1,371 @@ +csm_instances: + production_de: + variables: + project_name: eu-de_test_dmd + vpcs: { VPC_A: "192.168.200.0/24", VPC_B: "192.168.201.0/24", VPC_C: "192.168.202.0/24" } + vpns: [] + vpc_peerings: { VPC_A: "VPC_B" } + tmp_dir: /tmp + home_dir: /home/linux + local_private_key: "{{ tmp_dir }}/{{ key_name }}" + key_name: infra-key + requirements: requirements.txt + ansible_ssh_user: linux + ansible_ssh_private_key_file: "{{ local_private_key }}" + container_name: csm + telegraf_graphite_url: localhost:2003 + telegraf_graphite_env: production_eu-de + loadbalancer_private_ip: 192.168.200.5 + watcher_image: Standard_Debian_10_latest + watcher_flavor: s2.medium.2 + watcher_volume_size: 10 + infra_eips_object: infra_eips + object_private_key: machine_key + # variables for ping script + statsd_host: 192.168.14.159 + statsd_port: 8125 + deploy_environment: production_eu-de + runner_environment: production_eu-de + # vpc a nodes list + vpc_a_nodes: + watcher_eu-de-01: [ + {ip: 192.168.200.10, name: eu-de-01_to_eu-de-01}, + {ip: 192.168.200.13, name: eu-de-01_to_eu-de-02}, + {ip: 192.168.200.16, name: eu-de-01_to_eu-de-03}] + watcher_eu-de-02: [ + {ip: 192.168.200.11, name: eu-de-02_to_eu-de-01}, + {ip: 192.168.200.14, name: eu-de-02_to_eu-de-02}, + {ip: 192.168.200.17, name: eu-de-02_to_eu-de-03}] + watcher_eu-de-03: [ + {ip: 192.168.200.12, name: eu-de-03_to_eu-de-01}, + {ip: 192.168.200.15, name: eu-de-03_to_eu-de-02}, + {ip: 192.168.200.18, name: eu-de-03_to_eu-de-03}] + # vpc a watchers list + vpc_a_watchers: [ + {ip: 192.168.200.2, name: eu-de-01}, + {ip: 192.168.200.3, name: eu-de-02}, + {ip: 192.168.200.4, name: eu-de-03}] + # vpc b watchers list + vpc_b_watchers: [ + {ip: 192.168.201.2, name: watcher_eu-de-01-vpc-b}] + # vpc c nodes list + vpc_c_nodes: [ + {ip: 192.168.202.2, name: instance_vpc-c}] + # sites list + snat_dnat: [ + {ip: 'https://digital.gov.ru/', name: ru}, + {ip: 'https://www.deutschland.de/', name: eu}, + {ip: 'https://www.gov.cn/', name: cn}, + {ip: 'https://www.state.gov/', name: us}] + watcher_eu-de-01: + ansible_host: 192.168.200.2 + ansible_ssh_user: linux + ansible_ssh_private_key_file: "{{ local_private_key }}" + az: eu-de-01 + name: "watcher_eu-de-01" + role: watcher + vpc: VPC_A + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443", "2003", "2004", "2023", "2024", "8126"] + secgroup_udp_ports: ["8125"] + watcher_eu-de-02: + ansible_host: 192.168.200.3 + ansible_ssh_user: linux + ansible_ssh_private_key_file: "{{ local_private_key }}" + az: eu-de-02 + name: "watcher_eu-de-02" + role: watcher + vpc: VPC_A + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443"] + secgroup_udp_ports: [] + watcher_eu-de-03: + ansible_host: 192.168.200.4 + ansible_ssh_user: linux + ansible_ssh_private_key_file: "{{ local_private_key }}" + az: eu-de-03 + name: "watcher_eu-de-03" + role: watcher + vpc: VPC_A + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443"] + secgroup_udp_ports: [] + watcher_eu-de-01-vpc-b: + ansible_host: 192.168.201.2 + ansible_ssh_user: linux + ansible_ssh_private_key_file: "{{ local_private_key }}" + az: eu-de-01 + name: "watcher_eu-de-01-vpc-b" + role: watcher + vpc: VPC_B + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443"] + secgroup_udp_ports: [] + instance_1_eu-de-01: + ansible_host: 192.168.200.10 + az: eu-de-01 + name: "instance_1_eu-de-01" + role: instance + vpc: VPC_A + scenarios: ["loadbalancer"] + secgroup_tcp_ports: ["22", "80", "443", "3333"] + instance_2_eu-de-01: + ansible_host: 192.168.200.11 + az: eu-de-01 + name: "instance_2_eu-de-01" + role: instance + vpc: VPC_A + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443", "3333"] + instance_3_eu-de-01: + ansible_host: 192.168.200.12 + az: eu-de-01 + name: "instance_3_eu-de-01" + role: instance + vpc: VPC_A + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443", "3333"] + instance_1_eu-de-02: + ansible_host: 192.168.200.13 + az: eu-de-02 + name: "instance_1_eu-de-02" + role: instance + vpc: VPC_A + scenarios: ["loadbalancer"] + secgroup_tcp_ports: ["22", "80", "443", "3333"] + instance_2_eu-de-02: + ansible_host: 192.168.200.14 + az: eu-de-02 + name: "instance_2_eu-de-02" + role: instance + vpc: VPC_A + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443", "3333"] + instance_3_eu-de-02: + ansible_host: 192.168.200.15 + az: eu-de-02 + name: "instance_3_eu-de-02" + role: instance + vpc: VPC_A + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443", "3333"] + instance_1_eu-de-03: + ansible_host: 192.168.200.16 + az: eu-de-03 + name: "instance_1_eu-de-03" + role: instance + vpc: VPC_A + scenarios: ["loadbalancer"] + secgroup_tcp_ports: ["22", "80", "443", "3333"] + instance_2_eu-de-03: + ansible_host: 192.168.200.17 + az: eu-de-03 + name: "instance_2_eu-de-03" + role: instance + vpc: VPC_A + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443", "3333"] + instance_3_eu-de-03: + ansible_host: 192.168.200.18 + az: eu-de-03 + name: "instance_3_eu-de-03" + role: instance + vpc: VPC_A + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443", "3333"] + instance_vpc-c: + ansible_host: 192.168.202.2 + az: eu-de-01 + name: "instance_vpc-c" + role: instance + vpc: VPC_C + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443", "3333"] + disabled: + hosts: + + production_nl: + variables: + project_name: eu-nl_something + vpcs: { VPC_A: "192.168.200.0/24", VPC_B: "192.168.201.0/24", VPC_C: "192.168.202.0/24" } + vpns: [] + vpc_peerings: { VPC_A: "VPC_B" } + tmp_dir: /tmp + home_dir: /home/linux + local_private_key: "{{ tmp_dir }}/{{ key_name }}" + key_name: infra-key + requirements: requirements.txt + ansible_ssh_user: linux + ansible_ssh_private_key_file: "{{ local_private_key }}" + container_name: csm + telegraf_graphite_url: localhost:2003 + telegraf_graphite_env: production_eu-nl + loadbalancer_private_ip: 192.168.200.5 + watcher_image: Standard_Debian_10_latest + watcher_flavor: s2.medium.2 + watcher_volume_size: 10 + infra_eips_object: infra_eips + object_private_key: machine_key + # variables for ping script + statsd_host: 192.168.14.159 + statsd_port: 8125 + deploy_environment: production_eu-nl + runner_environment: production_eu-nl + # vpc a nodes list + vpc_a_nodes: + watcher_eu-nl-01: [ + {ip: 192.168.200.10, name: eu-nl-01_to_eu-nl-01}, + {ip: 192.168.200.13, name: eu-nl-01_to_eu-nl-02}, + {ip: 192.168.200.16, name: eu-nl-01_to_eu-nl-03}] + watcher_eu-nl-02: [ + {ip: 192.168.200.11, name: eu-nl-02_to_eu-nl-01}, + {ip: 192.168.200.14, name: eu-nl-02_to_eu-nl-02}, + {ip: 192.168.200.17, name: eu-nl-02_to_eu-nl-03}] + watcher_eu-de-03: [ + {ip: 192.168.200.12, name: eu-nl-03_to_eu-nl-01}, + {ip: 192.168.200.15, name: eu-nl-03_to_eu-nl-02}, + {ip: 192.168.200.18, name: eu-nl-03_to_eu-nl-03}] + # vpc a watchers list + vpc_a_watchers: [ + {ip: 192.168.200.2, name: eu-nl-01}, + {ip: 192.168.200.3, name: eu-nl-02}, + {ip: 192.168.200.4, name: eu-nl-03}] + # vpc b watchers list + vpc_b_watchers: [ + {ip: 192.168.201.2, name: watcher_eu-nl-01-vpc-b}] + # vpc c nodes list + vpc_c_nodes: [ + {ip: 192.168.202.2, name: instance_vpc-c}] + # sites list + snat_dnat: [ + {ip: 'https://digital.gov.ru/', name: ru}, + {ip: 'https://www.deutschland.de/', name: eu}, + {ip: 'https://www.gov.cn/', name: cn}, + {ip: 'https://www.state.gov/', name: us}] + + watcher_eu-nl-01: + ansible_host: 192.168.200.2 + ansible_ssh_user: linux + ansible_ssh_private_key_file: "{{ local_private_key }}" + az: eu-nl-01 + name: "watcher_eu-nl-01" + role: watcher + vpc: VPC_A + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443", "2003", "2004", "2023", "2024", "8126"] + secgroup_udp_ports: ["8125"] + watcher_eu-nl-02: + ansible_host: 192.168.200.3 + ansible_ssh_user: linux + ansible_ssh_private_key_file: "{{ local_private_key }}" + az: eu-nl-02 + name: "watcher_eu-nl-02" + role: watcher + vpc: VPC_A + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443"] + secgroup_udp_ports: [] + watcher_eu-nl-03: + ansible_host: 192.168.200.4 + ansible_ssh_user: linux + ansible_ssh_private_key_file: "{{ local_private_key }}" + az: eu-nl-03 + name: "watcher_eu-nl-03" + role: watcher + vpc: VPC_A + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443"] + secgroup_udp_ports: [] + watcher_eu-nl-01-vpc-b: + ansible_host: 192.168.201.2 + ansible_ssh_user: linux + ansible_ssh_private_key_file: "{{ local_private_key }}" + az: eu-nl-01 + name: "watcher_eu-nl-01-vpc-b" + role: watcher + vpc: VPC_B + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443"] + secgroup_udp_ports: [] + instance_1_eu-nl-01: + ansible_host: 192.168.200.10 + az: eu-nl-01 + name: "instance_1_eu-nl-01" + role: instance + vpc: VPC_A + scenarios: ["loadbalancer"] + secgroup_tcp_ports: ["22", "80", "443", "3333"] + instance_2_eu-nl-01: + ansible_host: 192.168.200.11 + az: eu-nl-01 + name: "instance_2_eu-nl-01" + role: instance + vpc: VPC_A + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443", "3333"] + instance_3_eu-nl-01: + ansible_host: 192.168.200.12 + az: eu-nl-01 + name: "instance_3_eu-nl-01" + role: instance + vpc: VPC_A + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443", "3333"] + instance_1_eu-nl-02: + ansible_host: 192.168.200.13 + az: eu-nl-02 + name: "instance_1_eu-nl-02" + role: instance + vpc: VPC_A + scenarios: ["loadbalancer"] + secgroup_tcp_ports: ["22", "80", "443", "3333"] + instance_2_eu-nl-02: + ansible_host: 192.168.200.14 + az: eu-nl-02 + name: "instance_2_eu-nl-02" + role: instance + vpc: VPC_A + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443", "3333"] + instance_3_eu-nl-02: + ansible_host: 192.168.200.15 + az: eu-nl-02 + name: "instance_3_eu-nl-02" + role: instance + vpc: VPC_A + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443", "3333"] + instance_1_eu-nl-03: + ansible_host: 192.168.200.16 + az: eu-nl-03 + name: "instance_1_eu-nl-03" + role: instance + vpc: VPC_A + scenarios: ["loadbalancer"] + secgroup_tcp_ports: ["22", "80", "443", "3333"] + instance_2_eu-nl-03: + ansible_host: 192.168.200.17 + az: eu-nl-03 + name: "instance_2_eu-nl-03" + role: instance + vpc: VPC_A + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443", "3333"] + instance_3_eu-nl-03: + ansible_host: 192.168.200.18 + az: eu-nl-03 + name: "instance_3_eu-nl-03" + role: instance + vpc: VPC_A + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443", "3333"] + instance_vpc-c: + ansible_host: 192.168.202.2 + az: eu-nl-01 + name: "instance_vpc-c" + role: instance + vpc: VPC_C + scenarios: [] + secgroup_tcp_ports: ["22", "80", "443", "3333"] + disabled: + hosts: diff --git a/playbooks/zuul/run-csm-production-playbook.yaml b/playbooks/zuul/run-csm-playbook.yaml similarity index 98% rename from playbooks/zuul/run-csm-production-playbook.yaml rename to playbooks/zuul/run-csm-playbook.yaml index dbd29000..71f844c6 100644 --- a/playbooks/zuul/run-csm-production-playbook.yaml +++ b/playbooks/zuul/run-csm-playbook.yaml @@ -28,7 +28,7 @@ dest: /home/zuul/src/github.com/opentelekomcloud-infra/customer-service-monitoring force: yes - - name: Run the csm production playbook and capture logs + - name: Run the csm playbooks and capture logs block: - name: Log a playbook start header From 7550d590866374551bb8c7a0b7836db1a6fb31cb Mon Sep 17 00:00:00 2001 From: Anton Sidelnikov Date: Thu, 6 May 2021 11:46:58 +0300 Subject: [PATCH 3/4] nl inventory invocation --- .../group_vars/{csm-production.yaml => csm-production-de.yaml} | 0 inventory/service/group_vars/csm-production-nl.yaml | 1 + 2 files changed, 1 insertion(+) rename inventory/service/group_vars/{csm-production.yaml => csm-production-de.yaml} (100%) create mode 100644 inventory/service/group_vars/csm-production-nl.yaml diff --git a/inventory/service/group_vars/csm-production.yaml b/inventory/service/group_vars/csm-production-de.yaml similarity index 100% rename from inventory/service/group_vars/csm-production.yaml rename to inventory/service/group_vars/csm-production-de.yaml diff --git a/inventory/service/group_vars/csm-production-nl.yaml b/inventory/service/group_vars/csm-production-nl.yaml new file mode 100644 index 00000000..9240b25a --- /dev/null +++ b/inventory/service/group_vars/csm-production-nl.yaml @@ -0,0 +1 @@ +csm_instance: production_nl \ No newline at end of file From 7550de34b8870789d369a2e38033ee921a6dfb4c Mon Sep 17 00:00:00 2001 From: Artem Goncharov Date: Thu, 6 May 2021 13:39:32 +0200 Subject: [PATCH 4/4] add watchers under real hosts Together with that add some additional playbooks and roles to further automate infra provisioning --- inventory/base/hosts.yaml | 21 +++++++ .../service/group_vars/cloud-launcher.yaml | 55 +++++++++++++++++++ inventory/service/group_vars/csm_watcher.yaml | 3 + inventory/service/groups.yaml | 27 ++++++--- ...01.apimon.eco.tsi-dev.otc-service.com.yaml | 6 ++ ...02.apimon.eco.tsi-dev.otc-service.com.yaml | 6 ++ ...03.apimon.eco.tsi-dev.otc-service.com.yaml | 7 +++ playbooks/cloud-hosts.yaml | 13 +++++ playbooks/cloud-networks.yaml | 7 +++ playbooks/roles/cloud_host/defaults/main.yaml | 1 + playbooks/roles/cloud_host/tasks/destroy.yaml | 6 ++ playbooks/roles/cloud_host/tasks/main.yaml | 6 ++ .../roles/cloud_host/tasks/provision.yaml | 25 +++++++++ playbooks/roles/cloud_sg/defaults/main.yaml | 1 + playbooks/roles/cloud_sg/tasks/destroy.yaml | 4 ++ playbooks/roles/cloud_sg/tasks/main.yaml | 6 ++ playbooks/roles/cloud_sg/tasks/provision.yaml | 18 ++++++ .../clouds/bridge_all_clouds.yaml.j2 | 20 +++++++ 18 files changed, 224 insertions(+), 8 deletions(-) create mode 100644 inventory/service/group_vars/csm_watcher.yaml create mode 100644 inventory/service/host_vars/watcher-eu-nl-01.apimon.eco.tsi-dev.otc-service.com.yaml create mode 100644 inventory/service/host_vars/watcher-eu-nl-02.apimon.eco.tsi-dev.otc-service.com.yaml create mode 100644 inventory/service/host_vars/watcher-eu-nl-03.apimon.eco.tsi-dev.otc-service.com.yaml create mode 100644 playbooks/cloud-hosts.yaml create mode 100644 playbooks/roles/cloud_host/defaults/main.yaml create mode 100644 playbooks/roles/cloud_host/tasks/destroy.yaml create mode 100644 playbooks/roles/cloud_host/tasks/main.yaml create mode 100644 playbooks/roles/cloud_host/tasks/provision.yaml create mode 100644 playbooks/roles/cloud_sg/defaults/main.yaml create mode 100644 playbooks/roles/cloud_sg/tasks/destroy.yaml create mode 100644 playbooks/roles/cloud_sg/tasks/main.yaml create mode 100644 playbooks/roles/cloud_sg/tasks/provision.yaml diff --git a/inventory/base/hosts.yaml b/inventory/base/hosts.yaml index 64ea1178..5d9b1342 100644 --- a/inventory/base/hosts.yaml +++ b/inventory/base/hosts.yaml @@ -52,3 +52,24 @@ all: ansible_host: 192.168.20.182 zk2.zuul.eco.tsi-dev.otc-service.com: ansible_host: 192.168.20.47 + watcher-eu-nl-01.apimon.eco.tsi-dev.otc-service.com: + ansible_host: 192.168.204.2 + ansible_user: linux + location: + cloud: "otcinfra-domain3-csm-nl" + region: "eu-nl" + az: "eu-de-01" + watcher-eu-nl-02.apimon.eco.tsi-dev.otc-service.com: + ansible_host: 192.168.204.3 + ansible_user: linux + location: + cloud: "otcinfra-domain3-csm-nl" + region: "eu-nl" + az: "eu-de-02" + watcher-eu-nl-03.apimon.eco.tsi-dev.otc-service.com: + ansible_host: 192.168.204.4 + ansible_user: linux + location: + cloud: "otcinfra-domain3-csm-nl" + region: "eu-nl" + az: "eu-de-03" diff --git a/inventory/service/group_vars/cloud-launcher.yaml b/inventory/service/group_vars/cloud-launcher.yaml index e353d9bd..6dda0698 100644 --- a/inventory/service/group_vars/cloud-launcher.yaml +++ b/inventory/service/group_vars/cloud-launcher.yaml @@ -41,6 +41,16 @@ cloud_projects: cloud: "otc-tests-admin" - name: "eu-nl_apimon_probes4" cloud: "otc-tests-admin" + - name: "eu-nl_eco_csm" + cloud: "otc-domain3-admin" + description: "CSM Project" + properties: + parent_id: "66a5482c6f154f98a426ecb33579772d" + - name: "eu-de_eco_csm" + cloud: "otc-domain3-admin" + description: "CSM Project" + properties: + parent_id: "9c5d1a97b49a4715b39ccd0a7e08489c" cloud_user_groups: # Zuul groups @@ -143,6 +153,51 @@ cloud_nets: - name: "apimon-infra-subnet" cidr: "192.168.151.0/24" dns_nameservers: ['100.125.4.25', '8.8.4.4'] + - cloud: "otcinfra-domain3-csm-nl" + router: "VPC_A" + nets: + - name: "vpc_a_csm_net" + subnets: + - name: "csm-subnet" + cidr: "192.168.204.0/24" + dns_nameservers: ['100.125.4.25', '8.8.4.4'] + - cloud: "otcinfra-domain3-csm-nl" + router: "VPC_B" + nets: + - name: "vpc_b_csm_net" + subnets: + - name: "csm-subnet" + cidr: "192.168.205.0/24" + dns_nameservers: ['100.125.4.25', '8.8.4.4'] + - cloud: "otcinfra-domain3-csm-nl" + router: "VPC_C" + nets: + - name: "vpc_b_csm_net" + subnets: + - name: "csm-subnet" + cidr: "192.168.206.0/24" + dns_nameservers: ['100.125.4.25', '8.8.4.4'] + +cloud_security_groups: + - cloud: "otcinfra-domain3-csm-nl" + name: "watcher-sg" + rules: + - protocol: "icmp" + port_range_min: -1 + port_range_max: -1 + remote_ip_prefix: "0.0.0.0/0" + - protocol: "tcp" + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: "0.0.0.0/0" + - protocol: "tcp" + port_range_min: 80 + port_range_max: 80 + remote_ip_prefix: "0.0.0.0/0" + - protocol: "tcp" + port_range_min: 443 + port_range_max: 443 + remote_ip_prefix: "0.0.0.0/0" cloud_nat_gws: - cloud: "otcinfra-domain3-infra-de" diff --git a/inventory/service/group_vars/csm_watcher.yaml b/inventory/service/group_vars/csm_watcher.yaml new file mode 100644 index 00000000..dd8dd981 --- /dev/null +++ b/inventory/service/group_vars/csm_watcher.yaml @@ -0,0 +1,3 @@ +image: Standard_Debian_10_latest +flavor: s2.medium.2 +volume_size: 10 diff --git a/inventory/service/groups.yaml b/inventory/service/groups.yaml index c1b8c768..32e20afb 100644 --- a/inventory/service/groups.yaml +++ b/inventory/service/groups.yaml @@ -1,6 +1,7 @@ plugin: yamlgroup groups: - # NOTE(gtema): bridge is present in most groups to be able to manage k8 deployments of the service + # NOTE(gtema): bridge is present in most groups to + # be able to manage k8 deployments of the service # APImon groups: # general APImon values # @@ -19,10 +20,10 @@ groups: apimon-clouds: - bridge.eco.tsi-dev.otc-service.com - scheduler1.apimon.eco.tsi-dev.otc-service.com - #- executor1.apimon.eco.tsi-dev.otc-service.com + # - executor1.apimon.eco.tsi-dev.otc-service.com - executor2.apimon.eco.tsi-dev.otc-service.com - #- executor3.apimon.eco.tsi-dev.otc-service.com - #- executor4.apimon.eco.tsi-dev.otc-service.com + # - executor3.apimon.eco.tsi-dev.otc-service.com + # - executor4.apimon.eco.tsi-dev.otc-service.com - hybrid.apimon.eco.tsi-dev.otc-service.com - preprod.apimon.eco.tsi-dev.otc-service.com @@ -52,7 +53,7 @@ groups: # "production" instance of the apimon apimon-production: - executor1.apimon.eco.tsi-dev.otc-service.com - # - executor2.apimon.eco.tsi-dev.otc-service.com + # - executor2.apimon.eco.tsi-dev.otc-service.com - executor3.apimon.eco.tsi-dev.otc-service.com - executor4.apimon.eco.tsi-dev.otc-service.com - scheduler1.apimon.eco.tsi-dev.otc-service.com @@ -71,10 +72,10 @@ groups: # Where local statsd should be deployed statsd: - scheduler1.apimon.eco.tsi-dev.otc-service.com - #- executor1.apimon.eco.tsi-dev.otc-service.com + # - executor1.apimon.eco.tsi-dev.otc-service.com - executor2.apimon.eco.tsi-dev.otc-service.com - #- executor3.apimon.eco.tsi-dev.otc-service.com - #- executor4.apimon.eco.tsi-dev.otc-service.com + # - executor3.apimon.eco.tsi-dev.otc-service.com + # - executor4.apimon.eco.tsi-dev.otc-service.com - hybrid.apimon.eco.tsi-dev.otc-service.com - preprod.apimon.eco.tsi-dev.otc-service.com @@ -139,6 +140,11 @@ groups: nodepool: - bridge.eco.tsi-dev.otc-service.com + csm_watcher: + - watcher-eu-nl-01.apimon.eco.tsi-dev.otc-service.com + - watcher-eu-nl-02.apimon.eco.tsi-dev.otc-service.com + - watcher-eu-nl-03.apimon.eco.tsi-dev.otc-service.com + disabled: # We can not manage coreos with ansible by default - graphite1.apimon.eco.tsi-dev.otc-service.com @@ -146,3 +152,8 @@ groups: - zk0.zuul.eco.tsi-dev.otc-service.com - zk1.zuul.eco.tsi-dev.otc-service.com - zk2.zuul.eco.tsi-dev.otc-service.com + # Unless we finalize infra hosts management those + # should not be used to provision + - watcher-eu-nl-01.apimon.eco.tsi-dev.otc-service.com + - watcher-eu-nl-02.apimon.eco.tsi-dev.otc-service.com + - watcher-eu-nl-03.apimon.eco.tsi-dev.otc-service.com diff --git a/inventory/service/host_vars/watcher-eu-nl-01.apimon.eco.tsi-dev.otc-service.com.yaml b/inventory/service/host_vars/watcher-eu-nl-01.apimon.eco.tsi-dev.otc-service.com.yaml new file mode 100644 index 00000000..6e9b47cc --- /dev/null +++ b/inventory/service/host_vars/watcher-eu-nl-01.apimon.eco.tsi-dev.otc-service.com.yaml @@ -0,0 +1,6 @@ +security_groups: ["watcher-sg"] +nics: + - address: "192.168.204.2" + network: "vpc_a_csm_net" + + diff --git a/inventory/service/host_vars/watcher-eu-nl-02.apimon.eco.tsi-dev.otc-service.com.yaml b/inventory/service/host_vars/watcher-eu-nl-02.apimon.eco.tsi-dev.otc-service.com.yaml new file mode 100644 index 00000000..3bcc898d --- /dev/null +++ b/inventory/service/host_vars/watcher-eu-nl-02.apimon.eco.tsi-dev.otc-service.com.yaml @@ -0,0 +1,6 @@ +security_groups: ["watcher-sg"] +nics: + - address: "192.168.204.3" + network: "vpc_a_csm_net" + + diff --git a/inventory/service/host_vars/watcher-eu-nl-03.apimon.eco.tsi-dev.otc-service.com.yaml b/inventory/service/host_vars/watcher-eu-nl-03.apimon.eco.tsi-dev.otc-service.com.yaml new file mode 100644 index 00000000..06e58136 --- /dev/null +++ b/inventory/service/host_vars/watcher-eu-nl-03.apimon.eco.tsi-dev.otc-service.com.yaml @@ -0,0 +1,7 @@ +volume_size: 10 +security_groups: ["watcher-sg"] +nics: + - fixed_ip: "192.168.204.4" + net-name: "vpc_a_csm_net" + + diff --git a/playbooks/cloud-hosts.yaml b/playbooks/cloud-hosts.yaml new file mode 100644 index 00000000..f1fd205b --- /dev/null +++ b/playbooks/cloud-hosts.yaml @@ -0,0 +1,13 @@ +- hosts: cloud-launcher:!disabled + name: "Manage cloud hosts" + tasks: + - name: Manage OpenStack hosts + include_role: + name: cloud_host + loop: "{{ group['all'] }}" + loop_control: + loop_var: host + when: + - "hostvars[host].location is defined" + + diff --git a/playbooks/cloud-networks.yaml b/playbooks/cloud-networks.yaml index 2be8e73c..25c98b71 100644 --- a/playbooks/cloud-networks.yaml +++ b/playbooks/cloud-networks.yaml @@ -14,3 +14,10 @@ loop: "{{ cloud_nat_gws }}" loop_control: loop_var: natgw + + - name: Manage Security Groups + include_role: + name: cloud_sg + loop: "{{ cloud_security_groups }}" + loop_control: + loop_var: sg diff --git a/playbooks/roles/cloud_host/defaults/main.yaml b/playbooks/roles/cloud_host/defaults/main.yaml new file mode 100644 index 00000000..eac2b8ec --- /dev/null +++ b/playbooks/roles/cloud_host/defaults/main.yaml @@ -0,0 +1 @@ +state: present diff --git a/playbooks/roles/cloud_host/tasks/destroy.yaml b/playbooks/roles/cloud_host/tasks/destroy.yaml new file mode 100644 index 00000000..e8142b4c --- /dev/null +++ b/playbooks/roles/cloud_host/tasks/destroy.yaml @@ -0,0 +1,6 @@ +- name: Destroy instance + openstack.cloud.server: + state: "absent" + cloud: "{{ hostvars[host].location.cloud }}" + name: "{{ hostvars[host].inventory_hostname }}" + delete_fip: true diff --git a/playbooks/roles/cloud_host/tasks/main.yaml b/playbooks/roles/cloud_host/tasks/main.yaml new file mode 100644 index 00000000..a5f29d69 --- /dev/null +++ b/playbooks/roles/cloud_host/tasks/main.yaml @@ -0,0 +1,6 @@ +--- +- include: "provision.yaml" + when: "state != 'absent'" + +- include: "destroy.yaml" + when: "state == 'absent'" diff --git a/playbooks/roles/cloud_host/tasks/provision.yaml b/playbooks/roles/cloud_host/tasks/provision.yaml new file mode 100644 index 00000000..f053277c --- /dev/null +++ b/playbooks/roles/cloud_host/tasks/provision.yaml @@ -0,0 +1,25 @@ +- name: Ensure keypair exists + openstack.cloud.keypair: + state: "present" + cloud: "{{ hostvars[host].location.cloud }}" + name: "otcinfra-bridge" + public_key: "{{ bastion_public_key }}" + +- name: Create a new instance + openstack.cloud.server: + state: "present" + cloud: "{{ hostvars[host].location.cloud }}" + name: "{{ hostvars[host].inventory_hostname }}" + flavor: "{{ hostvars[host].flavor }}" + key_name: "otcinfra-bridge" + availability_zone: "{{ hostvars[host].location.az }}" + region: "{{ hostvars[host].location.region | default(omit) }}" + security_groups: "{{ hostvars[host].security_groups }}" + timeout: 600 + nics: "{{ hostvars[host].nics }}" + boot_from_volume: true + volume_size: "{{ hostvars[host].volume_size | default(omit) }}" + image: "{{ hostvars[host].image }}" + terminate_volume: true + delete_fip: true + auto_ip: "{{ hostvars[host].auto_ip | default(omit) }}" diff --git a/playbooks/roles/cloud_sg/defaults/main.yaml b/playbooks/roles/cloud_sg/defaults/main.yaml new file mode 100644 index 00000000..eac2b8ec --- /dev/null +++ b/playbooks/roles/cloud_sg/defaults/main.yaml @@ -0,0 +1 @@ +state: present diff --git a/playbooks/roles/cloud_sg/tasks/destroy.yaml b/playbooks/roles/cloud_sg/tasks/destroy.yaml new file mode 100644 index 00000000..b4c1c9b8 --- /dev/null +++ b/playbooks/roles/cloud_sg/tasks/destroy.yaml @@ -0,0 +1,4 @@ +- name: Destroy security group + openstack.cloud.security_group: + name: "{{ sg.name }}" + state: "{{ state }}" diff --git a/playbooks/roles/cloud_sg/tasks/main.yaml b/playbooks/roles/cloud_sg/tasks/main.yaml new file mode 100644 index 00000000..a5f29d69 --- /dev/null +++ b/playbooks/roles/cloud_sg/tasks/main.yaml @@ -0,0 +1,6 @@ +--- +- include: "provision.yaml" + when: "state != 'absent'" + +- include: "destroy.yaml" + when: "state == 'absent'" diff --git a/playbooks/roles/cloud_sg/tasks/provision.yaml b/playbooks/roles/cloud_sg/tasks/provision.yaml new file mode 100644 index 00000000..f8df3f25 --- /dev/null +++ b/playbooks/roles/cloud_sg/tasks/provision.yaml @@ -0,0 +1,18 @@ +- name: Create security group + openstack.cloud.security_group: + name: "{{ sg.name }}" + description: "{{ sg.description | default(omit) }}" + register: secur_group + +- name: Add rules + openstack.cloud.security_group_rule: + security_group: "{{ secur_group.secgroup.id }}" + description: "{{ sg.description | default(omit) }}" + protocol: "{{ item.protocol }}" + port_range_min: "{{ item.port_range_min | default(omit) }}" + port_range_max: "{{ item.port_range_max | default(omit) }}" + remote_ip_prefix: "{{ item.remote_ip_prefix | default(omit) }}" + remote_group: "{{ item.remote_group | default(omit) }}" + direction: "{{ item.direction | default(omit) }}" + + loop: "{{ sg.rules }}" diff --git a/playbooks/templates/clouds/bridge_all_clouds.yaml.j2 b/playbooks/templates/clouds/bridge_all_clouds.yaml.j2 index dab4befd..4cd75b68 100644 --- a/playbooks/templates/clouds/bridge_all_clouds.yaml.j2 +++ b/playbooks/templates/clouds/bridge_all_clouds.yaml.j2 @@ -55,6 +55,26 @@ clouds: interface: public identity_api_version: 3 region_name: eu-de + otcinfra-domain3-csm-nl: + profile: otc + auth: + user_domain_name: {{ clouds.otcinfra_domain3.auth.user_domain_name }} + project_name: eu-nl_eco_csm + username: {{ clouds.otcinfra_domain3.auth.username }} + password: "{{ clouds.otcinfra_domain3.auth.password }}" + interface: public + identity_api_version: 3 + region_name: eu-nl + otcinfra-domain3-csm-de: + profile: otc + auth: + user_domain_name: {{ clouds.otcinfra_domain3.auth.user_domain_name }} + project_name: eu-de_eco_csm + username: {{ clouds.otcinfra_domain3.auth.username }} + password: "{{ clouds.otcinfra_domain3.auth.password }}" + interface: public + identity_api_version: 3 + region_name: eu-de # OTC Swift otc-swift: