From 1ca3ed152498bdccb4aad3c84985f3ecb3f5f495 Mon Sep 17 00:00:00 2001 From: Michele Costa Date: Tue, 12 Mar 2024 16:00:37 +0000 Subject: [PATCH 1/6] Move display_deployment_plan and validate_inventory back into crucible --- deploy_cluster.yml | 2 + deploy_day2_workers.yml | 2 + playbooks/add_day2_nodes.yml | 4 + playbooks/approve_csrs.yml | 4 + playbooks/boot_disk.yml | 2 + playbooks/boot_iso.yml | 3 + playbooks/create_cluster.yml | 3 + playbooks/create_day2_cluster.yml | 4 + playbooks/create_vms.yml | 2 + .../deploy_assisted_installer_onprem.yml | 3 + .../deploy_cluster_agent_based_installer.yml | 3 + .../deploy_cluster_assisted_installer.yml | 3 + playbooks/deploy_dns.yml | 4 + playbooks/deploy_http_store.yml | 3 + playbooks/deploy_ntp.yml | 4 + playbooks/deploy_registry.yml | 3 + playbooks/deploy_sushy_tools.yml | 4 + playbooks/deploy_tftp.yml | 4 + playbooks/destroy_vms.yml | 3 + playbooks/display_deployment_plan.yml | 2 +- playbooks/extract_agent_based_installer.yml | 3 + playbooks/generate_agent_iso.yml | 3 + playbooks/generate_discovery_iso.yml | 3 + playbooks/generate_manifests.yml | 3 + playbooks/generate_ssh_key_pair.yml | 3 + playbooks/install_cluster.yml | 3 + playbooks/monitor_agent_based_installer.yml | 4 + playbooks/monitor_cluster.yml | 3 + playbooks/monitor_hosts.yml | 4 + playbooks/mount_discovery_iso_for_pxe.yml | 4 + playbooks/populate_registry.yml | 3 + playbooks/validate_inventory.yml | 2 +- roles/display_deployment_plan/README.md | 3 + .../display_deployment_plan/defaults/main.yml | 2 + roles/display_deployment_plan/tasks/main.yml | 72 +++++++++++++ .../display_deployment_plan/templates/plan.j2 | 73 +++++++++++++ roles/validate_inventory/README.md | 3 + roles/validate_inventory/defaults/main.yml | 37 +++++++ .../agent_based_installer_feature_gates.yml | 26 +++++ .../agent_based_installer_requirements.yml | 12 +++ roles/validate_inventory/tasks/ai.yml | 87 +++++++++++++++ roles/validate_inventory/tasks/cluster.yml | 95 ++++++++++++++++ roles/validate_inventory/tasks/day2.yml | 5 + roles/validate_inventory/tasks/dns.yml | 21 ++++ roles/validate_inventory/tasks/main.yml | 102 ++++++++++++++++++ roles/validate_inventory/tasks/network.yml | 21 ++++ roles/validate_inventory/tasks/ntp.yml | 6 ++ roles/validate_inventory/tasks/partitions.yml | 7 ++ roles/validate_inventory/tasks/prereqs.yml | 4 + roles/validate_inventory/tasks/proxy.yml | 5 + .../tasks/required_vars.yml | 14 +++ roles/validate_inventory/tasks/secrets.yml | 36 +++++++ .../validate_inventory/tasks/validate_pxe.yml | 36 +++++++ roles/validate_inventory/tasks/vendor.yml | 19 ++++ roles/validate_inventory/tasks/vms.yml | 82 ++++++++++++++ site.yml | 2 + 56 files changed, 868 insertions(+), 2 deletions(-) create mode 100644 roles/display_deployment_plan/README.md create mode 100644 roles/display_deployment_plan/defaults/main.yml create mode 100644 roles/display_deployment_plan/tasks/main.yml create mode 100644 roles/display_deployment_plan/templates/plan.j2 create mode 100644 roles/validate_inventory/README.md create mode 100644 roles/validate_inventory/defaults/main.yml create mode 100644 roles/validate_inventory/tasks/agent_based_installer_feature_gates.yml create mode 100644 roles/validate_inventory/tasks/agent_based_installer_requirements.yml create mode 100644 roles/validate_inventory/tasks/ai.yml create mode 100644 roles/validate_inventory/tasks/cluster.yml create mode 100644 roles/validate_inventory/tasks/day2.yml create mode 100644 roles/validate_inventory/tasks/dns.yml create mode 100644 roles/validate_inventory/tasks/main.yml create mode 100644 roles/validate_inventory/tasks/network.yml create mode 100644 roles/validate_inventory/tasks/ntp.yml create mode 100644 roles/validate_inventory/tasks/partitions.yml create mode 100644 roles/validate_inventory/tasks/prereqs.yml create mode 100644 roles/validate_inventory/tasks/proxy.yml create mode 100644 roles/validate_inventory/tasks/required_vars.yml create mode 100644 roles/validate_inventory/tasks/secrets.yml create mode 100644 roles/validate_inventory/tasks/validate_pxe.yml create mode 100644 roles/validate_inventory/tasks/vendor.yml create mode 100644 roles/validate_inventory/tasks/vms.yml diff --git a/deploy_cluster.yml b/deploy_cluster.yml index 0d9a25ee..9030e673 100644 --- a/deploy_cluster.yml +++ b/deploy_cluster.yml @@ -1,4 +1,6 @@ --- +- import_playbook: playbooks/validate_inventory.yml + - import_playbook: playbooks/deploy_cluster_agent_based_installer.yml when: (use_agent_based_installer | default(false)) | bool diff --git a/deploy_day2_workers.yml b/deploy_day2_workers.yml index 6b0341d6..c68e7849 100644 --- a/deploy_day2_workers.yml +++ b/deploy_day2_workers.yml @@ -1,4 +1,6 @@ --- +- import_playbook: playbooks/validate_inventory.yml + - import_playbook: playbooks/create_vms.yml when: groups['day2_workers'] | default([]) | length > 0 vars: diff --git a/playbooks/add_day2_nodes.yml b/playbooks/add_day2_nodes.yml index 38fddf21..60452ced 100644 --- a/playbooks/add_day2_nodes.yml +++ b/playbooks/add_day2_nodes.yml @@ -1,3 +1,7 @@ +--- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Add day2 workers hosts: day2_workers gather_facts: false diff --git a/playbooks/approve_csrs.yml b/playbooks/approve_csrs.yml index e6840895..9e72878c 100644 --- a/playbooks/approve_csrs.yml +++ b/playbooks/approve_csrs.yml @@ -1,3 +1,7 @@ +--- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Approve CSRs on cluster hosts: bastion gather_facts: false diff --git a/playbooks/boot_disk.yml b/playbooks/boot_disk.yml index c01dd2a6..c70d1ac7 100644 --- a/playbooks/boot_disk.yml +++ b/playbooks/boot_disk.yml @@ -1,5 +1,7 @@ --- # file: boot_disk.yml +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml - name: Unmounting Assisted Installer Discovery ISO hosts: bastion diff --git a/playbooks/boot_iso.yml b/playbooks/boot_iso.yml index 1f23e663..52718d44 100644 --- a/playbooks/boot_iso.yml +++ b/playbooks/boot_iso.yml @@ -1,4 +1,7 @@ --- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Mounting, Booting the Assisted Installer Discovery ISO hosts: "{{ boot_iso_hosts | default('nodes') }}" gather_facts: false diff --git a/playbooks/create_cluster.yml b/playbooks/create_cluster.yml index 895b9deb..df0f67dc 100644 --- a/playbooks/create_cluster.yml +++ b/playbooks/create_cluster.yml @@ -1,4 +1,7 @@ --- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Create cluster and generate Assisted Installer Discovery ISO hosts: bastion gather_facts: false diff --git a/playbooks/create_day2_cluster.yml b/playbooks/create_day2_cluster.yml index 84ab9626..577b359a 100644 --- a/playbooks/create_day2_cluster.yml +++ b/playbooks/create_day2_cluster.yml @@ -1,3 +1,7 @@ +--- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Create day2 cluster definition hosts: bastion roles: diff --git a/playbooks/create_vms.yml b/playbooks/create_vms.yml index 42183345..20e71475 100644 --- a/playbooks/create_vms.yml +++ b/playbooks/create_vms.yml @@ -1,4 +1,6 @@ --- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml - name: Process KVM nodes hosts: bastion diff --git a/playbooks/deploy_assisted_installer_onprem.yml b/playbooks/deploy_assisted_installer_onprem.yml index ffa025dd..23924c05 100644 --- a/playbooks/deploy_assisted_installer_onprem.yml +++ b/playbooks/deploy_assisted_installer_onprem.yml @@ -1,4 +1,7 @@ --- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Play to populate image_hashes for relevant images hosts: localhost vars: diff --git a/playbooks/deploy_cluster_agent_based_installer.yml b/playbooks/deploy_cluster_agent_based_installer.yml index 1be935e6..f982999c 100644 --- a/playbooks/deploy_cluster_agent_based_installer.yml +++ b/playbooks/deploy_cluster_agent_based_installer.yml @@ -1,4 +1,7 @@ --- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Extract Agent based installer ansible.builtin.import_playbook: extract_agent_based_installer.yml when: agent_based_installer_path is not defined diff --git a/playbooks/deploy_cluster_assisted_installer.yml b/playbooks/deploy_cluster_assisted_installer.yml index b84c5414..f445585c 100644 --- a/playbooks/deploy_cluster_assisted_installer.yml +++ b/playbooks/deploy_cluster_assisted_installer.yml @@ -1,4 +1,7 @@ --- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Generate SSH Keys for node access ansible.builtin.import_playbook: generate_ssh_key_pair.yml when: (generate_ssh_keys | default(true)) | bool diff --git a/playbooks/deploy_dns.yml b/playbooks/deploy_dns.yml index dd9cac48..b2322f27 100644 --- a/playbooks/deploy_dns.yml +++ b/playbooks/deploy_dns.yml @@ -1,3 +1,7 @@ +--- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Setup DNS Records hosts: dns_host gather_facts: false diff --git a/playbooks/deploy_http_store.yml b/playbooks/deploy_http_store.yml index 99ef6cfc..3fab7bbe 100644 --- a/playbooks/deploy_http_store.yml +++ b/playbooks/deploy_http_store.yml @@ -1,4 +1,7 @@ --- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Install and http_store service hosts: http_store gather_facts: false diff --git a/playbooks/deploy_ntp.yml b/playbooks/deploy_ntp.yml index 3981472e..2bf05d36 100644 --- a/playbooks/deploy_ntp.yml +++ b/playbooks/deploy_ntp.yml @@ -1,3 +1,7 @@ +--- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Setup NTP hosts: ntp_host gather_facts: false diff --git a/playbooks/deploy_registry.yml b/playbooks/deploy_registry.yml index 52de4606..f564ca2f 100644 --- a/playbooks/deploy_registry.yml +++ b/playbooks/deploy_registry.yml @@ -1,4 +1,7 @@ --- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Play to populate image_hashes for relevant images hosts: localhost gather_facts: false diff --git a/playbooks/deploy_sushy_tools.yml b/playbooks/deploy_sushy_tools.yml index ccf22d50..eac15e51 100644 --- a/playbooks/deploy_sushy_tools.yml +++ b/playbooks/deploy_sushy_tools.yml @@ -1,3 +1,7 @@ +--- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Deploy sushy tools hosts: vm_hosts gather_facts: false diff --git a/playbooks/deploy_tftp.yml b/playbooks/deploy_tftp.yml index 0af5119b..3b0aad92 100644 --- a/playbooks/deploy_tftp.yml +++ b/playbooks/deploy_tftp.yml @@ -1,3 +1,7 @@ +--- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Setup TFTP hosts: tftp_host vars: diff --git a/playbooks/destroy_vms.yml b/playbooks/destroy_vms.yml index f9c03eda..0442a435 100644 --- a/playbooks/destroy_vms.yml +++ b/playbooks/destroy_vms.yml @@ -1,4 +1,7 @@ --- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Process KVM nodes hosts: bastion roles: diff --git a/playbooks/display_deployment_plan.yml b/playbooks/display_deployment_plan.yml index 2dd7f5e9..8f2c9e85 100644 --- a/playbooks/display_deployment_plan.yml +++ b/playbooks/display_deployment_plan.yml @@ -3,4 +3,4 @@ hosts: localhost gather_facts: false roles: - - redhatci.ocp.display_deployment_plan + - display_deployment_plan diff --git a/playbooks/extract_agent_based_installer.yml b/playbooks/extract_agent_based_installer.yml index 2aea8e26..9f043ade 100644 --- a/playbooks/extract_agent_based_installer.yml +++ b/playbooks/extract_agent_based_installer.yml @@ -1,4 +1,7 @@ --- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Extract openshift installer hosts: bastion gather_facts: false diff --git a/playbooks/generate_agent_iso.yml b/playbooks/generate_agent_iso.yml index 9abe36b0..e3a77521 100644 --- a/playbooks/generate_agent_iso.yml +++ b/playbooks/generate_agent_iso.yml @@ -1,4 +1,7 @@ --- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Generate agent iso using agent_based_installer hosts: bastion roles: diff --git a/playbooks/generate_discovery_iso.yml b/playbooks/generate_discovery_iso.yml index 9eefe27a..75453d09 100644 --- a/playbooks/generate_discovery_iso.yml +++ b/playbooks/generate_discovery_iso.yml @@ -1,5 +1,8 @@ --- # file: generate_discovery_iso.yml +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Generate discovery iso hosts: bastion roles: diff --git a/playbooks/generate_manifests.yml b/playbooks/generate_manifests.yml index cd3e7e6e..b2ae6631 100644 --- a/playbooks/generate_manifests.yml +++ b/playbooks/generate_manifests.yml @@ -1,4 +1,7 @@ --- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Generate manfiests for agent_based_installer hosts: bastion vars: diff --git a/playbooks/generate_ssh_key_pair.yml b/playbooks/generate_ssh_key_pair.yml index 1101bcb6..af5c013f 100644 --- a/playbooks/generate_ssh_key_pair.yml +++ b/playbooks/generate_ssh_key_pair.yml @@ -1,4 +1,7 @@ --- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Generate ssh keys used for debug hosts: bastion roles: diff --git a/playbooks/install_cluster.yml b/playbooks/install_cluster.yml index 30a1e435..111e0d0b 100644 --- a/playbooks/install_cluster.yml +++ b/playbooks/install_cluster.yml @@ -1,5 +1,8 @@ --- # file: install_cluster.yml +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Installing the cluster hosts: bastion gather_facts: false diff --git a/playbooks/monitor_agent_based_installer.yml b/playbooks/monitor_agent_based_installer.yml index 877e28b2..199896d4 100644 --- a/playbooks/monitor_agent_based_installer.yml +++ b/playbooks/monitor_agent_based_installer.yml @@ -1,3 +1,7 @@ +--- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Monitor install process of agent_based_installer hosts: bastion gather_facts: false diff --git a/playbooks/monitor_cluster.yml b/playbooks/monitor_cluster.yml index 6a4dd7cd..e301145c 100644 --- a/playbooks/monitor_cluster.yml +++ b/playbooks/monitor_cluster.yml @@ -1,5 +1,8 @@ --- # file: monitor_cluster.yml +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Monitoring cluster installation hosts: bastion gather_facts: false diff --git a/playbooks/monitor_hosts.yml b/playbooks/monitor_hosts.yml index 5553d9c1..8be69f2e 100644 --- a/playbooks/monitor_hosts.yml +++ b/playbooks/monitor_hosts.yml @@ -1,3 +1,7 @@ +--- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Monitoring hosts installation hosts: masters, workers gather_facts: false diff --git a/playbooks/mount_discovery_iso_for_pxe.yml b/playbooks/mount_discovery_iso_for_pxe.yml index 09cc3276..7cafea0e 100644 --- a/playbooks/mount_discovery_iso_for_pxe.yml +++ b/playbooks/mount_discovery_iso_for_pxe.yml @@ -1,3 +1,7 @@ +--- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Mount discovery ISO for PXE hosts: http_store vars: diff --git a/playbooks/populate_registry.yml b/playbooks/populate_registry.yml index 3791616d..40b1758f 100644 --- a/playbooks/populate_registry.yml +++ b/playbooks/populate_registry.yml @@ -1,4 +1,7 @@ --- +- name: Validate Inventory + ansible.builtin.import_playbook: validate_inventory.yml + - name: Play to populate image_hashes for relevant images hosts: localhost gather_facts: (setup_registry_service | default(true)) | bool diff --git a/playbooks/validate_inventory.yml b/playbooks/validate_inventory.yml index b127416b..91e80252 100644 --- a/playbooks/validate_inventory.yml +++ b/playbooks/validate_inventory.yml @@ -3,6 +3,6 @@ hosts: localhost gather_facts: false roles: - - role: redhatci.ocp.validate_inventory + - role: validate_inventory vars: validation_host: localhost diff --git a/roles/display_deployment_plan/README.md b/roles/display_deployment_plan/README.md new file mode 100644 index 00000000..4d0e817e --- /dev/null +++ b/roles/display_deployment_plan/README.md @@ -0,0 +1,3 @@ +# display_deployment_plan + +Displays the crucible deployment plan and waits for user confirmation. diff --git a/roles/display_deployment_plan/defaults/main.yml b/roles/display_deployment_plan/defaults/main.yml new file mode 100644 index 00000000..4c465a8d --- /dev/null +++ b/roles/display_deployment_plan/defaults/main.yml @@ -0,0 +1,2 @@ +--- +pxe_node_names: [] diff --git a/roles/display_deployment_plan/tasks/main.yml b/roles/display_deployment_plan/tasks/main.yml new file mode 100644 index 00000000..682166d4 --- /dev/null +++ b/roles/display_deployment_plan/tasks/main.yml @@ -0,0 +1,72 @@ +--- +- name: Get all PXE Nodes + set_fact: + pxe_node_names: "{{ pxe_node_names + [item] }}" + when: hostvars[item]['vendor'] | lower == 'pxe' + loop: "{{ groups['nodes'] }}" + +- name: Display deployment plan if interactive prompts are allowed + when: not ((skip_interactive_prompts | default(false)) | bool) + block: + - name: Display inventory details and ask for user confirmation + pause: + prompt: "{{ lookup('template', 'plan.j2').strip('\n') }}" + vars: + # Every row consists of two columns. To ensure all items in the right column are aligned, + # a minimum width of the left column is set in the following variable. + left_column_width: 36 + row_format_str: '%-{{ left_column_width }}s %s' + # To improve readability, all hosts listed in the "Groups and hosts configuration" section + # are nested under groups by the number of spaces defined in the variable below. + # For padding set to 2, the displayed output is as follows: + # group_name: + # host_name_A + # host_name_B + hosts_row_padding: 2 + hosts_row_format_str: '%-{{ (left_column_width - hosts_row_padding) }}s %s' + hosts_row_extended_format_str: '%-{{ (left_column_width - hosts_row_padding) }}s %s (%s)' + # In the sample inventory, some host groups are nested under other groups. + # To prevent the defined hosts from being displayed multiple times, selected top level + # groups are omitted from the deployment plan. + groups_to_exclude: ["all", "nodes", "ungrouped"] + messages: + value_missing: "" + hosts_missing: "" + groups_missing: "" + kubeadmin_vault_password_file_path_missing: "UNDEFINED (This means the kubeadmin credentals will be stored in plain text)" + pxe_hosts_missing: "[WARNING] No hosts are set" + cached_image_hash_file_path: "{{ image_hashes_path | default(repo_root_path + '/image_hashes.yml') }}" + + # A default error message is assigned to each variable referenced in the deployment plan. + # This prevents the prompt from not being displayed due to templating errors if a valid + # inventory file is not provided. + inventory: + cluster_name: "{{ cluster_name | default(messages.value_missing) }}" + base_dns_domain: "{{ base_dns_domain | default(messages.value_missing) }}" + api_vip: "{{ api_vip | default(messages.value_missing) }}" + ingress_vip: "{{ ingress_vip | default(messages.value_missing) }}" + vip_dhcp_allocation: "{{ vip_dhcp_allocation | default(messages.value_missing) }}" + openshift_full_version: "{{ openshift_full_version | default(messages.value_missing) }}" + setup_ntp_service: "{{ setup_ntp_service | default(messages.value_missing) }}" + setup_dns_service: "{{ setup_dns_service | default(messages.value_missing) }}" + setup_registry_service: "{{ setup_registry_service | default(messages.value_missing) }}" + setup_http_store_service: "{{ setup_http_store_service | default(messages.value_missing) }}" + setup_assisted_installer: "{{ setup_assisted_installer | default(messages.value_missing) }}" + setup_pxe_service: "{{ setup_pxe_service | default(messages.value_missing) }}" + num_pxe_nodes: "{{ pxe_node_names | length }}" + discovery_iso_name: "{{ discovery_iso_name | default(messages.value_missing) }}" + discovery_iso_download_path: "{{ iso_download_dest_path | default(messages.value_missing) }}" + is_valid_single_node_openshift_config: "{{ is_valid_single_node_openshift_config | default(messages.value_missing) }}" + groups_filtered: "{{ groups | difference(groups_to_exclude) }}" + kubeadmin_vault_password_file_path: "{{ kubeadmin_vault_password_file_path | default(messages.kubeadmin_vault_password_file_path_missing) }}" + cached_image_hash_file_path: "{{ cached_image_hash_file_path }}" + cached_image_hash_file_exists: "{{ cached_image_hash_file_path is file }}" + ignore_cached_image_hash_file: "{{ ignore_cached_image_hashes | default(false) }}" + register: display_deployment_plan__confirmation + + - name: Assert the deployment plan is confirmed + assert: + that: + - (display_deployment_plan__confirmation.user_input | lower | trim) in ['y', 'yes'] + fail_msg: "The deployment plan must be confirmed by the user" + quiet: true diff --git a/roles/display_deployment_plan/templates/plan.j2 b/roles/display_deployment_plan/templates/plan.j2 new file mode 100644 index 00000000..620fa64a --- /dev/null +++ b/roles/display_deployment_plan/templates/plan.j2 @@ -0,0 +1,73 @@ +An OpenShift cluster is about to be deployed. Please double check the provided inventory details. + +* General configuration + + {{ row_format_str | format('Cluster name', inventory.cluster_name) }} + {{ row_format_str | format('Base DNS domain', inventory.base_dns_domain) }} + + {{ row_format_str | format('OpenShift version', inventory.openshift_full_version) }} + + {{ row_format_str | format('Kube admin vault password file', inventory.kubeadmin_vault_password_file_path) }} + +* Cluster network configuration + + {{ row_format_str | format('API Virtual IP', inventory.api_vip) }} + {{ row_format_str | format('Ingress Virtual IP', inventory.ingress_vip) }} + + {{ row_format_str | format('Allocate Virtual IPs via DHCP', inventory.vip_dhcp_allocation) }} + +* Prerequisite services configuration + + {{ row_format_str | format('Setup NTP Service', inventory.setup_ntp_service) }} + {{ row_format_str | format('Setup DNS Service', inventory.setup_dns_service) }} + {{ row_format_str | format('Setup Registry Service', inventory.setup_registry_service) }} + {{ row_format_str | format('Setup HTTP Store Service', inventory.setup_http_store_service) }} + {{ row_format_str | format('Setup Assisted Installer', inventory.setup_assisted_installer) }} + {{ row_format_str | format('Setup PXE Service', inventory.setup_pxe_service) }} + {% if inventory.setup_pxe_service | bool and inventory.num_pxe_nodes | int > 0 -%} + {{ row_format_str | format('Number of PXE Nodes', inventory.num_pxe_nodes) }} + {% elif inventory.setup_pxe_service | bool and inventory.num_pxe_nodes | int == 0 -%} + {{ row_format_str | format('Number of PXE Nodes', messages.pxe_hosts_missing) }} + {% endif -%} + {{ row_format_str | format('Discovery ISO Name', inventory.discovery_iso_name) }} + {{ row_format_str | format('Discovery ISO Download Path', inventory.discovery_iso_download_path) }} + +* Groups and hosts configuration + + {{ row_format_str | format('Single Node OpenShift (SNO) Mode', inventory.is_valid_single_node_openshift_config) }} + +{% for group_name in inventory.groups_filtered %} + {{ group_name }}: +{% for host_name in groups[group_name] %} +{% set host_ansible_host = hostvars[host_name]['ansible_host'] | default(messages.value_missing) %} +{% if hostvars[host_name]['vm_host'] is defined %} + {{ hosts_row_extended_format_str | format(host_name, host_ansible_host, hostvars[host_name]['vm_host']) }} +{% else %} + {{ hosts_row_format_str | format(host_name, host_ansible_host) }} +{% endif %} +{% else %} + {{ messages.hosts_missing }} +{% endfor %} +{# This adds a newline character between groups #}{{ '' }} +{% else %} + {{ messages.groups_missing }}{{ '\n' }} +{% endfor %} + +* Image hash caching + + {{ row_format_str | format('Cached image hash file path', inventory.cached_image_hash_file_path)}} + {{ row_format_str | format('Cached image hash file exists', inventory.cached_image_hash_file_exists)}} + {{ row_format_str | format('Ignoring cached image hash file', inventory.ignore_cached_image_hash_file)}} + + {% if inventory.cached_image_hash_file_exists and not inventory.ignore_cached_image_hash_file %} + NOTE: If you have changed any tags for images, the hash will NOT be updated unless you remove the + entry from (or remove the entire file) + {{ inventory.cached_image_hash_file_path }} + or set ignore_cached_image_hashes to True. + {% endif %} + +--- + +Are you sure you want to proceed with the deployment using this inventory configuration? + +Enter a value [yes/NO] diff --git a/roles/validate_inventory/README.md b/roles/validate_inventory/README.md new file mode 100644 index 00000000..dcf9c648 --- /dev/null +++ b/roles/validate_inventory/README.md @@ -0,0 +1,3 @@ +# validate_inventory + +Validates a crucible inventory diff --git a/roles/validate_inventory/defaults/main.yml b/roles/validate_inventory/defaults/main.yml new file mode 100644 index 00000000..ffdbfae2 --- /dev/null +++ b/roles/validate_inventory/defaults/main.yml @@ -0,0 +1,37 @@ +node_required_vars: + - bmc_password + - bmc_user + - vendor + - role + - mac + +supported_role_values: + - worker + - master + +supported_vendor_values: + - dell + - hpe + - lenovo + - kvm + - supermicro + - pxe + - zt + +allow_custom_vendor: False + +ai_version: "{{ hostvars.assisted_installer.ai_version | default('v2.26.0') }}" +ai_version_number: "{{ ai_version | regex_replace('v(\\d+\\.\\d+\\.\\d+)', '\\1') }}" + +supported_ocp_versions: + - 4.6.16 + - 4.7.52 + - 4.8.43 + - 4.9.59 + - 4.10.67 + - 4.11.53 + - 4.12.44 + - 4.13.22 + - 4.14.2 + +single_node_openshift_enabled: "{{ is_valid_single_node_openshift_config | default(false) }}" diff --git a/roles/validate_inventory/tasks/agent_based_installer_feature_gates.yml b/roles/validate_inventory/tasks/agent_based_installer_feature_gates.yml new file mode 100644 index 00000000..d81eeaf1 --- /dev/null +++ b/roles/validate_inventory/tasks/agent_based_installer_feature_gates.yml @@ -0,0 +1,26 @@ +- name: Assert no day2 nodes + ansible.builtin.assert: + that: + - (groups['day2_workers'] | default([])) | length == 0 + fail_msg: "use_agent_based_installer does not support day2 workers" + +- name: Assert no partitions + ansible.builtin.assert: + that: + - hostvars[item]['disks'] is not defined + fail_msg: "Can not use partitions when using use_agent_based_installer" + loop: "{{ groups['nodes'] | default([]) }}" + +- name: Assert discovery is password hash is not defined + ansible.builtin.assert: + that: + - hashed_discovery_password is not defined + fail_msg: "hashed_discovery_password is not supported when use_agent_based_installer" + +- name: Assert no OLM Operators + ansible.builtin.assert: + that: + - install_lso is not defined + - install_odf is not defined + - install_cnv is not defined + fail_msg: "Operator installion is not supported when use_agent_based_installer, you can use extra_manifests to deploy these operators" diff --git a/roles/validate_inventory/tasks/agent_based_installer_requirements.yml b/roles/validate_inventory/tasks/agent_based_installer_requirements.yml new file mode 100644 index 00000000..bdb0d0ff --- /dev/null +++ b/roles/validate_inventory/tasks/agent_based_installer_requirements.yml @@ -0,0 +1,12 @@ +- name: Assert agent_based_installer_path is defined + ansible.builtin.assert: + that: + - openshift_full_version is version('4.12.0', '>=') + fail_msg: Agent based installation is only avaiable for openshift_full_version >= 4.12.0 + +- name: Assert nodes have network_config + ansible.builtin.assert: + that: + - hostvars[item]['network_config'] is defined + fail_msg: "use_agent_based_installer requires that hosts have a network config" + loop: "{{ groups['nodes'] | default([]) }}" diff --git a/roles/validate_inventory/tasks/ai.yml b/roles/validate_inventory/tasks/ai.yml new file mode 100644 index 00000000..f64f30ea --- /dev/null +++ b/roles/validate_inventory/tasks/ai.yml @@ -0,0 +1,87 @@ +--- +- name: Assert ai_version is valid + assert: + that: + - ai_version_number is version('2.1.0', '>=') + fail_msg: "ai_version must be >= v2.1.0 and must be of the form 'v\\d+.\\d+.\\d+'" + +- name: Assert that Openshift version is supported + assert: + that: + - openshift_full_version is version('4.6', '>=') + fail_msg: "openshift_full_version must be >= 4.6." + +- name: Assert VIPs are within the machine network + assert: + that: + - hostvars['assisted_installer'][item] | ansible.utils.ipaddr(hostvars['assisted_installer']['machine_network_cidr']) | ansible.utils.ipaddr('bool') + fail_msg: "{{ item }} is not within the machine network!" + when: vip_dhcp_allocation == false + loop: + - api_vip + - ingress_vip + +- name: Assert nodes are within the machine network + assert: + that: + - hostvars[item]['ansible_host'] | ansible.utils.ipaddr(hostvars['assisted_installer']['machine_network_cidr']) | ansible.utils.ipaddr('bool') + fail_msg: "{{ item }} is not within the machine network!" + when: vip_dhcp_allocation == false + loop: "{{ groups['masters'] + (groups['workers'] | default([])) }}" # This should not include day2_workers as they can be RWNs + +- name: Validate extra VIPs for dualstack + when: + - extra_api_vip is defined + - extra_api_vip | length > 0 + - extra_ingress_vip is defined + - extra_ingress_vip | length > 0 + block: + - name: Assert that Openshift version is supported for dualstack VIPs + assert: + that: + - openshift_full_version is version('4.12', '>=') + fail_msg: "openshift_full_version must be >= 4.12. to support dualstack VIPs" + + - name: Assert that extra_machine_networks variable is defined and not empty + assert: + that: + - (extra_machine_networks | length) > 0 + fail_msg: "extra_machine_networks must be defined and have at least one cidr value in a list to support dualstack VIPs" + + - name: Check if extra api VIP is within the extra machine networks + vars: + extra_api_vip: "{{ hostvars['assisted_installer']['extra_api_vip'] | default ([]) }}" + extra_api_vip_tests: [] + ansible.builtin.set_fact: + extra_api_vip_tests: "{{ extra_api_vip_tests + [extra_api_vip | ansible.utils.ipaddr(item.cidr)] }}" + when: + - vip_dhcp_allocation == false + - extra_machine_networks is defined + loop: "{{ hostvars['assisted_installer']['extra_machine_networks'] }}" + + - name: Fail if extra api VIP is NOT within any of the extra machine networks + ansible.builtin.assert: + that: + - extra_api_vip in extra_api_vip_tests + fail_msg: "{{ extra_api_vip }} is not within any of the extra machine networks!" + when: + - extra_api_vip_tests is defined + + - name: Check if extra ingress VIP is within the extra machine networks + vars: + extra_ingress_vip: "{{ hostvars['assisted_installer']['extra_ingress_vip'] | default ([]) }}" + extra_ingress_vip_tests: [] + ansible.builtin.set_fact: + extra_ingress_vip_tests: "{{ extra_ingress_vip_tests + [extra_ingress_vip | ansible.utils.ipaddr(item.cidr)] }}" + when: + - vip_dhcp_allocation == false + - extra_machine_networks is defined + loop: "{{ hostvars['assisted_installer']['extra_machine_networks'] }}" + + - name: Fail if extra ingress VIP is NOT within any of the extra machine networks + ansible.builtin.assert: + that: + - extra_ingress_vip in extra_ingress_vip_tests + fail_msg: "{{ extra_ingress_vip }} is not within any of the extra machine networks!" + when: + - extra_ingress_vip_tests is defined diff --git a/roles/validate_inventory/tasks/cluster.yml b/roles/validate_inventory/tasks/cluster.yml new file mode 100644 index 00000000..d807a9b3 --- /dev/null +++ b/roles/validate_inventory/tasks/cluster.yml @@ -0,0 +1,95 @@ +--- +- name: Assert Openshift version is supported + assert: + that: + - openshift_full_version in supported_ocp_versions + fail_msg: "We do not support openshift version {{ openshift_full_version }}, The supported versions are: {{ supported_ocp_versions | join(',') }}" + +- name: Assert valid master configuration (HA) + assert: + that: + - groups['masters'] | length >= 3 + fail_msg: "There must be at least three masters defined. To deploy SNO, define one master and no workers." + when: not single_node_openshift_enabled + +- name: Assert API and Ingress VIPs are set correctly (SNO) + assert: + that: + - api_vip == hostvars[sno_hostname]['ansible_host'] + - ingress_vip == hostvars[sno_hostname]['ansible_host'] + fail_msg: "For SNO deployments, API and Ingress VIPs need to match the IP address (ansible_host) of the master node." + vars: + sno_hostname: "{{ groups['masters'][0] }}" + when: single_node_openshift_enabled + +- name: Assert valid worker configuration + assert: + that: + - (groups['workers'] | length == 0) or (groups['workers'] | length >= 2) + fail_msg: "There must be either zero, or more than one, workers defined." + when: groups['workers'] is defined + +- name: Assert all nodes have all required vars + assert: + that: + - hostvars[item.0][item.1] is defined + - hostvars[item.0][item.1] | trim != '' + quiet: true + fail_msg: "Node {{ item.0 }} is missing required var {{ item.1 }}" + loop: "{{ groups['nodes'] | product(node_required_vars) | list }}" + +- name: Assert all nodes have a BMC IP or address + assert: + that: + - (hostvars[item]['bmc_ip'] is defined and (setup_dns_service | default(false)) | bool) or (hostvars[item]['bmc_address'] is defined) + quiet: true + fail_msg: "Node {{ item }} must have either bmc_ip" + loop: "{{ groups['nodes'] }}" + +- name: Assert bmc_ip is correct type + assert: + that: + - hostvars[item]['bmc_ip'] | ansible.utils.ipaddr('bool') + quiet: true + fail_msg: "Node {{ item }}'s bmc_ip must be a valid IP address'" + loop: "{{ groups['nodes'] }}" + when: hostvars[item]['bmc_ip'] is defined + +- name: Assert bmc_address is correct type + assert: + that: + - hostvars[item]['bmc_address'] is string + quiet: true + fail_msg: "Node {{ item }}'s bmc_address is string" + loop: "{{ groups['nodes'] }}" + when: hostvars[item]['bmc_address'] is defined + +- name: Assert required vars are correctly typed + assert: + that: + - (hostvars[item]['mac'] | ansible.utils.hwaddr('bool')) == true + - hostvars[item]['bmc_password'] is string + - hostvars[item]['bmc_user'] is string + - hostvars[item]['vendor'] is string + quiet: true + fail_msg: "Node {{ item }} has an incorrectly formatted var" + loop: "{{ groups['nodes'] }}" + +- name: Assert mac has linux format + assert: + that: + - ( hostvars[item]['mac'] | string | upper | regex_search('^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$')) is not none + quiet: true + fail_msg: |- + The mac address for node {{ item }} needs to be in linux format XX:XX:XX:XX:XX:XX + Make sure to wrap it quotes to make sure it is not being mangled + Current value: {{ hostvars[item]['mac'] }} + loop: "{{ groups['nodes'] }}" + +- name: Assert that all values of 'role' are supported + assert: + that: + - hostvars[item]['role'] is in supported_role_values + quiet: true + fail_msg: "Node {{ item }} does not have a supported value for 'role'" + loop: "{{ groups['nodes'] }}" diff --git a/roles/validate_inventory/tasks/day2.yml b/roles/validate_inventory/tasks/day2.yml new file mode 100644 index 00000000..db9960c6 --- /dev/null +++ b/roles/validate_inventory/tasks/day2.yml @@ -0,0 +1,5 @@ +- name: Check for day2_discovery_iso_name if required + assert: + that: + - day2_discovery_iso_name is defined + when: (groups['day2_workers'] | default([])) | length > 0 diff --git a/roles/validate_inventory/tasks/dns.yml b/roles/validate_inventory/tasks/dns.yml new file mode 100644 index 00000000..1c91c17a --- /dev/null +++ b/roles/validate_inventory/tasks/dns.yml @@ -0,0 +1,21 @@ +--- +- name: Assert 'dhcp_range_first' and 'dhcp_range_last' are defined if needed + assert: + that: + - hostvars['dns_host'][item] is defined + - hostvars['dns_host'][item] | ansible.utils.ipaddr('bool') == True + quiet: true + when: hostvars['dns_host']['use_dhcp'] | default(false) + loop: + - dhcp_range_first + - dhcp_range_last + +- name: if DNS DHCP setup is enabled, ntp_server MUST be an IP for DNS config + assert: + that: + - hostvars['dns_host']['ntp_server'] is defined + - hostvars['dns_host']['ntp_server'] | ansible.utils.ipaddr('bool') == True + when: hostvars['dns_host']['setup_dns_service'] | default(false) and hostvars['dns_host']['use_dhcp'] | default(false) + +# All other DNS config is excluded for brevity at this time. +# It is taken from the cluster and/or AI configuration and is not DNS-specific so much is checked elsewhere. diff --git a/roles/validate_inventory/tasks/main.yml b/roles/validate_inventory/tasks/main.yml new file mode 100644 index 00000000..9aa47596 --- /dev/null +++ b/roles/validate_inventory/tasks/main.yml @@ -0,0 +1,102 @@ +--- +- name: Validate Inventory + block: + - include_tasks: + file: required_vars.yml + apply: + tags: validate_required_vars + tags: validate_required_vars + + - include_tasks: + file: cluster.yml + apply: + tags: validate_cluster + tags: validate_cluster + + - include_tasks: + file: vendor.yml + apply: + tags: validate_vendor + tags: validate_vendor + + - include_tasks: + file: ntp.yml + apply: + tags: validate_ntp + tags: validate_ntp + + - include_tasks: + file: vms.yml + apply: + tags: validate_vms + tags: validate_vms + + - include_tasks: + file: secrets.yml + apply: + tags: validate_secrets + tags: validate_secrets + + - include_tasks: + file: prereqs.yml + apply: + tags: validate_prereqs + tags: validate_prereqs + + - include_tasks: + file: proxy.yml + apply: + tags: validate_proxy_config + tags: validate_proxy_config + + - include_tasks: + file: network.yml + apply: + tags: validate_network + tags: validate_network + + - include_tasks: + file: day2.yml + apply: + tags: validate_day2 + tags: validate_day2 + + - include_tasks: + file: partitions.yml + apply: + tags: validate_partitions + tags: validate_partitions + + - include_tasks: + file: validate_pxe.yml + apply: + tags: validate_pxe + tags: validate_pxe + when: (setup_pxe_service | default(false)) | bool + + - include_tasks: + file: agent_based_installer_feature_gates.yml + apply: + tags: validate_agent_based_installer + when: + - (use_agent_based_installer | default(false) | bool) + - not ((ignore_agent_based_installer_feature_gates | default(false)) | bool) + tags: validate_agent_based_installer + + - include_tasks: + file: agent_based_installer_requirements.yml + apply: + tags: validate_agent_based_installer + when: + - (use_agent_based_installer | default(false) | bool) + tags: validate_agent_based_installer + + when: not (inventory_validated | default(False) | bool) + delegate_to: "{{ validation_host | default('bastion') }}" + +- name: Record successful validation on all hosts + set_fact: + inventory_validated: True + delegate_to: "{{ item }}" + delegate_facts: True + loop: "{{ groups['all'] + ['localhost'] }}" diff --git a/roles/validate_inventory/tasks/network.yml b/roles/validate_inventory/tasks/network.yml new file mode 100644 index 00000000..6bace421 --- /dev/null +++ b/roles/validate_inventory/tasks/network.yml @@ -0,0 +1,21 @@ +--- +# Node `ansible_host`s are not pinged. They are not required to be running at this stage. +# KVM node BMCs are not checked, the vm_host will be pinged later. +- name: Ensure baremetal node BMCs are reachable + shell: # noqa 305 + cmd: "ping -c 1 -W 2 {{ hostvars[item]['bmc_ip'] | default(hostvars[item]['bmc_address']) }}" + changed_when: False + when: hostvars[item]['vendor'] | lower != 'kvm' + loop: "{{ groups['nodes'] }}" + +- name: Ensure service hosts are reachable + shell: # noqa 305 + cmd: "ping -c 1 -W 2 {{ hostvars[item]['ansible_host'] }}" + changed_when: False + loop: "{{ groups['services'] }}" + +- name: Ensure NTP server is available if not being set up + shell: # noqa 305 + cmd: "ping -c 1 -W 2 {{ ntp_server }}" + changed_when: False + when: (setup_ntp_service | default(True)) != True diff --git a/roles/validate_inventory/tasks/ntp.yml b/roles/validate_inventory/tasks/ntp.yml new file mode 100644 index 00000000..e1e84e6c --- /dev/null +++ b/roles/validate_inventory/tasks/ntp.yml @@ -0,0 +1,6 @@ +- name: Check ntp_server is valid + assert: + that: + - hostvars['ntp_host']['ntp_server'] is defined + - hostvars['ntp_host']['ntp_server'] | ansible.utils.ipaddr('bool') == True + when: hostvars['ntp_host']['setup_ntp_service'] | default(false) diff --git a/roles/validate_inventory/tasks/partitions.yml b/roles/validate_inventory/tasks/partitions.yml new file mode 100644 index 00000000..ad08be63 --- /dev/null +++ b/roles/validate_inventory/tasks/partitions.yml @@ -0,0 +1,7 @@ +- name: Assert that partitions can be create on supported Openshift version + assert: + that: + - openshift_full_version is version('4.8', '>=') + fail_msg: "openshift_full_version must be >= 4.8." + when: hostvars[item]['disks'] is defined + loop: "{{ groups['nodes'] | default([]) }}" diff --git a/roles/validate_inventory/tasks/prereqs.yml b/roles/validate_inventory/tasks/prereqs.yml new file mode 100644 index 00000000..4f9c1b60 --- /dev/null +++ b/roles/validate_inventory/tasks/prereqs.yml @@ -0,0 +1,4 @@ +--- +- import_tasks: ai.yml + +- import_tasks: dns.yml diff --git a/roles/validate_inventory/tasks/proxy.yml b/roles/validate_inventory/tasks/proxy.yml new file mode 100644 index 00000000..87bc0eec --- /dev/null +++ b/roles/validate_inventory/tasks/proxy.yml @@ -0,0 +1,5 @@ +- name: Check for repeated values in proxy config + assert: + that: + - "{{ (no_proxy.split(',') | unique | list | length) == (no_proxy.split(',') | list | length) }}" + when: no_proxy is defined and no_proxy != "" diff --git a/roles/validate_inventory/tasks/required_vars.yml b/roles/validate_inventory/tasks/required_vars.yml new file mode 100644 index 00000000..c469da8d --- /dev/null +++ b/roles/validate_inventory/tasks/required_vars.yml @@ -0,0 +1,14 @@ +- name: Check repo_root_path is defined + assert: + that: + - repo_root_path is defined + fail_msg: repo_root_path is required for all playbooks to function correctly + changed_when: False + +- name: Check kubeadmin_vault_password_file_path is defined and the file exists + assert: + that: + - kubeadmin_vault_password_file_path is file + fail_msg: "Kubeadmin Vault password must be stored in the location specified by the required variable 'kubeadmin_vault_password_file_path'." + changed_when: False + when: kubeadmin_vault_password_file_path is defined diff --git a/roles/validate_inventory/tasks/secrets.yml b/roles/validate_inventory/tasks/secrets.yml new file mode 100644 index 00000000..514f1600 --- /dev/null +++ b/roles/validate_inventory/tasks/secrets.yml @@ -0,0 +1,36 @@ +--- +- name: Assert that all credentials for the disconnected registry are set + assert: + that: + - hostvars['registry_host'][secret_var_name] is defined + - hostvars['registry_host'][secret_var_name] is string + - hostvars['registry_host'][secret_var_name] | trim != '' + fail_msg: > + The registry host requires a valid {{ secret_var_name }} variable to be set. + Please ensure a valid secret is set in the inventory vault file. + vars: + secret_vars_to_check: + - disconnected_registry_user + - disconnected_registry_password + - REGISTRY_HTTP_SECRET + loop: "{{ secret_vars_to_check }}" + loop_control: + loop_var: secret_var_name + # only for Restricted Network installations + when: "use_local_mirror_registry | default(setup_registry_service | default(true))" + +- name: Assert that all nodes have BMC credentials set + assert: + that: + - hostvars[item.0][item.1] is defined + - hostvars[item.0][item.1] is string + - hostvars[item.0][item.1] | trim != '' + fail_msg: > + Node {{ item.0 }} requires a valid {{ item.1 }} variable to be set. + Please ensure valid BMC credentials are set in the inventory vault file. + vars: + secret_vars_to_check: + - bmc_user + - bmc_password + nodes_and_required_secret_vars: "{{ groups['nodes'] | product(secret_vars_to_check) | list }}" + loop: "{{ nodes_and_required_secret_vars }}" diff --git a/roles/validate_inventory/tasks/validate_pxe.yml b/roles/validate_inventory/tasks/validate_pxe.yml new file mode 100644 index 00000000..b5bd9b84 --- /dev/null +++ b/roles/validate_inventory/tasks/validate_pxe.yml @@ -0,0 +1,36 @@ +--- +- name: Assert 'dns host' and 'tftp host' is have IPs and that is the same + assert: + that: + - hostvars['dns_host']['ansible_host'] is defined + - hostvars['tftp_host']['ansible_host'] is defined + - hostvars['dns_host']['ansible_host'] == hostvars['tftp_host']['ansible_host'] + quiet: true + when: hostvars['dns_host']['use_pxe'] | default(false) + +- name: Check for ipmitool and pyghmi + delegate_to: bastion + block: + - name: "Check if ipmitool installed" + ansible.builtin.shell: + cmd: "ipmitool -V" + register: ipmitool_check + ignore_errors: True + changed_when: False + + - name: Record failures + fail: + msg: "ipmitool must be installed for pxe installation" + when: ipmitool_check.rc != 0 + + - name: "Check if pyghmi installed" + ansible.builtin.shell: + cmd: "pip3 show pyghmi" + register: pyghmi_check + ignore_errors: True + changed_when: False + + - name: Record failures + fail: + msg: "pyghmi must be installed for pxe installation" + when: pyghmi_check.rc != 0 diff --git a/roles/validate_inventory/tasks/vendor.yml b/roles/validate_inventory/tasks/vendor.yml new file mode 100644 index 00000000..d3f26f6e --- /dev/null +++ b/roles/validate_inventory/tasks/vendor.yml @@ -0,0 +1,19 @@ + +- name: Assert that all values of 'vendor' are supported + ansible.builtin.assert: + that: + - (hostvars[item]['vendor'] | lower) is in supported_vendor_values + quiet: true + fail_msg: "Node {{ item }} does not have a supported value for 'vendor'" + when: not allow_custom_vendor | bool + loop: "{{ groups['nodes'] }}" + +- name: "Check vendors role exists" + ansible.builtin.include_role: + name: "redhatci.ocp.vendors.{{ hostvars[item]['vendor'] | lower }}" + tasks_from: exists.yml + allow_duplicates: true + loop: "{{ groups['nodes'] }}" + args: + apply: + delegate_to: bastion diff --git a/roles/validate_inventory/tasks/vms.yml b/roles/validate_inventory/tasks/vms.yml new file mode 100644 index 00000000..64200885 --- /dev/null +++ b/roles/validate_inventory/tasks/vms.yml @@ -0,0 +1,82 @@ +- name: Get all KVM Nodes + vars: + kvm_node_names: [] + set_fact: + kvm_node_names: "{{ kvm_node_names + [item] }}" + when: hostvars[item]['vendor'] | lower == 'kvm' + loop: "{{ groups['nodes'] }}" + +- name: Check there is no vm_host in services + assert: + that: + - ('vm_host' not in groups['services']) + fail_msg: > + The structure of the inventory has changed + please put vm_host into a group called vm_hosts + and add a vm_host entry to the KVM node to point + to the host of the vm. + +- name: Check that a host is defined in the 'vm_hosts' group if needed + assert: + that: + - groups['vm_hosts'] is defined + - groups['vm_hosts'] | length > 0 + quiet: true + when: (kvm_node_names is defined) and (kvm_node_names | length > 0) + +- name: Check that each node will be on a valid host + assert: + that: + - hostvars[item]['vm_host'] in groups['vm_hosts'] + quiet: true + fail_msg: "vm_host ({{ hostvars[item]['vm_host'] }}) for {{ item }} not found in group vm_hosts" + loop: "{{ kvm_node_names | default([]) }}" + +- name: Check KVM BMC username password combinations + when: (kvm_node_names is defined) and (kvm_node_names | length > 0) + block: + - name: Get KVM BMC username password combinations + vars: + kvm_user_password_combinations: {} # Structure will be {'{{vm_host}}.{{bmc_user}}': [{{bmc_password}}, ...]} + set_fact: + kvm_user_password_combinations: "{{ + kvm_user_password_combinations | combine({ + (hostvars[item]['vm_host'] + '.' + hostvars[item]['bmc_user']): ( + ( + kvm_user_password_combinations[( + hostvars[item]['vm_host'] + '.' + hostvars[item]['bmc_user'] + )] | default([]) + ) + [hostvars[item]['bmc_password']] + ) | unique + }) + }}" + loop: "{{ kvm_node_names }}" + no_log: true + + - name: Check there are not mutliple bmc passwords for the same bmc user and vm host. + assert: + that: + - (item.value | length) == 1 + fail_msg: | + For vm host {{ item.key.split('.')[0] }}: bmc_user ({{ item.key.split('.')[1] }}) + and bmc_password combinations for a single host must be unique + loop: "{{ kvm_user_password_combinations | dict2items() }}" + no_log: true + +- name: Check UUIDs are unique + when: (kvm_node_names is defined) and (kvm_node_names | length > 0) + vars: + kvm_node_uuids: [] + block: + - name: Get node UUIDs + set_fact: + kvm_node_uuids: "{{ kvm_node_uuids + [hostvars[item]['uuid']] }}" + when: "'uuid' in hostvars[item]" + loop: "{{ kvm_node_names }}" + no_log: true + + - name: Check values are unique + assert: + that: + - (kvm_node_uuids | length) == (kvm_node_uuids | unique | length) + fail_msg: "KVM node UUIDs must be unique otherwise they won't build or will collide during discovery" diff --git a/site.yml b/site.yml index 45fa8a4a..df7642c5 100644 --- a/site.yml +++ b/site.yml @@ -1,4 +1,6 @@ --- +- import_playbook: playbooks/validate_inventory.yml + - import_playbook: playbooks/display_deployment_plan.yml - import_playbook: deploy_prerequisites.yml From 705535c57388045b32d9e8129307ee984813051e Mon Sep 17 00:00:00 2001 From: Michele Costa Date: Fri, 15 Mar 2024 11:33:01 +0000 Subject: [PATCH 2/6] Require os_images and release_images in inventory --- requirements.txt | 4 +- roles/validate_inventory/defaults/main.yml | 13 +--- roles/validate_inventory/tasks/ai.yml | 38 ++++++++--- roles/validate_inventory/tasks/cluster.yml | 26 +++----- tests/validate_inventory/suites/prereqs.yml | 34 ++++++++++ .../templates/test_inv.yml.j2 | 65 ++++++++++++++++++- 6 files changed, 141 insertions(+), 39 deletions(-) diff --git a/requirements.txt b/requirements.txt index 5679719b..af85b1ac 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ -netaddr==0.8.0 +jmespath==1.0.1 kubernetes==27.2.0 - +netaddr==0.8.0 diff --git a/roles/validate_inventory/defaults/main.yml b/roles/validate_inventory/defaults/main.yml index ffdbfae2..27e7fec9 100644 --- a/roles/validate_inventory/defaults/main.yml +++ b/roles/validate_inventory/defaults/main.yml @@ -18,20 +18,9 @@ supported_vendor_values: - pxe - zt -allow_custom_vendor: False +allow_custom_vendor: false ai_version: "{{ hostvars.assisted_installer.ai_version | default('v2.26.0') }}" ai_version_number: "{{ ai_version | regex_replace('v(\\d+\\.\\d+\\.\\d+)', '\\1') }}" -supported_ocp_versions: - - 4.6.16 - - 4.7.52 - - 4.8.43 - - 4.9.59 - - 4.10.67 - - 4.11.53 - - 4.12.44 - - 4.13.22 - - 4.14.2 - single_node_openshift_enabled: "{{ is_valid_single_node_openshift_config | default(false) }}" diff --git a/roles/validate_inventory/tasks/ai.yml b/roles/validate_inventory/tasks/ai.yml index f64f30ea..7a971dbb 100644 --- a/roles/validate_inventory/tasks/ai.yml +++ b/roles/validate_inventory/tasks/ai.yml @@ -1,18 +1,18 @@ --- - name: Assert ai_version is valid - assert: + ansible.builtin.assert: that: - ai_version_number is version('2.1.0', '>=') fail_msg: "ai_version must be >= v2.1.0 and must be of the form 'v\\d+.\\d+.\\d+'" - name: Assert that Openshift version is supported - assert: + ansible.builtin.assert: that: - openshift_full_version is version('4.6', '>=') fail_msg: "openshift_full_version must be >= 4.6." - name: Assert VIPs are within the machine network - assert: + ansible.builtin.assert: that: - hostvars['assisted_installer'][item] | ansible.utils.ipaddr(hostvars['assisted_installer']['machine_network_cidr']) | ansible.utils.ipaddr('bool') fail_msg: "{{ item }} is not within the machine network!" @@ -22,7 +22,7 @@ - ingress_vip - name: Assert nodes are within the machine network - assert: + ansible.builtin.assert: that: - hostvars[item]['ansible_host'] | ansible.utils.ipaddr(hostvars['assisted_installer']['machine_network_cidr']) | ansible.utils.ipaddr('bool') fail_msg: "{{ item }} is not within the machine network!" @@ -37,20 +37,20 @@ - extra_ingress_vip | length > 0 block: - name: Assert that Openshift version is supported for dualstack VIPs - assert: + ansible.builtin.assert: that: - openshift_full_version is version('4.12', '>=') fail_msg: "openshift_full_version must be >= 4.12. to support dualstack VIPs" - name: Assert that extra_machine_networks variable is defined and not empty - assert: + ansible.builtin.assert: that: - (extra_machine_networks | length) > 0 fail_msg: "extra_machine_networks must be defined and have at least one cidr value in a list to support dualstack VIPs" - name: Check if extra api VIP is within the extra machine networks vars: - extra_api_vip: "{{ hostvars['assisted_installer']['extra_api_vip'] | default ([]) }}" + extra_api_vip: "{{ hostvars['assisted_installer']['extra_api_vip'] | default([]) }}" extra_api_vip_tests: [] ansible.builtin.set_fact: extra_api_vip_tests: "{{ extra_api_vip_tests + [extra_api_vip | ansible.utils.ipaddr(item.cidr)] }}" @@ -69,7 +69,7 @@ - name: Check if extra ingress VIP is within the extra machine networks vars: - extra_ingress_vip: "{{ hostvars['assisted_installer']['extra_ingress_vip'] | default ([]) }}" + extra_ingress_vip: "{{ hostvars['assisted_installer']['extra_ingress_vip'] | default([]) }}" extra_ingress_vip_tests: [] ansible.builtin.set_fact: extra_ingress_vip_tests: "{{ extra_ingress_vip_tests + [extra_ingress_vip | ansible.utils.ipaddr(item.cidr)] }}" @@ -85,3 +85,25 @@ fail_msg: "{{ extra_ingress_vip }} is not within any of the extra machine networks!" when: - extra_ingress_vip_tests is defined + + +- name: Check OS and release image vars + vars: + ocp_ver_query: "[?openshift_version=='{{ openshift_version }}']" + ver_query: "[?version=='{{ openshift_full_version }}']" + block: + - name: Assert os_images exists + vars: + ansible.builtin.assert: + that: + - os_images is defined + - os_images | json_query(ocp_ver_query) | length > 0 + fail_msg: "os_images is not defined or does not contain the required version" + + - name: Assert release_images exists + ansible.builtin.assert: + that: + - release_images is defined + - release_images | json_query(ver_query) | length > 0 + - release_images | json_query(ocp_ver_query) | length > 0 + fail_msg: "release_images is not defined or does not contain the required versions" diff --git a/roles/validate_inventory/tasks/cluster.yml b/roles/validate_inventory/tasks/cluster.yml index d807a9b3..51bdad18 100644 --- a/roles/validate_inventory/tasks/cluster.yml +++ b/roles/validate_inventory/tasks/cluster.yml @@ -1,19 +1,13 @@ --- -- name: Assert Openshift version is supported - assert: - that: - - openshift_full_version in supported_ocp_versions - fail_msg: "We do not support openshift version {{ openshift_full_version }}, The supported versions are: {{ supported_ocp_versions | join(',') }}" - - name: Assert valid master configuration (HA) - assert: + ansible.builtin.assert: that: - groups['masters'] | length >= 3 fail_msg: "There must be at least three masters defined. To deploy SNO, define one master and no workers." when: not single_node_openshift_enabled - name: Assert API and Ingress VIPs are set correctly (SNO) - assert: + ansible.builtin.assert: that: - api_vip == hostvars[sno_hostname]['ansible_host'] - ingress_vip == hostvars[sno_hostname]['ansible_host'] @@ -23,14 +17,14 @@ when: single_node_openshift_enabled - name: Assert valid worker configuration - assert: + ansible.builtin.assert: that: - (groups['workers'] | length == 0) or (groups['workers'] | length >= 2) fail_msg: "There must be either zero, or more than one, workers defined." when: groups['workers'] is defined - name: Assert all nodes have all required vars - assert: + ansible.builtin.assert: that: - hostvars[item.0][item.1] is defined - hostvars[item.0][item.1] | trim != '' @@ -39,7 +33,7 @@ loop: "{{ groups['nodes'] | product(node_required_vars) | list }}" - name: Assert all nodes have a BMC IP or address - assert: + ansible.builtin.assert: that: - (hostvars[item]['bmc_ip'] is defined and (setup_dns_service | default(false)) | bool) or (hostvars[item]['bmc_address'] is defined) quiet: true @@ -47,7 +41,7 @@ loop: "{{ groups['nodes'] }}" - name: Assert bmc_ip is correct type - assert: + ansible.builtin.assert: that: - hostvars[item]['bmc_ip'] | ansible.utils.ipaddr('bool') quiet: true @@ -56,7 +50,7 @@ when: hostvars[item]['bmc_ip'] is defined - name: Assert bmc_address is correct type - assert: + ansible.builtin.assert: that: - hostvars[item]['bmc_address'] is string quiet: true @@ -65,7 +59,7 @@ when: hostvars[item]['bmc_address'] is defined - name: Assert required vars are correctly typed - assert: + ansible.builtin.assert: that: - (hostvars[item]['mac'] | ansible.utils.hwaddr('bool')) == true - hostvars[item]['bmc_password'] is string @@ -76,7 +70,7 @@ loop: "{{ groups['nodes'] }}" - name: Assert mac has linux format - assert: + ansible.builtin.assert: that: - ( hostvars[item]['mac'] | string | upper | regex_search('^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$')) is not none quiet: true @@ -87,7 +81,7 @@ loop: "{{ groups['nodes'] }}" - name: Assert that all values of 'role' are supported - assert: + ansible.builtin.assert: that: - hostvars[item]['role'] is in supported_role_values quiet: true diff --git a/tests/validate_inventory/suites/prereqs.yml b/tests/validate_inventory/suites/prereqs.yml index 06fb4b2a..d42a3ca2 100644 --- a/tests/validate_inventory/suites/prereqs.yml +++ b/tests/validate_inventory/suites/prereqs.yml @@ -3,6 +3,40 @@ tags: validate_prereqs template_file: test_inv.yml.j2 tests: + - test_name: valid_prereqs + expected: 0 + + - test_name: invalid_missing_os_images + expected: 2 + template: + dont_define_os_images: true + + - test_name: invalid_missing_release_images + expected: 2 + template: + dont_define_release_images: true + + - test_name: invalid_os_images_wrong_version + expected: 2 + template: + os_images: + - cpu_architecture: x86_64 + openshift_version: '4.11' + rootfs_url: https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/4.11/4.11.9/rhcos-live-rootfs.x86_64.img + url: https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/4.11/4.11.9/rhcos-4.11.9-x86_64-live.x86_64.iso + version: 411.86.202210072320-0 + + - test_name: invalid_release_images_wrong_version + expected: 2 + template: + release_images: + - cpu_architecture: x86_64 + cpu_architectures: + - x86_64 + openshift_version: '4.11' + url: quay.io/openshift-release-dev/ocp-release:4.11.12-x86_64 + version: 4.11.12 + - test_name: invalid_dhcp_first_last_missing expected: 2 template: diff --git a/tests/validate_inventory/templates/test_inv.yml.j2 b/tests/validate_inventory/templates/test_inv.yml.j2 index 58e62674..297b591e 100644 --- a/tests/validate_inventory/templates/test_inv.yml.j2 +++ b/tests/validate_inventory/templates/test_inv.yml.j2 @@ -1,9 +1,40 @@ all: vars: + openshift_full_version: {{ item.template.openshift_full_version | default('4.10.67') }} + repo_root_path: {{ playbook_dir + "/.." }} + + api_vip: 10.60.0.96 + ingress_vip: 10.60.0.97 + machine_network_cidr: 10.60.0.0/24 + + vip_dhcp_allocation: {{ item.template.vip_dhcp_allocation | default(false) }} + {% if item.template.os_images is defined %} + os_images: {{ item.template.os_images | to_json}} + {% elif (item.template.dont_define_os_images | default(false)) %} + {% else %} + os_images: + - cpu_architecture: x86_64 + openshift_version: '4.10' + rootfs_url: https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/4.10/4.10.37/rhcos-live-rootfs.x86_64.img + url: https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/4.10/4.10.37/rhcos-4.10.37-x86_64-live.x86_64.iso + version: 410.84.202210061459-0 + {% endif %} + + {% if item.template.release_images is defined %} + release_images: {{ item.template.release_images | to_json}} + {% elif (item.template.dont_define_release_images | default(false)) %} + {% else %} + release_images: + - cpu_architecture: x86_64 + cpu_architectures: + - x86_64 + openshift_version: '4.10' + url: quay.io/openshift-release-dev/ocp-release:4.10.67-x86_64 + version: 4.10.67 + {% endif %} {% if item.template.day2_discovery_iso_name is defined %} day2_discovery_iso_name: {{ item.template.day2_discovery_iso_name }} {% endif %} - openshift_full_version: {{ item.template.openshift_full_version | default('4.10.67') }} setup_dns_service: {{ item.template.setup_dns_service | default(False)}} {% if item.template.allow_custom_vendor is defined %} allow_custom_vendor: {{ item.template.allow_custom_vendor }} @@ -11,6 +42,35 @@ all: {% if item.template.no_proxy is defined %} no_proxy: {{ item.template.no_proxy }} {% endif %} + + ############################ + # LOGIC: DO NOT TOUCH # + # vvvvvvvvvvvvvvvvvvvvvvvv # + ############################ + {% raw %} + # pull secret logic, no need to change. Configure above + local_pull_secret_path: "{{ lookup('first_found', pull_secret_lookup_paths) }}" + pull_secret: "{{ lookup('file', local_pull_secret_path) }}" + + # ssh key logic, no need to change. Configure above + local_ssh_public_key_path: "{{ lookup('first_found', ssh_public_key_lookup_paths) }}" + ssh_public_key: "{{ lookup('file', local_ssh_public_key_path) }}" + + # provided mirror certificate logic, no need to change. + local_mirror_certificate_path: "{{ (setup_registry_service == true) | ternary( + fetched_dest + '/' + (hostvars['registry_host']['cert_file_prefix'] | default('registry')) + '.crt', + repo_root_path + '/mirror_certificate.txt') + }}" + mirror_certificate: "{{ lookup('file', local_mirror_certificate_path) }}" + + openshift_version: "{{ openshift_full_version.split('.')[:2] | join('.') }}" + + is_valid_single_node_openshift_config: "{{ (groups['nodes'] | length == 1) and (groups['masters'] | length == 1) }}" + {% endraw %} + ############################ + # ^^^^^^^^^^^^^^^^^^^^^^^^ # + # LOGIC: DO NOT TOUCH # + ############################ children: bastions: hosts: @@ -60,6 +120,7 @@ all: hosts: {% for n in range(item.template.num_masters | default(3)) %} master{{ n }}: + ansible_host: 10.60.0.{{n}} {% endfor %} workers: vars: @@ -67,6 +128,7 @@ all: hosts: {% for n in range(item.template.num_workers | default(2)) %} worker{{ n }}: + ansible_host: 10.60.0.{{n+50}} {% endfor %} {% if item.template.num_day2_workers is defined %} day2_workers: @@ -75,5 +137,6 @@ all: hosts: {% for n in range(item.template.num_day2_workers) %} day2_worker{{ n }}: + ansible_host: 10.60.0.{{n+100}} {% endfor %} {% endif %} From f796a8cf58ff2217911e4efd5d81514f178f32bc Mon Sep 17 00:00:00 2001 From: Michele Costa Date: Mon, 18 Mar 2024 16:30:18 +0000 Subject: [PATCH 3/6] Set use_agent_based_installer default to true --- deploy_cluster.yml | 4 ++-- deploy_prerequisites.yml | 2 +- roles/validate_inventory/tasks/main.yml | 4 ++-- site.yml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/deploy_cluster.yml b/deploy_cluster.yml index 9030e673..7d140666 100644 --- a/deploy_cluster.yml +++ b/deploy_cluster.yml @@ -2,7 +2,7 @@ - import_playbook: playbooks/validate_inventory.yml - import_playbook: playbooks/deploy_cluster_agent_based_installer.yml - when: (use_agent_based_installer | default(false)) | bool + when: (use_agent_based_installer | default(true)) | bool - import_playbook: playbooks/deploy_cluster_assisted_installer.yml - when: not ((use_agent_based_installer | default(false)) | bool) + when: not ((use_agent_based_installer | default(true)) | bool) diff --git a/deploy_prerequisites.yml b/deploy_prerequisites.yml index a243025a..55d27936 100644 --- a/deploy_prerequisites.yml +++ b/deploy_prerequisites.yml @@ -21,6 +21,6 @@ - import_playbook: playbooks/deploy_registry.yml - import_playbook: playbooks/deploy_assisted_installer_onprem.yml - when: not ((use_agent_based_installer | default(false)) | bool) + when: not ((use_agent_based_installer | default(true)) | bool) - import_playbook: playbooks/deploy_sushy_tools.yml diff --git a/roles/validate_inventory/tasks/main.yml b/roles/validate_inventory/tasks/main.yml index 9aa47596..efe1b6b3 100644 --- a/roles/validate_inventory/tasks/main.yml +++ b/roles/validate_inventory/tasks/main.yml @@ -79,7 +79,7 @@ apply: tags: validate_agent_based_installer when: - - (use_agent_based_installer | default(false) | bool) + - (use_agent_based_installer | default(true) | bool) - not ((ignore_agent_based_installer_feature_gates | default(false)) | bool) tags: validate_agent_based_installer @@ -88,7 +88,7 @@ apply: tags: validate_agent_based_installer when: - - (use_agent_based_installer | default(false) | bool) + - (use_agent_based_installer | default(true) | bool) tags: validate_agent_based_installer when: not (inventory_validated | default(False) | bool) diff --git a/site.yml b/site.yml index df7642c5..9e3dc679 100644 --- a/site.yml +++ b/site.yml @@ -10,4 +10,4 @@ - import_playbook: post_install.yml - import_playbook: deploy_day2_workers.yml - when: not ((use_agent_based_installer | default(false)) | bool) + when: not ((use_agent_based_installer | default(true)) | bool) From 002addf5fc1bf7e6a2dbf4c585a449cd0c36d1ef Mon Sep 17 00:00:00 2001 From: Michele Costa Date: Thu, 21 Mar 2024 15:43:18 +0000 Subject: [PATCH 4/6] Add a script to help users generate os_images and release_images --- docs/inventory.md | 123 +++++++++++++++----- hack/README.md | 15 +++ hack/generate_os_release_images.py | 179 +++++++++++++++++++++++++++++ 3 files changed, 286 insertions(+), 31 deletions(-) create mode 100644 hack/README.md create mode 100755 hack/generate_os_release_images.py diff --git a/docs/inventory.md b/docs/inventory.md index 7342edac..a8fa23be 100644 --- a/docs/inventory.md +++ b/docs/inventory.md @@ -18,6 +18,63 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ## Inventory Validation +### OS Image and Release Image requirements +You are now required to provide the os_image and release_image for the openshift version you want to deploy. + + Note: We have provided a script which automates steps 1 to 8 in the hack directory however there are some dependancies to it. + +The os_image for a relase can be generated by: +1. Navigating to this url https://mirror.openshift.com/pub/openshift-v4//dependencies/rhcos/. Where arch is the architecture you wish to deploy onto. + - For example we will use `x86_64` producing: + [https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/](https://mirror.openshift.com/pub/openshift-v4/amd64/dependencies/rhcos/) +2. Selecing the Y stream you wish to deploy e.g. 4.15 +3. Then selecting the Z stream latest version <= to the version you wish to deploy e.g. 4.15.5 you could select 4.15.0 +4. Navigating into that director you can find the `live iso` and the `live rootfs image` files note down there urls. +5. You then go to following URL replacing the place holders. Where arch is the same as before and OS_VERSION is the version you selected in the previous step. + https://mirror.openshift.com/pub/openshift-v4//clients/ocp//release.txt + + - For example using arch as before and `4.15.0` producing: [https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp/4.15.0/release.txt](https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp/4.15.0/release.txt) +6. You then need to gather the machine-os version from the release.txt in this case `415.92.202402201450-0` +7. You can now produce the os_image using the following template + ```yaml + os_images: + - openshift_version: , + cpu_architecture: , + url: , + rootfs_url: , + version: , + ``` + For the 4.15.5 example this would look like: + ```yaml + os_images: + - openshift_version: "4.15", + cpu_architecture: "x86_64", + url: "https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/4.15/4.15.0/rhcos-4.15.0-x86_64-live.x86_64.iso", + rootfs_url: "https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/4.15/4.15.0/rhcos-live-rootfs.x86_64.img", + version: "415.92.202402201450-0", + ``` +8. You can build your release image using the template: + ```yaml + release_images: + - openshift_version: , + cpu_architecture: , + cpu_architectures: + - , + url: "quay.io/openshift-release-dev/ocp-release:-", + version: , + ``` + For the 4.15.5 example this would look like: + ```yaml + release_images: + - openshift_version: "4.15", + cpu_architecture: "x86_64", + cpu_architectures: + - "x86_64" + url: "quay.io/openshift-release-dev/ocp-release:4.15.5-x86_64", + version: "4.15.5", + ``` +9. Insert `os_images` and `release_images` into the all section of your inventory. + ### Cluster config checks: #### Highly Available OpenShift cluster node checks @@ -46,32 +103,7 @@ In addition to that, the following checks must be met for both HA and SNO deploy - All values of `role` are supported - If any nodes are virtual (vendor = KVM) then a vm_host is defined -There three possible groups of nodes are `masters`, `workers` and `day2_workers`. - -#### Day 2 nodes - -Day 2 nodes are added to an existing cluster. The reason why the installation of day 2 nodes is built into the main path of our automation, is that for assisted installer day 2 nodes can be on a different L2 network which the main flow does not allow. - -Add a second ISO name parameter to the inventory to avoid conflict with the original: - -```yaml -# day2 workers require custom parameter -day2_discovery_iso_name: "discovery/day2_discovery-image.iso" -``` - -Then add the stanza for day2 workers: - -```yaml -day2_workers: - vars: - role: worker - vendor: HPE - hosts: - worker3: # Ensure this does not conflict with any existing workers - ansible_host: 10.60.0.106 - bmc_address: 172.28.11.26 - mac: 3C:FD:FE:78:AB:05 -``` +There three possible groups of nodes are `masters`, `workers` and `day2_workers` (day2_workers are onprem assisted installer only) . ### Network checks @@ -195,7 +227,7 @@ network_config: - name: ens1f0 type: ethernet mac: "40:A6:B7:3D:B3:70" - state: down + state: down - name: ens1f1 type: ethernet mac: "40:A6:B7:3D:B3:71" @@ -651,7 +683,7 @@ You must have these services when using PXE deployment vendor: pxe bmc_address: "nfvpe-21.oot.lab.eng.bos.redhat.com" bmc_port: 8082 - + ``` > **Note**: that the BMCs of the nodes in the cluster must be routable from the bastion host and the HTTP Store must be routable from the BMCs @@ -727,12 +759,41 @@ all: ansible_host: 192.168.10.17 bmc_ip: 172.30.10.7 ``` -# Defining a password for the discovery iso. + +# On prem assisted installer only +These features require that the onprem assisted installer option. +To use them set `use_agent_based_installer: false` in the all section of the inventory. + +## Defining a password for the discovery iso. If users wish to provide password for the discovery ISO, they must define `hashed_discovery_password` in the `all` section inventory. The value provided in `hashed_discovery_password` can be created by using `mkpasswd --method=SHA-512 MyAwesomePassword`. - -# Operators +## Operators It is possible to install a few operators as part of the cluster installtion. These operators are Local Storage Operator (`install_lso: True`), Open Data Fabric (`install_odf: True`) and Openshift Virtualization (`install_cnv: True`) + +## Day 2 nodes + +Day 2 nodes are added to an existing cluster. The reason why the installation of day 2 nodes is built into the main path of our automation, is that for assisted installer day 2 nodes can be on a different L2 network which the main flow does not allow. + +Add a second ISO name parameter to the inventory to avoid conflict with the original: + +```yaml +# day2 workers require custom parameter +day2_discovery_iso_name: "discovery/day2_discovery-image.iso" +``` + +Then add the stanza for day2 workers: + +```yaml +day2_workers: + vars: + role: worker + vendor: HPE + hosts: + worker3: # Ensure this does not conflict with any existing workers + ansible_host: 10.60.0.106 + bmc_address: 172.28.11.26 + mac: 3C:FD:FE:78:AB:05 +``` diff --git a/hack/README.md b/hack/README.md new file mode 100644 index 00000000..496751ae --- /dev/null +++ b/hack/README.md @@ -0,0 +1,15 @@ +# generate_os_release_images.py + +## Requriments + +```shell +pip install semver beautifulsoup4 +``` + +## Usage +Can be used to generate `os_images` and `release_images`. + +Here's an example for multiple different ocp versions: +```shell +./generate_os_release_images.py -a x86_64 -v 4.12.29 -v 4.11.30 -v 4.13.2 -v 4.14.12 -v 4.15.1 +``` diff --git a/hack/generate_os_release_images.py b/hack/generate_os_release_images.py new file mode 100755 index 00000000..a866249b --- /dev/null +++ b/hack/generate_os_release_images.py @@ -0,0 +1,179 @@ +#! /usr/bin/env python3 + +try: + from BeautifulSoup import BeautifulSoup +except ImportError: + from bs4 import BeautifulSoup +try: + from semver import Version as VersionInfo +except ImportError: + from semver import VersionInfo + +import yaml + +import requests +import re +import argparse + +DEBUG = False + +def generate_image_values(ocp_version, arch): + rhcos = requests.get( + f"https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/{ocp_version.major}.{ocp_version.minor}" + ) + if not rhcos.ok: + raise ValueError( + f"Failed to find rhcos dependencies for version: {ocp_version.major}.{ocp_version.minor}" + ) + + page = BeautifulSoup(rhcos.content, "lxml") + versions = map(lambda p: p["href"].strip("/"), page.find_all("a")[1:-1]) + + os_version = None + for v in versions: + ver = VersionInfo.parse(v) + if ver.compare(ocp_version) < 1 and ( + os_version is None or os_version.compare(ver) == -1 + ): + os_version = ver + + if os_version is None: + raise ValueError( + f"Failed to find a version <= {ocp_version} in {versions.join(', ')}" + ) + + release_info = requests.get( + f"https://mirror.openshift.com/pub/openshift-v4/{arch}/clients/ocp/{os_version}/release.txt" + ) + if not release_info.ok: + raise ValueError(f"Failed to find release.txt for version: {os_version}") + + rhcos_version_match = re.search( + r"^\s+machine-os (?P.+) Red Hat Enterprise Linux CoreOS$", + release_info.content.decode(), + re.MULTILINE, + ) + rhcos_version = rhcos_version_match.groupdict()["rhcos_version"] + + if DEBUG: + print(arch) + print(ocp_version) + print(os_version) + print(rhcos_version) + + result = { + "os_images": { + str(os_version): { + "openshift_version": f"{os_version.major}.{os_version.minor}", + "cpu_architecture": f"{arch}", + "url": f"https://mirror.openshift.com/pub/openshift-v4/{arch}/dependencies/rhcos/{os_version.major}.{os_version.minor}/{os_version}/rhcos-{os_version}-{arch}-live.{arch}.iso", + "rootfs_url": f"https://mirror.openshift.com/pub/openshift-v4/{arch}/dependencies/rhcos/{os_version.major}.{os_version.minor}/{os_version}/rhcos-live-rootfs.{arch}.img", + "version": f"{rhcos_version}", + }, + }, + "release_images": [ + { + "openshift_version": f"{ocp_version.major}.{ocp_version.minor}", + "cpu_architecture": arch, + "cpu_architectures": [arch], + "url": f"quay.io/openshift-release-dev/ocp-release:{ocp_version}-{arch}", + "version": str(ocp_version), + }, + ], + } + + return result + + +def merge_results(results): + merged = { + "os_images": {}, + "release_images": [], + } + + for r in results: + for os_v, os in r["os_images"].items(): + merged["os_images"][os_v] = os + for os in r["release_images"]: + merged["release_images"].append(os) + + res = { + "os_images": [], + "release_images": merged["release_images"], + } + + for os in merged["os_images"].values(): + res["os_images"].append(os) + + return res + + +def verify_urls(merged): + for os in merged["os_images"]: + url_head = requests.head(os["url"]) + if not url_head.ok: + raise ValueError(f"file not found at expected url {os['url']}") + rootfs_url_head = requests.head(os["rootfs_url"]) + if not rootfs_url_head.ok: + raise ValueError(f"file not found at expected url {os['rootfs_url']}") + + for release in merged["release_images"]: + url_head = requests.head(os["url"]) + if not url_head.ok: + raise ValueError(f"file not found at expected url {os['url']}") + + +def main(ocp_versions, arch, verify): + results = [] + for v in ocp_versions: + results.append(generate_image_values(v, arch)) + + if DEBUG: + print(results) + + merged_results = merge_results(results) + if DEBUG: + print(merged_results) + + class IndentDumper(yaml.Dumper): + def increase_indent(self, flow=False, indentless=False): + return super(IndentDumper, self).increase_indent(flow, False) + + if verify: + verify_urls(merged_results) + + print(yaml.dump(merged_results, Dumper=IndentDumper)) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "-a", + "--arch", + help="target archictecture", + ) + parser.add_argument( + "-v", + "--version", + action="append", + ) + parser.add_argument( + "--skip-verify", + action="store_false", + default=True, + ) + parser.add_argument( + "--debug", + action="store_true", + default=False, + ) + + args = parser.parse_args() + + DEBUG = args.debug + + ocp_versions = [] + for v in args.version: + ocp_versions.append(VersionInfo.parse(v)) + + main(ocp_versions=ocp_versions, arch=args.arch, verify=args.skip_verify) From 4f7e4cd3b830d5cd952b1fd5aed4959f1cfcad5c Mon Sep 17 00:00:00 2001 From: Michele Costa Date: Thu, 21 Mar 2024 16:35:03 +0000 Subject: [PATCH 5/6] Remove test for custom_vendors As we don't host the vendor role anymore it doesn't make sense to have this test --- .../vendors/test_custom_vendor/main/disk.yml | 1 - .../test_custom_vendor/main/exists.yml | 4 --- .../vendors/test_custom_vendor/main/iso.yml | 1 - tests/validate_inventory/suites/vendor.yml | 25 ------------------- tests/validate_inventory/tests.yml | 1 - 5 files changed, 32 deletions(-) delete mode 100644 tests/validate_inventory/roles_for_vaildating_role_fetching/vendors/test_custom_vendor/main/disk.yml delete mode 100644 tests/validate_inventory/roles_for_vaildating_role_fetching/vendors/test_custom_vendor/main/exists.yml delete mode 100644 tests/validate_inventory/roles_for_vaildating_role_fetching/vendors/test_custom_vendor/main/iso.yml delete mode 100644 tests/validate_inventory/suites/vendor.yml diff --git a/tests/validate_inventory/roles_for_vaildating_role_fetching/vendors/test_custom_vendor/main/disk.yml b/tests/validate_inventory/roles_for_vaildating_role_fetching/vendors/test_custom_vendor/main/disk.yml deleted file mode 100644 index ed97d539..00000000 --- a/tests/validate_inventory/roles_for_vaildating_role_fetching/vendors/test_custom_vendor/main/disk.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/tests/validate_inventory/roles_for_vaildating_role_fetching/vendors/test_custom_vendor/main/exists.yml b/tests/validate_inventory/roles_for_vaildating_role_fetching/vendors/test_custom_vendor/main/exists.yml deleted file mode 100644 index 0f1e886f..00000000 --- a/tests/validate_inventory/roles_for_vaildating_role_fetching/vendors/test_custom_vendor/main/exists.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- debug: # noqa: unnamed-task - msg: "I exist" - verbosity: 1 diff --git a/tests/validate_inventory/roles_for_vaildating_role_fetching/vendors/test_custom_vendor/main/iso.yml b/tests/validate_inventory/roles_for_vaildating_role_fetching/vendors/test_custom_vendor/main/iso.yml deleted file mode 100644 index ed97d539..00000000 --- a/tests/validate_inventory/roles_for_vaildating_role_fetching/vendors/test_custom_vendor/main/iso.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/tests/validate_inventory/suites/vendor.yml b/tests/validate_inventory/suites/vendor.yml deleted file mode 100644 index e098643d..00000000 --- a/tests/validate_inventory/suites/vendor.yml +++ /dev/null @@ -1,25 +0,0 @@ -tags: validate_vendor -env: - ANSIBLE_ROLES_PATH: roles:tests/validate_inventory/roles_for_vaildating_role_fetching -template_file: test_inv.yml.j2 - -tests: - - test_name: valid_vendor - expected: 0 - - - test_name: invalid_test_vendor - expected: 2 - template: - vendor: test_custom_vendor - - - test_name: invalid_test_custom_vendor_doesnt_exist - expected: 2 - template: - vendor: test_custom_vendor_doesnt_exist - allow_custom_vendor: true - - - test_name: valid_test_custom_vendor - expected: 0 - template: - vendor: test_custom_vendor - allow_custom_vendor: true diff --git a/tests/validate_inventory/tests.yml b/tests/validate_inventory/tests.yml index 5e458b71..af52694f 100644 --- a/tests/validate_inventory/tests.yml +++ b/tests/validate_inventory/tests.yml @@ -11,7 +11,6 @@ - vms.yml - ntp.yml - day2.yml - - vendor.yml - proxy.yml tasks: - name: Run Inventory Validation test suites From 4a5b8b2a855ea6c51a07051758675c7d81f69e22 Mon Sep 17 00:00:00 2001 From: Michele Costa Date: Thu, 28 Mar 2024 12:45:28 +0000 Subject: [PATCH 6/6] Move Partitioning docs to assisted installer onprem only section --- docs/inventory.md | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/docs/inventory.md b/docs/inventory.md index a8fa23be..88432a0f 100644 --- a/docs/inventory.md +++ b/docs/inventory.md @@ -627,17 +627,6 @@ The basic network configuration of the inventory for the fully bare metal deploy bmc_address: 172.30.10.7 # ... ``` -## Additional Partition Deployment - -For OCP 4.8+ deployments you can set partitions if required on the nodes. You do this by adding the snippet below to the node definition. Please ensure you provide the correct label and size(MiB) for the additional partitions you want to create. The device can either be the drive in which RHCOS image needs to be installed or it can be any additional drive on the node that requires partitioning. In the case that the device is equal to the host's `installation_disk_path` then a partition will be added defined by `disks_rhcos_root`. All additional partitions must be added under `extra_partitions` key as per the example below. - -```yaml -disks: - - device: "{{ installation_disk_path }}" - extra_partitions: - partition_1: 1024 - partition_2: 1024 - ``` ## PXE Deployment You must have these services when using PXE deployment @@ -685,7 +674,7 @@ You must have these services when using PXE deployment bmc_port: 8082 ``` -> **Note**: that the BMCs of the nodes in the cluster must be routable from the bastion host and the HTTP Store must be routable from the BMCs +> **Note**: that the BMCs of the nodes in the cluster must be routable from the bastion host and the HTTP Store must be routable from the BMCs. These two examples are not the only type of clusters that can be deployed using Crucible. A hybrid cluster can be created by mixing virtual and bare metal nodes. @@ -775,7 +764,8 @@ It is possible to install a few operators as part of the cluster installtion. Th ## Day 2 nodes -Day 2 nodes are added to an existing cluster. The reason why the installation of day 2 nodes is built into the main path of our automation, is that for assisted installer day 2 nodes can be on a different L2 network which the main flow does not allow. +Day 2 nodes are added to an existing cluster. +The reason why the installation of day 2 nodes is built into the main path of our automation, is that for assisted installer day 2 nodes can be on a different L2 network which the main flow does not allow. Add a second ISO name parameter to the inventory to avoid conflict with the original: @@ -797,3 +787,20 @@ day2_workers: bmc_address: 172.28.11.26 mac: 3C:FD:FE:78:AB:05 ``` + +## Additional Partition Deployment + +For OCP 4.8+ deployments you can set partitions if required on the nodes. +You do this by adding the snippet below to the node definition. +Please ensure you provide the correct label and size(MiB) for the additional partitions you want to create. +The device can either be the drive in which RHCOS image needs to be installed or it can be any additional drive on the node that requires partitioning. +In the case that the device is equal to the host's `installation_disk_path` then a partition will be added defined by `disks_rhcos_root`. +All additional partitions must be added under `extra_partitions` key as per the example below. + +```yaml +disks: + - device: "{{ installation_disk_path }}" + extra_partitions: + partition_1: 1024 + partition_2: 1024 + ```