diff --git a/kubeinit/hosts/cdk/inventory b/kubeinit/hosts/cdk/inventory index 6e272a82e..7b62b97c2 100644 --- a/kubeinit/hosts/cdk/inventory +++ b/kubeinit/hosts/cdk/inventory @@ -34,7 +34,6 @@ disk=25G ram=25165824 vcpus=8 maxvcpus=16 -prefix=24 [compute_nodes:vars] os=ubuntu @@ -42,15 +41,9 @@ disk=30G ram=8388608 vcpus=8 maxvcpus=16 -prefix=24 [service_nodes:vars] os=ubuntu -disk=150G -ram=12582912 -vcpus=8 -maxvcpus=16 -prefix=24 services="bind,dnsmasq,haproxy,apache,registry" # nexus [extra_nodes:vars] @@ -59,7 +52,6 @@ disk=20G ram=8388608 vcpus=8 maxvcpus=16 -prefix=24 # # Hosts definitions diff --git a/kubeinit/hosts/eks/inventory b/kubeinit/hosts/eks/inventory index 0d0477c6f..37399a9b1 100644 --- a/kubeinit/hosts/eks/inventory +++ b/kubeinit/hosts/eks/inventory @@ -34,7 +34,6 @@ disk=25G ram=25165824 vcpus=8 maxvcpus=16 -prefix=24 [compute_nodes:vars] os=centos @@ -42,15 +41,9 @@ disk=30G ram=8388608 vcpus=8 maxvcpus=16 -prefix=24 [service_nodes:vars] os=centos -disk=150G -ram=12582912 -vcpus=8 -maxvcpus=16 -prefix=24 services="bind,dnsmasq,haproxy,apache,registry" # nexus # diff --git a/kubeinit/hosts/k8s/inventory b/kubeinit/hosts/k8s/inventory index 7c79b9b4f..d87fa48a6 100644 --- a/kubeinit/hosts/k8s/inventory +++ b/kubeinit/hosts/k8s/inventory @@ -34,7 +34,6 @@ disk=25G ram=25165824 vcpus=8 maxvcpus=16 -prefix=24 [compute_nodes:vars] os=centos @@ -42,15 +41,9 @@ disk=30G ram=8388608 vcpus=8 maxvcpus=16 -prefix=24 [service_nodes:vars] os=centos -disk=150G -ram=12582912 -vcpus=8 -maxvcpus=16 -prefix=24 services="bind,dnsmasq,haproxy,apache,registry" # nexus # diff --git a/kubeinit/hosts/kid/inventory b/kubeinit/hosts/kid/inventory index 534731d5a..119450a8f 100644 --- a/kubeinit/hosts/kid/inventory +++ b/kubeinit/hosts/kid/inventory @@ -34,7 +34,6 @@ disk=25G ram=25165824 vcpus=8 maxvcpus=16 -prefix=24 [compute_nodes:vars] os=debian @@ -42,15 +41,9 @@ disk=30G ram=8388608 vcpus=8 maxvcpus=16 -prefix=24 [service_nodes:vars] os=debian -disk=150G -ram=12582912 -vcpus=8 -maxvcpus=16 -prefix=24 services="bind,dnsmasq,haproxy,apache,registry" # nexus # diff --git a/kubeinit/hosts/okd/inventory b/kubeinit/hosts/okd/inventory index 56cdf4f47..2cc2490c2 100644 --- a/kubeinit/hosts/okd/inventory +++ b/kubeinit/hosts/okd/inventory @@ -34,7 +34,6 @@ disk=25G ram=25165824 vcpus=8 maxvcpus=16 -prefix=24 [compute_nodes:vars] os=coreos @@ -42,15 +41,9 @@ disk=30G ram=8388608 vcpus=8 maxvcpus=16 -prefix=24 [service_nodes:vars] os=centos -disk=150G -ram=12582912 -vcpus=8 -maxvcpus=16 -prefix=24 services="bind,dnsmasq,haproxy,apache,registry" # nexus [extra_nodes:vars] @@ -59,7 +52,6 @@ disk=20G ram=16777216 vcpus=8 maxvcpus=16 -prefix=24 # # Hosts definitions diff --git a/kubeinit/hosts/rke/inventory b/kubeinit/hosts/rke/inventory index f56374a73..44731a890 100644 --- a/kubeinit/hosts/rke/inventory +++ b/kubeinit/hosts/rke/inventory @@ -34,7 +34,6 @@ disk=25G ram=25165824 vcpus=8 maxvcpus=16 -prefix=24 [compute_nodes:vars] os=ubuntu @@ -42,15 +41,9 @@ disk=30G ram=8388608 vcpus=8 maxvcpus=16 -prefix=24 [service_nodes:vars] os=ubuntu -disk=150G -ram=12582912 -vcpus=8 -maxvcpus=16 -prefix=24 services="bind,dnsmasq,haproxy,apache,registry" # nexus # diff --git a/kubeinit/roles/kubeinit_cdk/tasks/main.yml b/kubeinit/roles/kubeinit_cdk/tasks/main.yml index efb246daf..0262d8904 100644 --- a/kubeinit/roles/kubeinit_cdk/tasks/main.yml +++ b/kubeinit/roles/kubeinit_cdk/tasks/main.yml @@ -41,7 +41,6 @@ vars: kubeinit_deployment_node_name: "{{ cluster_role_item }}" kubeinit_deployment_delegate: "{{ hostvars[cluster_role_item].target }}" - kubeinit_deployment_role: cluster when: kubeinit_cluster_nodes_deployed is not defined or not kubeinit_cluster_nodes_deployed - name: Add cluster authorized keys in cluster nodes diff --git a/kubeinit/roles/kubeinit_cdk/tasks/post_deployment_tasks.yml b/kubeinit/roles/kubeinit_cdk/tasks/post_deployment_tasks.yml index 756bd12d6..7003e093f 100644 --- a/kubeinit/roles/kubeinit_cdk/tasks/post_deployment_tasks.yml +++ b/kubeinit/roles/kubeinit_cdk/tasks/post_deployment_tasks.yml @@ -32,4 +32,5 @@ echo "Finished" args: executable: /bin/bash - changed_when: false + register: _result + changed_when: "_result.rc == 0" diff --git a/kubeinit/roles/kubeinit_cdk/tasks/prepare_cluster.yml b/kubeinit/roles/kubeinit_cdk/tasks/prepare_cluster.yml index 297cf6eaf..91d598e16 100644 --- a/kubeinit/roles/kubeinit_cdk/tasks/prepare_cluster.yml +++ b/kubeinit/roles/kubeinit_cdk/tasks/prepare_cluster.yml @@ -38,8 +38,6 @@ kubeinit_deployment_pod_name: "{{ hostvars[kubeinit_provision_service_node].guest_name }}-pod" kubeinit_deployment_delegate: "{{ hostvars[kubeinit_provision_service_node].target }}" kubeinit_deployment_os: "{{ hostvars[kubeinit_provision_service_node].os }}" - kubeinit_deployment_type: container - kubeinit_deployment_role: provision - name: Add task-prepare-cluster to tasks_completed ansible.builtin.add_host: diff --git a/kubeinit/roles/kubeinit_eks/defaults/main.yml b/kubeinit/roles/kubeinit_eks/defaults/main.yml index cce8ab487..8bd9f3803 100644 --- a/kubeinit/roles/kubeinit_eks/defaults/main.yml +++ b/kubeinit/roles/kubeinit_eks/defaults/main.yml @@ -23,5 +23,3 @@ kubeinit_eks_hide_sensitive_logs: true kubeinit_eks_pod_network_cidr: 10.244.0.0/16 kubeinit_eks_service_network_cidr: 10.96.0.0/12 - -kubeinit_eks_kubernetes_version: 1.21.4 diff --git a/kubeinit/roles/kubeinit_eks/tasks/main.yml b/kubeinit/roles/kubeinit_eks/tasks/main.yml index 88e5558c9..9f33c9e3e 100644 --- a/kubeinit/roles/kubeinit_eks/tasks/main.yml +++ b/kubeinit/roles/kubeinit_eks/tasks/main.yml @@ -41,7 +41,6 @@ vars: kubeinit_deployment_node_name: "{{ cluster_role_item }}" kubeinit_deployment_delegate: "{{ hostvars[cluster_role_item].target }}" - kubeinit_deployment_role: cluster when: kubeinit_cluster_nodes_deployed is not defined or not kubeinit_cluster_nodes_deployed - name: Add cluster authorized keys in cluster nodes diff --git a/kubeinit/roles/kubeinit_eks/tasks/post_deployment_tasks.yml b/kubeinit/roles/kubeinit_eks/tasks/post_deployment_tasks.yml index 1eb3ce181..1a3fab6a6 100644 --- a/kubeinit/roles/kubeinit_eks/tasks/post_deployment_tasks.yml +++ b/kubeinit/roles/kubeinit_eks/tasks/post_deployment_tasks.yml @@ -17,7 +17,7 @@ - name: Fetch the kubeconfig from the first controller node ansible.builtin.slurp: src: ~/.kube/config - register: kubeinit_eks_cluster_kubeconfig + register: _result_cluster_kubeconfig delegate_to: "{{ kubeinit_first_controller_node }}" - name: Create kube directory @@ -27,21 +27,21 @@ mode: '0644' delegate_to: "{{ kubeinit_provision_service_node }}" -- name: Storing the master kubeconfig to the provision services machine. +- name: Store the kubeconfig to the provision services machine. ansible.builtin.copy: - content: "{{ kubeinit_eks_cluster_kubeconfig.content | default('Empty file') | b64decode }}" + content: "{{ _result_cluster_kubeconfig.content | default('Empty file') | b64decode }}" dest: ~/.kube/config mode: '0644' delegate_to: "{{ kubeinit_provision_service_node }}" -- name: Label worker nodes +- name: Label compute nodes ansible.builtin.shell: | - kubectl label node {{ item }}.{{ kubeinit_cluster_fqdn }} node-role.kubernetes.io/worker= + kubectl label node {{ hostvars[item].fqdn }} node-role.kubernetes.io/worker= args: executable: /bin/bash - changed_when: false - with_items: - - "{{ groups['all_compute_nodes'] | list }}" + register: _result + changed_when: "_result.rc == 0" + loop: "{{ groups['all_compute_nodes'] }}" delegate_to: "{{ kubeinit_provision_service_node }}" - name: Deploy EKS manifests. @@ -52,7 +52,8 @@ kubectl get release kubernetes-1-18-eks-1 -o yaml args: executable: /bin/bash - changed_when: false + register: _result + changed_when: "_result.rc == 0" delegate_to: "{{ kubeinit_provision_service_node }}" # diff --git a/kubeinit/roles/kubeinit_eks/tasks/prepare_cluster.yml b/kubeinit/roles/kubeinit_eks/tasks/prepare_cluster.yml index bf17c3fd9..bfcda9ee7 100644 --- a/kubeinit/roles/kubeinit_eks/tasks/prepare_cluster.yml +++ b/kubeinit/roles/kubeinit_eks/tasks/prepare_cluster.yml @@ -38,8 +38,6 @@ kubeinit_deployment_pod_name: "{{ hostvars[kubeinit_provision_service_node].guest_name }}-pod" kubeinit_deployment_delegate: "{{ hostvars[kubeinit_provision_service_node].target }}" kubeinit_deployment_os: "{{ hostvars[kubeinit_provision_service_node].os }}" - kubeinit_deployment_type: container - kubeinit_deployment_role: provision - name: Configure the service node block: @@ -72,6 +70,34 @@ # Kubernetes config # + - name: Set EKS kubernetes major-minor fact + ansible.builtin.set_fact: + kubeinit_eks_kubernetes_major_minor: "{{ kubeinit_inventory_kubernetes_version.split('.')[0] + '-' + kubeinit_inventory_kubernetes_version.split('.')[1] }}" + + - name: Discover the revision number for eks + ansible.builtin.command: curl -s https://raw.githubusercontent.com/aws/eks-distro/main/release/{{ kubeinit_eks_kubernetes_major_minor }}/production/RELEASE + register: _result_revision + changed_when: "_result_revision.rc == 0" + + - name: Set EKS release number fact + ansible.builtin.set_fact: + kubeinit_eks_revision: "{{ _result_revision.stdout | trim }}" + + - name: Discover the git tag for eks + ansible.builtin.command: curl -s https://raw.githubusercontent.com/aws/eks-distro/main/projects/kubernetes/release/{{ kubeinit_eks_kubernetes_major_minor }}/GIT_TAG + register: _result_gittag + changed_when: "_result_gittag.rc == 0" + + - name: Set EKS string facts + ansible.builtin.set_fact: + kubeinit_eks_kubernetes_dirname: "kubernetes-{{ kubeinit_eks_kubernetes_major_minor }}" + kubeinit_eks_kubernetes_filename: "kubernetes-{{ kubeinit_eks_kubernetes_major_minor + '-eks-' + kubeinit_eks_revision }}.yaml" + kubeinit_eks_kubernetes_gittag: "{{ (_result_gittag.stdout | trim) + '-eks-' + kubeinit_eks_kubernetes_major_minor + '-' + kubeinit_eks_revision }}" + + - name: Set complete EKS kubernetes version + ansible.builtin.set_fact: + kubeinit_eks_kubernetes_version: "{{ kubeinit_inventory_kubernetes_version + '.' + kubeinit_eks_revision }}" + - name: Install requirements ansible.builtin.command: dnf install -y kubectl-{{ kubeinit_eks_kubernetes_version }} --disableexcludes=kubernetes @@ -92,10 +118,11 @@ - name: Get the required container images ansible.builtin.shell: | - set -o pipefail - curl -s https://distro.eks.amazonaws.com/kubernetes-1-21/kubernetes-1-21-eks-4.yaml | sed -n -e "s|^.*uri: \(public.ecr.aws/eks-distro\)|\1|p" > ~/kubeinit_deployment_images.txt - echo public.ecr.aws/eks-distro/kubernetes/kube-proxy-base:$(curl -s https://raw.githubusercontent.com/aws/eks-distro/main/projects/kubernetes/release/1-21/GIT_TAG)-eks-1-21-4 >> ~/kubeinit_deployment_images.txt - echo public.ecr.aws/eks-distro/kubernetes/go-runner:$(curl -s https://raw.githubusercontent.com/aws/eks-distro/main/projects/kubernetes/release/1-21/GIT_TAG)-eks-1-21-4 >> ~/kubeinit_deployment_images.txt + set -eo pipefail + curl -s https://distro.eks.amazonaws.com/{{ kubeinit_eks_kubernetes_dirname }}/{{ kubeinit_eks_kubernetes_filename }} | \ + sed -n -e "s|^.*uri: \(public.ecr.aws/eks-distro\)|\1|p" > ~/kubeinit_deployment_images.txt + echo public.ecr.aws/eks-distro/kubernetes/kube-proxy-base:{{ kubeinit_eks_kubernetes_gittag }} >> ~/kubeinit_deployment_images.txt + echo public.ecr.aws/eks-distro/kubernetes/go-runner:{{ kubeinit_eks_kubernetes_gittag }} >> ~/kubeinit_deployment_images.txt args: executable: /bin/bash register: _result diff --git a/kubeinit/roles/kubeinit_k8s/tasks/main.yml b/kubeinit/roles/kubeinit_k8s/tasks/main.yml index d120302c2..3098decff 100644 --- a/kubeinit/roles/kubeinit_k8s/tasks/main.yml +++ b/kubeinit/roles/kubeinit_k8s/tasks/main.yml @@ -41,7 +41,6 @@ vars: kubeinit_deployment_node_name: "{{ cluster_role_item }}" kubeinit_deployment_delegate: "{{ hostvars[cluster_role_item].target }}" - kubeinit_deployment_role: cluster when: kubeinit_cluster_nodes_deployed is not defined or not kubeinit_cluster_nodes_deployed - name: Setup the first controller node @@ -173,18 +172,13 @@ # The kubeconfig file is on the controller nodes so we run kubectl label on the first controller - name: Label node - ansible.builtin.shell: | - kubectl label node {{ kubeinit_deployment_node_name }}.{{ kubeinit_cluster_fqdn }} node-role.kubernetes.io/worker= - args: - executable: /bin/bash + ansible.builtin.command: | + kubectl label node {{ hostvars[compute_node].fqdn }} node-role.kubernetes.io/worker= register: _result changed_when: "_result.rc == 0" loop: "{{ groups['all_compute_nodes'] }}" loop_control: - loop_var: cluster_role_item - vars: - kubeinit_deployment_node_name: "{{ cluster_role_item }}" - kubeinit_deployment_role: compute + loop_var: compute_node delegate_to: "{{ kubeinit_first_controller_node }}" - name: Add task-deploy-cluster to tasks_completed diff --git a/kubeinit/roles/kubeinit_k8s/tasks/post_deployment_tasks.yml b/kubeinit/roles/kubeinit_k8s/tasks/post_deployment_tasks.yml index 19690ad3c..920f76145 100644 --- a/kubeinit/roles/kubeinit_k8s/tasks/post_deployment_tasks.yml +++ b/kubeinit/roles/kubeinit_k8s/tasks/post_deployment_tasks.yml @@ -18,7 +18,7 @@ - name: Copying the kubeconfig to a variable ansible.builtin.slurp: src: ~/.kube/config - register: kubeinit_k8s_cluster_kubeconfig + register: _result_cluster_kubeconfig delegate_to: "{{ kubeinit_first_controller_node }}" - name: Create kube directory @@ -30,7 +30,7 @@ - name: Storing the master kubeconfig to the services machine. ansible.builtin.copy: - content: "{{ kubeinit_k8s_cluster_kubeconfig.content | default('Empty file') | b64decode }}" + content: "{{ _result_cluster_kubeconfig.content | default('Empty file') | b64decode }}" dest: ~/.kube/config mode: '0644' delegate_to: "{{ kubeinit_provision_service_node }}" diff --git a/kubeinit/roles/kubeinit_k8s/tasks/prepare_cluster.yml b/kubeinit/roles/kubeinit_k8s/tasks/prepare_cluster.yml index 458f8e37f..50b531321 100644 --- a/kubeinit/roles/kubeinit_k8s/tasks/prepare_cluster.yml +++ b/kubeinit/roles/kubeinit_k8s/tasks/prepare_cluster.yml @@ -38,8 +38,6 @@ kubeinit_deployment_pod_name: "{{ hostvars[kubeinit_provision_service_node].guest_name }}-pod" kubeinit_deployment_delegate: "{{ hostvars[kubeinit_provision_service_node].target }}" kubeinit_deployment_os: "{{ hostvars[kubeinit_provision_service_node].os }}" - kubeinit_deployment_type: container - kubeinit_deployment_role: provision - name: Configure the service node block: diff --git a/kubeinit/roles/kubeinit_kid/tasks/main.yml b/kubeinit/roles/kubeinit_kid/tasks/main.yml index a44a61819..9b8f48b97 100644 --- a/kubeinit/roles/kubeinit_kid/tasks/main.yml +++ b/kubeinit/roles/kubeinit_kid/tasks/main.yml @@ -41,7 +41,7 @@ vars: kubeinit_deployment_node_name: "{{ cluster_node }}" kubeinit_deployment_delegate: "{{ hostvars[cluster_node].target }}" - kubeinit_deployment_role: cluster + when: kubeinit_cluster_nodes_deployed is not defined or not kubeinit_cluster_nodes_deployed - name: Add cluster authorized keys in all cluster nodes ansible.posix.authorized_key: diff --git a/kubeinit/roles/kubeinit_kid/tasks/post_deployment_tasks.yml b/kubeinit/roles/kubeinit_kid/tasks/post_deployment_tasks.yml index 846aedb70..986f862e4 100644 --- a/kubeinit/roles/kubeinit_kid/tasks/post_deployment_tasks.yml +++ b/kubeinit/roles/kubeinit_kid/tasks/post_deployment_tasks.yml @@ -32,6 +32,6 @@ touch ~/.kube/config args: executable: /bin/bash - register: touch_kubeconfig - changed_when: "touch_kubeconfig.rc == 0" + register: _result + changed_when: "_result.rc == 0" delegate_to: "{{ kubeinit_provision_service_node }}" diff --git a/kubeinit/roles/kubeinit_kid/tasks/prepare_cluster.yml b/kubeinit/roles/kubeinit_kid/tasks/prepare_cluster.yml index 4012e8d3c..e035b2e2f 100644 --- a/kubeinit/roles/kubeinit_kid/tasks/prepare_cluster.yml +++ b/kubeinit/roles/kubeinit_kid/tasks/prepare_cluster.yml @@ -38,8 +38,6 @@ kubeinit_deployment_pod_name: "{{ hostvars[kubeinit_provision_service_node].guest_name }}-pod" kubeinit_deployment_delegate: "{{ hostvars[kubeinit_provision_service_node].target }}" kubeinit_deployment_os: "{{ hostvars[kubeinit_provision_service_node].os }}" - kubeinit_deployment_type: container - kubeinit_deployment_role: provision - name: Configure the service node block: @@ -51,8 +49,8 @@ echo "{{ kubeinit_kid_service_cidr }}" > ~/service_cidr args: executable: /bin/bash - register: render_net_info - changed_when: "render_net_info.rc == 0" + register: _result + changed_when: "_result.rc == 0" delegate_to: "{{ kubeinit_provision_service_node }}" diff --git a/kubeinit/roles/kubeinit_libvirt/tasks/10_ovn_cleanup.yml b/kubeinit/roles/kubeinit_libvirt/tasks/10_ovn_cleanup.yml index c18752fe3..ab62b03b2 100644 --- a/kubeinit/roles/kubeinit_libvirt/tasks/10_ovn_cleanup.yml +++ b/kubeinit/roles/kubeinit_libvirt/tasks/10_ovn_cleanup.yml @@ -30,8 +30,8 @@ ovs-dpctl del-dp ovs-system || true args: executable: /bin/bash - register: clean_ovn - changed_when: "clean_ovn.rc == 0" + register: _result + changed_when: "_result.rc == 0" with_items: - "{{ groups['all_hosts'] | list }}" loop_control: @@ -47,8 +47,8 @@ name: "{{ service_name }}" state: stopped enabled: false - register: libvirt_clean_ovn - failed_when: libvirt_clean_ovn is not defined + register: _result_stop_service + failed_when: _result_stop_service is not defined with_items: - "{{ groups['all_hosts'] | product(['openvswitch', 'ovn-northd', 'ovn-controller']) | list }}" loop_control: @@ -66,8 +66,8 @@ name: "{{ service_name }}" state: stopped enabled: false - register: libvirt_clean_ovn - failed_when: libvirt_clean_ovn is not defined + register: _result_stop_service + failed_when: _result_stop_service is not defined with_items: - "{{ groups['all_hosts'] | product(['openvswitch', 'ovn-controller']) | list }}" loop_control: @@ -85,8 +85,8 @@ name: "{{ service_name }}" state: stopped enabled: false - register: libvirt_clean_ovn - failed_when: libvirt_clean_ovn is not defined + register: _result_stop_service + failed_when: _result_stop_service is not defined with_items: - "{{ groups['all_hosts'] | product(['ovs-vswitchd', 'ovn-central', 'ovn-controller']) | list }}" loop_control: @@ -104,8 +104,8 @@ name: "{{ service_name }}" state: stopped enabled: false - register: libvirt_clean_ovn - failed_when: libvirt_clean_ovn is not defined + register: _result_stop_service + failed_when: _result_stop_service is not defined with_items: - "{{ groups['all_hosts'] | product(['ovs-vswitchd', 'ovn-controller']) | list }}" loop_control: diff --git a/kubeinit/roles/kubeinit_libvirt/tasks/20_ovn_install.yml b/kubeinit/roles/kubeinit_libvirt/tasks/20_ovn_install.yml index 7e4704897..a86826f14 100644 --- a/kubeinit/roles/kubeinit_libvirt/tasks/20_ovn_install.yml +++ b/kubeinit/roles/kubeinit_libvirt/tasks/20_ovn_install.yml @@ -38,8 +38,8 @@ (hostvars[kubeinit_deployment_node_name].distribution_family == 'CentOS') args: executable: /bin/bash - register: ovn_install - changed_when: "ovn_install.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Install OVN packages in Fedora (Master Hypervisor) ansible.builtin.shell: | @@ -53,8 +53,8 @@ (hostvars[kubeinit_deployment_node_name].distribution_family == 'Fedora') args: executable: /bin/bash - register: ovn_install - changed_when: "ovn_install.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Install OVN packages in Ubuntu/Debian (Master Hypervisor) ansible.builtin.shell: | @@ -68,8 +68,8 @@ (hostvars[kubeinit_deployment_node_name].distribution_family == 'Debian') args: executable: /bin/bash - register: ovn_install - changed_when: "ovn_install.rc == 0" + register: _result + changed_when: "_result.rc == 0" # # We DO NOT install ovn-central (OVN requirement) in the other hypervisors @@ -87,8 +87,8 @@ (hostvars[kubeinit_deployment_node_name].distribution_family == 'CentOS') args: executable: /bin/bash - register: ovn_install_slaves - changed_when: "ovn_install_slaves.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Install OVN packages in Fedora (Slave Hypervisor) ansible.builtin.shell: | @@ -101,8 +101,8 @@ (hostvars[kubeinit_deployment_node_name].distribution_family == 'Fedora') args: executable: /bin/bash - register: ovn_install_slaves - changed_when: "ovn_install_slaves.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Install OVN packages in Ubuntu/Debian (Slave Hypervisor) ansible.builtin.shell: | @@ -115,14 +115,14 @@ (hostvars[kubeinit_deployment_node_name].distribution_family == 'Debian') args: executable: /bin/bash - register: ovn_install_slaves - changed_when: "ovn_install_slaves.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Refresh firewalld services list to pick up ovn services ansible.builtin.command: | firewall-cmd --reload - register: refresh_firewalld - changed_when: "refresh_firewalld.rc == 0" + register: _result + changed_when: "_result.rc == 0" when: > hostvars[kubeinit_deployment_node_name].firewalld_is_active @@ -158,8 +158,8 @@ - name: Refresh firewalld services list ansible.builtin.command: | firewall-cmd --reload - register: reload_firewalld - changed_when: "reload_firewalld.rc == 0" + register: _result + changed_when: "_result.rc == 0" when: > hostvars[kubeinit_deployment_node_name].firewalld_is_active diff --git a/kubeinit/roles/kubeinit_libvirt/tasks/40_ovn_setup.yml b/kubeinit/roles/kubeinit_libvirt/tasks/40_ovn_setup.yml index 96619a2e4..0ca0e8203 100644 --- a/kubeinit/roles/kubeinit_libvirt/tasks/40_ovn_setup.yml +++ b/kubeinit/roles/kubeinit_libvirt/tasks/40_ovn_setup.yml @@ -20,25 +20,22 @@ - name: Configure OVS on the Hypervisors ansible.builtin.shell: | CENTRAL_IP={{ hostvars[kubeinit_ovn_central_host].ssh_connection_address }} # This is the IP of the ovn-central HV - LOCAL_IP={{ hostvars[kubeinit_deployment_node_name].ssh_connection_address }} # This is the IP of the current HV + LOCAL_IP={{ hostvars[ovn_host].ssh_connection_address }} # This is the IP of the current HV ENCAP_TYPE={{ kubeinit_libvirt_ovn_encapsulation }} ovs-vsctl set Open_vSwitch . \ external_ids:ovn-remote="tcp:$CENTRAL_IP:{{ kubeinit_libvirt_ovn_southbound_port }}" \ external_ids:ovn-nb="tcp:$CENTRAL_IP:{{ kubeinit_libvirt_ovn_northbound_port }}" \ external_ids:ovn-encap-ip=$LOCAL_IP \ external_ids:ovn-encap-type="$ENCAP_TYPE" \ - external_ids:system-id="{{ kubeinit_deployment_node_name }}" + external_ids:system-id="{{ ovn_host }}" # On each HV lets create a virtual bridge br-int # This bridge will be used when we create the VMs ovs-vsctl --may-exist add-br br-int args: executable: /bin/bash - register: ovs_setup - changed_when: "ovs_setup.rc == 0" - with_items: - - "{{ groups['all_ovn_hosts'] }}" + register: _result + changed_when: "_result.rc == 0" + loop: "{{ groups['all_ovn_hosts'] }}" loop_control: - loop_var: cluster_role_item - vars: - kubeinit_deployment_node_name: "{{ cluster_role_item }}" - delegate_to: "{{ kubeinit_deployment_node_name }}" + loop_var: ovn_host + delegate_to: "{{ ovn_host }}" diff --git a/kubeinit/roles/kubeinit_libvirt/tasks/50_ovn_post_setup.yml b/kubeinit/roles/kubeinit_libvirt/tasks/50_ovn_post_setup.yml index 60f386673..1e22bcdd7 100644 --- a/kubeinit/roles/kubeinit_libvirt/tasks/50_ovn_post_setup.yml +++ b/kubeinit/roles/kubeinit_libvirt/tasks/50_ovn_post_setup.yml @@ -28,8 +28,8 @@ ovn-sbctl set-connection ptcp:{{ kubeinit_libvirt_ovn_southbound_port }} args: executable: /bin/bash - register: config_ovn_connection - changed_when: "config_ovn_connection.rc == 0" + register: _result + changed_when: "_result.rc == 0" # # We create the OVN switch that will be binded to each chassis (hypervisor) @@ -44,9 +44,9 @@ ovn-nbctl --wait=hv ls-add sw0 args: executable: /bin/bash - register: config_ovn_switch + register: _result + changed_when: "_result.rc == 0" when: kubeinit_libvirt_destroy_nets|bool and not kubeinit_libvirt_multicluster_keep_predefined_networks - changed_when: "config_ovn_switch.rc == 0" - name: Create OVS/OVN bindings for the VMs ports ansible.builtin.shell: | @@ -61,9 +61,9 @@ ovn-nbctl --wait=sb lsp-set-port-security {{ hostvars[item].interfaceid }} "{{ hostvars[item].mac }} {{ hostvars[item].ansible_host }}" args: executable: /bin/bash - with_items: "{{ groups['all_nodes'] }}" - register: config_ovn_bindings - changed_when: "config_ovn_bindings.rc == 0" + loop: "{{ groups['all_nodes'] }}" + register: _result + changed_when: "_result.rc == 0" - name: Configuring a router connected to the guests switch ansible.builtin.shell: | @@ -123,8 +123,8 @@ sysctl net.ipv4.conf.all.rp_filter=2 args: executable: /bin/bash - register: config_ovn_routing - changed_when: "config_ovn_routing.rc == 0" + register: _result + changed_when: "_result.rc == 0" # # The NAT rules are mandatory to allow guests to have external connectivity @@ -154,8 +154,8 @@ iptables -P OUTPUT ACCEPT args: executable: /bin/bash - register: config_iptables - changed_when: "config_iptables.rc == 0" + register: _result + changed_when: "_result.rc == 0" when: not hostvars[kubeinit_deployment_node_name].firewalld_is_active delegate_to: "{{ kubeinit_deployment_node_name }}" diff --git a/kubeinit/roles/kubeinit_libvirt/tasks/70_check_nodes_up.yml b/kubeinit/roles/kubeinit_libvirt/tasks/70_check_nodes_up.yml index 79e81f331..e7305c5e3 100644 --- a/kubeinit/roles/kubeinit_libvirt/tasks/70_check_nodes_up.yml +++ b/kubeinit/roles/kubeinit_libvirt/tasks/70_check_nodes_up.yml @@ -30,11 +30,11 @@ root@{{ hostvars[kubeinit_deployment_node_name].ansible_host }} 'echo connected' || true args: executable: /bin/bash - register: cmd_boot_ok_res + register: _result retries: 30 delay: 10 - until: "'connected' in cmd_boot_ok_res.stdout" - changed_when: "cmd_boot_ok_res.rc == 0" + until: "'connected' in _result.stdout" + changed_when: "_result.rc == 0" delegate_to: "{{ kubeinit_deployment_delegate }}" - name: Wait for {{ kubeinit_deployment_node_name }} to boot diff --git a/kubeinit/roles/kubeinit_libvirt/tasks/deploy_centos_guest.yml b/kubeinit/roles/kubeinit_libvirt/tasks/deploy_centos_guest.yml index 2449800e0..783b8f211 100644 --- a/kubeinit/roles/kubeinit_libvirt/tasks/deploy_centos_guest.yml +++ b/kubeinit/roles/kubeinit_libvirt/tasks/deploy_centos_guest.yml @@ -46,8 +46,8 @@ qemu-img resize {{ kubeinit_libvirt_target_image_dir }}/{{ hostvars[kubeinit_deployment_node_name].guest_name }}.qcow2 +{{ hostvars[kubeinit_deployment_node_name].disk }} args: executable: /bin/bash - register: grow_img - changed_when: "grow_img.rc == 0" + register: _result + changed_when: "_result.rc == 0" # This will inject the VM configuration in the case of a CentOS machine - name: "Inject virt-customize assets in {{ kubeinit_deployment_node_name }}" @@ -72,8 +72,8 @@ --selinux-relabel args: executable: /bin/bash - register: virt_inject - changed_when: "virt_inject.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: "Create VM definition for {{ kubeinit_deployment_node_name }}" ansible.builtin.shell: | @@ -97,17 +97,17 @@ --disk {{ kubeinit_libvirt_target_image_dir }}/{{ hostvars[kubeinit_deployment_node_name].guest_name }}.qcow2,format=qcow2,bus=virtio args: executable: /bin/bash - register: virt_install - changed_when: "virt_install.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Wait until {{ kubeinit_deployment_node_name }} is running community.libvirt.virt: command: list_vms state: running - register: running_vms + register: _result retries: 30 delay: 10 - until: hostvars[kubeinit_deployment_node_name].guest_name in running_vms.list_vms + until: hostvars[kubeinit_deployment_node_name].guest_name in _result.list_vms delegate_to: "{{ kubeinit_deployment_delegate }}" @@ -134,20 +134,20 @@ - name: Get resolv lines ansible.builtin.slurp: src: /etc/resolv.conf - register: count_resolv_lines + register: _result_resolv_conf - name: Add the local DNS server as a local resolver when not empty ansible.builtin.lineinfile: path: /etc/resolv.conf line: "nameserver {{ kubeinit_bind_service_address }}" insertbefore: nameserver.* - when: (count_resolv_lines.content | b64decode).splitlines() | length > 0 + when: (_result_resolv_conf.content | b64decode).splitlines() | length > 0 - name: Add the local DNS server as a local resolver when empty ansible.builtin.lineinfile: path: /etc/resolv.conf line: "nameserver {{ kubeinit_bind_service_address }}" - when: (count_resolv_lines.content | b64decode).splitlines() | length == 0 + when: (_result_resolv_conf.content | b64decode).splitlines() | length == 0 - name: Add the local DNS server as a local resolver ansible.builtin.lineinfile: @@ -160,7 +160,8 @@ swapoff -a args: executable: /bin/bash - changed_when: false + register: _result + changed_when: "_result.rc == 0" - name: Resize root partition ansible.builtin.shell: | @@ -169,7 +170,8 @@ xfs_growfs /dev/vda1 args: executable: /bin/bash - changed_when: false + register: _result + changed_when: "_result.rc == 0" - name: Disable SELinux ansible.posix.selinux: @@ -187,7 +189,8 @@ sysctl -p args: executable: /bin/bash - changed_when: false + register: _result + changed_when: "_result.rc == 0" - name: Perform any distro-specific post-deployment guest configuration ansible.builtin.include_role: @@ -199,7 +202,7 @@ ansible.builtin.package: name: "*" state: latest - register: update_packages + register: _result_update_packages - name: Reboot immediately after the package update ansible.builtin.shell: "sleep 5 && reboot" @@ -207,7 +210,7 @@ executable: /bin/bash async: 1 poll: 0 - when: update_packages is changed and kubeinit_libvirt_reboot_guests_after_package_update + when: _result_update_packages is changed and kubeinit_libvirt_reboot_guests_after_package_update delegate_to: "{{ kubeinit_deployment_node_name }}" diff --git a/kubeinit/roles/kubeinit_libvirt/tasks/deploy_coreos_guest.yml b/kubeinit/roles/kubeinit_libvirt/tasks/deploy_coreos_guest.yml index bba456a77..ead7280e9 100644 --- a/kubeinit/roles/kubeinit_libvirt/tasks/deploy_coreos_guest.yml +++ b/kubeinit/roles/kubeinit_libvirt/tasks/deploy_coreos_guest.yml @@ -36,7 +36,7 @@ kubeinit_libvirt_okd_image_rootfs: "{{ kubeinit_libvirt_source_images.rhcos.rootfs }}" when: kubeinit_okd_openshift_deploy | default(False) - - name: "Create VM definition for the {{ kubeinit_deployment_role }} nodes" + - name: "Create VM definition for {{ kubeinit_deployment_node_name }}" ansible.builtin.shell: | set -o pipefail # If you use the kernel args to deploy the machine @@ -53,10 +53,10 @@ coreos.inst.insecure=yes coreos.inst.install_dev=/dev/vda coreos.inst.image_url=http://{{ kubeinit_apache_service_address }}:8080/kubeinit/okd4/{{ kubeinit_libvirt_okd_image_raw }} - coreos.inst.ignition_url=http://{{ kubeinit_apache_service_address }}:8080/kubeinit/okd4/{{ kubeinit_deployment_role }}.ign + coreos.inst.ignition_url=http://{{ kubeinit_apache_service_address }}:8080/kubeinit/okd4/{{ kubeinit_ignition_name }}.ign coreos.live.rootfs_url=http://{{ kubeinit_apache_service_address }}:8080/kubeinit/okd4/{{ kubeinit_libvirt_okd_image_rootfs }}' - kernel_args='initrd=http://{{ kubeinit_apache_service_address }}:8080/kubeinit/okd4/{{ kubeinit_libvirt_okd_image_initrd }} ip=dhcp nameserver={{ kubeinit_bind_service_address }} rd.neednet=1 console=tty0 console=ttyS0 coreos.inst=yes coreos.inst.insecure=yes coreos.inst.install_dev=/dev/vda coreos.inst.image_url=http://{{ kubeinit_apache_service_address }}:8080/kubeinit/okd4/{{ kubeinit_libvirt_okd_image_raw }} coreos.inst.ignition_url=http://{{ kubeinit_apache_service_address }}:8080/kubeinit/okd4/{{ kubeinit_deployment_role }}.ign coreos.live.rootfs_url=http://{{ kubeinit_apache_service_address }}:8080/kubeinit/okd4/{{ kubeinit_libvirt_okd_image_rootfs }}' + kernel_args='initrd=http://{{ kubeinit_apache_service_address }}:8080/kubeinit/okd4/{{ kubeinit_libvirt_okd_image_initrd }} ip=dhcp nameserver={{ kubeinit_bind_service_address }} rd.neednet=1 console=tty0 console=ttyS0 coreos.inst=yes coreos.inst.insecure=yes coreos.inst.install_dev=/dev/vda coreos.inst.image_url=http://{{ kubeinit_apache_service_address }}:8080/kubeinit/okd4/{{ kubeinit_libvirt_okd_image_raw }} coreos.inst.ignition_url=http://{{ kubeinit_apache_service_address }}:8080/kubeinit/okd4/{{ kubeinit_ignition_name }}.ign coreos.live.rootfs_url=http://{{ kubeinit_apache_service_address }}:8080/kubeinit/okd4/{{ kubeinit_libvirt_okd_image_rootfs }}' virt-install \ --connect qemu:///system \ @@ -77,15 +77,16 @@ --extra-args "${kernel_args}" args: executable: /bin/bash - changed_when: false + register: _result + changed_when: "_result.rc == 0" - name: Wait until {{ kubeinit_deployment_node_name }} is running community.libvirt.virt: command: list_vms state: running - register: running_vms + register: _result retries: 30 delay: 10 - until: hostvars[kubeinit_deployment_node_name].guest_name in running_vms.list_vms + until: hostvars[kubeinit_deployment_node_name].guest_name in _result.list_vms delegate_to: "{{ hostvars[kubeinit_deployment_node_name].target }}" diff --git a/kubeinit/roles/kubeinit_libvirt/tasks/deploy_debian_guest.yml b/kubeinit/roles/kubeinit_libvirt/tasks/deploy_debian_guest.yml index 3be1b3c33..61b2756e1 100644 --- a/kubeinit/roles/kubeinit_libvirt/tasks/deploy_debian_guest.yml +++ b/kubeinit/roles/kubeinit_libvirt/tasks/deploy_debian_guest.yml @@ -51,8 +51,8 @@ qemu-img resize {{ kubeinit_libvirt_target_image_dir }}/{{ hostvars[kubeinit_deployment_node_name].guest_name }}.qcow2 +{{ hostvars[kubeinit_deployment_node_name].disk }} args: executable: /bin/bash - register: grow_img - changed_when: "grow_img.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: "Inject virt-customize assets" ansible.builtin.shell: | @@ -76,10 +76,10 @@ --copy-in {{ kubeinit_libvirt_hypervisor_tmp_dir }}/{{ hostvars[kubeinit_deployment_node_name].guest_name }}/cloud.cfg:/etc/cloud args: executable: /bin/bash - register: virt_inject - changed_when: "virt_inject.rc == 0" + register: _result + changed_when: "_result.rc == 0" - - name: "Create VM definition for the {{ kubeinit_deployment_role }} nodes" + - name: "Create VM definition for {{ kubeinit_deployment_node_name }}" ansible.builtin.shell: | virt-install \ --connect qemu:///system \ @@ -101,24 +101,24 @@ --disk {{ kubeinit_libvirt_target_image_dir }}/{{ hostvars[kubeinit_deployment_node_name].guest_name }}.qcow2,format=qcow2,bus=virtio args: executable: /bin/bash - register: virt_install - changed_when: "virt_install.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Wait until {{ kubeinit_deployment_node_name }} is running community.libvirt.virt: command: list_vms state: running - register: running_vms + register: _result retries: 30 delay: 10 - until: hostvars[kubeinit_deployment_node_name].guest_name in running_vms.list_vms + until: hostvars[kubeinit_deployment_node_name].guest_name in _result.list_vms delegate_to: "{{ kubeinit_deployment_delegate }}" - name: Check cluster nodes are up and running ansible.builtin.include_tasks: 70_check_nodes_up.yml -- name: Configure common requirements in guests +- name: Configure common requirements in Debian guests block: - name: Make sure base file exists @@ -131,20 +131,20 @@ - name: Get resolv lines ansible.builtin.slurp: src: /etc/resolvconf/resolv.conf.d/tail - register: count_resolv_lines + register: _result_resolv_conf - name: Add the local DNS server as a local resolver when not empty ansible.builtin.lineinfile: path: /etc/resolvconf/resolv.conf.d/tail line: "nameserver {{ kubeinit_bind_service_address }}" insertbefore: nameserver.* - when: (count_resolv_lines.content | b64decode).splitlines() | length > 0 + when: (_result_resolv_conf.content | b64decode).splitlines() | length > 0 - name: Add the local DNS server as a local resolver when empty ansible.builtin.lineinfile: path: /etc/resolvconf/resolv.conf.d/tail line: "nameserver {{ kubeinit_bind_service_address }}" - when: (count_resolv_lines.content | b64decode).splitlines() | length == 0 + when: (_result_resolv_conf.content | b64decode).splitlines() | length == 0 - name: Add the local DNS server as a local resolver ansible.builtin.lineinfile: @@ -170,8 +170,8 @@ resolvconf -u args: executable: /bin/bash - register: render_resolv - changed_when: "render_resolv.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Force apt-get update ansible.builtin.shell: | @@ -179,7 +179,8 @@ apt-get update args: executable: /bin/bash - changed_when: false + register: _result + changed_when: "_result.rc == 0" - name: Add the Podman debian package repository to Apt ansible.builtin.shell: | @@ -190,21 +191,21 @@ apt-get update args: executable: /bin/bash - register: add_podman_repo - changed_when: "add_podman_repo.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Update packages ansible.builtin.package: name: "*" state: latest - register: update_packages - name: Disable SWAP ansible.builtin.shell: | swapoff -a args: executable: /bin/bash - changed_when: false + register: _result + changed_when: "_result.rc == 0" - name: Resize root partition ansible.builtin.shell: | @@ -213,7 +214,8 @@ resize2fs /dev/vda1 args: executable: /bin/bash - changed_when: false + register: _result + changed_when: "_result.rc == 0" - name: Enable cloud init ansible.builtin.shell: | @@ -221,7 +223,8 @@ systemctl start cloud-init args: executable: /bin/bash - changed_when: false + register: _result + changed_when: "_result.rc == 0" - name: Perform any distro-specific post-deployment guest configuration ansible.builtin.include_role: @@ -233,7 +236,7 @@ ansible.builtin.package: name: "*" state: latest - register: update_packages + register: _result_update_packages - name: Reboot immediately after the package update ansible.builtin.shell: "sleep 5 && reboot" @@ -241,7 +244,7 @@ executable: /bin/bash async: 1 poll: 0 - when: update_packages is changed and kubeinit_libvirt_reboot_guests_after_package_update + when: _result_update_packages is changed and kubeinit_libvirt_reboot_guests_after_package_update delegate_to: "{{ kubeinit_deployment_node_name }}" diff --git a/kubeinit/roles/kubeinit_libvirt/tasks/deploy_network.yml b/kubeinit/roles/kubeinit_libvirt/tasks/deploy_network.yml index b247d956b..bc4c464af 100644 --- a/kubeinit/roles/kubeinit_libvirt/tasks/deploy_network.yml +++ b/kubeinit/roles/kubeinit_libvirt/tasks/deploy_network.yml @@ -25,7 +25,6 @@ loop_var: cluster_role_item vars: kubeinit_deployment_node_name: "{{ cluster_role_item }}" - kubeinit_deployment_role: network - name: Include the OVN setup ansible.builtin.include_tasks: 40_ovn_setup.yml @@ -38,7 +37,6 @@ loop_var: cluster_role_item vars: kubeinit_deployment_node_name: "{{ cluster_role_item }}" - kubeinit_deployment_role: network when: (kubeinit_deployment_node_name in kubeinit_ovn_central_host) - name: Include the linux bridge tasks @@ -49,5 +47,4 @@ loop_var: cluster_role_item vars: kubeinit_deployment_node_name: "{{ cluster_role_item }}" - kubeinit_deployment_role: network when: (kubeinit_deployment_node_name in groups['all_hosts']) diff --git a/kubeinit/roles/kubeinit_libvirt/tasks/deploy_ubuntu_guest.yml b/kubeinit/roles/kubeinit_libvirt/tasks/deploy_ubuntu_guest.yml index 5a4645f89..4bb96598f 100644 --- a/kubeinit/roles/kubeinit_libvirt/tasks/deploy_ubuntu_guest.yml +++ b/kubeinit/roles/kubeinit_libvirt/tasks/deploy_ubuntu_guest.yml @@ -51,8 +51,8 @@ qemu-img resize {{ kubeinit_libvirt_target_image_dir }}/{{ hostvars[kubeinit_deployment_node_name].guest_name }}.qcow2 +{{ hostvars[kubeinit_deployment_node_name].disk }} args: executable: /bin/bash - register: grow_img - changed_when: "grow_img.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: "Inject virt-customize assets" ansible.builtin.shell: | @@ -76,10 +76,10 @@ --copy-in {{ kubeinit_libvirt_hypervisor_tmp_dir }}/{{ hostvars[kubeinit_deployment_node_name].guest_name }}/cloud.cfg:/etc/cloud args: executable: /bin/bash - register: virt_inject - changed_when: "virt_inject.rc == 0" + register: _result + changed_when: "_result.rc == 0" - - name: "Create VM definition for the {{ kubeinit_deployment_role }} nodes" + - name: "Create VM definition for {{ kubeinit_deployment_node_name }}" ansible.builtin.shell: | virt-install \ --connect qemu:///system \ @@ -101,17 +101,17 @@ --disk {{ kubeinit_libvirt_target_image_dir }}/{{ hostvars[kubeinit_deployment_node_name].guest_name }}.qcow2,format=qcow2,bus=virtio args: executable: /bin/bash - register: virt_install - changed_when: "virt_install.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Wait until {{ kubeinit_deployment_node_name }} is running community.libvirt.virt: command: list_vms state: running - register: running_vms + register: _result retries: 30 delay: 10 - until: hostvars[kubeinit_deployment_node_name].guest_name in running_vms.list_vms + until: hostvars[kubeinit_deployment_node_name].guest_name in _result.list_vms delegate_to: "{{ kubeinit_deployment_delegate }}" @@ -160,20 +160,20 @@ - name: Get resolv lines ansible.builtin.slurp: src: /etc/resolvconf/resolv.conf.d/tail - register: count_resolv_lines + register: _result_resolv_conf - name: Add the local DNS server as a local resolver when not empty ansible.builtin.lineinfile: path: /etc/resolvconf/resolv.conf.d/tail line: "nameserver {{ kubeinit_bind_service_address }}" insertbefore: nameserver.* - when: (count_resolv_lines.content | b64decode).splitlines() | length > 0 + when: (_result_resolv_conf.content | b64decode).splitlines() | length > 0 - name: Add the local DNS server as a local resolver when empty ansible.builtin.lineinfile: path: /etc/resolvconf/resolv.conf.d/tail line: "nameserver {{ kubeinit_bind_service_address }}" - when: (count_resolv_lines.content | b64decode).splitlines() | length == 0 + when: (_result_resolv_conf.content | b64decode).splitlines() | length == 0 - name: Add the local DNS server as a local resolver ansible.builtin.lineinfile: @@ -199,22 +199,24 @@ resolvconf -u args: executable: /bin/bash - register: render_resolv - changed_when: "render_resolv.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Force apt-get update ansible.builtin.shell: | apt-get update args: executable: /bin/bash - changed_when: false + register: _result + changed_when: "_result.rc == 0" - name: Disable SWAP ansible.builtin.shell: | swapoff -a args: executable: /bin/bash - changed_when: false + register: _result + changed_when: "_result.rc == 0" - name: Resize root partition ansible.builtin.shell: | @@ -223,7 +225,8 @@ resize2fs /dev/vda1 args: executable: /bin/bash - changed_when: false + register: _result + changed_when: "_result.rc == 0" - name: Enable cloud init ansible.builtin.shell: | @@ -231,7 +234,8 @@ systemctl start cloud-init args: executable: /bin/bash - changed_when: false + register: _result + changed_when: "_result.rc == 0" - name: Perform any distro-specific post-deployment guest configuration ansible.builtin.include_role: @@ -243,7 +247,7 @@ ansible.builtin.package: name: "*" state: latest - register: update_packages + register: _result_update_packages - name: Reboot immediately after the package update ansible.builtin.shell: "sleep 5 && reboot" @@ -251,7 +255,7 @@ executable: /bin/bash async: 1 poll: 0 - when: update_packages is changed and kubeinit_libvirt_reboot_guests_after_package_update + when: _result_update_packages is changed and kubeinit_libvirt_reboot_guests_after_package_update delegate_to: "{{ kubeinit_deployment_node_name }}" diff --git a/kubeinit/roles/kubeinit_libvirt/tasks/download_cloud_images.yml b/kubeinit/roles/kubeinit_libvirt/tasks/download_cloud_images.yml index 8d8796e1f..67892022a 100644 --- a/kubeinit/roles/kubeinit_libvirt/tasks/download_cloud_images.yml +++ b/kubeinit/roles/kubeinit_libvirt/tasks/download_cloud_images.yml @@ -88,8 +88,8 @@ vars: kubeinit_deployment_node_name: "{{ cluster_role_item[0] }}" guest_os: "{{ cluster_role_item[1] }}" - register: update_cloud_image_packages - changed_when: "update_cloud_image_packages.rc == 0" + register: _result + changed_when: "_result.rc == 0" delegate_to: "{{ kubeinit_deployment_node_name }}" - name: Add task-download-images to tasks_completed diff --git a/kubeinit/roles/kubeinit_libvirt/tasks/download_coreos_libvirt_images.yml b/kubeinit/roles/kubeinit_libvirt/tasks/download_coreos_libvirt_images.yml index c98bf43ba..fb002c541 100644 --- a/kubeinit/roles/kubeinit_libvirt/tasks/download_coreos_libvirt_images.yml +++ b/kubeinit/roles/kubeinit_libvirt/tasks/download_coreos_libvirt_images.yml @@ -55,8 +55,8 @@ EOF args: executable: /bin/bash - register: treeinfo - changed_when: "treeinfo.rc == 0" + register: _result + changed_when: "_result.rc == 0" when: not kubeinit_okd_openshift_deploy @@ -98,8 +98,8 @@ EOF args: executable: /bin/bash - register: treeinfo - changed_when: "treeinfo.rc == 0" + register: _result + changed_when: "_result.rc == 0" when: kubeinit_okd_openshift_deploy diff --git a/kubeinit/roles/kubeinit_libvirt/templates/00-installer-ubuntu-netconfig.yaml.j2 b/kubeinit/roles/kubeinit_libvirt/templates/00-installer-ubuntu-netconfig.yaml.j2 index 822614e5d..de3635bdf 100644 --- a/kubeinit/roles/kubeinit_libvirt/templates/00-installer-ubuntu-netconfig.yaml.j2 +++ b/kubeinit/roles/kubeinit_libvirt/templates/00-installer-ubuntu-netconfig.yaml.j2 @@ -11,7 +11,7 @@ network: dhcp4: false dhcp6: false link-local: [ ] - addresses: [{{ hostvars[kubeinit_deployment_node_name].ansible_host }}/{{ hostvars[kubeinit_deployment_node_name].prefix }}] + addresses: [{{ hostvars[kubeinit_deployment_node_name].ansible_host }}/{{ kubeinit_inventory_network_cidr }}] gateway4: {{ kubeinit_inventory_network_gateway }} match: macaddress: {{ hostvars[kubeinit_deployment_node_name].mac }} diff --git a/kubeinit/roles/kubeinit_libvirt/templates/debian-network-config-enp1s0.j2 b/kubeinit/roles/kubeinit_libvirt/templates/debian-network-config-enp1s0.j2 index 2cc1accb1..9c5f2819a 100644 --- a/kubeinit/roles/kubeinit_libvirt/templates/debian-network-config-enp1s0.j2 +++ b/kubeinit/roles/kubeinit_libvirt/templates/debian-network-config-enp1s0.j2 @@ -1,6 +1,6 @@ auto enp1s0 iface enp1s0 inet static -address {{ hostvars[kubeinit_deployment_node_name].ansible_host }}/{{ hostvars[kubeinit_deployment_node_name].prefix }} +address {{ hostvars[kubeinit_deployment_node_name].ansible_host }}/{{ kubeinit_inventory_network_cidr }} gateway {{ kubeinit_inventory_network_gateway }} mtu 1442 diff --git a/kubeinit/roles/kubeinit_okd/tasks/20_configure_controller_nodes.yml b/kubeinit/roles/kubeinit_okd/tasks/20_configure_controller_nodes.yml deleted file mode 100644 index 91c45e6a5..000000000 --- a/kubeinit/roles/kubeinit_okd/tasks/20_configure_controller_nodes.yml +++ /dev/null @@ -1,66 +0,0 @@ ---- -# Copyright kubeinit contributors -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: "Verify that controller nodes are ok" - ansible.builtin.shell: | - set -o pipefail - export KUBECONFIG=~/install_dir/auth/kubeconfig; \ - oc get nodes | grep master | grep " Ready" - args: - executable: /bin/bash - register: cmd_res_nodes_ok - changed_when: "cmd_res_nodes_ok.rc == 0" - retries: 60 - delay: 60 - until: cmd_res_nodes_ok.stdout_lines | default([]) | list | count == groups['all_controller_nodes'] | count - delegate_to: "{{ kubeinit_deployment_node_name }}" - -- name: "Use single node cluster" - ansible.builtin.shell: | - set -o pipefail - export KUBECONFIG=~/install_dir/auth/kubeconfig - oc get nodes - - oc patch clusterversion/version --type='merge' -p "$(cat <<- EOF - spec: - overrides: - - group: apps/v1 - kind: Deployment - name: etcd-quorum-guard - namespace: openshift-machine-config-operator - unmanaged: true - EOF - )" - oc scale --replicas=1 deployment/etcd-quorum-guard -n openshift-machine-config-operator || true - oc scale --replicas=1 ingresscontroller/default -n openshift-ingress-operator || true - oc scale --replicas=1 deployment.apps/console -n openshift-console || true - oc scale --replicas=1 deployment.apps/downloads -n openshift-console || true - oc scale --replicas=1 deployment.apps/oauth-openshift -n openshift-authentication || true - oc scale --replicas=1 deployment.apps/packageserver -n openshift-operator-lifecycle-manager || true - # Optional - oc scale --replicas=1 deployment.apps/prometheus-adapter -n openshift-monitoring || true - oc scale --replicas=1 deployment.apps/thanos-querier -n openshift-monitoring || true - oc scale --replicas=1 statefulset.apps/prometheus-k8s -n openshift-monitoring || true - oc scale --replicas=1 statefulset.apps/alertmanager-main -n openshift-monitoring || true - - oc patch etcd cluster -p='{"spec": {"unsupportedConfigOverrides": {"useUnsupportedUnsafeNonHANonProductionUnstableEtcd": true }}}' --type=merge - oc patch authentications.operator.openshift.io cluster -p='{"spec": {"unsupportedConfigOverrides": {"useUnsupportedUnsafeNonHANonProductionUnstableOAuthServer": true }}}' --type=merge - args: - executable: /bin/bash - register: single_node_cluster - changed_when: "single_node_cluster.rc == 0" - when: (groups['all_controller_nodes'] | count) == 1 - delegate_to: "{{ kubeinit_deployment_node_name }}" diff --git a/kubeinit/roles/kubeinit_okd/tasks/main.yml b/kubeinit/roles/kubeinit_okd/tasks/main.yml index 6ef9a41b6..4437b6282 100644 --- a/kubeinit/roles/kubeinit_okd/tasks/main.yml +++ b/kubeinit/roles/kubeinit_okd/tasks/main.yml @@ -42,9 +42,9 @@ vars: kubeinit_deployment_node_name: "{{ cluster_role_item }}" kubeinit_deployment_delegate: "{{ hostvars[cluster_role_item].target }}" - kubeinit_deployment_role: bootstrap + kubeinit_ignition_name: bootstrap -- name: Deploy the cluster master nodes +- name: Deploy the cluster controller nodes ansible.builtin.include_role: name: kubeinit.kubeinit.kubeinit_libvirt tasks_from: deploy_coreos_guest.yml @@ -56,7 +56,7 @@ vars: kubeinit_deployment_node_name: "{{ cluster_role_item }}" kubeinit_deployment_delegate: "{{ hostvars[cluster_role_item].target }}" - kubeinit_deployment_role: master + kubeinit_ignition_name: master - name: Verify that controller nodes are ok ansible.builtin.shell: | @@ -65,12 +65,12 @@ oc get nodes | grep master | grep " Ready" args: executable: /bin/bash - register: cmd_res_nodes_ok - changed_when: "cmd_res_nodes_ok.rc == 0" + register: _result + changed_when: "_result.rc == 0" retries: 60 delay: 60 - until: cmd_res_nodes_ok.stdout_lines | default([]) | list | count == groups['all_controller_nodes'] | count delegate_to: "{{ kubeinit_provision_service_node }}" + until: _result.stdout_lines | default([]) | list | count == groups['all_controller_nodes'] | count - name: Use single node cluster ansible.builtin.shell: | @@ -102,8 +102,8 @@ oc patch authentications.operator.openshift.io cluster -p='{"spec": {"unsupportedConfigOverrides": {"useUnsupportedUnsafeNonHANonProductionUnstableOAuthServer": true }}}' --type=merge args: executable: /bin/bash - register: single_node_cluster - changed_when: "single_node_cluster.rc == 0" + register: _result + changed_when: "_result.rc == 0" delegate_to: "{{ kubeinit_provision_service_node }}" when: (groups['all_controller_nodes'] | count) == 1 @@ -116,18 +116,18 @@ openshift-install --dir=install_dir/ wait-for bootstrap-complete --log-level info args: executable: /bin/bash - register: result + register: _result + changed_when: "_result.rc == 0" retries: 5 delay: 20 - until: result.rc == 0 - changed_when: "result.rc == 0" delegate_to: "{{ kubeinit_provision_service_node }}" + until: _result.rc == 0 - name: Remove bootstrap node from haproxy config ansible.builtin.command: | podman --remote --connection {{ kubeinit_haproxy_service_node }} exec {{ kubeinit_haproxy_service_name }} sed -i '/bootstrap/s/^/#/' /usr/local/etc/haproxy/haproxy.cfg - register: remove_bootstrap_from_haproxy - changed_when: "remove_bootstrap_from_haproxy.rc == 0" + register: _result + changed_when: "_result.rc == 0" delegate_to: localhost - name: Restart haproxy container @@ -161,7 +161,7 @@ kubeinit_deployment_node_name: "{{ groups['extra_nodes'][0] }}" delegate_to: "{{ hostvars[kubeinit_deployment_node_name].target }}" -- name: Deploy the cluster worker nodes +- name: Deploy the cluster compute nodes ansible.builtin.include_role: name: kubeinit.kubeinit.kubeinit_libvirt tasks_from: deploy_coreos_guest.yml @@ -173,7 +173,7 @@ vars: kubeinit_deployment_node_name: "{{ cluster_role_item }}" kubeinit_deployment_delegate: "{{ hostvars[cluster_role_item].target }}" - kubeinit_deployment_role: worker + kubeinit_ignition_name: worker - name: Add task-deploy-cluster to tasks_completed ansible.builtin.add_host: diff --git a/kubeinit/roles/kubeinit_okd/tasks/post_deployment_tasks.yml b/kubeinit/roles/kubeinit_okd/tasks/post_deployment_tasks.yml index 17f9f99e0..099ec428d 100644 --- a/kubeinit/roles/kubeinit_okd/tasks/post_deployment_tasks.yml +++ b/kubeinit/roles/kubeinit_okd/tasks/post_deployment_tasks.yml @@ -25,12 +25,12 @@ oc get csr | grep Approved args: executable: /bin/bash - register: cmd_res - changed_when: "cmd_res.rc == 0" + register: _result + changed_when: "_result.rc == 0" retries: 60 delay: 60 # Per each node we have 2 certs one per kubelet-serving and another per kube-apiserver-client-kubelet - until: cmd_res.stdout_lines | default([]) | list | count == ( groups['all_controller_nodes'] | count + groups['all_compute_nodes'] | count ) * 2 + until: _result.stdout_lines | default([]) | list | count == ( groups['all_controller_nodes'] | count + groups['all_compute_nodes'] | count ) * 2 - name: "wait until all nodes are ready" ansible.builtin.shell: | @@ -39,32 +39,19 @@ oc get nodes | grep " Ready" args: executable: /bin/bash - register: cmd_res - changed_when: "cmd_res.rc == 0" + register: _result + changed_when: "_result.rc == 0" retries: 60 delay: 60 - until: cmd_res.stdout_lines | default([]) | list | count == ( (groups['all_controller_nodes'] | count) + (groups['all_compute_nodes'] | count) ) + until: _result.stdout_lines | default([]) | list | count == ( (groups['all_controller_nodes'] | count) + (groups['all_compute_nodes'] | count) ) - name: Copy the kubeconfig ansible.builtin.shell: | cp ~/install_dir/auth/kubeconfig ~/.kube/config args: executable: /bin/bash - register: copy_kubeconfig - changed_when: "copy_kubeconfig.rc == 0" - - - name: Get some final cluster information - ansible.builtin.shell: | - set -o pipefail - set -e - echo "show stat" | socat unix-connect:/var/lib/haproxy/stats stdio - export KUBECONFIG=~/install_dir/auth/kubeconfig - oc get nodes - args: - executable: /bin/bash - ignore_errors: yes - register: final_output_info - changed_when: "final_output_info.rc == 0" + register: _result + changed_when: "_result.rc == 0" # # Configure NFS @@ -75,9 +62,30 @@ public: true when: "'nfs' in kubeinit_cluster_hostvars.services" + # + # Deploy KubeVirt + # + - name: Deploy KubeVirt + ansible.builtin.include_role: + name: kubeinit.kubeinit.kubeinit_kubevirt + public: yes + when: not kubeinit_okd_openshift_deploy | default(False) + + - name: Get some final cluster information + ansible.builtin.shell: | + set -eo pipefail + echo "show stat" | socat unix-connect:/var/lib/haproxy/stats stdio + export KUBECONFIG=~/install_dir/auth/kubeconfig + oc get nodes + args: + executable: /bin/bash + ignore_errors: yes + register: _result_cluster_info + changed_when: "_result_cluster_info.rc == 0" + - name: Display final debug info ansible.builtin.debug: - var: final_output_info + var: _result_cluster_info - name: Print some final data vars: @@ -89,13 +97,6 @@ ansible.builtin.debug: msg: "{{ msg.split('\n') }}" - - name: Deploy KubeVirt - ansible.builtin.include_role: - name: kubeinit.kubeinit.kubeinit_kubevirt - tasks_from: main.yml - public: yes - when: not kubeinit_okd_openshift_deploy | default(False) - vars: kubeinit_deployment_node_name: "{{ kubeinit_provision_service_node }}" delegate_to: "{{ kubeinit_deployment_node_name }}" diff --git a/kubeinit/roles/kubeinit_okd/tasks/prepare_cluster.yml b/kubeinit/roles/kubeinit_okd/tasks/prepare_cluster.yml index 1607eafd1..39b4aa89c 100644 --- a/kubeinit/roles/kubeinit_okd/tasks/prepare_cluster.yml +++ b/kubeinit/roles/kubeinit_okd/tasks/prepare_cluster.yml @@ -40,7 +40,7 @@ - name: Read openshift pullsecret ansible.builtin.slurp: src: "{{ kubeinit_okd_openshift_pullsecret }}" - register: openshift_pullsecret + register: _result_openshift_pullsecret delegate_to: localhost when: | kubeinit_okd_openshift_pullsecret is defined and @@ -61,28 +61,28 @@ ansible.builtin.shell: | set -eo pipefail {% if kubeinit_okd_openshift_pullsecret is defined and kubeinit_okd_openshift_pullsecret %} - echo '{{ openshift_pullsecret.content | b64decode | trim }}' | jq -c ".auths" + echo '{{ _result_openshift_pullsecret.content | b64decode | trim }}' | jq -c ".auths" {% else %} echo '{{ ocp_registry_token }}' | jq -c ".auths" {% endif %} args: executable: /bin/bash - register: ocp_registry_pullsecret - changed_when: "ocp_registry_pullsecret.rc == 0" + register: _result_registry_pullsecret + changed_when: "_result_registry_pullsecret.rc == 0" - name: Append openshift auth to pullsecret ansible.builtin.shell: | set -o pipefail echo '{{ kubeinit_okd_registry_pullsecret }}' | jq -c \ - '.auths += {{ ocp_registry_pullsecret.stdout }}' + '.auths += {{ _result_registry_pullsecret.stdout }}' args: executable: /bin/bash - register: ocp_new_pullsecret - changed_when: "ocp_new_pullsecret.rc == 0" + register: _result_new_pullsecret + changed_when: "_result_new_pullsecret.rc == 0" - name: Override final openshift kubeinit_okd_registry_pullsecret with both auths ansible.builtin.set_fact: - kubeinit_okd_registry_pullsecret: ' {{ ocp_new_pullsecret.stdout }}' + kubeinit_okd_registry_pullsecret: ' {{ _result_new_pullsecret.stdout }}' when: kubeinit_okd_openshift_deploy @@ -109,8 +109,6 @@ kubeinit_deployment_pod_name: "{{ hostvars[kubeinit_provision_service_node].guest_name }}-pod" kubeinit_deployment_delegate: "{{ hostvars[kubeinit_provision_service_node].target }}" kubeinit_deployment_os: "{{ hostvars[kubeinit_provision_service_node].os }}" - kubeinit_deployment_type: container - kubeinit_deployment_role: provision - name: Delegate to the provision service node block: @@ -122,8 +120,8 @@ echo "{{ kubeinit_okd_service_cidr }}" > ~/service_cidr args: executable: /bin/bash - register: render_net_info - changed_when: "render_net_info.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Install services requirements ansible.builtin.yum: @@ -158,8 +156,8 @@ openshift-install version args: executable: /bin/bash - register: install_client_and_installer - changed_when: "install_client_and_installer.rc == 0" + register: _result + changed_when: "_result.rc == 0" # # Configure local registry @@ -180,8 +178,8 @@ cat release.txt | grep quay | grep -v "Pull From" | tr -s ' ' | cut -d ' ' -f 3 > ~/kubeinit_deployment_images.txt args: executable: /bin/bash - register: render_images_list - changed_when: "render_images_list.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Mirror OKD remote registry to local ansible.builtin.shell: | @@ -207,8 +205,8 @@ openshift-install version args: executable: /bin/bash - register: mirror_registry - changed_when: "mirror_registry.rc == 0" + register: _result + changed_when: "_result.rc == 0" when: "'registry' in kubeinit_cluster_hostvars.services" delegate_to: "{{ kubeinit_provision_service_node }}" @@ -268,16 +266,16 @@ cp ~/install_dir/install-config.yaml ~/install-config.yaml args: executable: /bin/bash - register: backup_install_config - changed_when: "backup_install_config.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Render the bootstrap details ansible.builtin.shell: | openshift-install create manifests --dir=install_dir/ args: executable: /bin/bash - register: render_bootstrap_details - changed_when: "render_bootstrap_details.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Enable master schedulable if there are no worker nodes ansible.builtin.shell: | @@ -289,8 +287,8 @@ sed -r "s/^(\s*${key}\s*:\s*).*/\1${new_value}/" -i "$yaml_file" args: executable: /bin/bash - register: enable_masters_sched - changed_when: "enable_masters_sched.rc == 0" + register: _result + changed_when: "_result.rc == 0" when: kubeinit_compute_count | int == 0 - name: Disable master schedulable if there is at least one worker node @@ -303,8 +301,8 @@ sed -r "s/^(\s*${key}\s*:\s*).*/\1${new_value}/" -i "$yaml_file" args: executable: /bin/bash - register: disable_masters_sched - changed_when: "disable_masters_sched.rc == 0" + register: _result + changed_when: "_result.rc == 0" when: kubeinit_compute_count | int > 0 - name: Render ignition files @@ -315,8 +313,8 @@ cp -R install_dir/* "/var/kubeinit/html/okd4/" args: executable: /bin/bash - register: render_ignition_files - changed_when: "render_ignition_files.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Apply permissions to the apache folder ansible.builtin.file: @@ -337,7 +335,7 @@ args: executable: /bin/bash ignore_errors: yes - register: autoload - changed_when: "autoload.rc == 0" + register: _result + changed_when: "_result.rc == 0" delegate_to: "{{ kubeinit_provision_service_node }}" diff --git a/kubeinit/roles/kubeinit_prepare/tasks/cleanup_hypervisors.yml b/kubeinit/roles/kubeinit_prepare/tasks/cleanup_hypervisors.yml index f3cbe79d9..e8fa2c206 100644 --- a/kubeinit/roles/kubeinit_prepare/tasks/cleanup_hypervisors.yml +++ b/kubeinit/roles/kubeinit_prepare/tasks/cleanup_hypervisors.yml @@ -40,18 +40,15 @@ podman --remote system connection list | sed -e 1d -e 's/[* ].*//' args: executable: /bin/bash - register: list_remote_connections - changed_when: "list_remote_connections.rc == 0" + register: _result_connections + changed_when: "_result_connections.rc == 0" - name: Remove any existing remote system connection definition for bastion hypervisor - ansible.builtin.shell: | - set -eo pipefail + ansible.builtin.command: | podman --remote system connection remove {{ item }} - args: - executable: /bin/bash - loop: "{{ list_remote_connections.stdout_lines | list }}" - register: remove_remote_connection - changed_when: "remove_remote_connection.rc == 0" + loop: "{{ _result_connections.stdout_lines | list }}" + register: _result + changed_when: "_result.rc == 0" - name: Reset local ssh keys ansible.builtin.known_hosts: @@ -66,8 +63,8 @@ for host in $hosts; do ssh -O exit -S "~/.ssh/cm-%r@%h:%p" $host || true; done args: executable: /bin/bash - register: remove_ssh_tunnels - changed_when: "remove_ssh_tunnels.rc == 0" + register: _result + changed_when: "_result.rc == 0" delegate_to: "{{ kubeinit_bastion_host }}" - name: Find any service pods from previous deployments @@ -77,14 +74,14 @@ loop_var: service_node vars: kubeinit_deployment_node_name: "{{ hostvars[service_node].container_host }}" - register: podman_pod_info + register: _result_podinfo delegate_to: "{{ kubeinit_deployment_node_name }}" when: hostvars[kubeinit_deployment_node_name].podman_is_installed is defined and hostvars[kubeinit_deployment_node_name].podman_is_installed - name: Set facts about those pods ansible.builtin.set_fact: orphaned_pods: "{{ (orphaned_pods | default([])) + ([service_node] | product(pods)) }}" - loop: "{{ podman_pod_info.results }}" + loop: "{{ _result_podinfo.results }}" loop_control: loop_var: pod_info_result vars: @@ -114,14 +111,14 @@ vars: service_node: "{{ orphaned_cluster_pod[0] }}" pod: "{{ orphaned_cluster_pod[1] }}" - register: pod_infra_container_info + register: _result_containerinfo delegate_to: "{{ hostvars[service_node].target }}" when: orphaned_cluster_pods is defined - name: Set facts about container netns ansible.builtin.set_fact: infra_container_sandbox_keys: "{{ (infra_container_sandbox_keys | default([])) + ([service_node] | product([sandbox_key])) }}" - loop: "{{ pod_infra_container_info.results }}" + loop: "{{ _result_containerinfo.results }}" loop_control: loop_var: infra_container_info vars: @@ -136,14 +133,14 @@ loop_var: service_node vars: kubeinit_deployment_node_name: "{{ hostvars[service_node].container_host }}" - register: podman_net_info + register: _result_netinfo delegate_to: "{{ kubeinit_deployment_node_name }}" when: hostvars[kubeinit_deployment_node_name].podman_is_installed is defined and hostvars[kubeinit_deployment_node_name].podman_is_installed - name: Set facts about those pod networks ansible.builtin.set_fact: orphaned_pod_nets: "{{ (orphaned_pod_nets | default([])) + ([service_node] | product(nets)) }}" - loop: "{{ podman_net_info.results }}" + loop: "{{ _result_netinfo.results }}" loop_control: loop_var: net_info_result vars: @@ -173,8 +170,8 @@ scope: user state: stopped enabled: false - register: service_stop - failed_when: service_stop is not defined + register: _result_stop_service + failed_when: _result_stop_service is not defined with_items: - "{{ groups['all_hosts'] | product(kubeinit_cluster_hostvars.services) | list }}" loop_control: @@ -194,7 +191,6 @@ vars: service_node: "{{ orphaned_cluster_pod[0] }}" pod: "{{ orphaned_cluster_pod[1] }}" - register: stop_podman_pods delegate_to: "{{ hostvars[service_node].target }}" when: orphaned_cluster_pods is defined @@ -208,7 +204,6 @@ vars: service_node: "{{ orphaned_cluster_pod_net[0] }}" network: "{{ orphaned_cluster_pod_net[1] }}" - register: stop_podman_nets delegate_to: "{{ hostvars[service_node].target }}" when: orphaned_cluster_pod_nets is defined @@ -222,7 +217,6 @@ vars: service_node: "{{ infra_container_sandbox_key[0] }}" sandbox_key: "{{ infra_container_sandbox_key[1] }}" - register: remove_netns delegate_to: "{{ hostvars[service_node].target }}" when: infra_container_sandbox_keys is defined @@ -233,14 +227,14 @@ loop_var: service_node vars: kubeinit_deployment_node_name: "{{ hostvars[service_node].container_host }}" - register: podman_vol_info + register: _result_volinfo delegate_to: "{{ kubeinit_deployment_node_name }}" when: hostvars[kubeinit_deployment_node_name].podman_is_installed is defined and hostvars[kubeinit_deployment_node_name].podman_is_installed - name: Set facts about those podman volumes ansible.builtin.set_fact: orphaned_pod_vols: "{{ (orphaned_pod_vols | default([])) + ([service_node] | product(vols)) }}" - loop: "{{ podman_vol_info.results }}" + loop: "{{ _result_volinfo.results }}" loop_control: loop_var: vol_info_result vars: @@ -271,7 +265,6 @@ vars: service_node: "{{ orphaned_cluster_pod_vol[0] }}" volume: "{{ orphaned_cluster_pod_vol[1] }}" - register: remove_podman_vols delegate_to: "{{ hostvars[service_node].target }}" when: orphaned_cluster_pod_vols is defined @@ -287,8 +280,8 @@ loop_var: cluster_role_item vars: kubeinit_deployment_node_name: "{{ cluster_role_item }}" - register: remove_buildah_containers - changed_when: "remove_buildah_containers.rc == 0" + register: _result + changed_when: "_result.rc == 0" delegate_to: "{{ kubeinit_deployment_node_name }}" - name: Prune all podman resources @@ -303,8 +296,8 @@ loop_var: cluster_role_item vars: kubeinit_deployment_node_name: "{{ cluster_role_item }}" - register: prune_podman - changed_when: "prune_podman.rc == 0" + register: _result + changed_when: "_result.rc == 0" delegate_to: "{{ kubeinit_deployment_node_name }}" - name: Remove any previous veth dev @@ -320,8 +313,8 @@ vars: kubeinit_deployment_node_name: "{{ cluster_role_item[0] }}" ovs_veth_devname: "veth0-{{ hostvars[cluster_role_item[1]].ansible_host | ansible.netcommon.ip4_hex }}" - register: remove_veth_dev - changed_when: "remove_veth_dev.rc == 0" + register: _result + changed_when: "_result.rc == 0" delegate_to: "{{ kubeinit_deployment_node_name }}" - name: Remove any previous cluster network endpoint from the openvswitch bridge @@ -337,8 +330,8 @@ vars: kubeinit_deployment_node_name: "{{ cluster_role_item[0] }}" ovs_veth_devname: "veth0-{{ hostvars[cluster_role_item[1]].ansible_host | ansible.netcommon.ip4_hex }}" - register: remove_veth_from_ovs - changed_when: "remove_veth_from_ovs.rc == 0" + register: _result + changed_when: "_result.rc == 0" delegate_to: "{{ kubeinit_deployment_node_name }}" - name: Clean the OVN network diff --git a/kubeinit/roles/kubeinit_prepare/tasks/gather_host_facts.yml b/kubeinit/roles/kubeinit_prepare/tasks/gather_host_facts.yml index cd20e374f..2210e3b81 100644 --- a/kubeinit/roles/kubeinit_prepare/tasks/gather_host_facts.yml +++ b/kubeinit/roles/kubeinit_prepare/tasks/gather_host_facts.yml @@ -20,42 +20,42 @@ - name: Gather network facts ansible.builtin.gather_facts: gather_subset: "!all,network" - register: gather_results + register: _result_facts - name: Set distro_family for CentOS ansible.builtin.set_fact: distro_family: "CentOS" qemu_user: "qemu" host_os: "centos" - when: gather_results.ansible_facts.ansible_distribution == 'CentOS' + when: _result_facts.ansible_facts.ansible_distribution == 'CentOS' - name: Set distro_family for RedHat ansible.builtin.set_fact: distro_family: "CentOS" qemu_user: "qemu" host_os: "redhat" - when: gather_results.ansible_facts.ansible_distribution == 'RedHat' + when: _result_facts.ansible_facts.ansible_distribution == 'RedHat' - name: Set distro_family for Fedora ansible.builtin.set_fact: distro_family: "Fedora" qemu_user: "qemu" host_os: "fedora" - when: gather_results.ansible_facts.ansible_distribution == 'Fedora' + when: _result_facts.ansible_facts.ansible_distribution == 'Fedora' - name: Set distro_family for Debian ansible.builtin.set_fact: distro_family: "Debian" qemu_user: "libvirt-qemu" host_os: "debian" - when: gather_results.ansible_facts.ansible_distribution == 'Debian' + when: _result_facts.ansible_facts.ansible_distribution == 'Debian' - name: Set distro_family for Ubuntu ansible.builtin.set_fact: distro_family: "Debian" qemu_user: "libvirt-qemu" host_os: "ubuntu" - when: gather_results.ansible_facts.ansible_distribution == 'Ubuntu' + when: _result_facts.ansible_facts.ansible_distribution == 'Ubuntu' - name: Fails if OS is not supported ansible.builtin.fail: @@ -64,7 +64,7 @@ - name: Gather the services facts ansible.builtin.service_facts: - register: services_results + register: _result_services - name: Set firewalld_state to unknown ansible.builtin.set_fact: @@ -72,13 +72,13 @@ - name: Set firewalld_state when firewalld is defined ansible.builtin.set_fact: - firewalld_state: "{{ services_results.ansible_facts.services['firewalld'].state }}" - when: services_results.ansible_facts.services['firewalld'] is defined + firewalld_state: "{{ _result_services.ansible_facts.services['firewalld'].state }}" + when: _result_services.ansible_facts.services['firewalld'] is defined - name: Set firewalld_state when firewalld.service is defined ansible.builtin.set_fact: - firewalld_state: "{{ services_results.ansible_facts.services['firewalld.service'].state }}" - when: services_results.ansible_facts.services['firewalld.service'] is defined + firewalld_state: "{{ _result_services.ansible_facts.services['firewalld.service'].state }}" + when: _result_services.ansible_facts.services['firewalld.service'] is defined - name: Set firewalld_active ansible.builtin.set_fact: @@ -90,13 +90,13 @@ - name: Set podman_state when podman is defined ansible.builtin.set_fact: - podman_state: "{{ services_results.ansible_facts.services['podman'].state }}" - when: services_results.ansible_facts.services['podman'] is defined + podman_state: "{{ _result_services.ansible_facts.services['podman'].state }}" + when: _result_services.ansible_facts.services['podman'] is defined - name: Set podman_state when podman.service is defined ansible.builtin.set_fact: - podman_state: "{{ services_results.ansible_facts.services['podman.service'].state }}" - when: services_results.ansible_facts.services['podman.service'] is defined + podman_state: "{{ _result_services.ansible_facts.services['podman.service'].state }}" + when: _result_services.ansible_facts.services['podman.service'] is defined - name: Set podman_active ansible.builtin.set_fact: @@ -109,13 +109,13 @@ - name: Set libvirtd_state when libvirtd is defined ansible.builtin.set_fact: - libvirtd_state: "{{ services_results.ansible_facts.services['libvirtd'].state }}" - when: services_results.ansible_facts.services['libvirtd'] is defined + libvirtd_state: "{{ _result_services.ansible_facts.services['libvirtd'].state }}" + when: _result_services.ansible_facts.services['libvirtd'] is defined - name: Set libvirtd_state when libvirtd.service is defined ansible.builtin.set_fact: - libvirtd_state: "{{ services_results.ansible_facts.services['libvirtd.service'].state }}" - when: services_results.ansible_facts.services['libvirtd.service'] is defined + libvirtd_state: "{{ _result_services.ansible_facts.services['libvirtd.service'].state }}" + when: _result_services.ansible_facts.services['libvirtd.service'] is defined - name: Set libvirtd_active ansible.builtin.set_fact: @@ -124,36 +124,36 @@ - name: Get all the libvirt networks community.libvirt.virt_net: command: list_nets - register: running_nets + register: _result_nets when: libvirtd_active and kubeinit_deployment_node_name in groups['hypervisor_hosts'] - name: Get all the libvirt VMs community.libvirt.virt: command: list_vms - register: running_vms + register: _result_vms when: libvirtd_active and kubeinit_deployment_node_name in groups['hypervisor_hosts'] - name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) on hypervisor hosts community.crypto.openssh_keypair: path: "~/.ssh/{{ kubeinit_inventory_cluster_name }}_id_rsa" regenerate: 'never' - register: openssh_keypair + register: _result_keypair when: kubeinit_deployment_node_name in (['localhost'] + groups['hypervisor_hosts']) - name: Set ssh_host_key_info ansible.builtin.set_fact: - ssh_host_key_info: "{{ gather_results.ansible_facts.ansible_ssh_host_key_ecdsa_public_keytype }} {{ gather_results.ansible_facts.ansible_ssh_host_key_ecdsa_public }}" + ssh_host_key_info: "{{ _result_facts.ansible_facts.ansible_ssh_host_key_ecdsa_public_keytype }} {{ _result_facts.ansible_facts.ansible_ssh_host_key_ecdsa_public }}" when: > - gather_results.ansible_facts.ansible_ssh_host_key_ecdsa_public_keytype is defined and - gather_results.ansible_facts.ansible_ssh_host_key_ecdsa_public is defined + _result_facts.ansible_facts.ansible_ssh_host_key_ecdsa_public_keytype is defined and + _result_facts.ansible_facts.ansible_ssh_host_key_ecdsa_public is defined - name: Add ansible facts to hostvars ansible.builtin.add_host: name: "{{ kubeinit_deployment_node_name }}" - ansible_default_ipv4_address: "{{ gather_results.ansible_facts.ansible_default_ipv4.address }}" - ansible_hostname: "{{ gather_results.ansible_facts.ansible_hostname }}" - ansible_distribution: "{{ gather_results.ansible_facts.ansible_distribution }}" - ansible_distribution_major_version: "{{ gather_results.ansible_facts.ansible_distribution_major_version }}" + ansible_default_ipv4_address: "{{ _result_facts.ansible_facts.ansible_default_ipv4.address }}" + ansible_hostname: "{{ _result_facts.ansible_facts.ansible_hostname }}" + ansible_distribution: "{{ _result_facts.ansible_facts.ansible_distribution }}" + ansible_distribution_major_version: "{{ _result_facts.ansible_facts.ansible_distribution_major_version }}" distribution_family: "{{ distro_family }}" ssh_host_key_ecdsa: "{{ ssh_host_key_info | default(omit) }}" libvirt_qemu_user: "{{ qemu_user }}" @@ -161,11 +161,11 @@ firewalld_is_active: "{{ firewalld_active }}" podman_is_installed: "{{ podman_installed }}" podman_is_active: "{{ podman_active }}" - libvirt_nets: "{{ running_nets.list_nets | default([]) }}" - libvirt_vms: "{{ running_vms.list_vms | default([]) }}" - public_key: "{{ openssh_keypair.public_key | default(omit) }}" - ssh_connection_address: "{{ 'localhost' if (kubeinit_deployment_node_name == 'localhost') else gather_results.ansible_facts.ansible_env['SSH_CONNECTION'].split(' ')[2] }}" - runtime_path: "{{ gather_results.ansible_facts.ansible_env['XDG_RUNTIME_DIR'] | default('') | string }}" + libvirt_nets: "{{ _result_nets.list_nets | default([]) }}" + libvirt_vms: "{{ _result_vms.list_vms | default([]) }}" + public_key: "{{ _result_keypair.public_key | default(omit) }}" + ssh_connection_address: "{{ 'localhost' if (kubeinit_deployment_node_name == 'localhost') else _result_facts.ansible_facts.ansible_env['SSH_CONNECTION'].split(' ')[2] }}" + runtime_path: "{{ _result_facts.ansible_facts.ansible_env['XDG_RUNTIME_DIR'] | default('') | string }}" - name: Update kubeinit_cluster_hostvars ansible.builtin.set_fact: @@ -174,10 +174,10 @@ - name: Clear results ansible.builtin.set_fact: - gather_results: null - service_results: null - running_nets: null - running_vms: null - openssh_keypair: null + _result_facts: null + _result_services: null + _result_nets: null + _result_vms: null + _result_keypair: null delegate_to: "{{ kubeinit_deployment_node_name }}" diff --git a/kubeinit/roles/kubeinit_prepare/tasks/main.yml b/kubeinit/roles/kubeinit_prepare/tasks/main.yml index d36dd9342..d72845e9c 100644 --- a/kubeinit/roles/kubeinit_prepare/tasks/main.yml +++ b/kubeinit/roles/kubeinit_prepare/tasks/main.yml @@ -56,6 +56,8 @@ kubeinit_hypervisor_count: "{{ groups['hypervisor_hosts'] | length }}" kubeinit_controller_count: "{{ groups['all_controller_nodes'] | length }}" kubeinit_compute_count: "{{ groups['all_compute_nodes'] | length }}" + loop: "{{ ['localhost'] | union(groups['hypervisor_hosts']) }}" + delegate_to: "{{ item }}" - name: Install podman if required ansible.builtin.package: @@ -73,6 +75,8 @@ kubeinit_nfs_service_node: "{{ kubeinit_cluster_hostvars.services['nfs'] | default(None) }}" kubeinit_provision_service_node: "{{ kubeinit_cluster_hostvars.services['provision'] }}" kubeinit_registry_service_node: "{{ kubeinit_cluster_hostvars.services['registry'] | default(None) }}" + loop: "{{ ['localhost'] | union(groups['hypervisor_hosts']) }}" + delegate_to: "{{ item }}" - name: Gather facts from bastion host if it is not a hypervisor host block: @@ -129,6 +133,8 @@ kubeinit_service_user: "root" kubeinit_service_user_dir: "/root" kubeinit_ingress_hostname: "ingress" + loop: "{{ ['localhost'] | union(groups['hypervisor_hosts']) }}" + delegate_to: "{{ item }}" - name: Add ssh ProxyCommand option for all nodes ansible.builtin.add_host: diff --git a/kubeinit/roles/kubeinit_prepare/tasks/prepare_groups.yml b/kubeinit/roles/kubeinit_prepare/tasks/prepare_groups.yml index 4e8fada6c..3ea71bbfb 100644 --- a/kubeinit/roles/kubeinit_prepare/tasks/prepare_groups.yml +++ b/kubeinit/roles/kubeinit_prepare/tasks/prepare_groups.yml @@ -72,7 +72,7 @@ services: "{{ hostvars[item].services.split(',') }}" target: "{{ hostvars[item].target }}" type: container - container_host: "{{ item if (false and hostvars[item].type == 'virtual') else hostvars[item].target }}" + container_host: "{{ hostvars[item].target }}" loop: "{{ groups['service_nodes'] | default([]) | list }}" - name: Create tuples for each service and each service node providing that service @@ -196,7 +196,7 @@ ansible.builtin.add_host: name: "{{ item }}" group: all_guest_vms - loop: "{{ groups['all_service_nodes'] + groups['all_cluster_nodes'] + (groups['extra_nodes'] | default([])) }}" + loop: "{{ groups['all_cluster_nodes'] + (groups['extra_nodes'] | default([])) }}" when: hostvars[item].type == 'virtual' - name: Collect all of the node aliases diff --git a/kubeinit/roles/kubeinit_prepare/tasks/prepare_podman.yml b/kubeinit/roles/kubeinit_prepare/tasks/prepare_podman.yml index 966857018..d0ee1b948 100644 --- a/kubeinit/roles/kubeinit_prepare/tasks/prepare_podman.yml +++ b/kubeinit/roles/kubeinit_prepare/tasks/prepare_podman.yml @@ -60,7 +60,7 @@ - name: Check if kubeinit_common_docker_password path exists ansible.builtin.stat: path: "{{ kubeinit_common_docker_password }}" - register: docker_password_in_file + register: _result_passwordfile delegate_to: localhost no_log: true when: | @@ -72,7 +72,7 @@ - name: Read docker password from file when the variable has the path ansible.builtin.slurp: src: "{{ kubeinit_common_docker_password }}" - register: docker_password + register: _result_passsword delegate_to: localhost no_log: true when: | @@ -80,12 +80,12 @@ kubeinit_common_docker_password is defined and kubeinit_common_docker_username and kubeinit_common_docker_password and - docker_password_in_file.stat.exists + _result_passwordfile.stat.exists - name: Podman login to docker.io containers.podman.podman_login: username: "{{ kubeinit_common_docker_username }}" - password: "{{ (docker_password.content | b64decode | trim) if (docker_password_in_file.stat.exists) else (kubeinit_common_docker_password) }}" + password: "{{ (_result_passsword.content | b64decode | trim) if (_result_passwordfile.stat.exists) else (kubeinit_common_docker_password) }}" registry: "docker.io" no_log: true when: | @@ -96,7 +96,7 @@ - name: Clear any reference to docker password ansible.builtin.set_fact: - docker_password: null + _result_passsword: null no_log: true when: | kubeinit_common_docker_username is defined and diff --git a/kubeinit/roles/kubeinit_rke/tasks/main.yml b/kubeinit/roles/kubeinit_rke/tasks/main.yml index 5854befc8..edb001ea7 100644 --- a/kubeinit/roles/kubeinit_rke/tasks/main.yml +++ b/kubeinit/roles/kubeinit_rke/tasks/main.yml @@ -41,7 +41,6 @@ vars: kubeinit_deployment_node_name: "{{ cluster_node }}" kubeinit_deployment_delegate: "{{ hostvars[cluster_node].target }}" - kubeinit_deployment_role: cluster when: kubeinit_cluster_nodes_deployed is not defined or not kubeinit_cluster_nodes_deployed - name: Add cluster authorized keys in cluster nodes diff --git a/kubeinit/roles/kubeinit_rke/tasks/post_deployment_tasks.yml b/kubeinit/roles/kubeinit_rke/tasks/post_deployment_tasks.yml index 846aedb70..2708738b9 100644 --- a/kubeinit/roles/kubeinit_rke/tasks/post_deployment_tasks.yml +++ b/kubeinit/roles/kubeinit_rke/tasks/post_deployment_tasks.yml @@ -28,10 +28,8 @@ delegate_to: "{{ kubeinit_deployment_node_name }}" - name: Touch a file - ansible.builtin.shell: | + ansible.builtin.command: | touch ~/.kube/config - args: - executable: /bin/bash - register: touch_kubeconfig - changed_when: "touch_kubeconfig.rc == 0" + register: _result + changed_when: "_result.rc == 0" delegate_to: "{{ kubeinit_provision_service_node }}" diff --git a/kubeinit/roles/kubeinit_rke/tasks/prepare_cluster.yml b/kubeinit/roles/kubeinit_rke/tasks/prepare_cluster.yml index 93173371b..81bbcd379 100644 --- a/kubeinit/roles/kubeinit_rke/tasks/prepare_cluster.yml +++ b/kubeinit/roles/kubeinit_rke/tasks/prepare_cluster.yml @@ -38,8 +38,6 @@ kubeinit_deployment_pod_name: "{{ hostvars[kubeinit_provision_service_node].guest_name }}-pod" kubeinit_deployment_delegate: "{{ hostvars[kubeinit_provision_service_node].target }}" kubeinit_deployment_os: "{{ hostvars[kubeinit_provision_service_node].os }}" - kubeinit_deployment_type: container - kubeinit_deployment_role: provision - name: Configure the provision service node block: diff --git a/kubeinit/roles/kubeinit_services/tasks/create_managed_service.yml b/kubeinit/roles/kubeinit_services/tasks/create_managed_service.yml index 2f672db7e..888b3959e 100644 --- a/kubeinit/roles/kubeinit_services/tasks/create_managed_service.yml +++ b/kubeinit/roles/kubeinit_services/tasks/create_managed_service.yml @@ -18,8 +18,8 @@ - name: Ensure user specific systemd instance are persistent ansible.builtin.command: | /usr/bin/loginctl enable-linger {{ kubeinit_service_user }} - register: systemd_instance_persist - changed_when: "systemd_instance_persist.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Create systemd user directory ansible.builtin.file: diff --git a/kubeinit/roles/kubeinit_services/tasks/create_provision_container.yml b/kubeinit/roles/kubeinit_services/tasks/create_provision_container.yml index 3fd313450..3b076ec08 100644 --- a/kubeinit/roles/kubeinit_services/tasks/create_provision_container.yml +++ b/kubeinit/roles/kubeinit_services/tasks/create_provision_container.yml @@ -31,89 +31,89 @@ fi args: executable: /bin/bash - register: provision_buildah_rm_img - changed_when: "provision_buildah_rm_img.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Setup CentOS container image block: - name: Create a new working container image (CentOS) ansible.builtin.command: buildah from --name buildah-provision quay.io/centos/centos:stream8 - register: provision_buildah_create_img - changed_when: "provision_buildah_create_img.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Update the container ansible.builtin.command: buildah run buildah-provision -- dnf update -q -y - register: provision_buildah_update_packages - changed_when: "provision_buildah_update_packages.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Install commands and services we will need ansible.builtin.command: buildah run buildah-provision -- dnf install -q -y systemd openssh openssh-server openssh-clients procps iproute iputils net-tools python3 python3-pip jq - register: provision_buildah_install_packages - changed_when: "provision_buildah_install_packages.rc == 0" + register: _result + changed_when: "_result.rc == 0" when: kubeinit_deployment_os == 'centos' - name: Setup Debian container image block: - name: Create a new working container image ansible.builtin.command: buildah from --name buildah-provision docker.io/debian:{{ kubeinit_libvirt_debian_release }} - register: provision_buildah_create_img - changed_when: "provision_buildah_create_img.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Update the container ansible.builtin.command: buildah run buildah-provision -- apt-get update -q -y - register: provision_buildah_update_packages - changed_when: "provision_buildah_update_packages.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Install commands and services we will need ansible.builtin.command: buildah run buildah-provision -- env DEBIAN_FRONTEND=noninteractive DEBCONF_NONINTERACTIVE_SEEN=true apt-get install -q -y systemd openssh-server openssh-client procps iproute2 iputils-ping net-tools python3 python3-pip jq curl - register: provision_buildah_install_packages - changed_when: "provision_buildah_install_packages.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Missing privilege separation directory ansible.builtin.command: buildah run buildah-provision -- mkdir -p /run/sshd - register: provision_buildah_fix_sshd - changed_when: "provision_buildah_fix_sshd.rc == 0" + register: _result + changed_when: "_result.rc == 0" when: kubeinit_deployment_os == 'debian' - name: Setup Ubuntu container image block: - name: Create a new working container image ansible.builtin.command: buildah from --name buildah-provision docker.io/ubuntu:{{ kubeinit_libvirt_ubuntu_release }} - register: provision_buildah_create_img - changed_when: "provision_buildah_create_img.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Update the container ansible.builtin.command: buildah run buildah-provision -- apt-get update -q -y - register: provision_buildah_update_packages - changed_when: "provision_buildah_update_packages.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Install commands and services we will need ansible.builtin.command: buildah run buildah-provision -- env DEBIAN_FRONTEND=noninteractive DEBCONF_NONINTERACTIVE_SEEN=true apt-get install -q -y systemd openssh-server openssh-client procps iproute2 iputils-ping net-tools python3 python3-pip jq curl - register: provision_buildah_install_packages - changed_when: "provision_buildah_install_packages.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Create folder normally done by service ssh start ansible.builtin.command: buildah run buildah-provision -- mkdir /run/sshd - register: provision_buildah_create_folder - changed_when: "provision_buildah_create_folder.rc == 0" + register: _result + changed_when: "_result.rc == 0" when: kubeinit_deployment_os == 'ubuntu' - name: Set working directory inside container ansible.builtin.command: buildah config --workingdir {{ kubeinit_service_user_dir }} buildah-provision - register: provision_buildah_set_workingdir - changed_when: "provision_buildah_set_workingdir.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Generate system ssh keys ansible.builtin.command: buildah run buildah-provision -- bash -c "(cd /etc/ssh; ssh-keygen -A)" - register: provision_buildah_keygen - changed_when: "provision_buildah_keygen.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Clear cmd ansible.builtin.command: buildah config --cmd '' buildah-provision - register: provision_buildah_clear_cmd - changed_when: "provision_buildah_clear_cmd.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Set entrypoint ansible.builtin.command: buildah config --entrypoint '["/sbin/init"]' buildah-provision - register: provision_buildah_set_entrypoint - changed_when: "provision_buildah_set_entrypoint.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Commit the image ansible.builtin.command: buildah commit buildah-provision kubeinit/kubeinit-provision:latest - register: provision_buildah_commit_image - changed_when: "provision_buildah_commit_image.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Remove any previous provision container containers.podman.podman_container: @@ -189,8 +189,8 @@ python3 -m pip install -q cryptography==3.3.2 passlib nexus3-cli args: executable: /bin/bash - register: provision_pip_install - changed_when: "provision_pip_install.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Remove nologin marker ansible.builtin.file: @@ -218,12 +218,12 @@ '.auths += {{ disconnected_auth }}' args: executable: /bin/bash - register: new_pullsecret - changed_when: "new_pullsecret.rc == 0" + register: _result_pullsecret + changed_when: "_result_pullsecret.rc == 0" - name: Override final kubeinit_registry_pullsecret with both auths ansible.builtin.set_fact: - kubeinit_registry_pullsecret: ' {{ new_pullsecret.stdout }}' + kubeinit_registry_pullsecret: ' {{ _result_pullsecret.stdout }}' - name: Debug the creds dictionary ansible.builtin.debug: @@ -288,20 +288,20 @@ cp {{ kubeinit_registry_directory_cert }}/* /usr/local/share/ca-certificates/kubeinit/ args: executable: /bin/bash - register: install_all_certs - changed_when: "install_all_certs.rc == 0" + register: _result + changed_when: "_result.rc == 0" when: kubeinit_deployment_os == 'ubuntu' or kubeinit_deployment_os == 'debian' - name: Update the CA trust files ansible.builtin.command: update-ca-trust extract - register: update_ca_trust_files - changed_when: "update_ca_trust_files.rc == 0" + register: _result + changed_when: "_result.rc == 0" when: kubeinit_deployment_os == 'centos' - name: Update the CA trust files ansible.builtin.command: update-ca-certificates - register: update_ca_certs - changed_when: "update_ca_certs.rc == 0" + register: _result + changed_when: "_result.rc == 0" when: kubeinit_deployment_os == 'ubuntu' or kubeinit_deployment_os == 'debian' # @@ -321,7 +321,7 @@ community.crypto.openssh_keypair: path: "~/.ssh/id_rsa" regenerate: 'never' - register: provision_service_keypair + register: _result_provision_service_keypair - name: Install cluster authorized keys ansible.posix.authorized_key: @@ -330,7 +330,7 @@ state: present with_items: - "{{ kubeinit_cluster_hostvars.authorized_keys }}" - - "{{ provision_service_keypair.public_key }}" + - "{{ _result_provision_service_keypair.public_key }}" vars: ansible_ssh_pipelining: False @@ -338,7 +338,7 @@ - name: Add provision service public key to cluster authorized_keys ansible.builtin.set_fact: - authorized_keys_with_provision: "{{ kubeinit_cluster_hostvars.authorized_keys | union([provision_service_keypair.public_key]) }}" + authorized_keys_with_provision: "{{ kubeinit_cluster_hostvars.authorized_keys | union([_result_provision_service_keypair.public_key]) }}" - name: Update the cluster authorized_keys ansible.builtin.add_host: @@ -370,11 +370,11 @@ root@{{ hostvars[kubeinit_deployment_node_name].ansible_host }} 'echo connected' || true args: executable: /bin/bash - register: cmd_boot_ok_res - changed_when: "cmd_boot_ok_res.rc == 0" + register: _result + changed_when: "_result.rc == 0" retries: 30 delay: 10 - until: "'connected' in cmd_boot_ok_res.stdout" + until: "'connected' in _result.stdout" delegate_to: "{{ kubeinit_bastion_host }}" - name: Gather network and host facts for guest diff --git a/kubeinit/roles/kubeinit_services/tasks/main.yml b/kubeinit/roles/kubeinit_services/tasks/main.yml index 808efd6c6..020c1f55d 100644 --- a/kubeinit/roles/kubeinit_services/tasks/main.yml +++ b/kubeinit/roles/kubeinit_services/tasks/main.yml @@ -32,10 +32,7 @@ when: kubeinit_stop_before_task is defined and kubeinit_stop_before_task == 'task-create-services' - name: Setup the services pod - ansible.builtin.include_role: - name: kubeinit.kubeinit.kubeinit_services - tasks_from: 00_create_service_pod.yml - public: true + ansible.builtin.include_tasks: 00_create_service_pod.yml with_items: - "{{ groups['all_service_nodes'] | list }}" loop_control: @@ -45,7 +42,6 @@ kubeinit_deployment_bridge_name: "{{ hostvars[cluster_role_item].guest_name }}-bridge" kubeinit_deployment_pod_name: "{{ hostvars[cluster_role_item].guest_name }}-pod" kubeinit_deployment_delegate: "{{ hostvars[cluster_role_item].target }}" - kubeinit_deployment_role: service - name: Prepare the credentials we are going to use in the cluster ansible.builtin.include_tasks: prepare_credentials.yml @@ -53,15 +49,9 @@ kubeinit_deployment_node_name: "{{ kubeinit_provision_service_node }}" kubeinit_deployment_pod_name: "{{ hostvars[kubeinit_provision_service_node].guest_name }}-pod" kubeinit_deployment_delegate: "{{ hostvars[kubeinit_provision_service_node].target }}" - kubeinit_deployment_os: "{{ hostvars[kubeinit_provision_service_node].os }}" - kubeinit_deployment_type: container - kubeinit_deployment_role: provision - name: Start services containers - ansible.builtin.include_role: - name: kubeinit.kubeinit.kubeinit_services - tasks_from: start_services_containers.yml - public: true + ansible.builtin.include_tasks: start_services_containers.yml with_items: - "{{ groups['all_service_nodes'] | list }}" loop_control: @@ -70,7 +60,6 @@ kubeinit_deployment_node_name: "{{ cluster_role_item }}" kubeinit_deployment_pod_name: "{{ hostvars[cluster_role_item].guest_name }}-pod" kubeinit_deployment_delegate: "{{ hostvars[cluster_role_item].target }}" - kubeinit_deployment_role: service - name: Add task-create-services to tasks_completed ansible.builtin.add_host: diff --git a/kubeinit/roles/kubeinit_services/tasks/prepare_credentials.yml b/kubeinit/roles/kubeinit_services/tasks/prepare_credentials.yml index 4e327a2ec..8e9fd3445 100644 --- a/kubeinit/roles/kubeinit_services/tasks/prepare_credentials.yml +++ b/kubeinit/roles/kubeinit_services/tasks/prepare_credentials.yml @@ -80,8 +80,8 @@ python3 -m pip install cryptography==3.3.2 passlib args: executable: /bin/bash - register: install_passlib - changed_when: "install_passlib.rc == 0" + register: _result + changed_when: "_result.rc == 0" - name: Create directory to hold the registry files ansible.builtin.file: diff --git a/kubeinit/roles/kubeinit_validations/tasks/10_libvirt_free_space.yml b/kubeinit/roles/kubeinit_validations/tasks/10_libvirt_free_space.yml index 53a2ebb9f..fd796bea6 100644 --- a/kubeinit/roles/kubeinit_validations/tasks/10_libvirt_free_space.yml +++ b/kubeinit/roles/kubeinit_validations/tasks/10_libvirt_free_space.yml @@ -14,11 +14,9 @@ # License for the specific language governing permissions and limitations # under the License. - # # Make sure the mount path of libvirt has enough space in the hypervisor # - - name: Get libvirt hypervisors directory free space ansible.builtin.shell: | set -o pipefail @@ -40,20 +38,20 @@ ' | jq .spaceavail | tr -d '"' args: executable: /bin/bash - register: kubeinit_validations_libvirt_free_space + register: _result_free_storage with_items: - "{{ groups['all_hosts'] | list }}" delegate_to: "{{ item }}" - changed_when: "kubeinit_validations_libvirt_free_space.rc == 0" + changed_when: "_result_free_storage.rc == 0" - name: Debug ansible.builtin.debug: - var: kubeinit_validations_libvirt_free_space + var: _result_free_storage - name: Define the hypervisors disk free space dictionary ansible.builtin.set_fact: kubeinit_validations_hypervisors_free_disk_space: "{{ kubeinit_validations_hypervisors_free_disk_space|default([]) | combine( {item.item: item.stdout[:-1]|int} ) }}" - with_items: "{{ kubeinit_validations_libvirt_free_space.results }}" + with_items: "{{ _result_free_storage.results }}" - name: Debug ansible.builtin.debug: diff --git a/kubeinit/roles/kubeinit_validations/tasks/20_libvirt_available_ram.yml b/kubeinit/roles/kubeinit_validations/tasks/20_libvirt_available_ram.yml index 7a8f75cfa..a993638db 100644 --- a/kubeinit/roles/kubeinit_validations/tasks/20_libvirt_available_ram.yml +++ b/kubeinit/roles/kubeinit_validations/tasks/20_libvirt_available_ram.yml @@ -14,36 +14,33 @@ # License for the specific language governing permissions and limitations # under the License. - # # Make sure the mount of RAM if enough for all machines # - - name: Get hypervisor total RAM ansible.builtin.shell: | set -o pipefail free --kilo | grep ^Mem | tr -s ' ' | cut -d ' ' -f 2 args: executable: /bin/bash - register: kubeinit_validations_libvirt_total_ram + register: _result_total_ram with_items: - "{{ groups['all_hosts'] | list }}" delegate_to: "{{ item }}" - changed_when: "kubeinit_validations_libvirt_total_ram.rc == 0" + changed_when: "_result_total_ram.rc == 0" - name: Debug ansible.builtin.debug: - var: kubeinit_validations_libvirt_total_ram + var: _result_total_ram - name: Define the hypervisors ram available dictionary ansible.builtin.set_fact: kubeinit_validations_hypervisors_free_ram: "{{ kubeinit_validations_hypervisors_free_ram|default([]) | combine( {item.item: item.stdout|int} ) }}" - with_items: "{{ kubeinit_validations_libvirt_total_ram.results }}" + with_items: "{{ _result_total_ram.results }}" - name: Debug ansible.builtin.debug: - var: kubeinit_validations_libvirt_total_ram - + var: kubeinit_validations_hypervisors_free_ram - name: Get the total inventory ram usage ansible.builtin.set_fact: diff --git a/kubeinit/roles/kubeinit_validations/tasks/30_libvirt_check_cpu_cores.yml b/kubeinit/roles/kubeinit_validations/tasks/30_libvirt_check_cpu_cores.yml index 8c1a64235..e9ed0b644 100644 --- a/kubeinit/roles/kubeinit_validations/tasks/30_libvirt_check_cpu_cores.yml +++ b/kubeinit/roles/kubeinit_validations/tasks/30_libvirt_check_cpu_cores.yml @@ -25,24 +25,24 @@ cat /proc/cpuinfo | grep processor | grep : | wc -l args: executable: /bin/bash - register: kubeinit_validations_libvirt_total_cores + register: _result_total_cores with_items: - "{{ groups['all_hosts'] | list }}" delegate_to: "{{ item }}" - changed_when: "kubeinit_validations_libvirt_total_cores.rc == 0" + changed_when: "_result_total_cores.rc == 0" - name: Debug ansible.builtin.debug: - var: kubeinit_validations_libvirt_total_cores + var: _result_total_cores - name: Define the hypervisors available cores dictionary ansible.builtin.set_fact: kubeinit_validations_hypervisors_available_cores: "{{ kubeinit_validations_hypervisors_available_cores|default([]) | combine( {item.item: item.stdout|int} ) }}" - with_items: "{{ kubeinit_validations_libvirt_total_cores.results }}" + with_items: "{{ _result_total_cores.results }}" - name: Debug ansible.builtin.debug: - var: kubeinit_validations_libvirt_total_cores + var: kubeinit_validations_hypervisors_available_cores - name: Get the total inventory cores usage ansible.builtin.set_fact: