From fd269ae9b87bd961115476b0a51a8b80413a479f Mon Sep 17 00:00:00 2001 From: zenntrix Date: Mon, 18 Oct 2021 14:48:00 +0100 Subject: [PATCH 01/31] Add support for debian bullseye whilst removing support for EOL stretch --- README.md | 7 +- defaults/main.yml | 2 +- .../00_remove_checked_command_bullseye.patch | 13 ++++ files/00_remove_checked_command_stretch.patch | 70 ------------------- files/proxmox-ve-release-5.x.asc | 30 -------- files/proxmox-ve-release-7.x.asc | 29 ++++++++ meta/main.yml | 2 +- tasks/ceph.yml | 29 +++----- tasks/load_variables.yml | 10 --- tasks/main.yml | 4 ++ tasks/pve_add_node.yml | 7 +- tasks/pve_cluster_config.yml | 7 +- tests/install.yml | 4 +- tests/inventory | 6 +- ...debian-stretch.yml => debian-bullseye.yml} | 4 +- 15 files changed, 71 insertions(+), 153 deletions(-) create mode 100644 files/00_remove_checked_command_bullseye.patch delete mode 100644 files/00_remove_checked_command_stretch.patch delete mode 100644 files/proxmox-ve-release-5.x.asc create mode 100644 files/proxmox-ve-release-7.x.asc rename vars/{debian-stretch.yml => debian-bullseye.yml} (61%) diff --git a/README.md b/README.md index 82216861..72379061 100644 --- a/README.md +++ b/README.md @@ -376,7 +376,7 @@ serially during a maintenance period.) It will also enable the IPMI watchdog. ``` [variable]: [default] #[description/purpose] pve_group: proxmox # host group that contains the Proxmox hosts to be clustered together -pve_repository_line: "deb http://download.proxmox.com/debian/pve stretch pve-no-subscription" # apt-repository configuration - change to enterprise if needed (although TODO further configuration may be needed) +pve_repository_line: "deb http://download.proxmox.com/debian/pve bullseye pve-no-subscription" # apt-repository configuration - change to enterprise if needed (although TODO further configuration may be needed) pve_remove_subscription_warning: true # patches the subscription warning messages in proxmox if you are using the community edition pve_extra_packages: [] # Any extra packages you may want to install, e.g. ngrep pve_run_system_upgrades: false # Let role perform system upgrades @@ -391,7 +391,7 @@ pve_zfs_enabled: no # Specifies whether or not to install and configure ZFS pack # pve_zfs_options: "" # modprobe parameters to pass to zfs module on boot/modprobe # pve_zfs_zed_email: "" # Should be set to an email to receive ZFS notifications pve_ceph_enabled: false # Specifies wheter or not to install and configure Ceph packages. See below for an example configuration. -pve_ceph_repository_line: "deb http://download.proxmox.com/debian/ceph-nautilus buster main" # apt-repository configuration. Will be automatically set for 5.x and 6.x (Further information: https://pve.proxmox.com/wiki/Package_Repositories) +pve_ceph_repository_line: "deb http://download.proxmox.com/debian/ceph-pacific bullseye main" # apt-repository configuration. Will be automatically set for 6.x and 7.x (Further information: https://pve.proxmox.com/wiki/Package_Repositories) pve_ceph_network: "{{ (ansible_default_ipv4.network +'/'+ ansible_default_ipv4.netmask) | ipaddr('net') }}" # Ceph public network # pve_ceph_cluster_network: "" # Optional, if the ceph cluster network is different from the public network (see https://pve.proxmox.com/pve-docs/chapter-pveceph.html#pve_ceph_install_wizard) pve_ceph_mon_group: "{{ pve_group }}" # Host group containing all Ceph monitor hosts @@ -678,7 +678,8 @@ pve_ceph_fs: Musee Ullah ([@lae](https://github.com/lae), ) - Main developer Fabien Brachere ([@Fbrachere](https://github.com/Fbrachere)) - Storage config support -Gaudenz Steinlin ([@gaundez](https://github.com/gaudenz)) - Ceph support, etc +Gaudenz Steinlin ([@gaundez](https://github.com/gaudenz)) - Ceph support, etc +Richard Scott ([@zenntrix](https://github.com/zenntrix)) - Ceph support, PVE 7.x support, etc Thoralf Rickert-Wendt ([@trickert76](https://github.com/trickert76)) - PVE 6.x support, etc Engin Dumlu ([@roadrunner](https://github.com/roadrunner)) Jonas Meurer ([@mejo-](https://github.com/mejo-)) diff --git a/defaults/main.yml b/defaults/main.yml index a806dd54..4cfd5898 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -16,7 +16,7 @@ pve_zfs_enabled: no # pve_zfs_options: "parameters to pass to zfs module" # pve_zfs_zed_email: "email address for zfs events" pve_ceph_enabled: false -pve_ceph_repository_line: "deb http://download.proxmox.com/debian/{% if ansible_distribution_release == 'stretch' %}ceph-luminous stretch{% else %}ceph-nautilus buster{% endif %} main" +pve_ceph_repository_line: "deb http://download.proxmox.com/debian/{% if ansible_distribution_release == 'buster' %}ceph-nautilus buster{% else %}ceph-pacific bullseye{% endif %} main" pve_ceph_network: "{{ (ansible_default_ipv4.network +'/'+ ansible_default_ipv4.netmask) | ipaddr('net') }}" pve_ceph_mon_group: "{{ pve_group }}" pve_ceph_mgr_group: "{{ pve_ceph_mon_group }}" diff --git a/files/00_remove_checked_command_bullseye.patch b/files/00_remove_checked_command_bullseye.patch new file mode 100644 index 00000000..c244faa0 --- /dev/null +++ b/files/00_remove_checked_command_bullseye.patch @@ -0,0 +1,13 @@ +diff -u /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js +--- /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js ++++ /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js +@@ -493,7 +493,8 @@ + }, + + checked_command: function(orig_cmd) { +- Proxmox.Utils.API2Request( ++ orig_cmd(); ++ false && Proxmox.Utils.API2Request( + { + url: '/nodes/localhost/subscription', + method: 'GET', diff --git a/files/00_remove_checked_command_stretch.patch b/files/00_remove_checked_command_stretch.patch deleted file mode 100644 index af39a58d..00000000 --- a/files/00_remove_checked_command_stretch.patch +++ /dev/null @@ -1,70 +0,0 @@ -diff -ur /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js ---- /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js 2018-02-16 12:06:39.000000000 +0000 -+++ /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js 2018-03-03 05:43:35.083364592 +0000 -@@ -342,37 +342,6 @@ - Ext.Ajax.requestopts); - }, - -- checked_command: function(orig_cmd) { -- Proxmox.Utils.API2Request({ -- url: '/nodes/localhost/subscription', -- method: 'GET', -- //waitMsgTarget: me, -- failure: function(response, opts) { -- Ext.Msg.alert(gettext('Error'), response.htmlStatus); -- }, -- success: function(response, opts) { -- var data = response.result.data; -- -- if (data.status !== 'Active') { -- Ext.Msg.show({ -- title: gettext('No valid subscription'), -- icon: Ext.Msg.WARNING, -- msg: Proxmox.Utils.getNoSubKeyHtml(data.url), -- buttons: Ext.Msg.OK, -- callback: function(btn) { -- if (btn !== 'ok') { -- return; -- } -- orig_cmd(); -- } -- }); -- } else { -- orig_cmd(); -- } -- } -- }); -- }, -- - assemble_field_data: function(values, data) { - if (Ext.isObject(data)) { - Ext.Object.each(data, function(name, val) { -diff -ur /usr/share/pve-manager/js/pvemanagerlib.js /usr/share/pve-manager/js/pvemanagerlib.js ---- /usr/share/pve-manager/js/pvemanagerlib.js 2018-02-16 14:07:52.000000000 +0000 -+++ /usr/share/pve-manager/js/pvemanagerlib.js 2018-03-03 05:48:35.567396692 +0000 -@@ -13441,7 +13441,7 @@ - var version_btn = new Ext.Button({ - text: gettext('Package versions'), - handler: function(){ -- Proxmox.Utils.checked_command(function() { me.showVersions(); }); -+ me.showVersions(); - } - }); - -@@ -13691,7 +13691,7 @@ - { - text: gettext('System Report'), - handler: function() { -- Proxmox.Utils.checked_command(function (){ me.showReport(); }); -+ me.showReport(); - } - } - ], -@@ -30605,7 +30605,6 @@ - handler: function(data) { - me.login = null; - me.updateLoginData(data); -- Proxmox.Utils.checked_command(function() {}); // display subscription status - } - }); - } diff --git a/files/proxmox-ve-release-5.x.asc b/files/proxmox-ve-release-5.x.asc deleted file mode 100644 index 1c3dffd5..00000000 --- a/files/proxmox-ve-release-5.x.asc +++ /dev/null @@ -1,30 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v2.2.1 (GNU/Linux) - -mQINBFfDyocBEADBqGXU2sVZeyJhjvcYHbkzcjfP9OKBgkPmpKNG8kP+fT+OsX8U -FCAmIKXMOd/3fWhdSv7V2/3JaiEmsYn1a1vWhIlgFj2VonE/YS9JqqW7suodSon0 -b52XNwxRisOapU40EOUEjSGhoVUuvNNFkXImKEdtIgzVyyFCf2pj+TXBGWhOtCtK -du/zctioq85HR3Zk1YokJCho4/uRU7bElmLNFHSmI7jAU33jmU6ZI3MpxTFq0bd5 -+75IQYOQi4SLktE/xFZPaX54DlIzYCaVvjr57/DKOlwa4nnL0PGbfdS9rwBVxN1E -VvRsLG3z0crtFtunpJxKN1TI4HM/vZzfvTt9FH38Xx1yhwlUZKqx42YCImYJSBY/ -mxx/XjVZqaGSqBoSLgI+zKmOPEoo6i2nhZhCrm/GuuEV+hP5MHch3YhqO2/xYcCP -eeM9CU8ham84m9uCJ6ol8H0iiImztHXHCGWJ1AFq567NOXE407vQNpM2z49bNlR4 -QYvlXuvM0wJLKo+LFTftj6SjyweMdd3FRzxGUDQaG9YjpBe20etBS3ETTySiDnxN -eLVRe2nKG+e36VugaELJ+T8GZlhT+2s34EPrS4WUdqpwsrIouMXPeMPp0z3VO/7A -qyTlTK5TaDgLj+LQIZF9dI3aXDhH1Z9OKXsS2m7tSBJeBCY15jDFH9Og2wARAQAB -tElQcm94bW94IFZpcnR1YWwgRW52aXJvbm1lbnQgNS54IFJlbGVhc2UgS2V5IDxw -cm94bW94LXJlbGVhc2VAcHJveG1veC5jb20+iQI/BBMBCAApBQJXw8qHAhsDBQkS -zAMABwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQDZoZUOLvBgM6RA/+KKtA -TciGES4TEgsLuHPoM5E0X4JWhMq2jN6znmzo5kIVmHXEk4LxeeppMoICsc6DMoDL -9n4M5m5YqlIYAs78SrxjSdDspPeV2/gPegDD/U8rx+OhGNBORpewSi9jq6iq/bWN -kT2Pwvk/lmDmnHebtCWvxB2y0mkcaAw87w8c5xYgOnnL/slwcegUN7/m6pcien5b -Ijixt75Kq3ol45y4QRnkYDnoejMnlinEB4U2qfdkiVxEpwLZ97ipKo+wIQ9tMqmk -q8xVoT39+JJESBAaJO3P19NSJiLtNjkPpoNFNOYJRubY9wD9/2Q1jx7V04U/4zuh -AppsFcGt/cn5K0Vy6KqPgUAyyMjRB/+MKpL/4zdFcpwy6gu7c0eqMdXw1lW9YYF0 -XQhhxVuet1xbVazIH4NgkwCJvOPVcJwILkmGorTtJPvHgS/V+NFYh3n6Q0GWY5gC -+dturtMnLxsi6GrE0mamqHuJ7mW3zli2zClURCJaePwdd8i+anZzQwT2wg3oXBtA -gOZgeZFuC4OrGnfK5hj9n/LV1PjMwEmliiFDNRPOGDrmKbn7LBocem3ams0dKxfg -Eh/97QpKJh15NM677TiQmzbFmBBPA5BPLRzPlVi4eemDyv5ggYdSckz+sCiUMzy3 -x7aL/VB66uF1IYQFp5/WeGRMOv3n3Bkq5S2aEO4= -=hfNW ------END PGP PUBLIC KEY BLOCK----- diff --git a/files/proxmox-ve-release-7.x.asc b/files/proxmox-ve-release-7.x.asc new file mode 100644 index 00000000..fefd83d1 --- /dev/null +++ b/files/proxmox-ve-release-7.x.asc @@ -0,0 +1,29 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBF+o6NQBEADdEeL7hTwBlX9kbRwf3ESa+H/LOjEUGEvpZkk5kMysIi+4DTXL +79LoZwQZXGKM3W6it7kF/YkE8hcGnHrFYKCcfmu3rGpQSF8v867xR6km0bzIRvLQ +XYYO3SL3SBDOlp4OuGwbcb+E9/oacVhfZY6d94AhGx2rueDW+YcUDC/nQrDnIJfd ++yurm1sHoZMG4cx43Y9Q5BlckyZN1Gt7KFSETo9seayxJ47+IOMCw3s1nOyXWtUD +7YrihSjQmhLd4jOJgLy7sSwOHnkrVfvvhIz6JfFn/ccGvPqK72dddgX2aF/VT5Lk +SF9d6yi5Ea4/AENMLqljnw74b+uvOa6wT4zjqQHTu7Wj3xLr711o9VsSbGSuRBP3 +Stwj2z6Xy8fTKChN8DkUal6HEtIVBvCs1jtioqdigoUY0cnHwGor1/yKMWsKjt5t +qWjGMnBDdLWngTM61yh4WtvxDh1zLK5Q0xGaIYDPrgcRhnO456+8JIGVoQVg6bu5 +g5m9ua1KRTsr+TaqctDwDMqhhzqDAZpGuNgpHF7ycDYrof7sYFgQ1n3S/+yCpYJx +TJOIvAdmkUTuHwDRkXqGvR4eyGy8/RZ0KMQ7oVJbMyZextOZBbUE95FbE7EB8iOt +9NZHH2pgBZojYhD9P4+xwSyTymR2t/SdpvmOROjOtIDxbQqdBvmDUy33DQARAQAB +tDpQcm94bW94IEJ1bGxzZXllIFJlbGVhc2UgS2V5IDxwcm94bW94LXJlbGVhc2VA +cHJveG1veC5jb20+iQJUBBMBCgA+FiEEKBOaL4ML1oR4oaAf3UujkX4jv1kFAl+o +6NQCGwMFCRLMAwAFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AACgkQ3UujkX4jv1k3 +qxAAq5ggmp54L/LCdfqNlMlloqiWerxhEYDZ+bBq6IFJtOcctm4kyrRUizRo2SUi +O1wmPLjWcfQ1gUYP9ZgECNTzOGi7o9z+lk76tQiH6GeckLOxTqvilOaxwfpJrvKD +BOp8e0yl7BSyNtnbMpaX5+vH9gNl+pWpzFuNmBMz85jkuI1qaoMDAbzY7Tg4YmkR +O6Z/6Mj1F0vyQldTIB45hUtdzOkNaE/Pa4jBhb8jZ2DPGbz7QqEUvsdbR06FaiFL +tZmLBQ6/yTXtUy/SbyIr+LlNmThkifohqzP9VGFy3DYuLskL/GF9w1Jb4TE5vobc +U6DdY1nF5j4BbfwdaiOOm5n3dIy7QtqCZ0apDXTpn211GszjCL4AfdhsfvovBUYW +LAE6bEZImJUqiyTW/a96zDbc1zulAtDvuZNWH05nlrdNomTk70HDEth/GQ02O4jK +bZxwWe/CWB0e9CvAssEFJZ5jId7guA0WsIz689tBJGYVMPc0WFL9Kuw3gweMd3TT +/r/lqy0eDgsxT2ym1/Jg9Kj6Ko2rAfX/Sr9OdXwE2X8e745Z9HTABtxgSnFwCnfv +/9QHrlfnn1C4e7QEcTuoen8JSOKlTYzoeFGDRuVi5uI+lFfIF1DZiWPnnvSmyYp3 +DPj7a1gXa3vX3EiIHWNYZzGEhyblqT9Oj7HFiFRGK2gWh5M= +=BaNd +-----END PGP PUBLIC KEY BLOCK----- \ No newline at end of file diff --git a/meta/main.yml b/meta/main.yml index cf72e9af..5ed00104 100644 --- a/meta/main.yml +++ b/meta/main.yml @@ -10,8 +10,8 @@ galaxy_info: platforms: - name: Debian versions: - - stretch - buster + - bullseye galaxy_tags: - proxmox diff --git a/tasks/ceph.yml b/tasks/ceph.yml index 4afc3180..01e682d5 100644 --- a/tasks/ceph.yml +++ b/tasks/ceph.yml @@ -1,22 +1,5 @@ # This is an Ansible version of what "pveceph install" actually does --- -- block: - - name: Install custom Ceph systemd service - copy: - src: /usr/share/doc/pve-manager/examples/ceph.service - dest: /etc/systemd/system/ceph.service - remote_src: true - owner: root - group: root - mode: preserve - notify: 'restart ceph' - - - name: Enable Ceph - systemd: - name: ceph.service - enabled: true - when: - - "ansible_distribution_release == 'stretch'" - block: - name: Create initial Ceph config @@ -43,7 +26,9 @@ command: 'pveceph mon create' args: creates: '/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/' - when: "inventory_hostname != groups[pve_ceph_mon_group][0]" + when: + - "inventory_hostname != groups[pve_ceph_mon_group][0]" + - "inventory_hostname in groups[pve_ceph_mon_group]" - name: Create additional Ceph managers command: 'pveceph mgr create' @@ -81,17 +66,19 @@ - name: Change osd list (remove existing osds from the list) set_fact: - pve_ceph_osds_diff: "{{ pve_ceph_osds | difference(_existing_ceph_volumes) }}" + _ceph_osds_diff: "{{ pve_ceph_osds | difference(_existing_ceph_volumes) }}" tags: ceph_volume - name: Create Ceph OSDs command: >- pveceph osd create {{ item.device }} {% if "encrypted" in item and item["encrypted"] | bool %}--encrypted 1{% endif %} - {% if "block.db" in item %}--journal_dev {{ item["block.db"] }}{% endif %} + {% if "block.db" in item %}--db_dev {{ item["block.db"] }}{% endif %} + {% if "block.wal" in item %}--wal_dev {{ item["block.wal"] }}{% endif %} args: creates: '{{ item.device }}1' - with_items: '{{ pve_ceph_osds_diff }}' + with_items: '{{ _ceph_osds_diff }}' + tags: create_osd - block: diff --git a/tasks/load_variables.yml b/tasks/load_variables.yml index cbcd522e..a5ec77f7 100644 --- a/tasks/load_variables.yml +++ b/tasks/load_variables.yml @@ -8,11 +8,6 @@ # address. Thus, we're deprecating them. See below references. # https://pve.proxmox.com/wiki/Separate_Cluster_Network#Setup_at_Cluster_Creation # https://git.proxmox.com/?p=pve-cluster.git;a=blob;f=data/PVE/Corosync.pm;h=8b5c91e0da084da4e9ba7423176872a0c16ef5af;hb=refs/heads/stable-5#l209 - - name: LEGACY - Define pve_cluster_addr0 from bindnet0_addr/ring0_addr - set_fact: - pve_cluster_addr0: "{{ pve_cluster_bindnet0_addr | default(pve_cluster_ring0_addr) }}" - when: "pve_cluster_ring0_addr is defined and ansible_distribution_release == 'stretch'" - - name: LEGACY - Define pve_cluster_addr0 from link0_addr set_fact: pve_cluster_addr0: "{{ pve_cluster_link0_addr }}" @@ -20,11 +15,6 @@ when: "pve_cluster_addr0 is not defined" - block: - - name: LEGACY - Define pve_cluster_addr1 from bindnet1_addr/ring1_addr - set_fact: - pve_cluster_addr1: "{{ pve_cluster_bindnet1_addr | default(pve_cluster_ring1_addr) }}" - when: "pve_cluster_ring1_addr is defined and ansible_distribution_release == 'stretch'" - - name: LEGACY - Define pve_cluster_addr1 from link1_addr set_fact: pve_cluster_addr1: "{{ pve_cluster_link1_addr }}" diff --git a/tasks/main.yml b/tasks/main.yml index 63d57b93..4309cc0b 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -2,6 +2,10 @@ --- - import_tasks: load_variables.yml +- name: Gather package facts + package_facts: + manager: auto + - name: Ensure that facts are present for all cluster hosts assert: that: diff --git a/tasks/pve_add_node.yml b/tasks/pve_add_node.yml index af6656a7..57c5279c 100644 --- a/tasks/pve_add_node.yml +++ b/tasks/pve_add_node.yml @@ -6,15 +6,12 @@ - name: Add node to Proxmox cluster command: >- pvecm add {{ hostvars[groups[pve_group][0]].pve_cluster_addr0 }} -use_ssh - {{ addr0_flag }} {{ pve_cluster_addr0 }} + -link0 {{ pve_cluster_addr0 }} {% if pve_cluster_addr1 is defined %} - {{ addr1_flag }} {{ pve_cluster_addr1 }} + -link1 {{ pve_cluster_addr1 }} {% endif %} args: creates: "{{ pve_cluster_conf }}" - vars: - addr0_flag: "{{ (ansible_distribution_release == 'buster') | ternary('-link0', '-ring0_addr') }}" - addr1_flag: "{{ (ansible_distribution_release == 'buster') | ternary('-link1', '-ring1_addr') }}" when: - "inventory_hostname == _pve_current_node" diff --git a/tasks/pve_cluster_config.yml b/tasks/pve_cluster_config.yml index 48fa86c7..605b9080 100644 --- a/tasks/pve_cluster_config.yml +++ b/tasks/pve_cluster_config.yml @@ -37,15 +37,12 @@ - name: Initialize a Proxmox cluster command: >- pvecm create {{ pve_cluster_clustername }} - {{ addr0_flag }} {{ pve_cluster_addr0 }} + -link0 {{ pve_cluster_addr0 }} {% if pve_cluster_addr1 is defined %} - {{ addr1_flag }} {{ pve_cluster_addr1 }} + -link1 {{ pve_cluster_addr1 }} {% endif %} args: creates: "{{ pve_cluster_conf }}" - vars: - addr0_flag: "{{ (ansible_distribution_release == 'buster') | ternary('-link0', '-ring0_addr') }}" - addr1_flag: "{{ (ansible_distribution_release == 'buster') | ternary('-link1', '-ring1_addr') }}" when: - "_pve_found_clusters is not defined" - "inventory_hostname == groups[pve_group][0]" diff --git a/tests/install.yml b/tests/install.yml index bdfde017..143e459b 100644 --- a/tests/install.yml +++ b/tests/install.yml @@ -14,10 +14,10 @@ shell: "openssl req -x509 -new -nodes -key {{ ssl_ca_key_path }} -sha256 -days 1 -subj '{{ ssl_subj }}' -out {{ ssl_ca_cert_path }}" vars: test_profiles: + - profile: debian-bullseye + prefix: proxmox-7- - profile: debian-buster prefix: proxmox-6- - - profile: debian-stretch - prefix: proxmox-5- test_hosts_per_profile: 3 container_config: - "lxc.apparmor.profile = unconfined" diff --git a/tests/inventory b/tests/inventory index ae1e8c59..f819daae 100644 --- a/tests/inventory +++ b/tests/inventory @@ -1,5 +1,5 @@ -[stretchcluster] -proxmox-5-[01:03].lxc - [bustercluster] proxmox-6-[01:03].lxc + +[bullseyecluster] +proxmox-7-[01:03].lxc \ No newline at end of file diff --git a/vars/debian-stretch.yml b/vars/debian-bullseye.yml similarity index 61% rename from vars/debian-stretch.yml rename to vars/debian-bullseye.yml index d506832e..f66a3a21 100644 --- a/vars/debian-stretch.yml +++ b/vars/debian-bullseye.yml @@ -1,4 +1,4 @@ --- -pve_release_key: proxmox-ve-release-5.x.asc -pve_release_key_id: 0D9A1950E2EF0603 +pve_release_key: proxmox-ve-release-7.x.asc +pve_release_key_id: DD4BA3917E23BF59 pve_ssh_ciphers: "aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,chacha20-poly1305@openssh.com" From a0d3a83b46fa97d6a97084d8de1ef9403af50501 Mon Sep 17 00:00:00 2001 From: Musee Ullah Date: Tue, 19 Oct 2021 00:15:52 -0500 Subject: [PATCH 02/31] Update Vagrantfile for Debian Bullseye/PVE 7 Use smaller block device for Ceph testing. --- Vagrantfile | 4 ++-- tests/vagrant/files/interfaces | 8 ++++++++ tests/vagrant/provision.yml | 4 ++++ 3 files changed, 14 insertions(+), 2 deletions(-) create mode 100644 tests/vagrant/files/interfaces diff --git a/Vagrantfile b/Vagrantfile index b3a5d02e..ef6bc1d1 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -1,10 +1,10 @@ Vagrant.configure("2") do |config| - config.vm.box = "debian/buster64" + config.vm.box = "debian/bullseye64" config.vm.provider :libvirt do |libvirt| libvirt.memory = 2048 libvirt.cpus = 2 - libvirt.storage :file, :size => '2G' + libvirt.storage :file, :size => '512M' end N = 3 diff --git a/tests/vagrant/files/interfaces b/tests/vagrant/files/interfaces new file mode 100644 index 00000000..935eadde --- /dev/null +++ b/tests/vagrant/files/interfaces @@ -0,0 +1,8 @@ +# interfaces(5) file used by ifup(8) and ifdown(8) +# The loopback network interface +auto lo +iface lo inet loopback + +# The primary network interface +allow-hotplug eth0 +iface eth0 inet dhcp diff --git a/tests/vagrant/provision.yml b/tests/vagrant/provision.yml index 36ac9f3b..147b9dcb 100644 --- a/tests/vagrant/provision.yml +++ b/tests/vagrant/provision.yml @@ -5,6 +5,10 @@ - name: Install gnupg2 apt: name: gnupg2 + - name: Replace /etc/network/interfaces + ansible.builtin.copy: + src: files/interfaces + dest: /etc/network/interfaces roles: - geerlingguy.ntp - lae.proxmox From 0497373bc93bc961ea83e4d6d6274e4bc7dfe7fd Mon Sep 17 00:00:00 2001 From: Musee Ullah Date: Tue, 19 Oct 2021 00:36:30 -0500 Subject: [PATCH 03/31] Update README.md references to PVE version --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index de6403b1..6bc34cae 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ lae.proxmox =========== -Installs and configures a Proxmox 5.x/6.x cluster with the following features: +Installs and configures a Proxmox 6.x/7.x cluster with the following features: - Ensures all hosts can connect to one another as root - Ability to create/manage groups, users, access control lists and storage @@ -68,7 +68,7 @@ Once complete, you should be able to access your Proxmox VE instance at For support or if you'd like to contribute to this role but want guidance, feel free to join this Discord server: https://discord.gg/cjqr6Fg -## Deploying a fully-featured PVE 5.x cluster +## Deploying a fully-featured PVE 7.x cluster Create a new playbook directory. We call ours `lab-cluster`. Our playbook will eventually look like this, but yours does not have to follow all of the steps: From af0e52ef7a30b187dfc30e22c2a7ee46984fc8d9 Mon Sep 17 00:00:00 2001 From: Musee Ullah Date: Tue, 19 Oct 2021 01:08:19 -0500 Subject: [PATCH 04/31] Move support section in README, add note about roles --- README.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 6bc34cae..c27f1277 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,13 @@ Installs and configures a Proxmox 6.x/7.x cluster with the following features: - BYO HTTPS certificate support - Ability to use either `pve-no-subscription` or `pve-enterprise` repositories +## Support/Contributing + +For support or if you'd like to contribute to this role but want guidance, feel +free to join this Discord server: https://discord.gg/cjqr6Fg. Please note, this +is an temporary invite, so you'll need to wait for @lae to assign you a role, +otherwise Discord will remove you from the server when you logout. + ## Quickstart The primary goal for this role is to configure and manage a @@ -63,11 +70,6 @@ file containing a list of hosts). Once complete, you should be able to access your Proxmox VE instance at `https://$SSH_HOST_FQDN:8006`. -## Support/Contributing - -For support or if you'd like to contribute to this role but want guidance, feel -free to join this Discord server: https://discord.gg/cjqr6Fg - ## Deploying a fully-featured PVE 7.x cluster Create a new playbook directory. We call ours `lab-cluster`. Our playbook will From 943610e59ac1b3da1c42f2b97b7d0be7be8bc361 Mon Sep 17 00:00:00 2001 From: Musee Ullah Date: Tue, 19 Oct 2021 01:20:56 -0500 Subject: [PATCH 05/31] Update playbook syntax in README, closes #142 --- README.md | 48 +++++++++++++++++++----------------------------- 1 file changed, 19 insertions(+), 29 deletions(-) diff --git a/README.md b/README.md index c27f1277..7d8ab031 100644 --- a/README.md +++ b/README.md @@ -37,20 +37,15 @@ Copy the following playbook to a file like `install_proxmox.yml`: - hosts: all become: True roles: - - { - role: geerlingguy.ntp, - ntp_manage_config: true, - ntp_servers: [ - clock.sjc.he.net, - clock.fmt.he.net, - clock.nyc.he.net - ] - } - - { - role: lae.proxmox, - pve_group: all, - pve_reboot_on_kernel_update: true - } + - role: geerlingguy.ntp + ntp_manage_config: true + ntp_servers: + - clock.sjc.he.net, + - clock.fmt.he.net, + - clock.nyc.he.net + - role: lae.proxmox + - pve_group: all + - pve_reboot_on_kernel_update: true Install this role and a role for configuring NTP: @@ -356,22 +351,17 @@ serially during a maintenance period.) It will also enable the IPMI watchdog. - hosts: pve01 become: True roles: - - { - role: geerlingguy.ntp, - ntp_manage_config: true, - ntp_servers: [ - clock.sjc.he.net, - clock.fmt.he.net, - clock.nyc.he.net - ] - } - - { - role: lae.proxmox, - pve_group: pve01, - pve_cluster_enabled: yes, - pve_reboot_on_kernel_update: true, + - role: geerlingguy.ntp + ntp_manage_config: true + ntp_servers: + - clock.sjc.he.net, + - clock.fmt.he.net, + - clock.nyc.he.net + - role: lae.proxmox + pve_group: pve01 + pve_cluster_enabled: yes + pve_reboot_on_kernel_update: true pve_watchdog: ipmi - } ## Role Variables From 5bb06fa96b61a280bec0d9c73ef54b3869c5a3d3 Mon Sep 17 00:00:00 2001 From: Musee Ullah Date: Tue, 19 Oct 2021 01:56:42 -0500 Subject: [PATCH 06/31] Update README with a better introduction --- README.md | 48 ++++++++++++++++++++++++++++++++---------------- meta/main.yml | 3 +-- 2 files changed, 33 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index 7d8ab031..620ffe2b 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,29 @@ -[![Build Status](https://travis-ci.org/lae/ansible-role-proxmox.svg?branch=master)](https://travis-ci.org/lae/ansible-role-proxmox) [![Galaxy Role](https://img.shields.io/badge/ansible--galaxy-proxmox-blue.svg)](https://galaxy.ansible.com/lae/proxmox/) lae.proxmox =========== -Installs and configures a Proxmox 6.x/7.x cluster with the following features: +Installs and configures Proxmox Virtual Environment 6.x/7.x on Debian servers. -- Ensures all hosts can connect to one another as root -- Ability to create/manage groups, users, access control lists and storage -- Ability to create or add nodes to a PVE cluster -- Ability to setup Ceph on the nodes -- IPMI watchdog support -- BYO HTTPS certificate support -- Ability to use either `pve-no-subscription` or `pve-enterprise` repositories +This role allows you to deploy and manage single-node PVE installations and PVE +clusters (3+ nodes) on Debian Buster (10) and Bullseye (11). You are able to +configure the following with the assistance of this role: + + - PVE RBAC definitions (roles, groups, users, and access control lists) + - PVE Storage definitions + - [`datacenter.cfg`][datacenter-cfg] + - HTTPS certificates for the Proxmox Web GUI (BYO) + - PVE repository selection (e.g. `pve-no-subscription` or `pve-enterprise`) + - Watchdog modules (IPMI and NMI) with applicable pve-ha-manager config + - ZFS module setup and ZED notification email + +With clustering enabled, this role does (or allows you to do) the following: + + - Ensure all hosts can connect to one another as root over SSH + - Initialize a new PVE cluster (or possibly adopt an existing one) + - Create or add new nodes to a PVE cluster + - Setup Ceph on a PVE cluster + - Create and manage high availability groups ## Support/Contributing @@ -206,8 +217,8 @@ must already exist) to access PVE and gives them the Administrator role as part of the `ops` group. Read the **User and ACL Management** section for more info. `pve_storages` allows to create different types of storage and configure them. -The backend needs to be supported by [Proxmox](https://pve.proxmox.com/pve-docs/chapter-pvesm.html). -Read the **Storage Management** section for more info. +The backend needs to be supported by [Proxmox][pvesm]. Read the **Storage +Management** section for more info. `pve_ssh_port` allows you to change the SSH port. If your SSH is listening on a port other than the default 22, please set this variable. If a new node is @@ -446,8 +457,8 @@ pve_cluster_ha_groups: restricted: 0 ``` -All configuration options supported in the datacenter.cfg file are documented in the -[Proxmox manual datacenter.cfg section][datacenter-cfg]. +All configuration options supported in the datacenter.cfg file are documented +in the [Proxmox manual datacenter.cfg section][datacenter-cfg]. In order for live reloading of network interfaces to work via the PVE web UI, you need to install the `ifupdown2` package. Note that this will remove @@ -619,7 +630,8 @@ pve_ceph_osds: block.db: /dev/sdb1 encrypted: true # Crush rules for different storage classes -# By default 'type' is set to host, you can find valid types at (https://docs.ceph.com/en/latest/rados/operations/crush-map/) +# By default 'type' is set to host, you can find valid types at +# (https://docs.ceph.com/en/latest/rados/operations/crush-map/) # listed under 'TYPES AND BUCKETS' pve_ceph_crush_rules: - name: replicated_rule @@ -667,9 +679,12 @@ pve_ceph_fs: `pve_ceph_network` by default uses the `ipaddr` filter, which requires the `netaddr` library to be installed and usable by your Ansible controller. -`pve_ceph_nodes` by default uses `pve_group`, this parameter allows to specify on which nodes install Ceph (e.g. if you don't want to install Ceph on all your nodes). +`pve_ceph_nodes` by default uses `pve_group`, this parameter allows to specify +on which nodes install Ceph (e.g. if you don't want to install Ceph on all your +nodes). -`pve_ceph_osds` by default creates unencrypted ceph volumes. To use encrypted volumes the parameter `encrypted` has to be set per drive to `true`. +`pve_ceph_osds` by default creates unencrypted ceph volumes. To use encrypted +volumes the parameter `encrypted` has to be set per drive to `true`. ## Contributors @@ -688,6 +703,7 @@ Michael Holasek ([@mholasek](https://github.com/mholasek)) [pve-cluster]: https://pve.proxmox.com/wiki/Cluster_Manager [install-ansible]: http://docs.ansible.com/ansible/intro_installation.html [pvecm-network]: https://pve.proxmox.com/pve-docs/chapter-pvecm.html#_separate_cluster_network +[pvesm]: https://pve.proxmox.com/pve-docs/chapter-pvesm.html [user-module]: https://github.com/lae/ansible-role-proxmox/blob/master/library/proxmox_user.py [group-module]: https://github.com/lae/ansible-role-proxmox/blob/master/library/proxmox_group.py [acl-module]: https://github.com/lae/ansible-role-proxmox/blob/master/library/proxmox_group.py diff --git a/meta/main.yml b/meta/main.yml index 5ed00104..3f71e745 100644 --- a/meta/main.yml +++ b/meta/main.yml @@ -1,8 +1,7 @@ --- galaxy_info: author: Musee Ullah - description: Installs and configures Proxmox 5.x (for clustering) - company: FireEye, Inc. + description: Installs and configures Proxmox Virtual Environment 6.x/7.x on Debian servers. license: MIT min_ansible_version: 2.4 From ba95f4fd77f1eda1985f6a4ed4ae377727003648 Mon Sep 17 00:00:00 2001 From: Musee Ullah Date: Tue, 19 Oct 2021 02:01:44 -0500 Subject: [PATCH 07/31] Remove unsupported Lets Encrypt feature --- README.md | 5 ----- defaults/main.yml | 1 - tasks/main.yml | 3 --- tasks/ssl_letsencrypt.yml | 18 ------------------ templates/pve-letsencrypt-post-hook.sh.j2 | 5 ----- 5 files changed, 32 deletions(-) delete mode 100644 tasks/ssl_letsencrypt.yml delete mode 100644 templates/pve-letsencrypt-post-hook.sh.j2 diff --git a/README.md b/README.md index 620ffe2b..b9197760 100644 --- a/README.md +++ b/README.md @@ -203,10 +203,6 @@ pvecluster. Here, a file lookup is used to read the contents of a file in the playbook, e.g. `files/pve01/lab-node01.key`. You could possibly just use host variables instead of files, if you prefer. -`pve_ssl_letsencrypt` allows to obtain a Let's Encrypt SSL certificate for -pvecluster. The Ansible role [systemli.letsencrypt](https://galaxy.ansible.com/systemli/letsencrypt/) -needs to be installed first in order to use this function. - `pve_cluster_enabled` enables the role to perform all cluster management tasks. This includes creating a cluster if it doesn't exist, or adding nodes to the existing cluster. There are checks to make sure you're not mixing nodes that @@ -408,7 +404,6 @@ pve_ceph_fs: [] # List of CephFS filesystems to create pve_ceph_crush_rules: [] # List of CRUSH rules to create # pve_ssl_private_key: "" # Should be set to the contents of the private key to use for HTTPS # pve_ssl_certificate: "" # Should be set to the contents of the certificate to use for HTTPS -pve_ssl_letsencrypt: false # Specifies whether or not to obtain a SSL certificate using Let's Encrypt pve_roles: [] # Added more roles with specific privileges. See section on User Management. pve_groups: [] # List of group definitions to manage in PVE. See section on User Management. pve_users: [] # List of user definitions to manage in PVE. See section on User Management. diff --git a/defaults/main.yml b/defaults/main.yml index 753dffef..2c7ef0d6 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -36,7 +36,6 @@ pve_manage_hosts_enabled: yes # pve_cluster_addr1: "{{ ansible_eth1.ipv4.address }} pve_datacenter_cfg: {} pve_cluster_ha_groups: [] -pve_ssl_letsencrypt: false # additional roles for your cluster (f.e. for monitoring) pve_roles: [] pve_groups: [] diff --git a/tasks/main.yml b/tasks/main.yml index f14d5e7e..58a1167e 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -306,6 +306,3 @@ when: - "pve_ssl_private_key is defined" - "pve_ssl_certificate is defined" - -- import_tasks: ssl_letsencrypt.yml - when: "pve_ssl_letsencrypt | bool" diff --git a/tasks/ssl_letsencrypt.yml b/tasks/ssl_letsencrypt.yml deleted file mode 100644 index 03d5cf08..00000000 --- a/tasks/ssl_letsencrypt.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- name: Install Proxmox Let's Encrypt post-hook script - template: - src: pve-letsencrypt-post-hook.sh.j2 - dest: /usr/local/bin/pve-letsencrypt-post-hook.sh - mode: 0755 - -- name: Request Let's Encrypt certificate for {{ ansible_fqdn }} - include_role: - name: systemli.letsencrypt - vars: - - letsencrypt_cert: - name: "{{ ansible_fqdn }}" - domains: - - "{{ ansible_fqdn }}" - challenge: http - http_auth: standalone - post_hook: /usr/local/bin/pve-letsencrypt-post-hook.sh diff --git a/templates/pve-letsencrypt-post-hook.sh.j2 b/templates/pve-letsencrypt-post-hook.sh.j2 deleted file mode 100644 index 6054d3bc..00000000 --- a/templates/pve-letsencrypt-post-hook.sh.j2 +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -cp /etc/letsencrypt/live/{{ ansible_fqdn }}/privkey.pem /etc/pve/local/pveproxy-ssl.key -cp /etc/letsencrypt/live/{{ ansible_fqdn }}/fullchain.pem /etc/pve/local/pveproxy-ssl.pem -service pveproxy restart From 3a78270ee5a576533d65eea21d391eb069c4da42 Mon Sep 17 00:00:00 2001 From: foerkede Date: Thu, 24 Dec 2020 03:02:16 +0100 Subject: [PATCH 08/31] Add support for zfspool storage --- library/proxmox_storage.py | 8 +++++++- tasks/main.yml | 10 ++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/library/proxmox_storage.py b/library/proxmox_storage.py index 19d842b0..84cd88b9 100755 --- a/library/proxmox_storage.py +++ b/library/proxmox_storage.py @@ -180,6 +180,7 @@ def __init__(self, module): self.options = module.params['options'] self.vgname = module.params['vgname'] self.thinpool = module.params['thinpool'] + self.sparse = module.params['sparse'] try: self.existing_storages = pvesh.get("storage") @@ -235,6 +236,8 @@ def prepare_storage_args(self): args['vgname'] = self.vgname if self.thinpool is not None: args['thinpool'] = self.thinpool + if self.sparse is not None: + args['sparse'] = self.sparse if self.maxfiles is not None and 'backup' not in self.content: self.module.fail_json(msg="maxfiles is not allowed when there is no 'backup' in content") @@ -307,7 +310,8 @@ def main(): content=dict(type='list', required=True, aliases=['storagetype']), nodes=dict(type='list', required=False, default=None), type=dict(default=None, type='str', required=True, - choices=["dir", "nfs", "rbd", "lvm", "lvmthin", "cephfs"]), + choices=["dir", "nfs", "rbd", "lvm", "lvmthin", "cephfs", + "zfspool"]), disable=dict(required=False, type='bool', default=False), state=dict(default='present', choices=['present', 'absent'], type='str'), path=dict(default=None, required=False, type='str'), @@ -321,6 +325,7 @@ def main(): options=dict(default=None, type='str', required=False), vgname=dict(default=None, type='str', required=False), thinpool=dict(default=None, type='str', required=False), + sparse=dict(default=None, type='bool', required=False), ) module = AnsibleModule( @@ -333,6 +338,7 @@ def main(): ["type", "nfs", ["server", "content", "export"]], ["type", "lvm", ["vgname", "content"]], ["type", "lvmthin", ["vgname", "thinpool", "content"]], + ["type", "zfspool", ["pool", "content"]], ] ) storage = ProxmoxStorage(module) diff --git a/tasks/main.yml b/tasks/main.yml index f14d5e7e..6d2d8c6e 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -249,6 +249,14 @@ with_items: "{{ pve_acls }}" when: "not pve_cluster_enabled | bool or (pve_cluster_enabled | bool and inventory_hostname == groups[pve_group][0])" +- name: Create ZFS Pools + zfs: + name: "{{ item.pool }}" + state: present + with_items: "{{ pve_storages }}" + when: item.type == "zfspool" + tags: storage + - name: Configure Proxmox Storage proxmox_storage: name: "{{ item.name }}" @@ -268,8 +276,10 @@ options: "{{ item.options | default(omit) }}" vgname: "{{ item.vgname | default(omit) }}" thinpool: "{{ item.thinpool | default(omit) }}" + sparse: "{{ item.sparse | default(omit) }}" with_items: "{{ pve_storages }}" when: "not pve_cluster_enabled | bool or (pve_cluster_enabled | bool and inventory_hostname == groups[pve_group][0])" + tags: storage - name: Check datacenter.cfg exists stat: From 3dd95cbfe10e655ad063a77d985f507c0b273441 Mon Sep 17 00:00:00 2001 From: foerkede Date: Sun, 27 Dec 2020 18:04:35 +0100 Subject: [PATCH 09/31] ZFS storage documentation --- README.md | 26 ++++++++++++++++++++++++-- defaults/main.yml | 1 + library/proxmox_storage.py | 15 +++++++++++++-- tasks/main.yml | 7 +++++++ 4 files changed, 45 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 620ffe2b..e70fe6cb 100644 --- a/README.md +++ b/README.md @@ -394,6 +394,7 @@ pve_watchdog_ipmi_timeout: 10 # Number of seconds the watchdog should wait pve_zfs_enabled: no # Specifies whether or not to install and configure ZFS packages # pve_zfs_options: "" # modprobe parameters to pass to zfs module on boot/modprobe # pve_zfs_zed_email: "" # Should be set to an email to receive ZFS notifications +pve_zfs_create_volumes: [] # List of ZFS Volumes to create (to use as PVE Storages). See section on Storage Management. pve_ceph_enabled: false # Specifies wheter or not to install and configure Ceph packages. See below for an example configuration. pve_ceph_repository_line: "deb http://download.proxmox.com/debian/ceph-pacific bullseye main" # apt-repository configuration. Will be automatically set for 6.x and 7.x (Further information: https://pve.proxmox.com/wiki/Package_Repositories) pve_ceph_network: "{{ (ansible_default_ipv4.network +'/'+ ansible_default_ipv4.netmask) | ipaddr('net') }}" # Ceph public network @@ -540,14 +541,14 @@ pve_acls: - test_users ``` -Refer to `library/proxmox_role.py` [link][user-module] and +Refer to `library/proxmox_role.py` [link][user-module] and `library/proxmox_acl.py` [link][acl-module] for module documentation. ## Storage Management You can use this role to manage storage within Proxmox VE (both in single server deployments and cluster deployments). For now, the only supported -types are `dir`, `rbd`, `nfs`, `cephfs` ,`lvm` and `lvmthin`. +types are `dir`, `rbd`, `nfs`, `cephfs`, `lvm`,`lvmthin`, and `zfspool`. Here are some examples. ``` @@ -591,6 +592,27 @@ pve_storages: - 10.0.0.1 - 10.0.0.2 - 10.0.0.3 + - name: zfs1 + type: zfspool + content: [ "images", "rootdir" ] + pool: rpool/data + sparse: true +``` + +Some Notes on ZFS: +Currently the `zfspool` type can be used only for `images` and `rootdir` contents. +If you want to store the other content types on a ZFS volume, you need to specify +them with type `dir`, path `//` and add an entry in +`pve_zfs_create_volumes`. This example adds a `iso` storage on a ZFS pool: + +``` +pve_zfs_create_volumes: + - rpool/iso +pve_storages: + - name: iso + type: dir + path: /rpool/iso + content: [ "iso" ] ``` Refer to `library/proxmox_storage.py` [link][storage-module] for module diff --git a/defaults/main.yml b/defaults/main.yml index 753dffef..f9172e27 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -16,6 +16,7 @@ pve_watchdog_ipmi_timeout: 10 pve_zfs_enabled: no # pve_zfs_options: "parameters to pass to zfs module" # pve_zfs_zed_email: "email address for zfs events" +pve_zfs_create_volumes: [] pve_ceph_enabled: false pve_ceph_repository_line: "deb http://download.proxmox.com/debian/{% if ansible_distribution_release == 'buster' %}ceph-nautilus buster{% else %}ceph-pacific bullseye{% endif %} main" pve_ceph_network: "{{ (ansible_default_ipv4.network +'/'+ ansible_default_ipv4.netmask) | ipaddr('net') }}" diff --git a/library/proxmox_storage.py b/library/proxmox_storage.py index 84cd88b9..29e3a57d 100755 --- a/library/proxmox_storage.py +++ b/library/proxmox_storage.py @@ -22,7 +22,7 @@ type: required: true aliases: [ "storagetype" ] - choices: [ "dir", "nfs", "rbd", "lvm", "lvmthin", "cephfs" ] + choices: [ "dir", "nfs", "rbd", "lvm", "lvmthin", "cephfs", "zfspool" ] description: - Type of storage, must be supported by Proxmox. disable: @@ -54,7 +54,7 @@ pool: required: false description: - - Ceph pool name. + - Ceph/ZFS pool name. monhost: required: false type: list @@ -94,6 +94,10 @@ required: false description: - The name of the LVM thin pool. + sparse: + required: false + description: + - Use ZFS thin-provisioning. author: - Fabien Brachere (@fbrachere) @@ -150,6 +154,13 @@ - 10.0.0.1 - 10.0.0.2 - 10.0.0.3 +- name: Create a ZFS storage type + proxmox_storage: + name: zfs1 + type: zfspool + content: [ "images", "rootdir" ] + pool: rpool/data + sparse: true ''' RETURN = ''' diff --git a/tasks/main.yml b/tasks/main.yml index 6d2d8c6e..d5054763 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -257,6 +257,13 @@ when: item.type == "zfspool" tags: storage +- name: Create ZFS Volumes specified by user + zfs: + name: "{{ item }}" + state: present + with_items: "{{ pve_zfs_create_volumes }}" + tags: storage + - name: Configure Proxmox Storage proxmox_storage: name: "{{ item.name }}" From 325ca32309d2795d1737bcb361e9d71f677c12e9 Mon Sep 17 00:00:00 2001 From: foerkede Date: Sun, 27 Dec 2020 18:10:59 +0100 Subject: [PATCH 10/31] Remove Readme line --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index e70fe6cb..45f7c6cb 100644 --- a/README.md +++ b/README.md @@ -599,7 +599,6 @@ pve_storages: sparse: true ``` -Some Notes on ZFS: Currently the `zfspool` type can be used only for `images` and `rootdir` contents. If you want to store the other content types on a ZFS volume, you need to specify them with type `dir`, path `//` and add an entry in From 671d024d77b6fd49c6188123e3f10112975bd62f Mon Sep 17 00:00:00 2001 From: foerkede Date: Tue, 5 Jan 2021 17:55:14 +0100 Subject: [PATCH 11/31] wrap conditional with quotes --- tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tasks/main.yml b/tasks/main.yml index d5054763..7b241639 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -254,7 +254,7 @@ name: "{{ item.pool }}" state: present with_items: "{{ pve_storages }}" - when: item.type == "zfspool" + when: "item.type == 'zfspool'" tags: storage - name: Create ZFS Volumes specified by user From 0137cecc74901cdef6470f7a6e75fd4a8d11ef12 Mon Sep 17 00:00:00 2001 From: foerkede Date: Wed, 13 Jan 2021 19:30:10 +0100 Subject: [PATCH 12/31] test zfs storage --- tests/group_vars/all | 11 +++++++++++ tests/test.yml | 5 +++++ 2 files changed, 16 insertions(+) diff --git a/tests/group_vars/all b/tests/group_vars/all index 6c644943..36b5fbb8 100644 --- a/tests/group_vars/all +++ b/tests/group_vars/all @@ -100,6 +100,17 @@ pve_storages: # This should create 2 different storages. - 10.0.0.1 - 10.0.0.2 - 10.0.0.3 + - name: zfs1 + type: zfspool + content: [ "images", "rootdir" ] + pool: rpool/zfs1 + sparse: true + - name: zfs2 + type: dir + content: [ "iso", "vztmpl", "backup" ] + path: /rpool/zfs2 +pve_zfs_create_volumes: + - rpool/zfs2 ssl_directory: /home/travis/ssl/ ssl_ca_key_path: "{{ ssl_directory }}/test-ca.key" diff --git a/tests/test.yml b/tests/test.yml index bf5bb55d..ab6c0143 100644 --- a/tests/test.yml +++ b/tests/test.yml @@ -71,6 +71,11 @@ query: "[*].storage" run_once: True + - name: Check that User specified ZFS Volumes exist + zfs_facts: + dataset: "{{ item }}" + with_items: "{{ pve_zfs_create_volumes }}" + - name: Read datacenter.cfg file slurp: src: "/etc/pve/datacenter.cfg" From e8757b19ed41b2c525511c80a661eb2f8b04db8b Mon Sep 17 00:00:00 2001 From: Musee Ullah Date: Tue, 19 Oct 2021 10:30:19 -0500 Subject: [PATCH 13/31] Add ZFS pool and storage creation to Vagrant test setup This also introduces a `pve_hooks` role variable to use for dynamically inserting custom tasks. It'll be further fleshed out separately. --- Vagrantfile | 1 + defaults/main.yml | 1 + tasks/zfs.yml | 4 ++++ tests/vagrant/group_vars/all | 13 +++++++++++++ tests/vagrant/tasks/zpool_setup.yml | 5 +++++ 5 files changed, 24 insertions(+) create mode 100644 tests/vagrant/tasks/zpool_setup.yml diff --git a/Vagrantfile b/Vagrantfile index ef6bc1d1..00f05765 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -5,6 +5,7 @@ Vagrant.configure("2") do |config| libvirt.memory = 2048 libvirt.cpus = 2 libvirt.storage :file, :size => '512M' + libvirt.storage :file, :size => '256M' end N = 3 diff --git a/defaults/main.yml b/defaults/main.yml index f9172e27..1ca75f48 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -46,3 +46,4 @@ pve_acls: [] pve_storages: [] pve_ssh_port: 22 pve_manage_ssh: true +pve_hooks: {} diff --git a/tasks/zfs.yml b/tasks/zfs.yml index e192af99..0d52a620 100644 --- a/tasks/zfs.yml +++ b/tasks/zfs.yml @@ -25,3 +25,7 @@ notify: - restart zfs-zed when: "pve_zfs_zed_email is defined" + +- name: HOOK - Run ZFS post-install hook tasks + ansible.builtin.include_tasks: "{{ pve_hooks.zfs_post_install }}" + when: "'zfs_post_install' in pve_hooks" diff --git a/tests/vagrant/group_vars/all b/tests/vagrant/group_vars/all index 75d92777..776dd74e 100644 --- a/tests/vagrant/group_vars/all +++ b/tests/vagrant/group_vars/all @@ -46,8 +46,21 @@ pve_storages: username: admin monhost: - "{{ ansible_fqdn }}:6789" + - name: zfs1 + type: zfspool + content: [ "images", "rootdir" ] + pool: testpool/zfs1 + sparse: true + - name: zfs2 + type: dir + content: [ "iso", "vztmpl", "backup" ] + path: /testpool/zfs2 +pve_zfs_create_volumes: + - testpool/zfs2 pve_ceph_osds: - device: "/dev/vdb" +pve_hooks: + zfs_post_install: zpool_setup.yml ntp_manage_config: true ntp_servers: - clock.sjc.he.net diff --git a/tests/vagrant/tasks/zpool_setup.yml b/tests/vagrant/tasks/zpool_setup.yml new file mode 100644 index 00000000..6af5e32c --- /dev/null +++ b/tests/vagrant/tasks/zpool_setup.yml @@ -0,0 +1,5 @@ +- name: Identify imported ZFS pools + community.general.zpool_facts: +- name: Create testpool ZFS pool + ansible.builtin.command: zpool create testpool vdc + when: "'testpool' not in (ansible_zfs_pools | json_query('[].name'))" From b186662532f86aceb1b7e1d7ea3b28915f0113d6 Mon Sep 17 00:00:00 2001 From: Slobberbone Date: Thu, 11 Mar 2021 11:59:09 +0100 Subject: [PATCH 14/31] Add update cache on install proxmoxve add update_cache: yes and cache_valid_time: 3600 on "Install Proxmox VE and related packages" --- tasks/main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tasks/main.yml b/tasks/main.yml index 9d55dc86..69e21fbd 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -154,6 +154,8 @@ - name: Install Proxmox VE and related packages apt: + update_cache: yes + cache_valid_time: 3600 name: "{{ _pve_install_packages }}" state: "{{ 'latest' if pve_run_proxmox_upgrades else 'present' }}" retries: 2 From 4e9bfe69e226d706ba218ba86c10194f3924adeb Mon Sep 17 00:00:00 2001 From: Musee Ullah Date: Tue, 19 Oct 2021 12:04:37 -0500 Subject: [PATCH 15/31] Update ceph_volume module to ceph/ceph-ansible@fde6354dcd604c4a36847ec9957be06488e44b5a --- LICENSE_IMPORTS | 26 ++++ library/ceph_volume.py | 260 ++++++++++++++++++++++++-------------- module_utils/ca_common.py | 112 ++++++++++++++++ 3 files changed, 305 insertions(+), 93 deletions(-) create mode 100644 LICENSE_IMPORTS create mode 100644 module_utils/ca_common.py diff --git a/LICENSE_IMPORTS b/LICENSE_IMPORTS new file mode 100644 index 00000000..633ffb77 --- /dev/null +++ b/LICENSE_IMPORTS @@ -0,0 +1,26 @@ +============================================================================== + +The following files are licensed under APL2: + + module_utils/ca_common.py + library/ceph_volume.py + +The license text from ceph/ceph-ansible is as follows: + + Copyright [2014] [Sébastien Han] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +============================================================================== + +# Licenses for libraries imported in the future should go here diff --git a/library/ceph_volume.py b/library/ceph_volume.py index c9aa50ba..76757f57 100755 --- a/library/ceph_volume.py +++ b/library/ceph_volume.py @@ -1,8 +1,19 @@ #!/usr/bin/python + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import exec_command, \ + is_containerized, \ + fatal +except ImportError: + from module_utils.ca_common import exec_command, \ + is_containerized, \ + fatal import datetime import copy import json import os +import re ANSIBLE_METADATA = { 'metadata_version': '1.0', @@ -52,6 +63,10 @@ description: - The OSD FSID required: false + osd_id: + description: + - The OSD ID + required: false journal: description: - The logical volume name or partition to use as a filestore journal. @@ -69,7 +84,7 @@ required: false db_vg: description: - - If db is a lv, this must be the name of the volume group it belongs to. # noqa E501 + - If db is a lv, this must be the name of the volume group it belongs to. # noqa: E501 - Only applicable if objectstore is 'bluestore'. required: false wal: @@ -79,7 +94,7 @@ required: false wal_vg: description: - - If wal is a lv, this must be the name of the volume group it belongs to. # noqa E501 + - If wal is a lv, this must be the name of the volume group it belongs to. # noqa: E501 - Only applicable if objectstore is 'bluestore'. required: false crush_device_class: @@ -114,6 +129,24 @@ - Only applicable if action is 'batch'. required: false default: -1 + journal_devices: + description: + - A list of devices for filestore journal to pass to the 'ceph-volume lvm batch' subcommand. + - Only applicable if action is 'batch'. + - Only applicable if objectstore is 'filestore'. + required: false + block_db_devices: + description: + - A list of devices for bluestore block db to pass to the 'ceph-volume lvm batch' subcommand. + - Only applicable if action is 'batch'. + - Only applicable if objectstore is 'bluestore'. + required: false + wal_devices: + description: + - A list of devices for bluestore block wal to pass to the 'ceph-volume lvm batch' subcommand. + - Only applicable if action is 'batch'. + - Only applicable if objectstore is 'bluestore'. + required: false report: description: - If provided the --report flag will be passed to 'ceph-volume lvm batch'. @@ -121,12 +154,6 @@ - Results will be returned in json format. - Only applicable if action is 'batch'. required: false - containerized: - description: - - Wether or not this is a containerized cluster. The value is - assigned or not depending on how the playbook runs. - required: false - default: None list: description: - List potential Ceph LVM metadata on a device @@ -157,7 +184,7 @@ action: create -- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db # noqa e501 +- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db # noqa: E501 ceph_volume: objectstore: bluestore data: data-lv @@ -168,20 +195,6 @@ ''' -from ansible.module_utils.basic import AnsibleModule # noqa 4502 - - -def fatal(message, module): - ''' - Report a fatal error and exit - ''' - - if module: - module.fail_json(msg=message, changed=False, rc=1) - else: - raise(Exception(message)) - - def container_exec(binary, container_image): ''' Build the docker CLI to run a command inside a container @@ -189,7 +202,6 @@ def container_exec(binary, container_image): container_binary = os.getenv('CEPH_CONTAINER_BINARY') command_exec = [container_binary, 'run', '--rm', '--privileged', '--net=host', '--ipc=host', - '--ulimit', 'nofile=1024:4096', '-v', '/run/lock/lvm:/run/lock/lvm:z', '-v', '/var/run/udev/:/var/run/udev/:z', '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', @@ -200,20 +212,21 @@ def container_exec(binary, container_image): return command_exec -def build_ceph_volume_cmd(action, container_image, cluster=None): +def build_cmd(action, container_image, cluster='ceph', binary='ceph-volume'): ''' Build the ceph-volume command ''' + _binary = binary + if container_image: - binary = 'ceph-volume' cmd = container_exec( binary, container_image) else: - binary = ['ceph-volume'] + binary = [binary] cmd = binary - if cluster: + if _binary == 'ceph-volume': cmd.extend(['--cluster', cluster]) cmd.extend(action) @@ -221,28 +234,6 @@ def build_ceph_volume_cmd(action, container_image, cluster=None): return cmd -def exec_command(module, cmd): - ''' - Execute command - ''' - - rc, out, err = module.run_command(cmd) - return rc, cmd, out, err - - -def is_containerized(): - ''' - Check if we are running on a containerized cluster - ''' - - if 'CEPH_CONTAINER_IMAGE' in os.environ: - container_image = os.getenv('CEPH_CONTAINER_IMAGE') - else: - container_image = None - - return container_image - - def get_data(data, data_vg): if data_vg: data = '{0}/{1}'.format(data_vg, data) @@ -267,7 +258,7 @@ def get_wal(wal, wal_vg): return wal -def batch(module, container_image): +def batch(module, container_image, report=None): ''' Batch prepare OSD devices ''' @@ -277,6 +268,7 @@ def batch(module, container_image): objectstore = module.params['objectstore'] batch_devices = module.params.get('batch_devices', None) crush_device_class = module.params.get('crush_device_class', None) + journal_devices = module.params.get('journal_devices', None) journal_size = module.params.get('journal_size', None) block_db_size = module.params.get('block_db_size', None) block_db_devices = module.params.get('block_db_devices', None) @@ -288,16 +280,17 @@ def batch(module, container_image): fatal('osds_per_device must be provided if action is "batch"', module) if osds_per_device < 1: - fatal('osds_per_device must be greater than 0 if action is "batch"', module) # noqa E501 + fatal('osds_per_device must be greater than 0 if action is "batch"', module) # noqa: E501 if not batch_devices: fatal('batch_devices must be provided if action is "batch"', module) # Build the CLI action = ['lvm', 'batch'] - cmd = build_ceph_volume_cmd(action, container_image, cluster) + cmd = build_cmd(action, container_image, cluster) cmd.extend(['--%s' % objectstore]) - cmd.append('--yes') + if not report: + cmd.append('--yes') if container_image: cmd.append('--prepare') @@ -319,11 +312,17 @@ def batch(module, container_image): cmd.extend(batch_devices) - if block_db_devices: - cmd.extend(['--db-devices', ' '.join(block_db_devices)]) + if journal_devices and objectstore == 'filestore': + cmd.append('--journal-devices') + cmd.extend(journal_devices) + + if block_db_devices and objectstore == 'bluestore': + cmd.append('--db-devices') + cmd.extend(block_db_devices) - if wal_devices: - cmd.extend(['--wal-devices', ' '.join(wal_devices)]) + if wal_devices and objectstore == 'bluestore': + cmd.append('--wal-devices') + cmd.extend(wal_devices) return cmd @@ -372,20 +371,20 @@ def prepare_or_create_osd(module, action, container_image): # Build the CLI action = ['lvm', action] - cmd = build_ceph_volume_cmd(action, container_image, cluster) + cmd = build_cmd(action, container_image, cluster) cmd.extend(['--%s' % objectstore]) cmd.append('--data') cmd.append(data) - if journal: + if journal and objectstore == 'filestore': journal = get_journal(journal, journal_vg) cmd.extend(['--journal', journal]) - if db: + if db and objectstore == 'bluestore': db = get_db(db, db_vg) cmd.extend(['--block.db', db]) - if wal: + if wal and objectstore == 'bluestore': wal = get_wal(wal, wal_vg) cmd.extend(['--block.wal', wal]) @@ -411,24 +410,26 @@ def list_osd(module, container_image): # Build the CLI action = ['lvm', 'list'] - cmd = build_ceph_volume_cmd(action, container_image, cluster) + cmd = build_cmd(action, container_image, cluster) if data: cmd.append(data) cmd.append('--format=json') return cmd + def list_storage_inventory(module, container_image): ''' List storage inventory. ''' action = ['inventory'] - cmd = build_ceph_volume_cmd(action, container_image) + cmd = build_cmd(action, container_image) cmd.append('--format=json') return cmd + def activate_osd(): ''' Activate all the OSDs on a machine @@ -437,12 +438,31 @@ def activate_osd(): # build the CLI action = ['lvm', 'activate'] container_image = None - cmd = build_ceph_volume_cmd(action, container_image) + cmd = build_cmd(action, container_image) cmd.append('--all') return cmd +def is_lv(module, vg, lv, container_image): + ''' + Check if an LV exists + ''' + + args = ['--noheadings', '--reportformat', 'json', '--select', 'lv_name={},vg_name={}'.format(lv, vg)] # noqa: E501 + + cmd = build_cmd(args, container_image, binary='lvs') + + rc, cmd, out, err = exec_command(module, cmd) + + if rc == 0: + result = json.loads(out)['report'][0]['lv'] + if len(result) > 0: + return True + + return False + + def zap_devices(module, container_image): ''' Will run 'ceph-volume lvm zap' on all devices, lvs and partitions @@ -461,15 +481,21 @@ def zap_devices(module, container_image): wal = module.params.get('wal', None) wal_vg = module.params.get('wal_vg', None) osd_fsid = module.params.get('osd_fsid', None) + osd_id = module.params.get('osd_id', None) + destroy = module.params.get('destroy', True) # build the CLI action = ['lvm', 'zap'] - cmd = build_ceph_volume_cmd(action, container_image) - cmd.append('--destroy') + cmd = build_cmd(action, container_image) + if destroy: + cmd.append('--destroy') if osd_fsid: cmd.extend(['--osd-fsid', osd_fsid]) + if osd_id: + cmd.extend(['--osd-id', osd_id]) + if data: data = get_data(data, data_vg) cmd.append(data) @@ -496,7 +522,7 @@ def run_module(): 'bluestore', 'filestore'], default='bluestore'), action=dict(type='str', required=False, choices=[ 'create', 'zap', 'batch', 'prepare', 'activate', 'list', - 'inventory'], default='create'), # noqa 4502 + 'inventory'], default='create'), # noqa: 4502 data=dict(type='str', required=False), data_vg=dict(type='str', required=False), journal=dict(type='str', required=False), @@ -510,31 +536,39 @@ def run_module(): batch_devices=dict(type='list', required=False, default=[]), osds_per_device=dict(type='int', required=False, default=1), journal_size=dict(type='str', required=False, default='5120'), + journal_devices=dict(type='list', required=False, default=[]), block_db_size=dict(type='str', required=False, default='-1'), block_db_devices=dict(type='list', required=False, default=[]), wal_devices=dict(type='list', required=False, default=[]), report=dict(type='bool', required=False, default=False), - containerized=dict(type='str', required=False, default=False), osd_fsid=dict(type='str', required=False), + osd_id=dict(type='str', required=False), + destroy=dict(type='bool', required=False, default=True), ) module = AnsibleModule( argument_spec=module_args, - supports_check_mode=True + supports_check_mode=True, + mutually_exclusive=[ + ('data', 'osd_fsid', 'osd_id'), + ], + required_if=[ + ('action', 'zap', ('data', 'osd_fsid', 'osd_id'), True) + ] ) result = dict( changed=False, stdout='', stderr='', - rc='', + rc=0, start='', end='', delta='', ) if module.check_mode: - return result + module.exit_json(**result) # start execution startd = datetime.datetime.now() @@ -563,23 +597,23 @@ def run_module(): try: out_dict = json.loads(out) except ValueError: - fatal("Could not decode json output: {} from the command {}".format(out, cmd), module) # noqa E501 + fatal("Could not decode json output: {} from the command {}".format(out, cmd), module) # noqa: E501 if out_dict: data = module.params['data'] - result['stdout'] = 'skipped, since {0} is already used for an osd'.format( # noqa E501 - data) + result['stdout'] = 'skipped, since {0} is already used for an osd'.format(data) # noqa: E501 result['rc'] = 0 module.exit_json(**result) # Prepare or create the OSD rc, cmd, out, err = exec_command( module, prepare_or_create_osd(module, action, container_image)) + err = re.sub('[a-zA-Z0-9+/]{38}==', '*' * 8, err) elif action == 'activate': if container_image: fatal( - "This is not how container's activation happens, nothing to activate", module) # noqa E501 + "This is not how container's activation happens, nothing to activate", module) # noqa: E501 # Activate the OSD rc, cmd, out, err = exec_command( @@ -587,8 +621,34 @@ def run_module(): elif action == 'zap': # Zap the OSD - rc, cmd, out, err = exec_command( - module, zap_devices(module, container_image)) + skip = [] + for device_type in ['journal', 'data', 'db', 'wal']: + # 1/ if we passed vg/lv + if module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa: E501 + # 2/ check this is an actual lv/vg + ret = is_lv(module, module.params['{}_vg'.format(device_type)], module.params[device_type], container_image) # noqa: E501 + skip.append(ret) + # 3/ This isn't a lv/vg device + if not ret: + module.params['{}_vg'.format(device_type)] = False + module.params[device_type] = False + # 4/ no journal|data|db|wal|_vg was passed, so it must be a raw device # noqa: E501 + elif not module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa: E501 + skip.append(True) + + cmd = zap_devices(module, container_image) + + if any(skip) or module.params.get('osd_fsid', None) \ + or module.params.get('osd_id', None): + rc, cmd, out, err = exec_command( + module, cmd) + for scan_cmd in ['vgscan', 'lvscan']: + module.run_command([scan_cmd, '--cache']) + else: + out = 'Skipped, nothing to zap' + err = '' + changed = False + rc = 0 elif action == 'list': # List Ceph LVM Metadata on a device @@ -610,7 +670,7 @@ def run_module(): '--format=json', ] - cmd = batch(module, container_image) + cmd = batch(module, container_image, report=True) batch_report_cmd = copy.copy(cmd) batch_report_cmd.extend(report_flags) @@ -619,12 +679,21 @@ def run_module(): rc, cmd, out, err = exec_command( module, batch_report_cmd) try: + if not out: + out = '{}' report_result = json.loads(out) except ValueError: - strategy_change = "strategy changed" in out - if strategy_change: - out = json.dumps( - {"changed": False, "stdout": out.rstrip("\r\n")}) + strategy_changed_in_out = "strategy changed" in out + strategy_changed_in_err = "strategy changed" in err + strategy_changed = strategy_changed_in_out or \ + strategy_changed_in_err + if strategy_changed: + if strategy_changed_in_out: + out = json.dumps({"changed": False, + "stdout": out.rstrip("\r\n")}) + elif strategy_changed_in_err: + out = json.dumps({"changed": False, + "stderr": err.rstrip("\r\n")}) rc = 0 changed = False else: @@ -636,24 +705,29 @@ def run_module(): rc=rc, changed=changed, ) - if strategy_change: + if strategy_changed: module.exit_json(**result) module.fail_json(msg='non-zero return code', **result) if not report: - # if not asking for a report, let's just run the batch command - changed = report_result['changed'] - if changed: - # Batch prepare the OSD + if 'changed' in report_result: + # we have the old batch implementation + # if not asking for a report, let's just run the batch command + changed = report_result['changed'] + if changed: + # Batch prepare the OSD + rc, cmd, out, err = exec_command( + module, batch(module, container_image)) + err = re.sub('[a-zA-Z0-9+/]{38}==', '*' * 8, err) + else: + # we have the refactored batch, its idempotent so lets just + # run it rc, cmd, out, err = exec_command( module, batch(module, container_image)) + err = re.sub('[a-zA-Z0-9+/]{38}==', '*' * 8, err) else: cmd = batch_report_cmd - else: - module.fail_json( - msg='State must either be "create" or "prepare" or "activate" or "list" or "zap" or "batch" or "inventory".', changed=False, rc=1) # noqa E501 - endd = datetime.datetime.now() delta = endd - startd @@ -679,4 +753,4 @@ def main(): if __name__ == '__main__': - main() \ No newline at end of file + main() diff --git a/module_utils/ca_common.py b/module_utils/ca_common.py new file mode 100644 index 00000000..9ea1f887 --- /dev/null +++ b/module_utils/ca_common.py @@ -0,0 +1,112 @@ +import os +import datetime + + +def generate_ceph_cmd(sub_cmd, args, user_key=None, cluster='ceph', user='client.admin', container_image=None, interactive=False): + ''' + Generate 'ceph' command line to execute + ''' + + if not user_key: + user_key = '/etc/ceph/{}.{}.keyring'.format(cluster, user) + + cmd = pre_generate_ceph_cmd(container_image=container_image, interactive=interactive) + + base_cmd = [ + '-n', + user, + '-k', + user_key, + '--cluster', + cluster + ] + base_cmd.extend(sub_cmd) + cmd.extend(base_cmd + args) + + return cmd + + +def container_exec(binary, container_image, interactive=False): + ''' + Build the docker CLI to run a command inside a container + ''' + + container_binary = os.getenv('CEPH_CONTAINER_BINARY') + command_exec = [container_binary, 'run'] + + if interactive: + command_exec.extend(['--interactive']) + + command_exec.extend(['--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + binary, container_image]) + return command_exec + + +def is_containerized(): + ''' + Check if we are running on a containerized cluster + ''' + + if 'CEPH_CONTAINER_IMAGE' in os.environ: + container_image = os.getenv('CEPH_CONTAINER_IMAGE') + else: + container_image = None + + return container_image + + +def pre_generate_ceph_cmd(container_image=None, interactive=False): + ''' + Generate ceph prefix comaand + ''' + if container_image: + cmd = container_exec('ceph', container_image, interactive=interactive) + else: + cmd = ['ceph'] + + return cmd + + +def exec_command(module, cmd, stdin=None): + ''' + Execute command(s) + ''' + + binary_data = False + if stdin: + binary_data = True + rc, out, err = module.run_command(cmd, data=stdin, binary_data=binary_data) + + return rc, cmd, out, err + + +def exit_module(module, out, rc, cmd, err, startd, changed=False): + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip("\r\n"), + stderr=err.rstrip("\r\n"), + changed=changed, + ) + module.exit_json(**result) + + +def fatal(message, module): + ''' + Report a fatal error and exit + ''' + + if module: + module.fail_json(msg=message, rc=1) + else: + raise(Exception(message)) From 50cefffac206a4615307ee3cfbd40647026a035c Mon Sep 17 00:00:00 2001 From: Musee Ullah Date: Tue, 19 Oct 2021 17:44:40 -0500 Subject: [PATCH 16/31] Simplify ceph_volume module and fix OSD detection --- library/ceph_volume.py | 642 +------------------------------------- module_utils/ca_common.py | 101 ------ tasks/ceph.yml | 43 +-- 3 files changed, 25 insertions(+), 761 deletions(-) diff --git a/library/ceph_volume.py b/library/ceph_volume.py index 76757f57..7ba416cf 100755 --- a/library/ceph_volume.py +++ b/library/ceph_volume.py @@ -2,18 +2,10 @@ from ansible.module_utils.basic import AnsibleModule try: - from ansible.module_utils.ca_common import exec_command, \ - is_containerized, \ - fatal + from ansible.module_utils.ca_common import exec_command except ImportError: - from module_utils.ca_common import exec_command, \ - is_containerized, \ - fatal + from module_utils.ca_common import exec_command import datetime -import copy -import json -import os -import re ANSIBLE_METADATA = { 'metadata_version': '1.0', @@ -25,11 +17,11 @@ --- module: ceph_volume -short_description: Create ceph OSDs with ceph-volume +short_description: Query ceph OSDs with ceph-volume description: - Using the ceph-volume utility available in Ceph this module - can be used to create ceph OSDs that are backed by logical volumes. + can be used to query ceph OSDs that are backed by logical volumes. - Only available in ceph versions luminous or greater. options: @@ -38,19 +30,6 @@ - The ceph cluster name. required: false default: ceph - objectstore: - description: - - The objectstore of the OSD, either filestore or bluestore - - Required if action is 'create' - required: false - choices: ['bluestore', 'filestore'] - default: bluestore - action: - description: - - The action to take. Creating OSDs and zapping or querying devices. - required: true - choices: ['create', 'zap', 'batch', 'prepare', 'activate', 'list', 'inventory'] - default: create data: description: - The logical volume name or device to use for the OSD data. @@ -59,109 +38,6 @@ description: - If data is a lv, this must be the name of the volume group it belongs to. required: false - osd_fsid: - description: - - The OSD FSID - required: false - osd_id: - description: - - The OSD ID - required: false - journal: - description: - - The logical volume name or partition to use as a filestore journal. - - Only applicable if objectstore is 'filestore'. - required: false - journal_vg: - description: - - If journal is a lv, this must be the name of the volume group it belongs to. - - Only applicable if objectstore is 'filestore'. - required: false - db: - description: - - A partition or logical volume name to use for block.db. - - Only applicable if objectstore is 'bluestore'. - required: false - db_vg: - description: - - If db is a lv, this must be the name of the volume group it belongs to. # noqa: E501 - - Only applicable if objectstore is 'bluestore'. - required: false - wal: - description: - - A partition or logical volume name to use for block.wal. - - Only applicable if objectstore is 'bluestore'. - required: false - wal_vg: - description: - - If wal is a lv, this must be the name of the volume group it belongs to. # noqa: E501 - - Only applicable if objectstore is 'bluestore'. - required: false - crush_device_class: - description: - - Will set the crush device class for the OSD. - required: false - dmcrypt: - description: - - If set to True the OSD will be encrypted with dmcrypt. - required: false - batch_devices: - description: - - A list of devices to pass to the 'ceph-volume lvm batch' subcommand. - - Only applicable if action is 'batch'. - required: false - osds_per_device: - description: - - The number of OSDs to create per device. - - Only applicable if action is 'batch'. - required: false - default: 1 - journal_size: - description: - - The size in MB of filestore journals. - - Only applicable if action is 'batch'. - required: false - default: 5120 - block_db_size: - description: - - The size in bytes of bluestore block db lvs. - - The default of -1 means to create them as big as possible. - - Only applicable if action is 'batch'. - required: false - default: -1 - journal_devices: - description: - - A list of devices for filestore journal to pass to the 'ceph-volume lvm batch' subcommand. - - Only applicable if action is 'batch'. - - Only applicable if objectstore is 'filestore'. - required: false - block_db_devices: - description: - - A list of devices for bluestore block db to pass to the 'ceph-volume lvm batch' subcommand. - - Only applicable if action is 'batch'. - - Only applicable if objectstore is 'bluestore'. - required: false - wal_devices: - description: - - A list of devices for bluestore block wal to pass to the 'ceph-volume lvm batch' subcommand. - - Only applicable if action is 'batch'. - - Only applicable if objectstore is 'bluestore'. - required: false - report: - description: - - If provided the --report flag will be passed to 'ceph-volume lvm batch'. - - No OSDs will be created. - - Results will be returned in json format. - - Only applicable if action is 'batch'. - required: false - list: - description: - - List potential Ceph LVM metadata on a device - required: false - inventory: - description: - - List storage device inventory. - required: false author: - Andrew Schoen (@andrewschoen) @@ -169,237 +45,25 @@ ''' EXAMPLES = ''' -- name: set up a filestore osd with an lv data and a journal partition +- name: query all osds ceph_volume: - objectstore: filestore - data: data-lv - data_vg: data-vg - journal: /dev/sdc1 - action: create -- name: set up a bluestore osd with a raw device for data +- name: query single osd on test cluster ceph_volume: - objectstore: bluestore + cluster: test data: /dev/sdc - action: create - - -- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db # noqa: E501 - ceph_volume: - objectstore: bluestore - data: data-lv - data_vg: data-vg - db: /dev/sdc1 - wal: /dev/sdc2 - action: create ''' -def container_exec(binary, container_image): - ''' - Build the docker CLI to run a command inside a container - ''' - container_binary = os.getenv('CEPH_CONTAINER_BINARY') - command_exec = [container_binary, 'run', - '--rm', '--privileged', '--net=host', '--ipc=host', - '-v', '/run/lock/lvm:/run/lock/lvm:z', - '-v', '/var/run/udev/:/var/run/udev/:z', - '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', - '-v', '/run/lvm/:/run/lvm/', - '-v', '/var/lib/ceph/:/var/lib/ceph/:z', - '-v', '/var/log/ceph/:/var/log/ceph/:z', - '--entrypoint=' + binary, container_image] - return command_exec - - -def build_cmd(action, container_image, cluster='ceph', binary='ceph-volume'): - ''' - Build the ceph-volume command - ''' - - _binary = binary - - if container_image: - cmd = container_exec( - binary, container_image) - else: - binary = [binary] - cmd = binary - - if _binary == 'ceph-volume': - cmd.extend(['--cluster', cluster]) - - cmd.extend(action) - - return cmd - - def get_data(data, data_vg): if data_vg: data = '{0}/{1}'.format(data_vg, data) return data -def get_journal(journal, journal_vg): - if journal_vg: - journal = '{0}/{1}'.format(journal_vg, journal) - return journal - - -def get_db(db, db_vg): - if db_vg: - db = '{0}/{1}'.format(db_vg, db) - return db - - -def get_wal(wal, wal_vg): - if wal_vg: - wal = '{0}/{1}'.format(wal_vg, wal) - return wal - - -def batch(module, container_image, report=None): +def list_osd(module): ''' - Batch prepare OSD devices - ''' - - # get module variables - cluster = module.params['cluster'] - objectstore = module.params['objectstore'] - batch_devices = module.params.get('batch_devices', None) - crush_device_class = module.params.get('crush_device_class', None) - journal_devices = module.params.get('journal_devices', None) - journal_size = module.params.get('journal_size', None) - block_db_size = module.params.get('block_db_size', None) - block_db_devices = module.params.get('block_db_devices', None) - wal_devices = module.params.get('wal_devices', None) - dmcrypt = module.params.get('dmcrypt', None) - osds_per_device = module.params.get('osds_per_device', 1) - - if not osds_per_device: - fatal('osds_per_device must be provided if action is "batch"', module) - - if osds_per_device < 1: - fatal('osds_per_device must be greater than 0 if action is "batch"', module) # noqa: E501 - - if not batch_devices: - fatal('batch_devices must be provided if action is "batch"', module) - - # Build the CLI - action = ['lvm', 'batch'] - cmd = build_cmd(action, container_image, cluster) - cmd.extend(['--%s' % objectstore]) - if not report: - cmd.append('--yes') - - if container_image: - cmd.append('--prepare') - - if crush_device_class: - cmd.extend(['--crush-device-class', crush_device_class]) - - if dmcrypt: - cmd.append('--dmcrypt') - - if osds_per_device > 1: - cmd.extend(['--osds-per-device', str(osds_per_device)]) - - if objectstore == 'filestore': - cmd.extend(['--journal-size', journal_size]) - - if objectstore == 'bluestore' and block_db_size != '-1': - cmd.extend(['--block-db-size', block_db_size]) - - cmd.extend(batch_devices) - - if journal_devices and objectstore == 'filestore': - cmd.append('--journal-devices') - cmd.extend(journal_devices) - - if block_db_devices and objectstore == 'bluestore': - cmd.append('--db-devices') - cmd.extend(block_db_devices) - - if wal_devices and objectstore == 'bluestore': - cmd.append('--wal-devices') - cmd.extend(wal_devices) - - return cmd - - -def ceph_volume_cmd(subcommand, container_image, cluster=None): - ''' - Build ceph-volume initial command - ''' - - if container_image: - binary = 'ceph-volume' - cmd = container_exec( - binary, container_image) - else: - binary = ['ceph-volume'] - cmd = binary - - if cluster: - cmd.extend(['--cluster', cluster]) - - cmd.append('lvm') - cmd.append(subcommand) - - return cmd - - -def prepare_or_create_osd(module, action, container_image): - ''' - Prepare or create OSD devices - ''' - - # get module variables - cluster = module.params['cluster'] - objectstore = module.params['objectstore'] - data = module.params['data'] - data_vg = module.params.get('data_vg', None) - data = get_data(data, data_vg) - journal = module.params.get('journal', None) - journal_vg = module.params.get('journal_vg', None) - db = module.params.get('db', None) - db_vg = module.params.get('db_vg', None) - wal = module.params.get('wal', None) - wal_vg = module.params.get('wal_vg', None) - crush_device_class = module.params.get('crush_device_class', None) - dmcrypt = module.params.get('dmcrypt', None) - - # Build the CLI - action = ['lvm', action] - cmd = build_cmd(action, container_image, cluster) - cmd.extend(['--%s' % objectstore]) - cmd.append('--data') - cmd.append(data) - - if journal and objectstore == 'filestore': - journal = get_journal(journal, journal_vg) - cmd.extend(['--journal', journal]) - - if db and objectstore == 'bluestore': - db = get_db(db, db_vg) - cmd.extend(['--block.db', db]) - - if wal and objectstore == 'bluestore': - wal = get_wal(wal, wal_vg) - cmd.extend(['--block.wal', wal]) - - if crush_device_class: - cmd.extend(['--crush-device-class', crush_device_class]) - - if dmcrypt: - cmd.append('--dmcrypt') - - return cmd - - -def list_osd(module, container_image): - ''' - List will detect wether or not a device has Ceph LVM Metadata + List will detect whether or not a device has Ceph LVM Metadata ''' # get module variables @@ -410,7 +74,8 @@ def list_osd(module, container_image): # Build the CLI action = ['lvm', 'list'] - cmd = build_cmd(action, container_image, cluster) + cmd = ['ceph-volume', '--cluster', cluster] + cmd.extend(action) if data: cmd.append(data) cmd.append('--format=json') @@ -418,143 +83,16 @@ def list_osd(module, container_image): return cmd -def list_storage_inventory(module, container_image): - ''' - List storage inventory. - ''' - - action = ['inventory'] - cmd = build_cmd(action, container_image) - cmd.append('--format=json') - - return cmd - - -def activate_osd(): - ''' - Activate all the OSDs on a machine - ''' - - # build the CLI - action = ['lvm', 'activate'] - container_image = None - cmd = build_cmd(action, container_image) - cmd.append('--all') - - return cmd - - -def is_lv(module, vg, lv, container_image): - ''' - Check if an LV exists - ''' - - args = ['--noheadings', '--reportformat', 'json', '--select', 'lv_name={},vg_name={}'.format(lv, vg)] # noqa: E501 - - cmd = build_cmd(args, container_image, binary='lvs') - - rc, cmd, out, err = exec_command(module, cmd) - - if rc == 0: - result = json.loads(out)['report'][0]['lv'] - if len(result) > 0: - return True - - return False - - -def zap_devices(module, container_image): - ''' - Will run 'ceph-volume lvm zap' on all devices, lvs and partitions - used to create the OSD. The --destroy flag is always passed so that - if an OSD was originally created with a raw device or partition for - 'data' then any lvs that were created by ceph-volume are removed. - ''' - - # get module variables - data = module.params.get('data', None) - data_vg = module.params.get('data_vg', None) - journal = module.params.get('journal', None) - journal_vg = module.params.get('journal_vg', None) - db = module.params.get('db', None) - db_vg = module.params.get('db_vg', None) - wal = module.params.get('wal', None) - wal_vg = module.params.get('wal_vg', None) - osd_fsid = module.params.get('osd_fsid', None) - osd_id = module.params.get('osd_id', None) - destroy = module.params.get('destroy', True) - - # build the CLI - action = ['lvm', 'zap'] - cmd = build_cmd(action, container_image) - if destroy: - cmd.append('--destroy') - - if osd_fsid: - cmd.extend(['--osd-fsid', osd_fsid]) - - if osd_id: - cmd.extend(['--osd-id', osd_id]) - - if data: - data = get_data(data, data_vg) - cmd.append(data) - - if journal: - journal = get_journal(journal, journal_vg) - cmd.extend([journal]) - - if db: - db = get_db(db, db_vg) - cmd.extend([db]) - - if wal: - wal = get_wal(wal, wal_vg) - cmd.extend([wal]) - - return cmd - - def run_module(): module_args = dict( cluster=dict(type='str', required=False, default='ceph'), - objectstore=dict(type='str', required=False, choices=[ - 'bluestore', 'filestore'], default='bluestore'), - action=dict(type='str', required=False, choices=[ - 'create', 'zap', 'batch', 'prepare', 'activate', 'list', - 'inventory'], default='create'), # noqa: 4502 data=dict(type='str', required=False), data_vg=dict(type='str', required=False), - journal=dict(type='str', required=False), - journal_vg=dict(type='str', required=False), - db=dict(type='str', required=False), - db_vg=dict(type='str', required=False), - wal=dict(type='str', required=False), - wal_vg=dict(type='str', required=False), - crush_device_class=dict(type='str', required=False), - dmcrypt=dict(type='bool', required=False, default=False), - batch_devices=dict(type='list', required=False, default=[]), - osds_per_device=dict(type='int', required=False, default=1), - journal_size=dict(type='str', required=False, default='5120'), - journal_devices=dict(type='list', required=False, default=[]), - block_db_size=dict(type='str', required=False, default='-1'), - block_db_devices=dict(type='list', required=False, default=[]), - wal_devices=dict(type='list', required=False, default=[]), - report=dict(type='bool', required=False, default=False), - osd_fsid=dict(type='str', required=False), - osd_id=dict(type='str', required=False), - destroy=dict(type='bool', required=False, default=True), ) module = AnsibleModule( argument_spec=module_args, supports_check_mode=True, - mutually_exclusive=[ - ('data', 'osd_fsid', 'osd_id'), - ], - required_if=[ - ('action', 'zap', ('data', 'osd_fsid', 'osd_id'), True) - ] ) result = dict( @@ -573,160 +111,8 @@ def run_module(): # start execution startd = datetime.datetime.now() - # get the desired action - action = module.params['action'] - - # will return either the image name or None - container_image = is_containerized() - - # Assume the task's status will be 'changed' - changed = True - - if action == 'create' or action == 'prepare': - # First test if the device has Ceph LVM Metadata - rc, cmd, out, err = exec_command( - module, list_osd(module, container_image)) - - # list_osd returns a dict, if the dict is empty this means - # we can not check the return code since it's not consistent - # with the plain output - # see: http://tracker.ceph.com/issues/36329 - # FIXME: it's probably less confusing to check for rc - - # convert out to json, ansible returns a string... - try: - out_dict = json.loads(out) - except ValueError: - fatal("Could not decode json output: {} from the command {}".format(out, cmd), module) # noqa: E501 - - if out_dict: - data = module.params['data'] - result['stdout'] = 'skipped, since {0} is already used for an osd'.format(data) # noqa: E501 - result['rc'] = 0 - module.exit_json(**result) - - # Prepare or create the OSD - rc, cmd, out, err = exec_command( - module, prepare_or_create_osd(module, action, container_image)) - err = re.sub('[a-zA-Z0-9+/]{38}==', '*' * 8, err) - - elif action == 'activate': - if container_image: - fatal( - "This is not how container's activation happens, nothing to activate", module) # noqa: E501 - - # Activate the OSD - rc, cmd, out, err = exec_command( - module, activate_osd()) - - elif action == 'zap': - # Zap the OSD - skip = [] - for device_type in ['journal', 'data', 'db', 'wal']: - # 1/ if we passed vg/lv - if module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa: E501 - # 2/ check this is an actual lv/vg - ret = is_lv(module, module.params['{}_vg'.format(device_type)], module.params[device_type], container_image) # noqa: E501 - skip.append(ret) - # 3/ This isn't a lv/vg device - if not ret: - module.params['{}_vg'.format(device_type)] = False - module.params[device_type] = False - # 4/ no journal|data|db|wal|_vg was passed, so it must be a raw device # noqa: E501 - elif not module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa: E501 - skip.append(True) - - cmd = zap_devices(module, container_image) - - if any(skip) or module.params.get('osd_fsid', None) \ - or module.params.get('osd_id', None): - rc, cmd, out, err = exec_command( - module, cmd) - for scan_cmd in ['vgscan', 'lvscan']: - module.run_command([scan_cmd, '--cache']) - else: - out = 'Skipped, nothing to zap' - err = '' - changed = False - rc = 0 - - elif action == 'list': - # List Ceph LVM Metadata on a device - rc, cmd, out, err = exec_command( - module, list_osd(module, container_image)) - - elif action == 'inventory': - # List storage device inventory. - rc, cmd, out, err = exec_command( - module, list_storage_inventory(module, container_image)) - - elif action == 'batch': - # Batch prepare AND activate OSDs - report = module.params.get('report', None) - - # Add --report flag for the idempotency test - report_flags = [ - '--report', - '--format=json', - ] - - cmd = batch(module, container_image, report=True) - batch_report_cmd = copy.copy(cmd) - batch_report_cmd.extend(report_flags) - - # Run batch --report to see what's going to happen - # Do not run the batch command if there is nothing to do - rc, cmd, out, err = exec_command( - module, batch_report_cmd) - try: - if not out: - out = '{}' - report_result = json.loads(out) - except ValueError: - strategy_changed_in_out = "strategy changed" in out - strategy_changed_in_err = "strategy changed" in err - strategy_changed = strategy_changed_in_out or \ - strategy_changed_in_err - if strategy_changed: - if strategy_changed_in_out: - out = json.dumps({"changed": False, - "stdout": out.rstrip("\r\n")}) - elif strategy_changed_in_err: - out = json.dumps({"changed": False, - "stderr": err.rstrip("\r\n")}) - rc = 0 - changed = False - else: - out = out.rstrip("\r\n") - result = dict( - cmd=cmd, - stdout=out.rstrip('\r\n'), - stderr=err.rstrip('\r\n'), - rc=rc, - changed=changed, - ) - if strategy_changed: - module.exit_json(**result) - module.fail_json(msg='non-zero return code', **result) - - if not report: - if 'changed' in report_result: - # we have the old batch implementation - # if not asking for a report, let's just run the batch command - changed = report_result['changed'] - if changed: - # Batch prepare the OSD - rc, cmd, out, err = exec_command( - module, batch(module, container_image)) - err = re.sub('[a-zA-Z0-9+/]{38}==', '*' * 8, err) - else: - # we have the refactored batch, its idempotent so lets just - # run it - rc, cmd, out, err = exec_command( - module, batch(module, container_image)) - err = re.sub('[a-zA-Z0-9+/]{38}==', '*' * 8, err) - else: - cmd = batch_report_cmd + # List Ceph LVM Metadata on a device + rc, cmd, out, err = exec_command(module, list_osd(module)) endd = datetime.datetime.now() delta = endd - startd @@ -739,7 +125,7 @@ def run_module(): rc=rc, stdout=out.rstrip('\r\n'), stderr=err.rstrip('\r\n'), - changed=changed, + changed=False, ) if rc != 0: diff --git a/module_utils/ca_common.py b/module_utils/ca_common.py index 9ea1f887..64b2bddc 100644 --- a/module_utils/ca_common.py +++ b/module_utils/ca_common.py @@ -1,76 +1,3 @@ -import os -import datetime - - -def generate_ceph_cmd(sub_cmd, args, user_key=None, cluster='ceph', user='client.admin', container_image=None, interactive=False): - ''' - Generate 'ceph' command line to execute - ''' - - if not user_key: - user_key = '/etc/ceph/{}.{}.keyring'.format(cluster, user) - - cmd = pre_generate_ceph_cmd(container_image=container_image, interactive=interactive) - - base_cmd = [ - '-n', - user, - '-k', - user_key, - '--cluster', - cluster - ] - base_cmd.extend(sub_cmd) - cmd.extend(base_cmd + args) - - return cmd - - -def container_exec(binary, container_image, interactive=False): - ''' - Build the docker CLI to run a command inside a container - ''' - - container_binary = os.getenv('CEPH_CONTAINER_BINARY') - command_exec = [container_binary, 'run'] - - if interactive: - command_exec.extend(['--interactive']) - - command_exec.extend(['--rm', - '--net=host', - '-v', '/etc/ceph:/etc/ceph:z', - '-v', '/var/lib/ceph/:/var/lib/ceph/:z', - '-v', '/var/log/ceph/:/var/log/ceph/:z', - '--entrypoint=' + binary, container_image]) - return command_exec - - -def is_containerized(): - ''' - Check if we are running on a containerized cluster - ''' - - if 'CEPH_CONTAINER_IMAGE' in os.environ: - container_image = os.getenv('CEPH_CONTAINER_IMAGE') - else: - container_image = None - - return container_image - - -def pre_generate_ceph_cmd(container_image=None, interactive=False): - ''' - Generate ceph prefix comaand - ''' - if container_image: - cmd = container_exec('ceph', container_image, interactive=interactive) - else: - cmd = ['ceph'] - - return cmd - - def exec_command(module, cmd, stdin=None): ''' Execute command(s) @@ -82,31 +9,3 @@ def exec_command(module, cmd, stdin=None): rc, out, err = module.run_command(cmd, data=stdin, binary_data=binary_data) return rc, cmd, out, err - - -def exit_module(module, out, rc, cmd, err, startd, changed=False): - endd = datetime.datetime.now() - delta = endd - startd - - result = dict( - cmd=cmd, - start=str(startd), - end=str(endd), - delta=str(delta), - rc=rc, - stdout=out.rstrip("\r\n"), - stderr=err.rstrip("\r\n"), - changed=changed, - ) - module.exit_json(**result) - - -def fatal(message, module): - ''' - Report a fatal error and exit - ''' - - if module: - module.fail_json(msg=message, rc=1) - else: - raise(Exception(message)) diff --git a/tasks/ceph.yml b/tasks/ceph.yml index 01e682d5..253d0e90 100644 --- a/tasks/ceph.yml +++ b/tasks/ceph.yml @@ -37,48 +37,27 @@ when: "inventory_hostname in groups[pve_ceph_mgr_group]" - block: - - name: Get existing ceph volumes + - name: Query for existing Ceph volumes ceph_volume: - action: list - data: "{{ item.device }}" register: _ceph_volume_data - loop: '{{ pve_ceph_osds }}' - tags: ceph_volume - changed_when: false #Merely gets the list of ceph volumes so never changes anything - - name: Initialize osd variables - set_fact: - _existing_ceph_volumes_tmp: [] - _existing_ceph_volumes: [] - tags: ceph_volume + - name: Generate a list of active OSDs + ansible.builtin.set_fact: + _existing_ceph_osds: "{{ _ceph_volume_data.stdout | from_json | json_query('*[].devices[]') }}" - - name: Determine ceph volumes Step1 - set_fact: - _existing_ceph_volumes_tmp: "{{ _existing_ceph_volumes_tmp + item.stdout | from_json | json_query('*[].devices[]') }}" - with_items: "{{ _ceph_volume_data.results }}" - tags: ceph_volume - - - name: Determine ceph volumes Step2 - set_fact: - _existing_ceph_volumes: "{{ _existing_ceph_volumes + [{'device': item}] }}" - with_items: "{{ _existing_ceph_volumes_tmp }}" - tags: ceph_volume - - - name: Change osd list (remove existing osds from the list) - set_fact: - _ceph_osds_diff: "{{ pve_ceph_osds | difference(_existing_ceph_volumes) }}" - tags: ceph_volume + - name: Generate list of unprovisioned OSDs + ansible.builtin.set_fact: + _ceph_osds_diff: "{{ _ceph_osds_diff | default([]) + [item] }}" + loop: "{{ pve_ceph_osds }}" + when: item.device not in _existing_ceph_osds - name: Create Ceph OSDs - command: >- + ansible.builtin.command: >- pveceph osd create {{ item.device }} {% if "encrypted" in item and item["encrypted"] | bool %}--encrypted 1{% endif %} {% if "block.db" in item %}--db_dev {{ item["block.db"] }}{% endif %} {% if "block.wal" in item %}--wal_dev {{ item["block.wal"] }}{% endif %} - args: - creates: '{{ item.device }}1' - with_items: '{{ _ceph_osds_diff }}' - + loop: '{{ _ceph_osds_diff | default([]) }}' tags: create_osd - block: From b949dc82c3686e499cf729e6a247fb7522b88e0f Mon Sep 17 00:00:00 2001 From: Musee Ullah Date: Wed, 20 Oct 2021 01:03:28 -0500 Subject: [PATCH 17/31] Add apt-cacher-ng support to vagrant playbook --- README.md | 21 ++++++++++++++++++++ tests/vagrant/provision.yml | 9 +++++++++ tests/vagrant/templates/detect-http-proxy.j2 | 14 +++++++++++++ 3 files changed, 44 insertions(+) create mode 100644 tests/vagrant/templates/detect-http-proxy.j2 diff --git a/README.md b/README.md index 58e02630..fff73c7b 100644 --- a/README.md +++ b/README.md @@ -702,6 +702,27 @@ nodes). `pve_ceph_osds` by default creates unencrypted ceph volumes. To use encrypted volumes the parameter `encrypted` has to be set per drive to `true`. +## Developer Notes + +When developing new features or fixing something in this role, you can test out +your changes by using Vagrant (only libvirt is supported currently). The +playbook can be found in `tests/vagrant` (so be sure to modify group variables +as needed). Be sure to test any changes on both Debian 10 and 11 (update the +Vagrantfile locally to use `debian/buster64`) before submitting a PR. + +You can also specify an apt caching proxy (e.g. `apt-cacher-ng`, and it must +run on port 3142) with the `APT_CACHE_HOST` environment variable to speed up +package downloads if you have one running locally in your environment. The +vagrant playbook will detect whether or not the caching proxy is available and +only use it if it is accessible from your network, so you could just +permanently set this variable in your development environment if you prefer. + +For example, you could run the following to show verbose/easier to read output, +use a caching proxy, and keep the VMs running if you run into an error (so that +you can troubleshoot it and/or run `vagrant provision` after fixing): + + APT_CACHE_HOST=10.71.71.10 ANSIBLE_STDOUT_CALLBACK=debug vagrant up --no-destroy-on-error + ## Contributors Musee Ullah ([@lae](https://github.com/lae), ) - Main developer diff --git a/tests/vagrant/provision.yml b/tests/vagrant/provision.yml index 147b9dcb..10ad2e0d 100644 --- a/tests/vagrant/provision.yml +++ b/tests/vagrant/provision.yml @@ -2,6 +2,15 @@ - hosts: all become: True pre_tasks: + - name: Copy apt proxy detection script + ansible.builtin.template: + src: detect-http-proxy.j2 + dest: /etc/apt/detect-http-proxy + mode: 0755 + - name: Configure apt to use detection script + ansible.builtin.copy: + content: "Acquire::Retries 0;\nAcquire::http::ProxyAutoDetect \"/etc/apt/detect-http-proxy\";" + dest: /etc/apt/apt.conf.d/30detectproxy - name: Install gnupg2 apt: name: gnupg2 diff --git a/tests/vagrant/templates/detect-http-proxy.j2 b/tests/vagrant/templates/detect-http-proxy.j2 new file mode 100644 index 00000000..1d227366 --- /dev/null +++ b/tests/vagrant/templates/detect-http-proxy.j2 @@ -0,0 +1,14 @@ +#!/bin/bash + +APT_CACHE_HOST="{{ lookup('env', 'APT_CACHE_HOST') }}" + +# Check if we can connect to the given host and provide it if so +if [ ! -z "${APT_CACHE_HOST}" ]; then + if $(nc -zw1 "${APT_CACHE_HOST}" 3142); then + echo "http://${APT_CACHE_HOST}:3142" + exit + fi +fi + +# Otherwise, don't use a proxy +echo "DIRECT" From 9b6c07a8783ebd19f4292cb384676808f3b7c96b Mon Sep 17 00:00:00 2001 From: Musee Ullah Date: Wed, 20 Oct 2021 01:25:34 -0500 Subject: [PATCH 18/31] Remove tasks for temporary fixes (fixed in PVE upstream) --- files/01_pass_correct_format_for_linkX.patch | 26 -------------------- tasks/main.yml | 9 ------- tasks/pve_add_node.yml | 8 ------ 3 files changed, 43 deletions(-) delete mode 100644 files/01_pass_correct_format_for_linkX.patch diff --git a/files/01_pass_correct_format_for_linkX.patch b/files/01_pass_correct_format_for_linkX.patch deleted file mode 100644 index 2cd55371..00000000 --- a/files/01_pass_correct_format_for_linkX.patch +++ /dev/null @@ -1,26 +0,0 @@ -to unbreak joining via SSH with an explicit link address. - -Signed-off-by: Fabian Grünbichler ---- - data/PVE/CLI/pvecm.pm | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/data/PVE/CLI/pvecm.pm b/data/PVE/CLI/pvecm.pm -index b381f4f..fe099d4 100755 ---- /usr/share/perl5/PVE/CLI/pvecm.pm -+++ /usr/share/perl5/PVE/CLI/pvecm.pm -@@ -405,9 +405,11 @@ __PACKAGE__->register_method ({ - push @$cmd, '--nodeid', $param->{nodeid} if $param->{nodeid}; - push @$cmd, '--votes', $param->{votes} if defined($param->{votes}); - -+ my $link_desc = get_standard_option('corosync-link'); -+ - foreach my $link (keys %$links) { - push @$cmd, "--link$link", PVE::JSONSchema::print_property_string( -- $links->{$link}, get_standard_option('corosync-link')); -+ $links->{$link}, $link_desc->{format}); - } - - # this will be used as fallback if no links are specified --- - diff --git a/tasks/main.yml b/tasks/main.yml index 69e21fbd..5d80b259 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -193,15 +193,6 @@ - import_tasks: kernel_module_cleanup.yml -- name: "[TEMPFIX] Fix cluster joins on PVE 6" - patch: - src: "01_pass_correct_format_for_linkX.patch" - basedir: / - strip: 1 - when: - - "ansible_distribution_release == 'buster'" - - "pve_cluster_enabled | bool" - - import_tasks: pve_cluster_config.yml when: "pve_cluster_enabled | bool" diff --git a/tasks/pve_add_node.yml b/tasks/pve_add_node.yml index 57c5279c..00a1087e 100644 --- a/tasks/pve_add_node.yml +++ b/tasks/pve_add_node.yml @@ -14,11 +14,3 @@ creates: "{{ pve_cluster_conf }}" when: - "inventory_hostname == _pve_current_node" - -- name: Remove stale corosync lock file due to lack of quorum during initialization - file: - dest: "{{ pve_base_dir }}/priv/lock/file-corosync_conf" - state: absent - when: - - "inventory_hostname == _pve_current_node" - - "inventory_hostname == groups[pve_group][1]" From 67a195c48143bb3f26991e38926a04778f23ef61 Mon Sep 17 00:00:00 2001 From: Musee Ullah Date: Wed, 20 Oct 2021 02:44:55 -0500 Subject: [PATCH 19/31] Remove extraneous patches and patch tasks Uses lineinfile to override checked_command --- files/00_remove_checked_command_bullseye.patch | 13 ------------- files/00_remove_checked_command_buster.patch | 13 ------------- tasks/identify_needed_packages.yml | 5 ----- tasks/main.yml | 9 +++++---- 4 files changed, 5 insertions(+), 35 deletions(-) delete mode 100644 files/00_remove_checked_command_bullseye.patch delete mode 100644 files/00_remove_checked_command_buster.patch diff --git a/files/00_remove_checked_command_bullseye.patch b/files/00_remove_checked_command_bullseye.patch deleted file mode 100644 index c244faa0..00000000 --- a/files/00_remove_checked_command_bullseye.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff -u /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js ---- /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js -+++ /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js -@@ -493,7 +493,8 @@ - }, - - checked_command: function(orig_cmd) { -- Proxmox.Utils.API2Request( -+ orig_cmd(); -+ false && Proxmox.Utils.API2Request( - { - url: '/nodes/localhost/subscription', - method: 'GET', diff --git a/files/00_remove_checked_command_buster.patch b/files/00_remove_checked_command_buster.patch deleted file mode 100644 index 1df8ace3..00000000 --- a/files/00_remove_checked_command_buster.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff -u /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js ---- /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js -+++ /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js -@@ -459,7 +459,8 @@ - }, - - checked_command: function(orig_cmd) { -- Proxmox.Utils.API2Request( -+ orig_cmd(); -+ false && Proxmox.Utils.API2Request( - { - url: '/nodes/localhost/subscription', - method: 'GET', diff --git a/tasks/identify_needed_packages.yml b/tasks/identify_needed_packages.yml index 2c115f3c..95125ade 100644 --- a/tasks/identify_needed_packages.yml +++ b/tasks/identify_needed_packages.yml @@ -7,11 +7,6 @@ - ksm-control-daemon - systemd-sysv -- name: Stage patch package if we need to patch the subscription message - set_fact: - _pve_install_packages: "{{ _pve_install_packages | union(['patch']) }}" - when: "'pve-no-subscription' in pve_repository_line" - - name: Stage ZFS packages if ZFS is enabled set_fact: _pve_install_packages: "{{ _pve_install_packages | union(['zfsutils-linux', 'zfs-initramfs', 'zfs-zed']) }}" diff --git a/tasks/main.yml b/tasks/main.yml index 5d80b259..c3ea1d61 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -173,10 +173,11 @@ - "deb https://enterprise.proxmox.com/debian/pve {{ ansible_distribution_release }} pve-enterprise" - name: Remove subscription check wrapper function in web UI - patch: - src: "00_remove_checked_command_{{ ansible_distribution_release }}.patch" - basedir: / - strip: 1 + ansible.builtin.lineinfile: + path: /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js + line: ' orig_cmd(); return;' + insertafter: '^\s+checked_command: function\(orig_cmd\) {$' + firstmatch: yes backup: yes when: - "pve_remove_subscription_warning | bool" From 0ee8380f77835636e842544e9121379115e0585c Mon Sep 17 00:00:00 2001 From: Musee Ullah Date: Wed, 20 Oct 2021 04:22:06 -0500 Subject: [PATCH 20/31] Set ansible-galaxy namespace and minimum ansible version to 2.9 --- meta/main.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/meta/main.yml b/meta/main.yml index 3f71e745..fff0ad0c 100644 --- a/meta/main.yml +++ b/meta/main.yml @@ -1,10 +1,12 @@ --- galaxy_info: + namespace: lae + role_name: proxmox author: Musee Ullah description: Installs and configures Proxmox Virtual Environment 6.x/7.x on Debian servers. license: MIT - min_ansible_version: 2.4 + min_ansible_version: 2.9 platforms: - name: Debian From 2be7706d28aa065271327b4cc923aeef3240df10 Mon Sep 17 00:00:00 2001 From: Musee Ullah Date: Wed, 20 Oct 2021 04:23:34 -0500 Subject: [PATCH 21/31] ansible-lint changes, mostly setting file permissions --- .ansible-lint | 2 ++ tasks/ceph.yml | 2 +- tasks/ipmi_watchdog.yml | 2 ++ tasks/kernel_module_cleanup.yml | 1 + tasks/main.yml | 1 + tasks/ssh_cluster_config.yml | 3 +++ tasks/ssl_config.yml | 1 + tasks/zfs.yml | 2 ++ 8 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 .ansible-lint diff --git a/.ansible-lint b/.ansible-lint new file mode 100644 index 00000000..0727309c --- /dev/null +++ b/.ansible-lint @@ -0,0 +1,2 @@ +skip_list: + - no-handler diff --git a/tasks/ceph.yml b/tasks/ceph.yml index 01e682d5..1cbf342b 100644 --- a/tasks/ceph.yml +++ b/tasks/ceph.yml @@ -107,7 +107,7 @@ regexp: >- rule\s+{{ item.name }}\s+{(?:(?P\s+)id\s+(?P[^\s]+)|\s+type\s+(?P[^\s]+)|\s+min_size[ ](?P[^\s]+)|\s+max_size\s+(?P[^\s]+)|\s+step\s+take\s+default(?:\n|\s+class\s+(?P[^\n]*))|\s+step\s+(?Pchooseleaf|choose).*?type\s+(?P[^\s]+))+(?:.|\n)*?} replace: >- - rule {{item.name}} { + rule {{ item.name }} { \gid \g \gtype \g \gmin_size {{ (pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].min_size | default("\g") | trim }} diff --git a/tasks/ipmi_watchdog.yml b/tasks/ipmi_watchdog.yml index 6a7ca7a5..9c203eb1 100644 --- a/tasks/ipmi_watchdog.yml +++ b/tasks/ipmi_watchdog.yml @@ -13,10 +13,12 @@ content: "options ipmi_watchdog action={{ pve_watchdog_ipmi_action }} \ timeout={{ pve_watchdog_ipmi_timeout }} panic_wdt_timeout=10" dest: /etc/modprobe.d/ipmi_watchdog.conf + mode: 0640 - name: Configure PVE HA Manager to use ipmi_watchdog copy: content: "WATCHDOG_MODULE=ipmi_watchdog" dest: /etc/default/pve-ha-manager + mode: 0640 notify: - restart watchdog-mux diff --git a/tasks/kernel_module_cleanup.yml b/tasks/kernel_module_cleanup.yml index 303ebf42..3081f762 100644 --- a/tasks/kernel_module_cleanup.yml +++ b/tasks/kernel_module_cleanup.yml @@ -42,6 +42,7 @@ copy: content: "WATCHDOG_MODULE=softdog" dest: /etc/default/pve-ha-manager + mode: 0640 notify: - restart watchdog-mux when: "pve_watchdog != 'ipmi'" diff --git a/tasks/main.yml b/tasks/main.yml index c3ea1d61..6b3c887a 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -294,6 +294,7 @@ file: path: "/etc/pve/datacenter.cfg" state: "touch" + mode: 0640 when: - "not pve_cluster_enabled | bool or (pve_cluster_enabled | bool and inventory_hostname == groups[pve_group][0])" - "pve_datacenter_cfg | length > 0" diff --git a/tasks/ssh_cluster_config.yml b/tasks/ssh_cluster_config.yml index b1c34f78..10922621 100644 --- a/tasks/ssh_cluster_config.yml +++ b/tasks/ssh_cluster_config.yml @@ -28,6 +28,7 @@ blockinfile: dest: /etc/ssh/ssh_config create: yes + mode: 0644 marker: "# {mark}: PVE host configuration options (managed by ansible)." content: | {% for host in groups[pve_group] %} @@ -67,6 +68,7 @@ blockinfile: dest: /etc/ssh/ssh_known_hosts create: yes + mode: 0644 marker: "# {mark}: PVE host keys (managed by ansible)." content: | {% for host in groups[pve_group] %} @@ -84,4 +86,5 @@ regexp: "^Ciphers .*" insertbefore: BOF create: yes + mode: 0644 dest: /root/.ssh/config diff --git a/tasks/ssl_config.yml b/tasks/ssl_config.yml index 114bf6f0..80d82793 100644 --- a/tasks/ssl_config.yml +++ b/tasks/ssl_config.yml @@ -3,6 +3,7 @@ copy: content: "{{ item.content }}" dest: "{{ item.dest }}" + mode: 0640 with_items: - dest: "/etc/ssl/pveproxy-ssl.key" content: "{{ pve_ssl_private_key }}" diff --git a/tasks/zfs.yml b/tasks/zfs.yml index 0d52a620..676c4d95 100644 --- a/tasks/zfs.yml +++ b/tasks/zfs.yml @@ -10,11 +10,13 @@ copy: content: zfs dest: /etc/modules-load.d/zfs.conf + mode: 0644 - name: Copy ZFS modprobe configuration copy: content: "options zfs {{ pve_zfs_options }}" dest: /etc/modprobe.d/zfs.conf + mode: 0644 when: "pve_zfs_options is defined and pve_zfs_options | bool" - name: Configure email address for ZFS event daemon notifications From 1930a1699c8f8fc54abc4c02591846f0e308999e Mon Sep 17 00:00:00 2001 From: Richard Scott Date: Sat, 23 Oct 2021 21:49:29 +0000 Subject: [PATCH 22/31] Fix crush map modification and TYPO in Readme --- README.md | 4 +-- tasks/ceph.yml | 85 +++++++++++++++++++++++++++----------------------- 2 files changed, 48 insertions(+), 41 deletions(-) diff --git a/README.md b/README.md index fff73c7b..55d99379 100644 --- a/README.md +++ b/README.md @@ -727,8 +727,8 @@ you can troubleshoot it and/or run `vagrant provision` after fixing): Musee Ullah ([@lae](https://github.com/lae), ) - Main developer Fabien Brachere ([@Fbrachere](https://github.com/Fbrachere)) - Storage config support -Gaudenz Steinlin ([@gaundez](https://github.com/gaudenz)) - Ceph support, etc -Richard Scott ([@zenntrix](https://github.com/zenntrix)) - Ceph support, PVE 7.x support, etc +Gaudenz Steinlin ([@gaundez](https://github.com/gaudenz)) - Ceph support, etc +Richard Scott ([@zenntrix](https://github.com/zenntrix)) - Ceph support, PVE 7.x support, etc Thoralf Rickert-Wendt ([@trickert76](https://github.com/trickert76)) - PVE 6.x support, etc Engin Dumlu ([@roadrunner](https://github.com/roadrunner)) Jonas Meurer ([@mejo-](https://github.com/mejo-)) diff --git a/tasks/ceph.yml b/tasks/ceph.yml index c1c8141b..60a6e614 100644 --- a/tasks/ceph.yml +++ b/tasks/ceph.yml @@ -73,48 +73,55 @@ when: item.name not in _ceph_crush.stdout_lines with_items: '{{ pve_ceph_crush_rules }}' - - name: Download and decompress crushmap - command: "{{ item }}" - with_items: - - ceph osd getcrushmap -o crush_map_compressed - - crushtool -d crush_map_compressed -o crush_map_decompressed - changed_when: false # This is just getting information for us to possibly edit, don't mislead user with 'changed' - - - name: Modify crushmap for rules that should be updated - replace: - path: crush_map_decompressed - regexp: >- - rule\s+{{ item.name }}\s+{(?:(?P\s+)id\s+(?P[^\s]+)|\s+type\s+(?P[^\s]+)|\s+min_size[ ](?P[^\s]+)|\s+max_size\s+(?P[^\s]+)|\s+step\s+take\s+default(?:\n|\s+class\s+(?P[^\n]*))|\s+step\s+(?Pchooseleaf|choose).*?type\s+(?P[^\s]+))+(?:.|\n)*?} - replace: >- - rule {{ item.name }} { - \gid \g - \gtype \g - \gmin_size {{ (pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].min_size | default("\g") | trim }} - \gmax_size {{ (pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].max_size | default("\g") | trim }} - {%- if ((pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].class | default(False)) -%} - \gstep take default class {{ (pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].class }} - {%- else -%} - \gstep take default\g - {%- endif -%} - \gstep \g firstn 0 type {{ (pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].type | default("\g") | trim }} - \gstep emit\n} - loop: '{{ pve_ceph_crush_rules }}' - register: _crushmap - - - name: Compress and upload changed crushmap - command: "{{ item }}" - with_items: - - crushtool -c crush_map_decompressed -o new_crush_map_compressed - - ceph osd setcrushmap -i new_crush_map_compressed - when: _crushmap.changed - - - name: Cleanup temp files from generating new crushmap - file: - path: + - block: + - name: Download and decompress crushmap + command: "{{ item }}" + with_items: + - ceph osd getcrushmap -o crush_map_compressed + - crushtool -d crush_map_compressed -o crush_map_decompressed + changed_when: false # This is just getting information for us to possibly edit, don't mislead user with 'changed' + + - name: Modify local crushmap for rules that should be updated + replace: + path: crush_map_decompressed + regexp: >- + rule\s+{{ item.name }}\s+{(?:(?P\s+)id\s+(?P[^\s]+)|\s+type\s+(?P[^\s]+)|\s+min_size[ ](?P[^\s]+)|\s+max_size\s+(?P[^\s]+)|\s+step\s+take\s+default(?:\n|\s+class\s+(?P[^\n]*))|\s+step\s+(?Pchooseleaf|choose).*?type\s+(?P[^\s]+))+(?:.|\n)*?} + replace: + "rule {{ item.name }} {\ + \\gid \\g\ + \\gtype \\g\ + \\gmin_size {{ (pve_ceph_crush_rules | selectattr(\"name\", \"match\", item.name) | list)[0].min_size | default(\"\\g\") | trim }}\ + \\gmax_size {{ (pve_ceph_crush_rules | selectattr(\"name\", \"match\", item.name) | list)[0].max_size | default(\"\\g\") | trim }}\ + {%- if ((pve_ceph_crush_rules | selectattr(\"name\", \"match\", item.name) | list)[0].class | default(False)) -%}\ + \\gstep take default class {{ (pve_ceph_crush_rules | selectattr(\"name\", \"match\", item.name) | list)[0].class }}\ + {%- else -%}\ + \\gstep take default\\g\ + {%- endif -%}\ + \\gstep \\g firstn 0 type {{ (pve_ceph_crush_rules | selectattr(\"name\", \"match\", item.name) | list)[0].type | default(\"\\g\") | trim }}\ + \\gstep emit\n}" + loop: '{{ pve_ceph_crush_rules }}' + register: _crushmap + + - name: Validate and compress new crushmap + command: crushtool -c crush_map_decompressed -o new_crush_map_compressed + register: _crushmap_valid + when: _crushmap.changed + + - name: Upload new crushmap + command: ceph osd setcrushmap -i new_crush_map_compressed + with_items: + - ceph osd setcrushmap -i new_crush_map_compressed + when: _crushmap.changed and _crushmap_valid.rc == 0 + + - name: Cleanup temp files from generating new crushmap + file: + path: "{{ item }}" + state: absent + with_items: - crush_map_compressed - crush_map_decompressed - new_crush_map_compressed - state: absent + changed_when: false # This will always trigger as the files are created to do the initial checks, lets not confuse the user with 'changed' - name: List Ceph Pools command: ceph osd pool ls From 981a865dc0aaa41b8470a4ebfbb5bf79f150540f Mon Sep 17 00:00:00 2001 From: Musee Ullah Date: Wed, 20 Oct 2021 23:24:03 -0500 Subject: [PATCH 23/31] Fix reboot task for single nodes The throttle argument is set to a boolean False when clustering is disabled instead of omit because it seems like omit doesn't work at this level (it complains that it's not a number) --- tasks/kernel_updates.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tasks/kernel_updates.yml b/tasks/kernel_updates.yml index a7369cc9..907f2055 100644 --- a/tasks/kernel_updates.yml +++ b/tasks/kernel_updates.yml @@ -10,11 +10,10 @@ msg: "PVE kernel update detected by Ansible" pre_reboot_delay: "{{ pve_reboot_on_kernel_update_delay }}" post_reboot_delay: "{{ pve_reboot_on_kernel_update_delay }}" - throttle: "{{ pve_cluster_enabled | bool | ternary(1, omit) }}" + throttle: "{{ pve_cluster_enabled | bool }}" when: - "pve_reboot_on_kernel_update | bool" - "_pve_kernel_update.new_kernel_exists" - - "pve_cluster_enabled | bool" - name: "Collect kernel package information" collect_kernel_info: From a9c49c44bc5df109fb49df77a090d59aa92a74ee Mon Sep 17 00:00:00 2001 From: wu3rstle Date: Wed, 24 Nov 2021 11:45:07 +0100 Subject: [PATCH 24/31] replaced module ceph_volume and module_util ca_common with combined module pve_ceph_volume --- library/{ceph_volume.py => pve_ceph_volume.py} | 13 +++++++++---- module_utils/ca_common.py | 11 ----------- tasks/ceph.yml | 2 +- 3 files changed, 10 insertions(+), 16 deletions(-) rename library/{ceph_volume.py => pve_ceph_volume.py} (91%) delete mode 100644 module_utils/ca_common.py diff --git a/library/ceph_volume.py b/library/pve_ceph_volume.py similarity index 91% rename from library/ceph_volume.py rename to library/pve_ceph_volume.py index 7ba416cf..7210f398 100755 --- a/library/ceph_volume.py +++ b/library/pve_ceph_volume.py @@ -1,10 +1,6 @@ #!/usr/bin/python from ansible.module_utils.basic import AnsibleModule -try: - from ansible.module_utils.ca_common import exec_command -except ImportError: - from module_utils.ca_common import exec_command import datetime ANSIBLE_METADATA = { @@ -54,6 +50,15 @@ data: /dev/sdc ''' +def exec_command(module, cmd, stdin=None): + ''' + Execute command(s) + ''' + binary_data = False + if stdin: + binary_data = True + rc, out, err = module.run_command(cmd, data=stdin, binary_data=binary_data) + return rc, cmd, out, err def get_data(data, data_vg): if data_vg: diff --git a/module_utils/ca_common.py b/module_utils/ca_common.py deleted file mode 100644 index 64b2bddc..00000000 --- a/module_utils/ca_common.py +++ /dev/null @@ -1,11 +0,0 @@ -def exec_command(module, cmd, stdin=None): - ''' - Execute command(s) - ''' - - binary_data = False - if stdin: - binary_data = True - rc, out, err = module.run_command(cmd, data=stdin, binary_data=binary_data) - - return rc, cmd, out, err diff --git a/tasks/ceph.yml b/tasks/ceph.yml index 60a6e614..264e1cdb 100644 --- a/tasks/ceph.yml +++ b/tasks/ceph.yml @@ -38,7 +38,7 @@ - block: - name: Query for existing Ceph volumes - ceph_volume: + pve_ceph_volume: register: _ceph_volume_data - name: Generate a list of active OSDs From 69e8c80de460ae7703fca1c28b41c951a05a3904 Mon Sep 17 00:00:00 2001 From: wu3rstle Date: Wed, 24 Nov 2021 12:10:28 +0100 Subject: [PATCH 25/31] adjusted LICENSE_IMPORTS to contain the new pve_ceph_volume with reference to the original files --- LICENSE_IMPORTS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/LICENSE_IMPORTS b/LICENSE_IMPORTS index 633ffb77..ed6de5ee 100644 --- a/LICENSE_IMPORTS +++ b/LICENSE_IMPORTS @@ -2,8 +2,7 @@ The following files are licensed under APL2: - module_utils/ca_common.py - library/ceph_volume.py + library/pve_ceph_volume.py (This is a combined version of the original files module_utils/ca_common.py and library/ceph_volume.py) The license text from ceph/ceph-ansible is as follows: From 193544a8d01202d183e63edc5208b8c16db502df Mon Sep 17 00:00:00 2001 From: Musee Ullah Date: Thu, 21 Oct 2021 19:10:32 -0500 Subject: [PATCH 26/31] Update vagrant VMs to 2.5G RAM and 2 128M extra disks --- Vagrantfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index 00f05765..90a9dc10 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -2,10 +2,10 @@ Vagrant.configure("2") do |config| config.vm.box = "debian/bullseye64" config.vm.provider :libvirt do |libvirt| - libvirt.memory = 2048 + libvirt.memory = 2560 libvirt.cpus = 2 - libvirt.storage :file, :size => '512M' - libvirt.storage :file, :size => '256M' + libvirt.storage :file, :size => '128M' + libvirt.storage :file, :size => '128M' end N = 3 From 52a8dda0220a3a2d09b1e49f44fea87ad9e83d24 Mon Sep 17 00:00:00 2001 From: Musee Ullah Date: Thu, 21 Oct 2021 19:12:18 -0500 Subject: [PATCH 27/31] Only update SSH known_hosts when joining cluster This role doesn't need to add all cluster hosts to /etc/ssh/ssh_known_hosts as PVE's cluster setup tool does this for you when joining a cluster. However, it is necessary for the node joining a cluster to already know the public host keys of the node it is using to join a cluster, so we add it temporarily to the root user's known_hosts. We don't add it to /etc/ssh/ssh_known_hosts as `pvecm` merges the contents of it into the cluster's copy of it, which is undesirable (it adds extraneous comments from ansible). --- tasks/pve_add_node.yml | 28 ++++++++++++++++++++++------ tasks/pve_cluster_config.yml | 1 - tasks/ssh_cluster_config.yml | 33 ++++----------------------------- 3 files changed, 26 insertions(+), 36 deletions(-) diff --git a/tasks/pve_add_node.yml b/tasks/pve_add_node.yml index 00a1087e..7c90c09c 100644 --- a/tasks/pve_add_node.yml +++ b/tasks/pve_add_node.yml @@ -1,16 +1,32 @@ --- -- name: Identify what host we're working with (inside outer loop) - set_fact: - _pve_current_node: "{{ item }}" +- name: Identify the SSH public key and SSH addresses of initial cluster host + ansible.builtin.set_fact: + _pve_cluster_host_key: "{{ ' '.join((hostvars[groups[pve_group][0]]._pve_ssh_public_key.content | b64decode).split()[:-1]) }}" + _pve_cluster_host_addresses: "{{ hostvars[groups[pve_group][0]].pve_cluster_ssh_addrs | join(',') }}" + +- name: Temporarily mark that cluster host as known in root user's known_hosts + ansible.builtin.blockinfile: + dest: /root/.ssh/known_hosts + create: yes + mode: 0600 + marker: "# {mark}: cluster host key for joining" + content: "{{ _pve_cluster_host_addresses }} {{ _pve_cluster_host_key }}" - name: Add node to Proxmox cluster - command: >- + ansible.builtin.command: >- pvecm add {{ hostvars[groups[pve_group][0]].pve_cluster_addr0 }} -use_ssh -link0 {{ pve_cluster_addr0 }} {% if pve_cluster_addr1 is defined %} -link1 {{ pve_cluster_addr1 }} {% endif %} + # Ensure that nodes join one-by-one because cluster joins create a lock + throttle: 1 args: creates: "{{ pve_cluster_conf }}" - when: - - "inventory_hostname == _pve_current_node" + +- name: Remove the cluster host's public key from root user's known_hosts + ansible.builtin.blockinfile: + dest: /root/.ssh/known_hosts + state: absent + mode: 0600 + marker: "# {mark}: cluster host key for joining" diff --git a/tasks/pve_cluster_config.yml b/tasks/pve_cluster_config.yml index 605b9080..3a3609f6 100644 --- a/tasks/pve_cluster_config.yml +++ b/tasks/pve_cluster_config.yml @@ -60,7 +60,6 @@ query: "response[?type=='cluster'].quorate | [0]" - include_tasks: pve_add_node.yml - with_items: "{{ groups[pve_group][1:] }}" when: - "_pve_active_cluster is not defined" - "inventory_hostname != groups[pve_group][0]" diff --git a/tasks/ssh_cluster_config.yml b/tasks/ssh_cluster_config.yml index 10922621..5d814bfc 100644 --- a/tasks/ssh_cluster_config.yml +++ b/tasks/ssh_cluster_config.yml @@ -50,35 +50,10 @@ notify: - reload sshd configuration -- name: Fetch SSH public host keys - slurp: - src: "/etc/ssh/{{ item }}" - register: proxmox_ssh_public_host_keys - with_items: - - ssh_host_rsa_key.pub - - ssh_host_ed25519_key.pub - - ssh_host_ecdsa_key.pub - -- name: Check status of known hosts file - stat: - path: /etc/ssh/ssh_known_hosts - register: _pve_known_hosts_file - -- name: Add every host's host keys to global known_hosts - blockinfile: - dest: /etc/ssh/ssh_known_hosts - create: yes - mode: 0644 - marker: "# {mark}: PVE host keys (managed by ansible)." - content: | - {% for host in groups[pve_group] %} - {% for _key_slurp in hostvars[host].proxmox_ssh_public_host_keys.results %} - {%- set _key = ' '.join((_key_slurp.content | b64decode).split()[:-1]) -%} - {{ hostvars[host].pve_cluster_ssh_addrs | join(",") }} {{ _key }} - {% endfor %} - {% endfor %} - when: - - "not (_pve_known_hosts_file.stat.islnk is defined and _pve_known_hosts_file.stat.islnk)" +- name: Fetch a SSH public key to use for cluster joins + ansible.builtin.slurp: + src: "/etc/ssh/ssh_host_ed25519_key.pub" + register: _pve_ssh_public_key - name: Add PVE-provided ciphers to SSH client config lineinfile: From c264285ccc311f13e015e8b8f7cd3bee96bb8154 Mon Sep 17 00:00:00 2001 From: Bruno Travouillon Date: Mon, 10 Jan 2022 10:51:27 -0500 Subject: [PATCH 28/31] Fix reinstall of initialization node By default, the initialization node is the first node in the group `pve_group`. When this node is reinstalled, it is not possible for the role to wait for quorum on this node since it is not a member of the cluster yet. Moreover, the tasks in pve_add_node.yml are not run for this node. Change the behavior of the role to pick any node already configured in `pve_cluster_clustername` and use this one as an initialization node. This does not change the behavior when installing a new cluster. --- tasks/pve_add_node.yml | 2 +- tasks/pve_cluster_config.yml | 18 +++++++++++++++--- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/tasks/pve_add_node.yml b/tasks/pve_add_node.yml index 7c90c09c..4525e931 100644 --- a/tasks/pve_add_node.yml +++ b/tasks/pve_add_node.yml @@ -14,7 +14,7 @@ - name: Add node to Proxmox cluster ansible.builtin.command: >- - pvecm add {{ hostvars[groups[pve_group][0]].pve_cluster_addr0 }} -use_ssh + pvecm add {{ hostvars[_init_node].pve_cluster_addr0 }} -use_ssh -link0 {{ pve_cluster_addr0 }} {% if pve_cluster_addr1 is defined %} -link1 {{ pve_cluster_addr1 }} diff --git a/tasks/pve_cluster_config.yml b/tasks/pve_cluster_config.yml index 3a3609f6..25a764de 100644 --- a/tasks/pve_cluster_config.yml +++ b/tasks/pve_cluster_config.yml @@ -34,6 +34,10 @@ cluster's name cannot be modified." when: "(_pve_found_clusters | default([]) | length) == 1" +- name: Default initialization node is the first node of pve_group + set_fact: + _init_node: "{{ groups[pve_group][0] }}" + - name: Initialize a Proxmox cluster command: >- pvecm create {{ pve_cluster_clustername }} @@ -45,7 +49,15 @@ creates: "{{ pve_cluster_conf }}" when: - "_pve_found_clusters is not defined" - - "inventory_hostname == groups[pve_group][0]" + - "inventory_hostname == _init_node" + +- name: Find any active node in an already initialized Proxmox cluster + set_fact: + _init_node: "{{ item }}" + with_items: "{{ groups[pve_group] }}" + when: + - "'_pve_active_cluster' in hostvars[item]" + - "hostvars[item]['_pve_active_cluster'] == pve_cluster_clustername" - name: Wait for quorum on initialization node proxmox_query: @@ -55,14 +67,14 @@ retries: 5 delay: 5 when: - - "inventory_hostname == groups[pve_group][0]" + - "inventory_hostname == _init_node" vars: query: "response[?type=='cluster'].quorate | [0]" - include_tasks: pve_add_node.yml when: - "_pve_active_cluster is not defined" - - "inventory_hostname != groups[pve_group][0]" + - "inventory_hostname != _init_node" - name: Check for PVE cluster HA groups proxmox_query: From 64959496efa370dcaf148fc3beeb7ae380a53133 Mon Sep 17 00:00:00 2001 From: Bruno Travouillon Date: Mon, 10 Jan 2022 14:29:20 -0500 Subject: [PATCH 29/31] Update _init_node before initializing the cluster For readability --- tasks/pve_cluster_config.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tasks/pve_cluster_config.yml b/tasks/pve_cluster_config.yml index 25a764de..676c9de3 100644 --- a/tasks/pve_cluster_config.yml +++ b/tasks/pve_cluster_config.yml @@ -38,6 +38,14 @@ set_fact: _init_node: "{{ groups[pve_group][0] }}" +- name: Find any active node in an already initialized Proxmox cluster + set_fact: + _init_node: "{{ item }}" + with_items: "{{ groups[pve_group] }}" + when: + - "'_pve_active_cluster' in hostvars[item]" + - "hostvars[item]['_pve_active_cluster'] == pve_cluster_clustername" + - name: Initialize a Proxmox cluster command: >- pvecm create {{ pve_cluster_clustername }} @@ -51,14 +59,6 @@ - "_pve_found_clusters is not defined" - "inventory_hostname == _init_node" -- name: Find any active node in an already initialized Proxmox cluster - set_fact: - _init_node: "{{ item }}" - with_items: "{{ groups[pve_group] }}" - when: - - "'_pve_active_cluster' in hostvars[item]" - - "hostvars[item]['_pve_active_cluster'] == pve_cluster_clustername" - - name: Wait for quorum on initialization node proxmox_query: query: cluster/status From 5ff80337706541734cf11cf7f3b5afe78c436aa7 Mon Sep 17 00:00:00 2001 From: Musee Ullah Date: Mon, 10 Jan 2022 19:22:49 -0600 Subject: [PATCH 30/31] Ensure identified host key/addresses are from our init host --- tasks/pve_add_node.yml | 4 ++-- tasks/pve_cluster_config.yml | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tasks/pve_add_node.yml b/tasks/pve_add_node.yml index 4525e931..f74e21ad 100644 --- a/tasks/pve_add_node.yml +++ b/tasks/pve_add_node.yml @@ -1,8 +1,8 @@ --- - name: Identify the SSH public key and SSH addresses of initial cluster host ansible.builtin.set_fact: - _pve_cluster_host_key: "{{ ' '.join((hostvars[groups[pve_group][0]]._pve_ssh_public_key.content | b64decode).split()[:-1]) }}" - _pve_cluster_host_addresses: "{{ hostvars[groups[pve_group][0]].pve_cluster_ssh_addrs | join(',') }}" + _pve_cluster_host_key: "{{ ' '.join((hostvars[_init_node]._pve_ssh_public_key.content | b64decode).split()[:-1]) }}" + _pve_cluster_host_addresses: "{{ hostvars[_init_node].pve_cluster_ssh_addrs | join(',') }}" - name: Temporarily mark that cluster host as known in root user's known_hosts ansible.builtin.blockinfile: diff --git a/tasks/pve_cluster_config.yml b/tasks/pve_cluster_config.yml index 676c9de3..c8003b26 100644 --- a/tasks/pve_cluster_config.yml +++ b/tasks/pve_cluster_config.yml @@ -35,11 +35,11 @@ when: "(_pve_found_clusters | default([]) | length) == 1" - name: Default initialization node is the first node of pve_group - set_fact: + ansible.builtin.set_fact: _init_node: "{{ groups[pve_group][0] }}" - name: Find any active node in an already initialized Proxmox cluster - set_fact: + ansible.builtin.set_fact: _init_node: "{{ item }}" with_items: "{{ groups[pve_group] }}" when: @@ -47,7 +47,7 @@ - "hostvars[item]['_pve_active_cluster'] == pve_cluster_clustername" - name: Initialize a Proxmox cluster - command: >- + ansible.builtin.command: >- pvecm create {{ pve_cluster_clustername }} -link0 {{ pve_cluster_addr0 }} {% if pve_cluster_addr1 is defined %} From 42d7c0e22adfa49e3995b233004e31f194cab1be Mon Sep 17 00:00:00 2001 From: Bruno Travouillon Date: Tue, 11 Jan 2022 15:54:04 -0500 Subject: [PATCH 31/31] Check for pve_manage_ssh in pve_add_node.yml --- README.md | 2 +- tasks/pve_add_node.yml | 25 ++++++++++++++----------- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 55d99379..d48c21dd 100644 --- a/README.md +++ b/README.md @@ -224,7 +224,7 @@ joining the cluster, the PVE cluster needs to communicate once via SSH. would make to your SSH server config. This is useful if you use another role to manage your SSH server. Note that setting this to false is not officially supported, you're on your own to replicate the changes normally made in -ssh_cluster_config.yml. +`ssh_cluster_config.yml` and `pve_add_node.yml`. `interfaces_template` is set to the path of a template we'll use for configuring the network on these Debian machines. This is only necessary if you want to diff --git a/tasks/pve_add_node.yml b/tasks/pve_add_node.yml index f74e21ad..f96d1f81 100644 --- a/tasks/pve_add_node.yml +++ b/tasks/pve_add_node.yml @@ -1,16 +1,18 @@ --- -- name: Identify the SSH public key and SSH addresses of initial cluster host - ansible.builtin.set_fact: - _pve_cluster_host_key: "{{ ' '.join((hostvars[_init_node]._pve_ssh_public_key.content | b64decode).split()[:-1]) }}" - _pve_cluster_host_addresses: "{{ hostvars[_init_node].pve_cluster_ssh_addrs | join(',') }}" +- block: + - name: Identify the SSH public key and SSH addresses of initial cluster host + ansible.builtin.set_fact: + _pve_cluster_host_key: "{{ ' '.join((hostvars[_init_node]._pve_ssh_public_key.content | b64decode).split()[:-1]) }}" + _pve_cluster_host_addresses: "{{ hostvars[_init_node].pve_cluster_ssh_addrs | join(',') }}" -- name: Temporarily mark that cluster host as known in root user's known_hosts - ansible.builtin.blockinfile: - dest: /root/.ssh/known_hosts - create: yes - mode: 0600 - marker: "# {mark}: cluster host key for joining" - content: "{{ _pve_cluster_host_addresses }} {{ _pve_cluster_host_key }}" + - name: Temporarily mark that cluster host as known in root user's known_hosts + ansible.builtin.blockinfile: + dest: /root/.ssh/known_hosts + create: yes + mode: 0600 + marker: "# {mark}: cluster host key for joining" + content: "{{ _pve_cluster_host_addresses }} {{ _pve_cluster_host_key }}" + when: "pve_manage_ssh | bool" - name: Add node to Proxmox cluster ansible.builtin.command: >- @@ -30,3 +32,4 @@ state: absent mode: 0600 marker: "# {mark}: cluster host key for joining" + when: "pve_manage_ssh | bool"