diff --git a/.ansible-lint b/.ansible-lint new file mode 100644 index 00000000..0727309c --- /dev/null +++ b/.ansible-lint @@ -0,0 +1,2 @@ +skip_list: + - no-handler diff --git a/LICENSE_IMPORTS b/LICENSE_IMPORTS new file mode 100644 index 00000000..ed6de5ee --- /dev/null +++ b/LICENSE_IMPORTS @@ -0,0 +1,25 @@ +============================================================================== + +The following files are licensed under APL2: + + library/pve_ceph_volume.py (This is a combined version of the original files module_utils/ca_common.py and library/ceph_volume.py) + +The license text from ceph/ceph-ansible is as follows: + + Copyright [2014] [Sébastien Han] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +============================================================================== + +# Licenses for libraries imported in the future should go here diff --git a/README.md b/README.md index c001cced..d48c21dd 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,36 @@ -[![Build Status](https://travis-ci.org/lae/ansible-role-proxmox.svg?branch=master)](https://travis-ci.org/lae/ansible-role-proxmox) [![Galaxy Role](https://img.shields.io/badge/ansible--galaxy-proxmox-blue.svg)](https://galaxy.ansible.com/lae/proxmox/) lae.proxmox =========== -Installs and configures a Proxmox 5.x/6.x cluster with the following features: +Installs and configures Proxmox Virtual Environment 6.x/7.x on Debian servers. -- Ensures all hosts can connect to one another as root -- Ability to create/manage groups, users, access control lists and storage -- Ability to create or add nodes to a PVE cluster -- Ability to setup Ceph on the nodes -- IPMI watchdog support -- BYO HTTPS certificate support -- Ability to use either `pve-no-subscription` or `pve-enterprise` repositories +This role allows you to deploy and manage single-node PVE installations and PVE +clusters (3+ nodes) on Debian Buster (10) and Bullseye (11). You are able to +configure the following with the assistance of this role: + + - PVE RBAC definitions (roles, groups, users, and access control lists) + - PVE Storage definitions + - [`datacenter.cfg`][datacenter-cfg] + - HTTPS certificates for the Proxmox Web GUI (BYO) + - PVE repository selection (e.g. `pve-no-subscription` or `pve-enterprise`) + - Watchdog modules (IPMI and NMI) with applicable pve-ha-manager config + - ZFS module setup and ZED notification email + +With clustering enabled, this role does (or allows you to do) the following: + + - Ensure all hosts can connect to one another as root over SSH + - Initialize a new PVE cluster (or possibly adopt an existing one) + - Create or add new nodes to a PVE cluster + - Setup Ceph on a PVE cluster + - Create and manage high availability groups + +## Support/Contributing + +For support or if you'd like to contribute to this role but want guidance, feel +free to join this Discord server: https://discord.gg/cjqr6Fg. Please note, this +is an temporary invite, so you'll need to wait for @lae to assign you a role, +otherwise Discord will remove you from the server when you logout. ## Quickstart @@ -30,20 +48,15 @@ Copy the following playbook to a file like `install_proxmox.yml`: - hosts: all become: True roles: - - { - role: geerlingguy.ntp, - ntp_manage_config: true, - ntp_servers: [ - clock.sjc.he.net, - clock.fmt.he.net, - clock.nyc.he.net - ] - } - - { - role: lae.proxmox, - pve_group: all, - pve_reboot_on_kernel_update: true - } + - role: geerlingguy.ntp + ntp_manage_config: true + ntp_servers: + - clock.sjc.he.net, + - clock.fmt.he.net, + - clock.nyc.he.net + - role: lae.proxmox + - pve_group: all + - pve_reboot_on_kernel_update: true Install this role and a role for configuring NTP: @@ -63,12 +76,7 @@ file containing a list of hosts). Once complete, you should be able to access your Proxmox VE instance at `https://$SSH_HOST_FQDN:8006`. -## Support/Contributing - -For support or if you'd like to contribute to this role but want guidance, feel -free to join this Discord server: https://discord.gg/cjqr6Fg - -## Deploying a fully-featured PVE 5.x cluster +## Deploying a fully-featured PVE 7.x cluster Create a new playbook directory. We call ours `lab-cluster`. Our playbook will eventually look like this, but yours does not have to follow all of the steps: @@ -195,10 +203,6 @@ pvecluster. Here, a file lookup is used to read the contents of a file in the playbook, e.g. `files/pve01/lab-node01.key`. You could possibly just use host variables instead of files, if you prefer. -`pve_ssl_letsencrypt` allows to obtain a Let's Encrypt SSL certificate for -pvecluster. The Ansible role [systemli.letsencrypt](https://galaxy.ansible.com/systemli/letsencrypt/) -needs to be installed first in order to use this function. - `pve_cluster_enabled` enables the role to perform all cluster management tasks. This includes creating a cluster if it doesn't exist, or adding nodes to the existing cluster. There are checks to make sure you're not mixing nodes that @@ -209,8 +213,8 @@ must already exist) to access PVE and gives them the Administrator role as part of the `ops` group. Read the **User and ACL Management** section for more info. `pve_storages` allows to create different types of storage and configure them. -The backend needs to be supported by [Proxmox](https://pve.proxmox.com/pve-docs/chapter-pvesm.html). -Read the **Storage Management** section for more info. +The backend needs to be supported by [Proxmox][pvesm]. Read the **Storage +Management** section for more info. `pve_ssh_port` allows you to change the SSH port. If your SSH is listening on a port other than the default 22, please set this variable. If a new node is @@ -220,7 +224,7 @@ joining the cluster, the PVE cluster needs to communicate once via SSH. would make to your SSH server config. This is useful if you use another role to manage your SSH server. Note that setting this to false is not officially supported, you're on your own to replicate the changes normally made in -ssh_cluster_config.yml. +`ssh_cluster_config.yml` and `pve_add_node.yml`. `interfaces_template` is set to the path of a template we'll use for configuring the network on these Debian machines. This is only necessary if you want to @@ -354,29 +358,24 @@ serially during a maintenance period.) It will also enable the IPMI watchdog. - hosts: pve01 become: True roles: - - { - role: geerlingguy.ntp, - ntp_manage_config: true, - ntp_servers: [ - clock.sjc.he.net, - clock.fmt.he.net, - clock.nyc.he.net - ] - } - - { - role: lae.proxmox, - pve_group: pve01, - pve_cluster_enabled: yes, - pve_reboot_on_kernel_update: true, + - role: geerlingguy.ntp + ntp_manage_config: true + ntp_servers: + - clock.sjc.he.net, + - clock.fmt.he.net, + - clock.nyc.he.net + - role: lae.proxmox + pve_group: pve01 + pve_cluster_enabled: yes + pve_reboot_on_kernel_update: true pve_watchdog: ipmi - } ## Role Variables ``` [variable]: [default] #[description/purpose] pve_group: proxmox # host group that contains the Proxmox hosts to be clustered together -pve_repository_line: "deb http://download.proxmox.com/debian/pve stretch pve-no-subscription" # apt-repository configuration - change to enterprise if needed (although TODO further configuration may be needed) +pve_repository_line: "deb http://download.proxmox.com/debian/pve bullseye pve-no-subscription" # apt-repository configuration - change to enterprise if needed (although TODO further configuration may be needed) pve_remove_subscription_warning: true # patches the subscription warning messages in proxmox if you are using the community edition pve_extra_packages: [] # Any extra packages you may want to install, e.g. ngrep pve_run_system_upgrades: false # Let role perform system upgrades @@ -391,8 +390,9 @@ pve_watchdog_ipmi_timeout: 10 # Number of seconds the watchdog should wait pve_zfs_enabled: no # Specifies whether or not to install and configure ZFS packages # pve_zfs_options: "" # modprobe parameters to pass to zfs module on boot/modprobe # pve_zfs_zed_email: "" # Should be set to an email to receive ZFS notifications +pve_zfs_create_volumes: [] # List of ZFS Volumes to create (to use as PVE Storages). See section on Storage Management. pve_ceph_enabled: false # Specifies wheter or not to install and configure Ceph packages. See below for an example configuration. -pve_ceph_repository_line: "deb http://download.proxmox.com/debian/ceph-nautilus buster main" # apt-repository configuration. Will be automatically set for 5.x and 6.x (Further information: https://pve.proxmox.com/wiki/Package_Repositories) +pve_ceph_repository_line: "deb http://download.proxmox.com/debian/ceph-pacific bullseye main" # apt-repository configuration. Will be automatically set for 6.x and 7.x (Further information: https://pve.proxmox.com/wiki/Package_Repositories) pve_ceph_network: "{{ (ansible_default_ipv4.network +'/'+ ansible_default_ipv4.netmask) | ipaddr('net') }}" # Ceph public network # pve_ceph_cluster_network: "" # Optional, if the ceph cluster network is different from the public network (see https://pve.proxmox.com/pve-docs/chapter-pveceph.html#pve_ceph_install_wizard) pve_ceph_nodes: "{{ pve_group }}" # Host group containing all Ceph nodes @@ -405,7 +405,6 @@ pve_ceph_fs: [] # List of CephFS filesystems to create pve_ceph_crush_rules: [] # List of CRUSH rules to create # pve_ssl_private_key: "" # Should be set to the contents of the private key to use for HTTPS # pve_ssl_certificate: "" # Should be set to the contents of the certificate to use for HTTPS -pve_ssl_letsencrypt: false # Specifies whether or not to obtain a SSL certificate using Let's Encrypt pve_roles: [] # Added more roles with specific privileges. See section on User Management. pve_groups: [] # List of group definitions to manage in PVE. See section on User Management. pve_users: [] # List of user definitions to manage in PVE. See section on User Management. @@ -454,8 +453,8 @@ pve_cluster_ha_groups: restricted: 0 ``` -All configuration options supported in the datacenter.cfg file are documented in the -[Proxmox manual datacenter.cfg section][datacenter-cfg]. +All configuration options supported in the datacenter.cfg file are documented +in the [Proxmox manual datacenter.cfg section][datacenter-cfg]. In order for live reloading of network interfaces to work via the PVE web UI, you need to install the `ifupdown2` package. Note that this will remove @@ -537,14 +536,14 @@ pve_acls: - test_users ``` -Refer to `library/proxmox_role.py` [link][user-module] and +Refer to `library/proxmox_role.py` [link][user-module] and `library/proxmox_acl.py` [link][acl-module] for module documentation. ## Storage Management You can use this role to manage storage within Proxmox VE (both in single server deployments and cluster deployments). For now, the only supported -types are `dir`, `rbd`, `nfs`, `cephfs` ,`lvm` and `lvmthin`. +types are `dir`, `rbd`, `nfs`, `cephfs`, `lvm`,`lvmthin`, and `zfspool`. Here are some examples. ``` @@ -588,6 +587,26 @@ pve_storages: - 10.0.0.1 - 10.0.0.2 - 10.0.0.3 + - name: zfs1 + type: zfspool + content: [ "images", "rootdir" ] + pool: rpool/data + sparse: true +``` + +Currently the `zfspool` type can be used only for `images` and `rootdir` contents. +If you want to store the other content types on a ZFS volume, you need to specify +them with type `dir`, path `//` and add an entry in +`pve_zfs_create_volumes`. This example adds a `iso` storage on a ZFS pool: + +``` +pve_zfs_create_volumes: + - rpool/iso +pve_storages: + - name: iso + type: dir + path: /rpool/iso + content: [ "iso" ] ``` Refer to `library/proxmox_storage.py` [link][storage-module] for module @@ -627,7 +646,8 @@ pve_ceph_osds: block.db: /dev/sdb1 encrypted: true # Crush rules for different storage classes -# By default 'type' is set to host, you can find valid types at (https://docs.ceph.com/en/latest/rados/operations/crush-map/) +# By default 'type' is set to host, you can find valid types at +# (https://docs.ceph.com/en/latest/rados/operations/crush-map/) # listed under 'TYPES AND BUCKETS' pve_ceph_crush_rules: - name: replicated_rule @@ -675,15 +695,40 @@ pve_ceph_fs: `pve_ceph_network` by default uses the `ipaddr` filter, which requires the `netaddr` library to be installed and usable by your Ansible controller. -`pve_ceph_nodes` by default uses `pve_group`, this parameter allows to specify on which nodes install Ceph (e.g. if you don't want to install Ceph on all your nodes). +`pve_ceph_nodes` by default uses `pve_group`, this parameter allows to specify +on which nodes install Ceph (e.g. if you don't want to install Ceph on all your +nodes). + +`pve_ceph_osds` by default creates unencrypted ceph volumes. To use encrypted +volumes the parameter `encrypted` has to be set per drive to `true`. + +## Developer Notes + +When developing new features or fixing something in this role, you can test out +your changes by using Vagrant (only libvirt is supported currently). The +playbook can be found in `tests/vagrant` (so be sure to modify group variables +as needed). Be sure to test any changes on both Debian 10 and 11 (update the +Vagrantfile locally to use `debian/buster64`) before submitting a PR. + +You can also specify an apt caching proxy (e.g. `apt-cacher-ng`, and it must +run on port 3142) with the `APT_CACHE_HOST` environment variable to speed up +package downloads if you have one running locally in your environment. The +vagrant playbook will detect whether or not the caching proxy is available and +only use it if it is accessible from your network, so you could just +permanently set this variable in your development environment if you prefer. + +For example, you could run the following to show verbose/easier to read output, +use a caching proxy, and keep the VMs running if you run into an error (so that +you can troubleshoot it and/or run `vagrant provision` after fixing): -`pve_ceph_osds` by default creates unencrypted ceph volumes. To use encrypted volumes the parameter `encrypted` has to be set per drive to `true`. + APT_CACHE_HOST=10.71.71.10 ANSIBLE_STDOUT_CALLBACK=debug vagrant up --no-destroy-on-error ## Contributors Musee Ullah ([@lae](https://github.com/lae), ) - Main developer Fabien Brachere ([@Fbrachere](https://github.com/Fbrachere)) - Storage config support Gaudenz Steinlin ([@gaundez](https://github.com/gaudenz)) - Ceph support, etc +Richard Scott ([@zenntrix](https://github.com/zenntrix)) - Ceph support, PVE 7.x support, etc Thoralf Rickert-Wendt ([@trickert76](https://github.com/trickert76)) - PVE 6.x support, etc Engin Dumlu ([@roadrunner](https://github.com/roadrunner)) Jonas Meurer ([@mejo-](https://github.com/mejo-)) @@ -695,6 +740,7 @@ Michael Holasek ([@mholasek](https://github.com/mholasek)) [pve-cluster]: https://pve.proxmox.com/wiki/Cluster_Manager [install-ansible]: http://docs.ansible.com/ansible/intro_installation.html [pvecm-network]: https://pve.proxmox.com/pve-docs/chapter-pvecm.html#_separate_cluster_network +[pvesm]: https://pve.proxmox.com/pve-docs/chapter-pvesm.html [user-module]: https://github.com/lae/ansible-role-proxmox/blob/master/library/proxmox_user.py [group-module]: https://github.com/lae/ansible-role-proxmox/blob/master/library/proxmox_group.py [acl-module]: https://github.com/lae/ansible-role-proxmox/blob/master/library/proxmox_group.py diff --git a/Vagrantfile b/Vagrantfile index b3a5d02e..90a9dc10 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -1,10 +1,11 @@ Vagrant.configure("2") do |config| - config.vm.box = "debian/buster64" + config.vm.box = "debian/bullseye64" config.vm.provider :libvirt do |libvirt| - libvirt.memory = 2048 + libvirt.memory = 2560 libvirt.cpus = 2 - libvirt.storage :file, :size => '2G' + libvirt.storage :file, :size => '128M' + libvirt.storage :file, :size => '128M' end N = 3 diff --git a/defaults/main.yml b/defaults/main.yml index 53fd494e..b9d4f7a0 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -16,8 +16,9 @@ pve_watchdog_ipmi_timeout: 10 pve_zfs_enabled: no # pve_zfs_options: "parameters to pass to zfs module" # pve_zfs_zed_email: "email address for zfs events" +pve_zfs_create_volumes: [] pve_ceph_enabled: false -pve_ceph_repository_line: "deb http://download.proxmox.com/debian/{% if ansible_distribution_release == 'stretch' %}ceph-luminous stretch{% else %}ceph-nautilus buster{% endif %} main" +pve_ceph_repository_line: "deb http://download.proxmox.com/debian/{% if ansible_distribution_release == 'buster' %}ceph-nautilus buster{% else %}ceph-pacific bullseye{% endif %} main" pve_ceph_network: "{{ (ansible_default_ipv4.network +'/'+ ansible_default_ipv4.netmask) | ipaddr('net') }}" pve_ceph_nodes: "{{ pve_group }}" pve_ceph_mon_group: "{{ pve_group }}" @@ -36,7 +37,6 @@ pve_manage_hosts_enabled: yes # pve_cluster_addr1: "{{ ansible_eth1.ipv4.address }} pve_datacenter_cfg: {} pve_cluster_ha_groups: [] -pve_ssl_letsencrypt: false # additional roles for your cluster (f.e. for monitoring) pve_roles: [] pve_groups: [] @@ -45,3 +45,4 @@ pve_acls: [] pve_storages: [] pve_ssh_port: 22 pve_manage_ssh: true +pve_hooks: {} diff --git a/files/00_remove_checked_command_buster.patch b/files/00_remove_checked_command_buster.patch deleted file mode 100644 index 1df8ace3..00000000 --- a/files/00_remove_checked_command_buster.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff -u /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js ---- /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js -+++ /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js -@@ -459,7 +459,8 @@ - }, - - checked_command: function(orig_cmd) { -- Proxmox.Utils.API2Request( -+ orig_cmd(); -+ false && Proxmox.Utils.API2Request( - { - url: '/nodes/localhost/subscription', - method: 'GET', diff --git a/files/00_remove_checked_command_stretch.patch b/files/00_remove_checked_command_stretch.patch deleted file mode 100644 index af39a58d..00000000 --- a/files/00_remove_checked_command_stretch.patch +++ /dev/null @@ -1,70 +0,0 @@ -diff -ur /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js ---- /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js 2018-02-16 12:06:39.000000000 +0000 -+++ /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js 2018-03-03 05:43:35.083364592 +0000 -@@ -342,37 +342,6 @@ - Ext.Ajax.requestopts); - }, - -- checked_command: function(orig_cmd) { -- Proxmox.Utils.API2Request({ -- url: '/nodes/localhost/subscription', -- method: 'GET', -- //waitMsgTarget: me, -- failure: function(response, opts) { -- Ext.Msg.alert(gettext('Error'), response.htmlStatus); -- }, -- success: function(response, opts) { -- var data = response.result.data; -- -- if (data.status !== 'Active') { -- Ext.Msg.show({ -- title: gettext('No valid subscription'), -- icon: Ext.Msg.WARNING, -- msg: Proxmox.Utils.getNoSubKeyHtml(data.url), -- buttons: Ext.Msg.OK, -- callback: function(btn) { -- if (btn !== 'ok') { -- return; -- } -- orig_cmd(); -- } -- }); -- } else { -- orig_cmd(); -- } -- } -- }); -- }, -- - assemble_field_data: function(values, data) { - if (Ext.isObject(data)) { - Ext.Object.each(data, function(name, val) { -diff -ur /usr/share/pve-manager/js/pvemanagerlib.js /usr/share/pve-manager/js/pvemanagerlib.js ---- /usr/share/pve-manager/js/pvemanagerlib.js 2018-02-16 14:07:52.000000000 +0000 -+++ /usr/share/pve-manager/js/pvemanagerlib.js 2018-03-03 05:48:35.567396692 +0000 -@@ -13441,7 +13441,7 @@ - var version_btn = new Ext.Button({ - text: gettext('Package versions'), - handler: function(){ -- Proxmox.Utils.checked_command(function() { me.showVersions(); }); -+ me.showVersions(); - } - }); - -@@ -13691,7 +13691,7 @@ - { - text: gettext('System Report'), - handler: function() { -- Proxmox.Utils.checked_command(function (){ me.showReport(); }); -+ me.showReport(); - } - } - ], -@@ -30605,7 +30605,6 @@ - handler: function(data) { - me.login = null; - me.updateLoginData(data); -- Proxmox.Utils.checked_command(function() {}); // display subscription status - } - }); - } diff --git a/files/01_pass_correct_format_for_linkX.patch b/files/01_pass_correct_format_for_linkX.patch deleted file mode 100644 index 2cd55371..00000000 --- a/files/01_pass_correct_format_for_linkX.patch +++ /dev/null @@ -1,26 +0,0 @@ -to unbreak joining via SSH with an explicit link address. - -Signed-off-by: Fabian Grünbichler ---- - data/PVE/CLI/pvecm.pm | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/data/PVE/CLI/pvecm.pm b/data/PVE/CLI/pvecm.pm -index b381f4f..fe099d4 100755 ---- /usr/share/perl5/PVE/CLI/pvecm.pm -+++ /usr/share/perl5/PVE/CLI/pvecm.pm -@@ -405,9 +405,11 @@ __PACKAGE__->register_method ({ - push @$cmd, '--nodeid', $param->{nodeid} if $param->{nodeid}; - push @$cmd, '--votes', $param->{votes} if defined($param->{votes}); - -+ my $link_desc = get_standard_option('corosync-link'); -+ - foreach my $link (keys %$links) { - push @$cmd, "--link$link", PVE::JSONSchema::print_property_string( -- $links->{$link}, get_standard_option('corosync-link')); -+ $links->{$link}, $link_desc->{format}); - } - - # this will be used as fallback if no links are specified --- - diff --git a/files/proxmox-ve-release-5.x.asc b/files/proxmox-ve-release-5.x.asc deleted file mode 100644 index 1c3dffd5..00000000 --- a/files/proxmox-ve-release-5.x.asc +++ /dev/null @@ -1,30 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v2.2.1 (GNU/Linux) - -mQINBFfDyocBEADBqGXU2sVZeyJhjvcYHbkzcjfP9OKBgkPmpKNG8kP+fT+OsX8U -FCAmIKXMOd/3fWhdSv7V2/3JaiEmsYn1a1vWhIlgFj2VonE/YS9JqqW7suodSon0 -b52XNwxRisOapU40EOUEjSGhoVUuvNNFkXImKEdtIgzVyyFCf2pj+TXBGWhOtCtK -du/zctioq85HR3Zk1YokJCho4/uRU7bElmLNFHSmI7jAU33jmU6ZI3MpxTFq0bd5 -+75IQYOQi4SLktE/xFZPaX54DlIzYCaVvjr57/DKOlwa4nnL0PGbfdS9rwBVxN1E -VvRsLG3z0crtFtunpJxKN1TI4HM/vZzfvTt9FH38Xx1yhwlUZKqx42YCImYJSBY/ -mxx/XjVZqaGSqBoSLgI+zKmOPEoo6i2nhZhCrm/GuuEV+hP5MHch3YhqO2/xYcCP -eeM9CU8ham84m9uCJ6ol8H0iiImztHXHCGWJ1AFq567NOXE407vQNpM2z49bNlR4 -QYvlXuvM0wJLKo+LFTftj6SjyweMdd3FRzxGUDQaG9YjpBe20etBS3ETTySiDnxN -eLVRe2nKG+e36VugaELJ+T8GZlhT+2s34EPrS4WUdqpwsrIouMXPeMPp0z3VO/7A -qyTlTK5TaDgLj+LQIZF9dI3aXDhH1Z9OKXsS2m7tSBJeBCY15jDFH9Og2wARAQAB -tElQcm94bW94IFZpcnR1YWwgRW52aXJvbm1lbnQgNS54IFJlbGVhc2UgS2V5IDxw -cm94bW94LXJlbGVhc2VAcHJveG1veC5jb20+iQI/BBMBCAApBQJXw8qHAhsDBQkS -zAMABwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQDZoZUOLvBgM6RA/+KKtA -TciGES4TEgsLuHPoM5E0X4JWhMq2jN6znmzo5kIVmHXEk4LxeeppMoICsc6DMoDL -9n4M5m5YqlIYAs78SrxjSdDspPeV2/gPegDD/U8rx+OhGNBORpewSi9jq6iq/bWN -kT2Pwvk/lmDmnHebtCWvxB2y0mkcaAw87w8c5xYgOnnL/slwcegUN7/m6pcien5b -Ijixt75Kq3ol45y4QRnkYDnoejMnlinEB4U2qfdkiVxEpwLZ97ipKo+wIQ9tMqmk -q8xVoT39+JJESBAaJO3P19NSJiLtNjkPpoNFNOYJRubY9wD9/2Q1jx7V04U/4zuh -AppsFcGt/cn5K0Vy6KqPgUAyyMjRB/+MKpL/4zdFcpwy6gu7c0eqMdXw1lW9YYF0 -XQhhxVuet1xbVazIH4NgkwCJvOPVcJwILkmGorTtJPvHgS/V+NFYh3n6Q0GWY5gC -+dturtMnLxsi6GrE0mamqHuJ7mW3zli2zClURCJaePwdd8i+anZzQwT2wg3oXBtA -gOZgeZFuC4OrGnfK5hj9n/LV1PjMwEmliiFDNRPOGDrmKbn7LBocem3ams0dKxfg -Eh/97QpKJh15NM677TiQmzbFmBBPA5BPLRzPlVi4eemDyv5ggYdSckz+sCiUMzy3 -x7aL/VB66uF1IYQFp5/WeGRMOv3n3Bkq5S2aEO4= -=hfNW ------END PGP PUBLIC KEY BLOCK----- diff --git a/files/proxmox-ve-release-7.x.asc b/files/proxmox-ve-release-7.x.asc new file mode 100644 index 00000000..fefd83d1 --- /dev/null +++ b/files/proxmox-ve-release-7.x.asc @@ -0,0 +1,29 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBF+o6NQBEADdEeL7hTwBlX9kbRwf3ESa+H/LOjEUGEvpZkk5kMysIi+4DTXL +79LoZwQZXGKM3W6it7kF/YkE8hcGnHrFYKCcfmu3rGpQSF8v867xR6km0bzIRvLQ +XYYO3SL3SBDOlp4OuGwbcb+E9/oacVhfZY6d94AhGx2rueDW+YcUDC/nQrDnIJfd ++yurm1sHoZMG4cx43Y9Q5BlckyZN1Gt7KFSETo9seayxJ47+IOMCw3s1nOyXWtUD +7YrihSjQmhLd4jOJgLy7sSwOHnkrVfvvhIz6JfFn/ccGvPqK72dddgX2aF/VT5Lk +SF9d6yi5Ea4/AENMLqljnw74b+uvOa6wT4zjqQHTu7Wj3xLr711o9VsSbGSuRBP3 +Stwj2z6Xy8fTKChN8DkUal6HEtIVBvCs1jtioqdigoUY0cnHwGor1/yKMWsKjt5t +qWjGMnBDdLWngTM61yh4WtvxDh1zLK5Q0xGaIYDPrgcRhnO456+8JIGVoQVg6bu5 +g5m9ua1KRTsr+TaqctDwDMqhhzqDAZpGuNgpHF7ycDYrof7sYFgQ1n3S/+yCpYJx +TJOIvAdmkUTuHwDRkXqGvR4eyGy8/RZ0KMQ7oVJbMyZextOZBbUE95FbE7EB8iOt +9NZHH2pgBZojYhD9P4+xwSyTymR2t/SdpvmOROjOtIDxbQqdBvmDUy33DQARAQAB +tDpQcm94bW94IEJ1bGxzZXllIFJlbGVhc2UgS2V5IDxwcm94bW94LXJlbGVhc2VA +cHJveG1veC5jb20+iQJUBBMBCgA+FiEEKBOaL4ML1oR4oaAf3UujkX4jv1kFAl+o +6NQCGwMFCRLMAwAFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AACgkQ3UujkX4jv1k3 +qxAAq5ggmp54L/LCdfqNlMlloqiWerxhEYDZ+bBq6IFJtOcctm4kyrRUizRo2SUi +O1wmPLjWcfQ1gUYP9ZgECNTzOGi7o9z+lk76tQiH6GeckLOxTqvilOaxwfpJrvKD +BOp8e0yl7BSyNtnbMpaX5+vH9gNl+pWpzFuNmBMz85jkuI1qaoMDAbzY7Tg4YmkR +O6Z/6Mj1F0vyQldTIB45hUtdzOkNaE/Pa4jBhb8jZ2DPGbz7QqEUvsdbR06FaiFL +tZmLBQ6/yTXtUy/SbyIr+LlNmThkifohqzP9VGFy3DYuLskL/GF9w1Jb4TE5vobc +U6DdY1nF5j4BbfwdaiOOm5n3dIy7QtqCZ0apDXTpn211GszjCL4AfdhsfvovBUYW +LAE6bEZImJUqiyTW/a96zDbc1zulAtDvuZNWH05nlrdNomTk70HDEth/GQ02O4jK +bZxwWe/CWB0e9CvAssEFJZ5jId7guA0WsIz689tBJGYVMPc0WFL9Kuw3gweMd3TT +/r/lqy0eDgsxT2ym1/Jg9Kj6Ko2rAfX/Sr9OdXwE2X8e745Z9HTABtxgSnFwCnfv +/9QHrlfnn1C4e7QEcTuoen8JSOKlTYzoeFGDRuVi5uI+lFfIF1DZiWPnnvSmyYp3 +DPj7a1gXa3vX3EiIHWNYZzGEhyblqT9Oj7HFiFRGK2gWh5M= +=BaNd +-----END PGP PUBLIC KEY BLOCK----- \ No newline at end of file diff --git a/library/ceph_volume.py b/library/ceph_volume.py deleted file mode 100755 index c9aa50ba..00000000 --- a/library/ceph_volume.py +++ /dev/null @@ -1,682 +0,0 @@ -#!/usr/bin/python -import datetime -import copy -import json -import os - -ANSIBLE_METADATA = { - 'metadata_version': '1.0', - 'status': ['preview'], - 'supported_by': 'community' -} - -DOCUMENTATION = ''' ---- -module: ceph_volume - -short_description: Create ceph OSDs with ceph-volume - -description: - - Using the ceph-volume utility available in Ceph this module - can be used to create ceph OSDs that are backed by logical volumes. - - Only available in ceph versions luminous or greater. - -options: - cluster: - description: - - The ceph cluster name. - required: false - default: ceph - objectstore: - description: - - The objectstore of the OSD, either filestore or bluestore - - Required if action is 'create' - required: false - choices: ['bluestore', 'filestore'] - default: bluestore - action: - description: - - The action to take. Creating OSDs and zapping or querying devices. - required: true - choices: ['create', 'zap', 'batch', 'prepare', 'activate', 'list', 'inventory'] - default: create - data: - description: - - The logical volume name or device to use for the OSD data. - required: true - data_vg: - description: - - If data is a lv, this must be the name of the volume group it belongs to. - required: false - osd_fsid: - description: - - The OSD FSID - required: false - journal: - description: - - The logical volume name or partition to use as a filestore journal. - - Only applicable if objectstore is 'filestore'. - required: false - journal_vg: - description: - - If journal is a lv, this must be the name of the volume group it belongs to. - - Only applicable if objectstore is 'filestore'. - required: false - db: - description: - - A partition or logical volume name to use for block.db. - - Only applicable if objectstore is 'bluestore'. - required: false - db_vg: - description: - - If db is a lv, this must be the name of the volume group it belongs to. # noqa E501 - - Only applicable if objectstore is 'bluestore'. - required: false - wal: - description: - - A partition or logical volume name to use for block.wal. - - Only applicable if objectstore is 'bluestore'. - required: false - wal_vg: - description: - - If wal is a lv, this must be the name of the volume group it belongs to. # noqa E501 - - Only applicable if objectstore is 'bluestore'. - required: false - crush_device_class: - description: - - Will set the crush device class for the OSD. - required: false - dmcrypt: - description: - - If set to True the OSD will be encrypted with dmcrypt. - required: false - batch_devices: - description: - - A list of devices to pass to the 'ceph-volume lvm batch' subcommand. - - Only applicable if action is 'batch'. - required: false - osds_per_device: - description: - - The number of OSDs to create per device. - - Only applicable if action is 'batch'. - required: false - default: 1 - journal_size: - description: - - The size in MB of filestore journals. - - Only applicable if action is 'batch'. - required: false - default: 5120 - block_db_size: - description: - - The size in bytes of bluestore block db lvs. - - The default of -1 means to create them as big as possible. - - Only applicable if action is 'batch'. - required: false - default: -1 - report: - description: - - If provided the --report flag will be passed to 'ceph-volume lvm batch'. - - No OSDs will be created. - - Results will be returned in json format. - - Only applicable if action is 'batch'. - required: false - containerized: - description: - - Wether or not this is a containerized cluster. The value is - assigned or not depending on how the playbook runs. - required: false - default: None - list: - description: - - List potential Ceph LVM metadata on a device - required: false - inventory: - description: - - List storage device inventory. - required: false - -author: - - Andrew Schoen (@andrewschoen) - - Sebastien Han -''' - -EXAMPLES = ''' -- name: set up a filestore osd with an lv data and a journal partition - ceph_volume: - objectstore: filestore - data: data-lv - data_vg: data-vg - journal: /dev/sdc1 - action: create - -- name: set up a bluestore osd with a raw device for data - ceph_volume: - objectstore: bluestore - data: /dev/sdc - action: create - - -- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db # noqa e501 - ceph_volume: - objectstore: bluestore - data: data-lv - data_vg: data-vg - db: /dev/sdc1 - wal: /dev/sdc2 - action: create -''' - - -from ansible.module_utils.basic import AnsibleModule # noqa 4502 - - -def fatal(message, module): - ''' - Report a fatal error and exit - ''' - - if module: - module.fail_json(msg=message, changed=False, rc=1) - else: - raise(Exception(message)) - - -def container_exec(binary, container_image): - ''' - Build the docker CLI to run a command inside a container - ''' - container_binary = os.getenv('CEPH_CONTAINER_BINARY') - command_exec = [container_binary, 'run', - '--rm', '--privileged', '--net=host', '--ipc=host', - '--ulimit', 'nofile=1024:4096', - '-v', '/run/lock/lvm:/run/lock/lvm:z', - '-v', '/var/run/udev/:/var/run/udev/:z', - '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', - '-v', '/run/lvm/:/run/lvm/', - '-v', '/var/lib/ceph/:/var/lib/ceph/:z', - '-v', '/var/log/ceph/:/var/log/ceph/:z', - '--entrypoint=' + binary, container_image] - return command_exec - - -def build_ceph_volume_cmd(action, container_image, cluster=None): - ''' - Build the ceph-volume command - ''' - - if container_image: - binary = 'ceph-volume' - cmd = container_exec( - binary, container_image) - else: - binary = ['ceph-volume'] - cmd = binary - - if cluster: - cmd.extend(['--cluster', cluster]) - - cmd.extend(action) - - return cmd - - -def exec_command(module, cmd): - ''' - Execute command - ''' - - rc, out, err = module.run_command(cmd) - return rc, cmd, out, err - - -def is_containerized(): - ''' - Check if we are running on a containerized cluster - ''' - - if 'CEPH_CONTAINER_IMAGE' in os.environ: - container_image = os.getenv('CEPH_CONTAINER_IMAGE') - else: - container_image = None - - return container_image - - -def get_data(data, data_vg): - if data_vg: - data = '{0}/{1}'.format(data_vg, data) - return data - - -def get_journal(journal, journal_vg): - if journal_vg: - journal = '{0}/{1}'.format(journal_vg, journal) - return journal - - -def get_db(db, db_vg): - if db_vg: - db = '{0}/{1}'.format(db_vg, db) - return db - - -def get_wal(wal, wal_vg): - if wal_vg: - wal = '{0}/{1}'.format(wal_vg, wal) - return wal - - -def batch(module, container_image): - ''' - Batch prepare OSD devices - ''' - - # get module variables - cluster = module.params['cluster'] - objectstore = module.params['objectstore'] - batch_devices = module.params.get('batch_devices', None) - crush_device_class = module.params.get('crush_device_class', None) - journal_size = module.params.get('journal_size', None) - block_db_size = module.params.get('block_db_size', None) - block_db_devices = module.params.get('block_db_devices', None) - wal_devices = module.params.get('wal_devices', None) - dmcrypt = module.params.get('dmcrypt', None) - osds_per_device = module.params.get('osds_per_device', 1) - - if not osds_per_device: - fatal('osds_per_device must be provided if action is "batch"', module) - - if osds_per_device < 1: - fatal('osds_per_device must be greater than 0 if action is "batch"', module) # noqa E501 - - if not batch_devices: - fatal('batch_devices must be provided if action is "batch"', module) - - # Build the CLI - action = ['lvm', 'batch'] - cmd = build_ceph_volume_cmd(action, container_image, cluster) - cmd.extend(['--%s' % objectstore]) - cmd.append('--yes') - - if container_image: - cmd.append('--prepare') - - if crush_device_class: - cmd.extend(['--crush-device-class', crush_device_class]) - - if dmcrypt: - cmd.append('--dmcrypt') - - if osds_per_device > 1: - cmd.extend(['--osds-per-device', str(osds_per_device)]) - - if objectstore == 'filestore': - cmd.extend(['--journal-size', journal_size]) - - if objectstore == 'bluestore' and block_db_size != '-1': - cmd.extend(['--block-db-size', block_db_size]) - - cmd.extend(batch_devices) - - if block_db_devices: - cmd.extend(['--db-devices', ' '.join(block_db_devices)]) - - if wal_devices: - cmd.extend(['--wal-devices', ' '.join(wal_devices)]) - - return cmd - - -def ceph_volume_cmd(subcommand, container_image, cluster=None): - ''' - Build ceph-volume initial command - ''' - - if container_image: - binary = 'ceph-volume' - cmd = container_exec( - binary, container_image) - else: - binary = ['ceph-volume'] - cmd = binary - - if cluster: - cmd.extend(['--cluster', cluster]) - - cmd.append('lvm') - cmd.append(subcommand) - - return cmd - - -def prepare_or_create_osd(module, action, container_image): - ''' - Prepare or create OSD devices - ''' - - # get module variables - cluster = module.params['cluster'] - objectstore = module.params['objectstore'] - data = module.params['data'] - data_vg = module.params.get('data_vg', None) - data = get_data(data, data_vg) - journal = module.params.get('journal', None) - journal_vg = module.params.get('journal_vg', None) - db = module.params.get('db', None) - db_vg = module.params.get('db_vg', None) - wal = module.params.get('wal', None) - wal_vg = module.params.get('wal_vg', None) - crush_device_class = module.params.get('crush_device_class', None) - dmcrypt = module.params.get('dmcrypt', None) - - # Build the CLI - action = ['lvm', action] - cmd = build_ceph_volume_cmd(action, container_image, cluster) - cmd.extend(['--%s' % objectstore]) - cmd.append('--data') - cmd.append(data) - - if journal: - journal = get_journal(journal, journal_vg) - cmd.extend(['--journal', journal]) - - if db: - db = get_db(db, db_vg) - cmd.extend(['--block.db', db]) - - if wal: - wal = get_wal(wal, wal_vg) - cmd.extend(['--block.wal', wal]) - - if crush_device_class: - cmd.extend(['--crush-device-class', crush_device_class]) - - if dmcrypt: - cmd.append('--dmcrypt') - - return cmd - - -def list_osd(module, container_image): - ''' - List will detect wether or not a device has Ceph LVM Metadata - ''' - - # get module variables - cluster = module.params['cluster'] - data = module.params.get('data', None) - data_vg = module.params.get('data_vg', None) - data = get_data(data, data_vg) - - # Build the CLI - action = ['lvm', 'list'] - cmd = build_ceph_volume_cmd(action, container_image, cluster) - if data: - cmd.append(data) - cmd.append('--format=json') - - return cmd - -def list_storage_inventory(module, container_image): - ''' - List storage inventory. - ''' - - action = ['inventory'] - cmd = build_ceph_volume_cmd(action, container_image) - cmd.append('--format=json') - - return cmd - -def activate_osd(): - ''' - Activate all the OSDs on a machine - ''' - - # build the CLI - action = ['lvm', 'activate'] - container_image = None - cmd = build_ceph_volume_cmd(action, container_image) - cmd.append('--all') - - return cmd - - -def zap_devices(module, container_image): - ''' - Will run 'ceph-volume lvm zap' on all devices, lvs and partitions - used to create the OSD. The --destroy flag is always passed so that - if an OSD was originally created with a raw device or partition for - 'data' then any lvs that were created by ceph-volume are removed. - ''' - - # get module variables - data = module.params.get('data', None) - data_vg = module.params.get('data_vg', None) - journal = module.params.get('journal', None) - journal_vg = module.params.get('journal_vg', None) - db = module.params.get('db', None) - db_vg = module.params.get('db_vg', None) - wal = module.params.get('wal', None) - wal_vg = module.params.get('wal_vg', None) - osd_fsid = module.params.get('osd_fsid', None) - - # build the CLI - action = ['lvm', 'zap'] - cmd = build_ceph_volume_cmd(action, container_image) - cmd.append('--destroy') - - if osd_fsid: - cmd.extend(['--osd-fsid', osd_fsid]) - - if data: - data = get_data(data, data_vg) - cmd.append(data) - - if journal: - journal = get_journal(journal, journal_vg) - cmd.extend([journal]) - - if db: - db = get_db(db, db_vg) - cmd.extend([db]) - - if wal: - wal = get_wal(wal, wal_vg) - cmd.extend([wal]) - - return cmd - - -def run_module(): - module_args = dict( - cluster=dict(type='str', required=False, default='ceph'), - objectstore=dict(type='str', required=False, choices=[ - 'bluestore', 'filestore'], default='bluestore'), - action=dict(type='str', required=False, choices=[ - 'create', 'zap', 'batch', 'prepare', 'activate', 'list', - 'inventory'], default='create'), # noqa 4502 - data=dict(type='str', required=False), - data_vg=dict(type='str', required=False), - journal=dict(type='str', required=False), - journal_vg=dict(type='str', required=False), - db=dict(type='str', required=False), - db_vg=dict(type='str', required=False), - wal=dict(type='str', required=False), - wal_vg=dict(type='str', required=False), - crush_device_class=dict(type='str', required=False), - dmcrypt=dict(type='bool', required=False, default=False), - batch_devices=dict(type='list', required=False, default=[]), - osds_per_device=dict(type='int', required=False, default=1), - journal_size=dict(type='str', required=False, default='5120'), - block_db_size=dict(type='str', required=False, default='-1'), - block_db_devices=dict(type='list', required=False, default=[]), - wal_devices=dict(type='list', required=False, default=[]), - report=dict(type='bool', required=False, default=False), - containerized=dict(type='str', required=False, default=False), - osd_fsid=dict(type='str', required=False), - ) - - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True - ) - - result = dict( - changed=False, - stdout='', - stderr='', - rc='', - start='', - end='', - delta='', - ) - - if module.check_mode: - return result - - # start execution - startd = datetime.datetime.now() - - # get the desired action - action = module.params['action'] - - # will return either the image name or None - container_image = is_containerized() - - # Assume the task's status will be 'changed' - changed = True - - if action == 'create' or action == 'prepare': - # First test if the device has Ceph LVM Metadata - rc, cmd, out, err = exec_command( - module, list_osd(module, container_image)) - - # list_osd returns a dict, if the dict is empty this means - # we can not check the return code since it's not consistent - # with the plain output - # see: http://tracker.ceph.com/issues/36329 - # FIXME: it's probably less confusing to check for rc - - # convert out to json, ansible returns a string... - try: - out_dict = json.loads(out) - except ValueError: - fatal("Could not decode json output: {} from the command {}".format(out, cmd), module) # noqa E501 - - if out_dict: - data = module.params['data'] - result['stdout'] = 'skipped, since {0} is already used for an osd'.format( # noqa E501 - data) - result['rc'] = 0 - module.exit_json(**result) - - # Prepare or create the OSD - rc, cmd, out, err = exec_command( - module, prepare_or_create_osd(module, action, container_image)) - - elif action == 'activate': - if container_image: - fatal( - "This is not how container's activation happens, nothing to activate", module) # noqa E501 - - # Activate the OSD - rc, cmd, out, err = exec_command( - module, activate_osd()) - - elif action == 'zap': - # Zap the OSD - rc, cmd, out, err = exec_command( - module, zap_devices(module, container_image)) - - elif action == 'list': - # List Ceph LVM Metadata on a device - rc, cmd, out, err = exec_command( - module, list_osd(module, container_image)) - - elif action == 'inventory': - # List storage device inventory. - rc, cmd, out, err = exec_command( - module, list_storage_inventory(module, container_image)) - - elif action == 'batch': - # Batch prepare AND activate OSDs - report = module.params.get('report', None) - - # Add --report flag for the idempotency test - report_flags = [ - '--report', - '--format=json', - ] - - cmd = batch(module, container_image) - batch_report_cmd = copy.copy(cmd) - batch_report_cmd.extend(report_flags) - - # Run batch --report to see what's going to happen - # Do not run the batch command if there is nothing to do - rc, cmd, out, err = exec_command( - module, batch_report_cmd) - try: - report_result = json.loads(out) - except ValueError: - strategy_change = "strategy changed" in out - if strategy_change: - out = json.dumps( - {"changed": False, "stdout": out.rstrip("\r\n")}) - rc = 0 - changed = False - else: - out = out.rstrip("\r\n") - result = dict( - cmd=cmd, - stdout=out.rstrip('\r\n'), - stderr=err.rstrip('\r\n'), - rc=rc, - changed=changed, - ) - if strategy_change: - module.exit_json(**result) - module.fail_json(msg='non-zero return code', **result) - - if not report: - # if not asking for a report, let's just run the batch command - changed = report_result['changed'] - if changed: - # Batch prepare the OSD - rc, cmd, out, err = exec_command( - module, batch(module, container_image)) - else: - cmd = batch_report_cmd - - else: - module.fail_json( - msg='State must either be "create" or "prepare" or "activate" or "list" or "zap" or "batch" or "inventory".', changed=False, rc=1) # noqa E501 - - endd = datetime.datetime.now() - delta = endd - startd - - result = dict( - cmd=cmd, - start=str(startd), - end=str(endd), - delta=str(delta), - rc=rc, - stdout=out.rstrip('\r\n'), - stderr=err.rstrip('\r\n'), - changed=changed, - ) - - if rc != 0: - module.fail_json(msg='non-zero return code', **result) - - module.exit_json(**result) - - -def main(): - run_module() - - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/library/proxmox_storage.py b/library/proxmox_storage.py index 19d842b0..29e3a57d 100755 --- a/library/proxmox_storage.py +++ b/library/proxmox_storage.py @@ -22,7 +22,7 @@ type: required: true aliases: [ "storagetype" ] - choices: [ "dir", "nfs", "rbd", "lvm", "lvmthin", "cephfs" ] + choices: [ "dir", "nfs", "rbd", "lvm", "lvmthin", "cephfs", "zfspool" ] description: - Type of storage, must be supported by Proxmox. disable: @@ -54,7 +54,7 @@ pool: required: false description: - - Ceph pool name. + - Ceph/ZFS pool name. monhost: required: false type: list @@ -94,6 +94,10 @@ required: false description: - The name of the LVM thin pool. + sparse: + required: false + description: + - Use ZFS thin-provisioning. author: - Fabien Brachere (@fbrachere) @@ -150,6 +154,13 @@ - 10.0.0.1 - 10.0.0.2 - 10.0.0.3 +- name: Create a ZFS storage type + proxmox_storage: + name: zfs1 + type: zfspool + content: [ "images", "rootdir" ] + pool: rpool/data + sparse: true ''' RETURN = ''' @@ -180,6 +191,7 @@ def __init__(self, module): self.options = module.params['options'] self.vgname = module.params['vgname'] self.thinpool = module.params['thinpool'] + self.sparse = module.params['sparse'] try: self.existing_storages = pvesh.get("storage") @@ -235,6 +247,8 @@ def prepare_storage_args(self): args['vgname'] = self.vgname if self.thinpool is not None: args['thinpool'] = self.thinpool + if self.sparse is not None: + args['sparse'] = self.sparse if self.maxfiles is not None and 'backup' not in self.content: self.module.fail_json(msg="maxfiles is not allowed when there is no 'backup' in content") @@ -307,7 +321,8 @@ def main(): content=dict(type='list', required=True, aliases=['storagetype']), nodes=dict(type='list', required=False, default=None), type=dict(default=None, type='str', required=True, - choices=["dir", "nfs", "rbd", "lvm", "lvmthin", "cephfs"]), + choices=["dir", "nfs", "rbd", "lvm", "lvmthin", "cephfs", + "zfspool"]), disable=dict(required=False, type='bool', default=False), state=dict(default='present', choices=['present', 'absent'], type='str'), path=dict(default=None, required=False, type='str'), @@ -321,6 +336,7 @@ def main(): options=dict(default=None, type='str', required=False), vgname=dict(default=None, type='str', required=False), thinpool=dict(default=None, type='str', required=False), + sparse=dict(default=None, type='bool', required=False), ) module = AnsibleModule( @@ -333,6 +349,7 @@ def main(): ["type", "nfs", ["server", "content", "export"]], ["type", "lvm", ["vgname", "content"]], ["type", "lvmthin", ["vgname", "thinpool", "content"]], + ["type", "zfspool", ["pool", "content"]], ] ) storage = ProxmoxStorage(module) diff --git a/library/pve_ceph_volume.py b/library/pve_ceph_volume.py new file mode 100755 index 00000000..7210f398 --- /dev/null +++ b/library/pve_ceph_volume.py @@ -0,0 +1,147 @@ +#!/usr/bin/python + +from ansible.module_utils.basic import AnsibleModule +import datetime + +ANSIBLE_METADATA = { + 'metadata_version': '1.0', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_volume + +short_description: Query ceph OSDs with ceph-volume + +description: + - Using the ceph-volume utility available in Ceph this module + can be used to query ceph OSDs that are backed by logical volumes. + - Only available in ceph versions luminous or greater. + +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + data: + description: + - The logical volume name or device to use for the OSD data. + required: true + data_vg: + description: + - If data is a lv, this must be the name of the volume group it belongs to. + required: false + +author: + - Andrew Schoen (@andrewschoen) + - Sebastien Han +''' + +EXAMPLES = ''' +- name: query all osds + ceph_volume: + +- name: query single osd on test cluster + ceph_volume: + cluster: test + data: /dev/sdc +''' + +def exec_command(module, cmd, stdin=None): + ''' + Execute command(s) + ''' + binary_data = False + if stdin: + binary_data = True + rc, out, err = module.run_command(cmd, data=stdin, binary_data=binary_data) + return rc, cmd, out, err + +def get_data(data, data_vg): + if data_vg: + data = '{0}/{1}'.format(data_vg, data) + return data + + +def list_osd(module): + ''' + List will detect whether or not a device has Ceph LVM Metadata + ''' + + # get module variables + cluster = module.params['cluster'] + data = module.params.get('data', None) + data_vg = module.params.get('data_vg', None) + data = get_data(data, data_vg) + + # Build the CLI + action = ['lvm', 'list'] + cmd = ['ceph-volume', '--cluster', cluster] + cmd.extend(action) + if data: + cmd.append(data) + cmd.append('--format=json') + + return cmd + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + data=dict(type='str', required=False), + data_vg=dict(type='str', required=False), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + + result = dict( + changed=False, + stdout='', + stderr='', + rc=0, + start='', + end='', + delta='', + ) + + if module.check_mode: + module.exit_json(**result) + + # start execution + startd = datetime.datetime.now() + + # List Ceph LVM Metadata on a device + rc, cmd, out, err = exec_command(module, list_osd(module)) + + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip('\r\n'), + stderr=err.rstrip('\r\n'), + changed=False, + ) + + if rc != 0: + module.fail_json(msg='non-zero return code', **result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/meta/main.yml b/meta/main.yml index cf72e9af..fff0ad0c 100644 --- a/meta/main.yml +++ b/meta/main.yml @@ -1,17 +1,18 @@ --- galaxy_info: + namespace: lae + role_name: proxmox author: Musee Ullah - description: Installs and configures Proxmox 5.x (for clustering) - company: FireEye, Inc. + description: Installs and configures Proxmox Virtual Environment 6.x/7.x on Debian servers. license: MIT - min_ansible_version: 2.4 + min_ansible_version: 2.9 platforms: - name: Debian versions: - - stretch - buster + - bullseye galaxy_tags: - proxmox diff --git a/tasks/ceph.yml b/tasks/ceph.yml index 3d0b6f96..264e1cdb 100644 --- a/tasks/ceph.yml +++ b/tasks/ceph.yml @@ -1,22 +1,5 @@ # This is an Ansible version of what "pveceph install" actually does --- -- block: - - name: Install custom Ceph systemd service - copy: - src: /usr/share/doc/pve-manager/examples/ceph.service - dest: /etc/systemd/system/ceph.service - remote_src: true - owner: root - group: root - mode: preserve - notify: 'restart ceph' - - - name: Enable Ceph - systemd: - name: ceph.service - enabled: true - when: - - "ansible_distribution_release == 'stretch'" - block: - name: Create initial Ceph config @@ -54,46 +37,27 @@ when: "inventory_hostname in groups[pve_ceph_mgr_group]" - block: - - name: Get existing ceph volumes - ceph_volume: - action: list - data: "{{ item.device }}" + - name: Query for existing Ceph volumes + pve_ceph_volume: register: _ceph_volume_data - loop: '{{ pve_ceph_osds }}' - tags: ceph_volume - changed_when: false #Merely gets the list of ceph volumes so never changes anything - - - name: Initialize osd variables - set_fact: - _existing_ceph_volumes_tmp: [] - _existing_ceph_volumes: [] - tags: ceph_volume - - name: Determine ceph volumes Step1 - set_fact: - _existing_ceph_volumes_tmp: "{{ _existing_ceph_volumes_tmp + item.stdout | from_json | json_query('*[].devices[]') }}" - with_items: "{{ _ceph_volume_data.results }}" - tags: ceph_volume + - name: Generate a list of active OSDs + ansible.builtin.set_fact: + _existing_ceph_osds: "{{ _ceph_volume_data.stdout | from_json | json_query('*[].devices[]') }}" - - name: Determine ceph volumes Step2 - set_fact: - _existing_ceph_volumes: "{{ _existing_ceph_volumes + [{'device': item}] }}" - with_items: "{{ _existing_ceph_volumes_tmp }}" - tags: ceph_volume - - - name: Change osd list (remove existing osds from the list) - set_fact: - pve_ceph_osds_diff: "{{ pve_ceph_osds | difference(_existing_ceph_volumes) }}" - tags: ceph_volume + - name: Generate list of unprovisioned OSDs + ansible.builtin.set_fact: + _ceph_osds_diff: "{{ _ceph_osds_diff | default([]) + [item] }}" + loop: "{{ pve_ceph_osds }}" + when: item.device not in _existing_ceph_osds - name: Create Ceph OSDs - command: >- + ansible.builtin.command: >- pveceph osd create {{ item.device }} {% if "encrypted" in item and item["encrypted"] | bool %}--encrypted 1{% endif %} - {% if "block.db" in item %}--journal_dev {{ item["block.db"] }}{% endif %} - args: - creates: '{{ item.device }}1' - with_items: '{{ pve_ceph_osds_diff }}' + {% if "block.db" in item %}--db_dev {{ item["block.db"] }}{% endif %} + {% if "block.wal" in item %}--wal_dev {{ item["block.wal"] }}{% endif %} + loop: '{{ _ceph_osds_diff | default([]) }}' tags: create_osd - block: @@ -109,48 +73,55 @@ when: item.name not in _ceph_crush.stdout_lines with_items: '{{ pve_ceph_crush_rules }}' - - name: Download and decompress crushmap - command: "{{ item }}" - with_items: - - ceph osd getcrushmap -o crush_map_compressed - - crushtool -d crush_map_compressed -o crush_map_decompressed - changed_when: false # This is just getting information for us to possibly edit, don't mislead user with 'changed' - - - name: Modify crushmap for rules that should be updated - replace: - path: crush_map_decompressed - regexp: >- - rule\s+{{ item.name }}\s+{(?:(?P\s+)id\s+(?P[^\s]+)|\s+type\s+(?P[^\s]+)|\s+min_size[ ](?P[^\s]+)|\s+max_size\s+(?P[^\s]+)|\s+step\s+take\s+default(?:\n|\s+class\s+(?P[^\n]*))|\s+step\s+(?Pchooseleaf|choose).*?type\s+(?P[^\s]+))+(?:.|\n)*?} - replace: >- - rule {{item.name}} { - \gid \g - \gtype \g - \gmin_size {{ (pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].min_size | default("\g") | trim }} - \gmax_size {{ (pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].max_size | default("\g") | trim }} - {%- if ((pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].class | default(False)) -%} - \gstep take default class {{ (pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].class }} - {%- else -%} - \gstep take default\g - {%- endif -%} - \gstep \g firstn 0 type {{ (pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].type | default("\g") | trim }} - \gstep emit\n} - loop: '{{ pve_ceph_crush_rules }}' - register: _crushmap - - - name: Compress and upload changed crushmap - command: "{{ item }}" - with_items: - - crushtool -c crush_map_decompressed -o new_crush_map_compressed - - ceph osd setcrushmap -i new_crush_map_compressed - when: _crushmap.changed - - - name: Cleanup temp files from generating new crushmap - file: - path: + - block: + - name: Download and decompress crushmap + command: "{{ item }}" + with_items: + - ceph osd getcrushmap -o crush_map_compressed + - crushtool -d crush_map_compressed -o crush_map_decompressed + changed_when: false # This is just getting information for us to possibly edit, don't mislead user with 'changed' + + - name: Modify local crushmap for rules that should be updated + replace: + path: crush_map_decompressed + regexp: >- + rule\s+{{ item.name }}\s+{(?:(?P\s+)id\s+(?P[^\s]+)|\s+type\s+(?P[^\s]+)|\s+min_size[ ](?P[^\s]+)|\s+max_size\s+(?P[^\s]+)|\s+step\s+take\s+default(?:\n|\s+class\s+(?P[^\n]*))|\s+step\s+(?Pchooseleaf|choose).*?type\s+(?P[^\s]+))+(?:.|\n)*?} + replace: + "rule {{ item.name }} {\ + \\gid \\g\ + \\gtype \\g\ + \\gmin_size {{ (pve_ceph_crush_rules | selectattr(\"name\", \"match\", item.name) | list)[0].min_size | default(\"\\g\") | trim }}\ + \\gmax_size {{ (pve_ceph_crush_rules | selectattr(\"name\", \"match\", item.name) | list)[0].max_size | default(\"\\g\") | trim }}\ + {%- if ((pve_ceph_crush_rules | selectattr(\"name\", \"match\", item.name) | list)[0].class | default(False)) -%}\ + \\gstep take default class {{ (pve_ceph_crush_rules | selectattr(\"name\", \"match\", item.name) | list)[0].class }}\ + {%- else -%}\ + \\gstep take default\\g\ + {%- endif -%}\ + \\gstep \\g firstn 0 type {{ (pve_ceph_crush_rules | selectattr(\"name\", \"match\", item.name) | list)[0].type | default(\"\\g\") | trim }}\ + \\gstep emit\n}" + loop: '{{ pve_ceph_crush_rules }}' + register: _crushmap + + - name: Validate and compress new crushmap + command: crushtool -c crush_map_decompressed -o new_crush_map_compressed + register: _crushmap_valid + when: _crushmap.changed + + - name: Upload new crushmap + command: ceph osd setcrushmap -i new_crush_map_compressed + with_items: + - ceph osd setcrushmap -i new_crush_map_compressed + when: _crushmap.changed and _crushmap_valid.rc == 0 + + - name: Cleanup temp files from generating new crushmap + file: + path: "{{ item }}" + state: absent + with_items: - crush_map_compressed - crush_map_decompressed - new_crush_map_compressed - state: absent + changed_when: false # This will always trigger as the files are created to do the initial checks, lets not confuse the user with 'changed' - name: List Ceph Pools command: ceph osd pool ls diff --git a/tasks/identify_needed_packages.yml b/tasks/identify_needed_packages.yml index 2c115f3c..95125ade 100644 --- a/tasks/identify_needed_packages.yml +++ b/tasks/identify_needed_packages.yml @@ -7,11 +7,6 @@ - ksm-control-daemon - systemd-sysv -- name: Stage patch package if we need to patch the subscription message - set_fact: - _pve_install_packages: "{{ _pve_install_packages | union(['patch']) }}" - when: "'pve-no-subscription' in pve_repository_line" - - name: Stage ZFS packages if ZFS is enabled set_fact: _pve_install_packages: "{{ _pve_install_packages | union(['zfsutils-linux', 'zfs-initramfs', 'zfs-zed']) }}" diff --git a/tasks/ipmi_watchdog.yml b/tasks/ipmi_watchdog.yml index 6a7ca7a5..9c203eb1 100644 --- a/tasks/ipmi_watchdog.yml +++ b/tasks/ipmi_watchdog.yml @@ -13,10 +13,12 @@ content: "options ipmi_watchdog action={{ pve_watchdog_ipmi_action }} \ timeout={{ pve_watchdog_ipmi_timeout }} panic_wdt_timeout=10" dest: /etc/modprobe.d/ipmi_watchdog.conf + mode: 0640 - name: Configure PVE HA Manager to use ipmi_watchdog copy: content: "WATCHDOG_MODULE=ipmi_watchdog" dest: /etc/default/pve-ha-manager + mode: 0640 notify: - restart watchdog-mux diff --git a/tasks/kernel_module_cleanup.yml b/tasks/kernel_module_cleanup.yml index 303ebf42..3081f762 100644 --- a/tasks/kernel_module_cleanup.yml +++ b/tasks/kernel_module_cleanup.yml @@ -42,6 +42,7 @@ copy: content: "WATCHDOG_MODULE=softdog" dest: /etc/default/pve-ha-manager + mode: 0640 notify: - restart watchdog-mux when: "pve_watchdog != 'ipmi'" diff --git a/tasks/kernel_updates.yml b/tasks/kernel_updates.yml index a7369cc9..907f2055 100644 --- a/tasks/kernel_updates.yml +++ b/tasks/kernel_updates.yml @@ -10,11 +10,10 @@ msg: "PVE kernel update detected by Ansible" pre_reboot_delay: "{{ pve_reboot_on_kernel_update_delay }}" post_reboot_delay: "{{ pve_reboot_on_kernel_update_delay }}" - throttle: "{{ pve_cluster_enabled | bool | ternary(1, omit) }}" + throttle: "{{ pve_cluster_enabled | bool }}" when: - "pve_reboot_on_kernel_update | bool" - "_pve_kernel_update.new_kernel_exists" - - "pve_cluster_enabled | bool" - name: "Collect kernel package information" collect_kernel_info: diff --git a/tasks/load_variables.yml b/tasks/load_variables.yml index cbcd522e..a5ec77f7 100644 --- a/tasks/load_variables.yml +++ b/tasks/load_variables.yml @@ -8,11 +8,6 @@ # address. Thus, we're deprecating them. See below references. # https://pve.proxmox.com/wiki/Separate_Cluster_Network#Setup_at_Cluster_Creation # https://git.proxmox.com/?p=pve-cluster.git;a=blob;f=data/PVE/Corosync.pm;h=8b5c91e0da084da4e9ba7423176872a0c16ef5af;hb=refs/heads/stable-5#l209 - - name: LEGACY - Define pve_cluster_addr0 from bindnet0_addr/ring0_addr - set_fact: - pve_cluster_addr0: "{{ pve_cluster_bindnet0_addr | default(pve_cluster_ring0_addr) }}" - when: "pve_cluster_ring0_addr is defined and ansible_distribution_release == 'stretch'" - - name: LEGACY - Define pve_cluster_addr0 from link0_addr set_fact: pve_cluster_addr0: "{{ pve_cluster_link0_addr }}" @@ -20,11 +15,6 @@ when: "pve_cluster_addr0 is not defined" - block: - - name: LEGACY - Define pve_cluster_addr1 from bindnet1_addr/ring1_addr - set_fact: - pve_cluster_addr1: "{{ pve_cluster_bindnet1_addr | default(pve_cluster_ring1_addr) }}" - when: "pve_cluster_ring1_addr is defined and ansible_distribution_release == 'stretch'" - - name: LEGACY - Define pve_cluster_addr1 from link1_addr set_fact: pve_cluster_addr1: "{{ pve_cluster_link1_addr }}" diff --git a/tasks/main.yml b/tasks/main.yml index 1445a589..6b3c887a 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -2,6 +2,10 @@ --- - import_tasks: load_variables.yml +- name: Gather package facts + package_facts: + manager: auto + - name: Ensure that facts are present for all cluster hosts assert: that: @@ -150,6 +154,8 @@ - name: Install Proxmox VE and related packages apt: + update_cache: yes + cache_valid_time: 3600 name: "{{ _pve_install_packages }}" state: "{{ 'latest' if pve_run_proxmox_upgrades else 'present' }}" retries: 2 @@ -167,10 +173,11 @@ - "deb https://enterprise.proxmox.com/debian/pve {{ ansible_distribution_release }} pve-enterprise" - name: Remove subscription check wrapper function in web UI - patch: - src: "00_remove_checked_command_{{ ansible_distribution_release }}.patch" - basedir: / - strip: 1 + ansible.builtin.lineinfile: + path: /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js + line: ' orig_cmd(); return;' + insertafter: '^\s+checked_command: function\(orig_cmd\) {$' + firstmatch: yes backup: yes when: - "pve_remove_subscription_warning | bool" @@ -187,15 +194,6 @@ - import_tasks: kernel_module_cleanup.yml -- name: "[TEMPFIX] Fix cluster joins on PVE 6" - patch: - src: "01_pass_correct_format_for_linkX.patch" - basedir: / - strip: 1 - when: - - "ansible_distribution_release == 'buster'" - - "pve_cluster_enabled | bool" - - import_tasks: pve_cluster_config.yml when: "pve_cluster_enabled | bool" @@ -245,6 +243,21 @@ with_items: "{{ pve_acls }}" when: "not pve_cluster_enabled | bool or (pve_cluster_enabled | bool and inventory_hostname == groups[pve_group][0])" +- name: Create ZFS Pools + zfs: + name: "{{ item.pool }}" + state: present + with_items: "{{ pve_storages }}" + when: "item.type == 'zfspool'" + tags: storage + +- name: Create ZFS Volumes specified by user + zfs: + name: "{{ item }}" + state: present + with_items: "{{ pve_zfs_create_volumes }}" + tags: storage + - name: Configure Proxmox Storage proxmox_storage: name: "{{ item.name }}" @@ -264,8 +277,10 @@ options: "{{ item.options | default(omit) }}" vgname: "{{ item.vgname | default(omit) }}" thinpool: "{{ item.thinpool | default(omit) }}" + sparse: "{{ item.sparse | default(omit) }}" with_items: "{{ pve_storages }}" when: "not pve_cluster_enabled | bool or (pve_cluster_enabled | bool and inventory_hostname == groups[pve_group][0])" + tags: storage - name: Check datacenter.cfg exists stat: @@ -279,6 +294,7 @@ file: path: "/etc/pve/datacenter.cfg" state: "touch" + mode: 0640 when: - "not pve_cluster_enabled | bool or (pve_cluster_enabled | bool and inventory_hostname == groups[pve_group][0])" - "pve_datacenter_cfg | length > 0" @@ -302,6 +318,3 @@ when: - "pve_ssl_private_key is defined" - "pve_ssl_certificate is defined" - -- import_tasks: ssl_letsencrypt.yml - when: "pve_ssl_letsencrypt | bool" diff --git a/tasks/pve_add_node.yml b/tasks/pve_add_node.yml index af6656a7..f96d1f81 100644 --- a/tasks/pve_add_node.yml +++ b/tasks/pve_add_node.yml @@ -1,27 +1,35 @@ --- -- name: Identify what host we're working with (inside outer loop) - set_fact: - _pve_current_node: "{{ item }}" +- block: + - name: Identify the SSH public key and SSH addresses of initial cluster host + ansible.builtin.set_fact: + _pve_cluster_host_key: "{{ ' '.join((hostvars[_init_node]._pve_ssh_public_key.content | b64decode).split()[:-1]) }}" + _pve_cluster_host_addresses: "{{ hostvars[_init_node].pve_cluster_ssh_addrs | join(',') }}" + + - name: Temporarily mark that cluster host as known in root user's known_hosts + ansible.builtin.blockinfile: + dest: /root/.ssh/known_hosts + create: yes + mode: 0600 + marker: "# {mark}: cluster host key for joining" + content: "{{ _pve_cluster_host_addresses }} {{ _pve_cluster_host_key }}" + when: "pve_manage_ssh | bool" - name: Add node to Proxmox cluster - command: >- - pvecm add {{ hostvars[groups[pve_group][0]].pve_cluster_addr0 }} -use_ssh - {{ addr0_flag }} {{ pve_cluster_addr0 }} + ansible.builtin.command: >- + pvecm add {{ hostvars[_init_node].pve_cluster_addr0 }} -use_ssh + -link0 {{ pve_cluster_addr0 }} {% if pve_cluster_addr1 is defined %} - {{ addr1_flag }} {{ pve_cluster_addr1 }} + -link1 {{ pve_cluster_addr1 }} {% endif %} + # Ensure that nodes join one-by-one because cluster joins create a lock + throttle: 1 args: creates: "{{ pve_cluster_conf }}" - vars: - addr0_flag: "{{ (ansible_distribution_release == 'buster') | ternary('-link0', '-ring0_addr') }}" - addr1_flag: "{{ (ansible_distribution_release == 'buster') | ternary('-link1', '-ring1_addr') }}" - when: - - "inventory_hostname == _pve_current_node" -- name: Remove stale corosync lock file due to lack of quorum during initialization - file: - dest: "{{ pve_base_dir }}/priv/lock/file-corosync_conf" +- name: Remove the cluster host's public key from root user's known_hosts + ansible.builtin.blockinfile: + dest: /root/.ssh/known_hosts state: absent - when: - - "inventory_hostname == _pve_current_node" - - "inventory_hostname == groups[pve_group][1]" + mode: 0600 + marker: "# {mark}: cluster host key for joining" + when: "pve_manage_ssh | bool" diff --git a/tasks/pve_cluster_config.yml b/tasks/pve_cluster_config.yml index 48fa86c7..c8003b26 100644 --- a/tasks/pve_cluster_config.yml +++ b/tasks/pve_cluster_config.yml @@ -34,21 +34,30 @@ cluster's name cannot be modified." when: "(_pve_found_clusters | default([]) | length) == 1" +- name: Default initialization node is the first node of pve_group + ansible.builtin.set_fact: + _init_node: "{{ groups[pve_group][0] }}" + +- name: Find any active node in an already initialized Proxmox cluster + ansible.builtin.set_fact: + _init_node: "{{ item }}" + with_items: "{{ groups[pve_group] }}" + when: + - "'_pve_active_cluster' in hostvars[item]" + - "hostvars[item]['_pve_active_cluster'] == pve_cluster_clustername" + - name: Initialize a Proxmox cluster - command: >- + ansible.builtin.command: >- pvecm create {{ pve_cluster_clustername }} - {{ addr0_flag }} {{ pve_cluster_addr0 }} + -link0 {{ pve_cluster_addr0 }} {% if pve_cluster_addr1 is defined %} - {{ addr1_flag }} {{ pve_cluster_addr1 }} + -link1 {{ pve_cluster_addr1 }} {% endif %} args: creates: "{{ pve_cluster_conf }}" - vars: - addr0_flag: "{{ (ansible_distribution_release == 'buster') | ternary('-link0', '-ring0_addr') }}" - addr1_flag: "{{ (ansible_distribution_release == 'buster') | ternary('-link1', '-ring1_addr') }}" when: - "_pve_found_clusters is not defined" - - "inventory_hostname == groups[pve_group][0]" + - "inventory_hostname == _init_node" - name: Wait for quorum on initialization node proxmox_query: @@ -58,15 +67,14 @@ retries: 5 delay: 5 when: - - "inventory_hostname == groups[pve_group][0]" + - "inventory_hostname == _init_node" vars: query: "response[?type=='cluster'].quorate | [0]" - include_tasks: pve_add_node.yml - with_items: "{{ groups[pve_group][1:] }}" when: - "_pve_active_cluster is not defined" - - "inventory_hostname != groups[pve_group][0]" + - "inventory_hostname != _init_node" - name: Check for PVE cluster HA groups proxmox_query: diff --git a/tasks/ssh_cluster_config.yml b/tasks/ssh_cluster_config.yml index b1c34f78..5d814bfc 100644 --- a/tasks/ssh_cluster_config.yml +++ b/tasks/ssh_cluster_config.yml @@ -28,6 +28,7 @@ blockinfile: dest: /etc/ssh/ssh_config create: yes + mode: 0644 marker: "# {mark}: PVE host configuration options (managed by ansible)." content: | {% for host in groups[pve_group] %} @@ -49,34 +50,10 @@ notify: - reload sshd configuration -- name: Fetch SSH public host keys - slurp: - src: "/etc/ssh/{{ item }}" - register: proxmox_ssh_public_host_keys - with_items: - - ssh_host_rsa_key.pub - - ssh_host_ed25519_key.pub - - ssh_host_ecdsa_key.pub - -- name: Check status of known hosts file - stat: - path: /etc/ssh/ssh_known_hosts - register: _pve_known_hosts_file - -- name: Add every host's host keys to global known_hosts - blockinfile: - dest: /etc/ssh/ssh_known_hosts - create: yes - marker: "# {mark}: PVE host keys (managed by ansible)." - content: | - {% for host in groups[pve_group] %} - {% for _key_slurp in hostvars[host].proxmox_ssh_public_host_keys.results %} - {%- set _key = ' '.join((_key_slurp.content | b64decode).split()[:-1]) -%} - {{ hostvars[host].pve_cluster_ssh_addrs | join(",") }} {{ _key }} - {% endfor %} - {% endfor %} - when: - - "not (_pve_known_hosts_file.stat.islnk is defined and _pve_known_hosts_file.stat.islnk)" +- name: Fetch a SSH public key to use for cluster joins + ansible.builtin.slurp: + src: "/etc/ssh/ssh_host_ed25519_key.pub" + register: _pve_ssh_public_key - name: Add PVE-provided ciphers to SSH client config lineinfile: @@ -84,4 +61,5 @@ regexp: "^Ciphers .*" insertbefore: BOF create: yes + mode: 0644 dest: /root/.ssh/config diff --git a/tasks/ssl_config.yml b/tasks/ssl_config.yml index 114bf6f0..80d82793 100644 --- a/tasks/ssl_config.yml +++ b/tasks/ssl_config.yml @@ -3,6 +3,7 @@ copy: content: "{{ item.content }}" dest: "{{ item.dest }}" + mode: 0640 with_items: - dest: "/etc/ssl/pveproxy-ssl.key" content: "{{ pve_ssl_private_key }}" diff --git a/tasks/ssl_letsencrypt.yml b/tasks/ssl_letsencrypt.yml deleted file mode 100644 index 03d5cf08..00000000 --- a/tasks/ssl_letsencrypt.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- name: Install Proxmox Let's Encrypt post-hook script - template: - src: pve-letsencrypt-post-hook.sh.j2 - dest: /usr/local/bin/pve-letsencrypt-post-hook.sh - mode: 0755 - -- name: Request Let's Encrypt certificate for {{ ansible_fqdn }} - include_role: - name: systemli.letsencrypt - vars: - - letsencrypt_cert: - name: "{{ ansible_fqdn }}" - domains: - - "{{ ansible_fqdn }}" - challenge: http - http_auth: standalone - post_hook: /usr/local/bin/pve-letsencrypt-post-hook.sh diff --git a/tasks/zfs.yml b/tasks/zfs.yml index e192af99..676c4d95 100644 --- a/tasks/zfs.yml +++ b/tasks/zfs.yml @@ -10,11 +10,13 @@ copy: content: zfs dest: /etc/modules-load.d/zfs.conf + mode: 0644 - name: Copy ZFS modprobe configuration copy: content: "options zfs {{ pve_zfs_options }}" dest: /etc/modprobe.d/zfs.conf + mode: 0644 when: "pve_zfs_options is defined and pve_zfs_options | bool" - name: Configure email address for ZFS event daemon notifications @@ -25,3 +27,7 @@ notify: - restart zfs-zed when: "pve_zfs_zed_email is defined" + +- name: HOOK - Run ZFS post-install hook tasks + ansible.builtin.include_tasks: "{{ pve_hooks.zfs_post_install }}" + when: "'zfs_post_install' in pve_hooks" diff --git a/templates/pve-letsencrypt-post-hook.sh.j2 b/templates/pve-letsencrypt-post-hook.sh.j2 deleted file mode 100644 index 6054d3bc..00000000 --- a/templates/pve-letsencrypt-post-hook.sh.j2 +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -cp /etc/letsencrypt/live/{{ ansible_fqdn }}/privkey.pem /etc/pve/local/pveproxy-ssl.key -cp /etc/letsencrypt/live/{{ ansible_fqdn }}/fullchain.pem /etc/pve/local/pveproxy-ssl.pem -service pveproxy restart diff --git a/tests/group_vars/all b/tests/group_vars/all index 6c644943..36b5fbb8 100644 --- a/tests/group_vars/all +++ b/tests/group_vars/all @@ -100,6 +100,17 @@ pve_storages: # This should create 2 different storages. - 10.0.0.1 - 10.0.0.2 - 10.0.0.3 + - name: zfs1 + type: zfspool + content: [ "images", "rootdir" ] + pool: rpool/zfs1 + sparse: true + - name: zfs2 + type: dir + content: [ "iso", "vztmpl", "backup" ] + path: /rpool/zfs2 +pve_zfs_create_volumes: + - rpool/zfs2 ssl_directory: /home/travis/ssl/ ssl_ca_key_path: "{{ ssl_directory }}/test-ca.key" diff --git a/tests/install.yml b/tests/install.yml index bdfde017..143e459b 100644 --- a/tests/install.yml +++ b/tests/install.yml @@ -14,10 +14,10 @@ shell: "openssl req -x509 -new -nodes -key {{ ssl_ca_key_path }} -sha256 -days 1 -subj '{{ ssl_subj }}' -out {{ ssl_ca_cert_path }}" vars: test_profiles: + - profile: debian-bullseye + prefix: proxmox-7- - profile: debian-buster prefix: proxmox-6- - - profile: debian-stretch - prefix: proxmox-5- test_hosts_per_profile: 3 container_config: - "lxc.apparmor.profile = unconfined" diff --git a/tests/inventory b/tests/inventory index ae1e8c59..f819daae 100644 --- a/tests/inventory +++ b/tests/inventory @@ -1,5 +1,5 @@ -[stretchcluster] -proxmox-5-[01:03].lxc - [bustercluster] proxmox-6-[01:03].lxc + +[bullseyecluster] +proxmox-7-[01:03].lxc \ No newline at end of file diff --git a/tests/test.yml b/tests/test.yml index bf5bb55d..ab6c0143 100644 --- a/tests/test.yml +++ b/tests/test.yml @@ -71,6 +71,11 @@ query: "[*].storage" run_once: True + - name: Check that User specified ZFS Volumes exist + zfs_facts: + dataset: "{{ item }}" + with_items: "{{ pve_zfs_create_volumes }}" + - name: Read datacenter.cfg file slurp: src: "/etc/pve/datacenter.cfg" diff --git a/tests/vagrant/files/interfaces b/tests/vagrant/files/interfaces new file mode 100644 index 00000000..935eadde --- /dev/null +++ b/tests/vagrant/files/interfaces @@ -0,0 +1,8 @@ +# interfaces(5) file used by ifup(8) and ifdown(8) +# The loopback network interface +auto lo +iface lo inet loopback + +# The primary network interface +allow-hotplug eth0 +iface eth0 inet dhcp diff --git a/tests/vagrant/group_vars/all b/tests/vagrant/group_vars/all index 75d92777..776dd74e 100644 --- a/tests/vagrant/group_vars/all +++ b/tests/vagrant/group_vars/all @@ -46,8 +46,21 @@ pve_storages: username: admin monhost: - "{{ ansible_fqdn }}:6789" + - name: zfs1 + type: zfspool + content: [ "images", "rootdir" ] + pool: testpool/zfs1 + sparse: true + - name: zfs2 + type: dir + content: [ "iso", "vztmpl", "backup" ] + path: /testpool/zfs2 +pve_zfs_create_volumes: + - testpool/zfs2 pve_ceph_osds: - device: "/dev/vdb" +pve_hooks: + zfs_post_install: zpool_setup.yml ntp_manage_config: true ntp_servers: - clock.sjc.he.net diff --git a/tests/vagrant/provision.yml b/tests/vagrant/provision.yml index 36ac9f3b..10ad2e0d 100644 --- a/tests/vagrant/provision.yml +++ b/tests/vagrant/provision.yml @@ -2,9 +2,22 @@ - hosts: all become: True pre_tasks: + - name: Copy apt proxy detection script + ansible.builtin.template: + src: detect-http-proxy.j2 + dest: /etc/apt/detect-http-proxy + mode: 0755 + - name: Configure apt to use detection script + ansible.builtin.copy: + content: "Acquire::Retries 0;\nAcquire::http::ProxyAutoDetect \"/etc/apt/detect-http-proxy\";" + dest: /etc/apt/apt.conf.d/30detectproxy - name: Install gnupg2 apt: name: gnupg2 + - name: Replace /etc/network/interfaces + ansible.builtin.copy: + src: files/interfaces + dest: /etc/network/interfaces roles: - geerlingguy.ntp - lae.proxmox diff --git a/tests/vagrant/tasks/zpool_setup.yml b/tests/vagrant/tasks/zpool_setup.yml new file mode 100644 index 00000000..6af5e32c --- /dev/null +++ b/tests/vagrant/tasks/zpool_setup.yml @@ -0,0 +1,5 @@ +- name: Identify imported ZFS pools + community.general.zpool_facts: +- name: Create testpool ZFS pool + ansible.builtin.command: zpool create testpool vdc + when: "'testpool' not in (ansible_zfs_pools | json_query('[].name'))" diff --git a/tests/vagrant/templates/detect-http-proxy.j2 b/tests/vagrant/templates/detect-http-proxy.j2 new file mode 100644 index 00000000..1d227366 --- /dev/null +++ b/tests/vagrant/templates/detect-http-proxy.j2 @@ -0,0 +1,14 @@ +#!/bin/bash + +APT_CACHE_HOST="{{ lookup('env', 'APT_CACHE_HOST') }}" + +# Check if we can connect to the given host and provide it if so +if [ ! -z "${APT_CACHE_HOST}" ]; then + if $(nc -zw1 "${APT_CACHE_HOST}" 3142); then + echo "http://${APT_CACHE_HOST}:3142" + exit + fi +fi + +# Otherwise, don't use a proxy +echo "DIRECT" diff --git a/vars/debian-stretch.yml b/vars/debian-bullseye.yml similarity index 61% rename from vars/debian-stretch.yml rename to vars/debian-bullseye.yml index d506832e..f66a3a21 100644 --- a/vars/debian-stretch.yml +++ b/vars/debian-bullseye.yml @@ -1,4 +1,4 @@ --- -pve_release_key: proxmox-ve-release-5.x.asc -pve_release_key_id: 0D9A1950E2EF0603 +pve_release_key: proxmox-ve-release-7.x.asc +pve_release_key_id: DD4BA3917E23BF59 pve_ssh_ciphers: "aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,chacha20-poly1305@openssh.com"