diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index 59900ffb..81c9de12 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,3 +1,4 @@ +--- ko_fi: sleepingkyoto custom: - "https://monappy.jp/u/lae" diff --git a/.gitignore b/.gitignore index 0ce91c4f..8da1baa3 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,4 @@ fetch/ .vagrant/ +/.settings/ \ No newline at end of file diff --git a/.travis.yml b/.travis.yml index 0eb33b38..ac830b5b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,32 +1,25 @@ --- language: python -python: - - "2.7" - - "3.6" sudo: required dist: bionic cache: directories: [ '$HOME/lxc' ] pip: true -env: - - ANSIBLE_VERSION='~=2.9.0' matrix: fast_finish: true include: # FIXME: Ansible 2.10.x going through major restructuring. # https://groups.google.com/forum/#!msg/ansible-project/eXsoOKEd0Mk/XTgbnPWbCAAJ -# - python: '3.6' -# env: ANSIBLE_GIT_VERSION='devel' # 2.10.x development branch - - python: '3.6' - env: ANSIBLE_VERSION='~=2.8.0' - - python: '3.6' - env: ANSIBLE_VERSION='~=2.7.0' +# - env: ANSIBLE_GIT_VERSION='devel' # 2.11.x development branch + - env: ANSIBLE_VERSION='~=2.10.0' + - env: ANSIBLE_VERSION='~=2.9.0' + - env: ANSIBLE_VERSION='~=2.8.0' install: - if [ "$ANSIBLE_GIT_VERSION" ]; then pip install "https://github.com/ansible/ansible/archive/${ANSIBLE_GIT_VERSION}.tar.gz"; else pip install "ansible${ANSIBLE_VERSION}"; fi; pip install --pre ansible-lint; pip install jmespath netaddr - ansible --version -- ansible-galaxy install lae.travis-lxc,v0.9.0 +- ansible-galaxy install lae.travis-lxc,v0.10.1 - ansible-playbook tests/install.yml -i tests/inventory - git archive --format tar.gz HEAD > lae.proxmox.tar.gz && ansible-galaxy install lae.proxmox.tar.gz,$(git rev-parse HEAD),lae.proxmox && rm lae.proxmox.tar.gz diff --git a/README.md b/README.md index fb2bda61..27f3afa2 100644 --- a/README.md +++ b/README.md @@ -409,6 +409,7 @@ pve_ceph_crush_rules: [] # List of CRUSH rules to create # pve_ssl_private_key: "" # Should be set to the contents of the private key to use for HTTPS # pve_ssl_certificate: "" # Should be set to the contents of the certificate to use for HTTPS pve_ssl_letsencrypt: false # Specifies whether or not to obtain a SSL certificate using Let's Encrypt +pve_roles: [] # Added more roles with specific privileges. See section on User Management. pve_groups: [] # List of group definitions to manage in PVE. See section on User Management. pve_users: [] # List of user definitions to manage in PVE. See section on User Management. pve_storages: [] # List of storages to manage in PVE. See section on Storage Management. @@ -513,10 +514,19 @@ pve_users: Refer to `library/proxmox_user.py` [link][user-module] and `library/proxmox_group.py` [link][group-module] for module documentation. -For managing ACLs, a similar module is employed, but the main difference is that -most of the parameters only accept lists (subject to change): +For managing roles and ACLs, a similar module is employed, but the main +difference is that most of the parameters only accept lists (subject to +change): ``` +pve_roles: + - name: Monitoring + privileges: + - "Sys.Modify" + - "Sys.Audit" + - "Datastore.Audit" + - "VM.Monitor" + - "VM.Audit" pve_acls: - path: / roles: [ "Administrator" ] @@ -529,7 +539,8 @@ pve_acls: - test_users ``` -Refer to `library/proxmox_acl.py` [link][acl-module] for module documentation. +Refer to `library/proxmox_role.py` [link][user-module] and +`library/proxmox_acl.py` [link][acl-module] for module documentation. ## Storage Management diff --git a/defaults/main.yml b/defaults/main.yml index 3eb55260..0308e492 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -34,6 +34,8 @@ pve_cluster_clustername: "{{ pve_group }}" pve_datacenter_cfg: {} pve_cluster_ha_groups: [] pve_ssl_letsencrypt: false +# additional roles for your cluster (f.e. for monitoring) +pve_roles: [] pve_groups: [] pve_users: [] pve_acls: [] diff --git a/files/00_remove_checked_command_buster.patch b/files/00_remove_checked_command_buster.patch index c48dc334..41dd0c01 100644 --- a/files/00_remove_checked_command_buster.patch +++ b/files/00_remove_checked_command_buster.patch @@ -64,10 +64,10 @@ diff -ur /usr/share/pve-manager/js/pvemanagerlib.js /usr/share/pve-manager/js/pv @@ -33892,7 +33892,7 @@ var version_btn = new Ext.Button({ text: gettext('Package versions'), - handler: function(){ + handler: function() { - Proxmox.Utils.checked_command(function() { me.showVersions(); }); + me.showVersions(); - } + }, }); @@ -45136,7 +45136,6 @@ @@ -75,6 +75,6 @@ diff -ur /usr/share/pve-manager/js/pvemanagerlib.js /usr/share/pve-manager/js/pv me.login = null; me.updateLoginData(data); - Proxmox.Utils.checked_command(function() {}); // display subscription status - } + }, }); } diff --git a/library/proxmox_acl.py b/library/proxmox_acl.py index 34760c71..f0dbb8b4 100755 --- a/library/proxmox_acl.py +++ b/library/proxmox_acl.py @@ -83,6 +83,11 @@ def __init__(self, module): except ProxmoxShellError as e: self.module.fail_json(msg=e.message, status_code=e.status_code) + # PVE 5.x (unnecessarily) uses a string for this value. This ensures + # that it's an integer for when we compare values later. + for acl in self.existing_acl: + acl['propagate'] = int(acl['propagate']) + self.parse_acls() def parse_acls(self): @@ -99,7 +104,7 @@ def parse_acls(self): for constituent in constituents: self.acls.append({ "path": self.path, - "propagate": "1", # possibly make this configurable in the module later + "propagate": 1, # possibly make this configurable in the module later "roleid": role, "type": constituent[0], "ugid": constituent[1] diff --git a/library/proxmox_role.py b/library/proxmox_role.py new file mode 100644 index 00000000..6371bb6e --- /dev/null +++ b/library/proxmox_role.py @@ -0,0 +1,189 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +ANSIBLE_METADATA = { + 'metadata_version': '0.1', + 'status': ['preview'], + 'supported_by': 'lae' +} + +DOCUMENTATION = ''' +--- +module: proxmox_role +short_description: Manages the Access Control List in Proxmox +options: + name: + required: true + description: + - name of the role. + privileges: + required: true + type: list + description: + - Specifies a list of PVE privileges for the given role. + state: + required: false + default: "present" + choices: [ "present", "absent" ] + description: + - Specifies whether this role should exist or not. +author: + - Thoralf Rickert-Wendt (@trickert76) +''' + +EXAMPLES = ''' +- name: Create a role for monitoring with given privileges + proxmox_role: + name: "monitoring" + privileges: [ "Sys.Modify", "Sys.Audit", "Datastore.Audit", "VM.Monitor", "VM.Audit" ] +''' + +RETURN = ''' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_text +from ansible.module_utils.pvesh import ProxmoxShellError +import ansible.module_utils.pvesh as pvesh + +class ProxmoxRole(object): + def __init__(self, module): + self.module = module + self.name = module.params['name'] + self.privileges = module.params['privileges'] + self.state = module.params['state'] + + try: + self.existing_roles = pvesh.get("access/roles") + except ProxmoxShellError as e: + self.module.fail_json(msg=e.message, status_code=e.status_code) + + self.parse_roles() + + def parse_roles(self): + self.roles = [] + for existing_role in self.existing_roles: + self.roles.append(existing_role.get('roleid')) + + def lookup(self): + self.roles = [] + for existing_role in self.existing_roles: + if existing_role.get('roleid') == self.name: + args = {} + args['roleid'] = existing_role.get('roleid') + args['privs'] = ','.join(sorted(existing_role.get('privs').split(','))) + return args + + return None + + def exists(self): + if self.name not in self.roles: + return False + + return True + + def prepare_role_args(self, appendKey=True): + args = {} + if appendKey: + args['roleid'] = self.name + args['privs'] = ','.join(sorted(self.privileges)) + + return args + + def remove_role(self): + try: + pvesh.delete("access/roles/{}".format(self.name)) + return (True, None) + except ProxmoxShellError as e: + return (False, e.message) + + def create_role(self): + new_role = self.prepare_role_args() + + try: + pvesh.create("access/roles", **new_role) + return (True, None) + except ProxmoxShellError as e: + return (False, e.message) + + def modify_role(self): + existing_role = self.lookup() + modified_role = self.prepare_role_args(appendKey=False) + updated_fields = [] + error = None + + for key in modified_role: + if key not in existing_role: + updated_fields.append(key) + else: + new_value = modified_role.get(key) + old_value = existing_role.get(key) + if isinstance(old_value, list): + old_value = ','.join(sorted(old_value)) + if isinstance(new_value, list): + new_value = ','.join(sorted(new_value)) + + if new_value != old_value: + updated_fields.append(key) + + if self.module.check_mode: + self.module.exit_json(changed=bool(updated_fields), expected_changes=updated_fields) + + if not updated_fields: + # No changes necessary + return (updated_fields, error) + + try: + pvesh.set("access/roles/{}".format(self.name), **modified_role) + except ProxmoxShellError as e: + error = e.message + + return (updated_fields, error) + +def main(): + # Refer to https://pve.proxmox.com/pve-docs/api-viewer/index.html + module = AnsibleModule( + argument_spec = dict( + name=dict(type='str', required=True), + privileges=dict(type='list', required=True), + state=dict(default='present', choices=['present', 'absent'], type='str') + ), + supports_check_mode=True + ) + + role = ProxmoxRole(module) + + changed = False + error = None + result = {} + result['name'] = role.name + result['state'] = role.state + result['changed'] = False + + if role.state == 'absent': + if role.exists(): + if module.check_mode: + module.exit_json(changed=True) + + (changed, error) = role.remove_role() + elif role.state == 'present': + if not role.exists(): + if module.check_mode: + module.exit_json(changed=True) + + (changed, error) = role.create_role() + else: + (updated_fields, error) = role.modify_role() + + if updated_fields: + changed = True + result['updated_fields'] = updated_fields + + if error is not None: + module.fail_json(name=role.name, msg=error) + + result['changed'] = changed + module.exit_json(**result) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/meta/main.yml b/meta/main.yml index 0c9f9120..cf72e9af 100644 --- a/meta/main.yml +++ b/meta/main.yml @@ -1,3 +1,4 @@ +--- galaxy_info: author: Musee Ullah description: Installs and configures Proxmox 5.x (for clustering) @@ -7,10 +8,10 @@ galaxy_info: min_ansible_version: 2.4 platforms: - - name: Debian - versions: - - stretch - - buster + - name: Debian + versions: + - stretch + - buster galaxy_tags: - proxmox diff --git a/tasks/ceph.yml b/tasks/ceph.yml index 820cd530..d71a77b4 100644 --- a/tasks/ceph.yml +++ b/tasks/ceph.yml @@ -1,188 +1,185 @@ # This is an Ansible version of what "pveceph install" actually does -- name: Install custom Ceph systemd service - copy: - src: /usr/share/doc/pve-manager/examples/ceph.service - dest: /etc/systemd/system/ceph.service - remote_src: true - owner: root - group: root - mode: preserve - notify: 'restart ceph' +--- +- block: + - name: Install custom Ceph systemd service + copy: + src: /usr/share/doc/pve-manager/examples/ceph.service + dest: /etc/systemd/system/ceph.service + remote_src: true + owner: root + group: root + mode: preserve + notify: 'restart ceph' + + - name: Enable Ceph + systemd: + name: ceph.service + enabled: true when: - "ansible_distribution_release == 'stretch'" -- name: Enable Ceph - systemd: - name: ceph.service - enabled: true - when: - - "ansible_distribution_release == 'stretch'" - - block: - - name: Create initial Ceph config - command: "pveceph init --network {{ pve_ceph_network }} \ - {% if pve_ceph_cluster_network is defined %} \ - --cluster-network {{ pve_ceph_cluster_network }} - {% endif %}" - args: - creates: /etc/ceph/ceph.conf - - - name: Create initial Ceph monitor - command: 'pveceph mon create' - args: - creates: '/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/' - register: _ceph_initial_mon - - - name: Fail if initial monitor creation failed - fail: - msg: 'Ceph intial monitor creation failed.' - when: _ceph_initial_mon is failed - - when: inventory_hostname == groups[pve_ceph_mon_group][0] + - name: Create initial Ceph config + command: "pveceph init --network {{ pve_ceph_network }} \ + {% if pve_ceph_cluster_network is defined %} \ + --cluster-network {{ pve_ceph_cluster_network }} + {% endif %}" + args: + creates: /etc/ceph/ceph.conf + + - name: Create initial Ceph monitor + command: 'pveceph mon create' + args: + creates: '/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/' + register: _ceph_initial_mon + + - name: Fail if initial monitor creation failed + fail: + msg: 'Ceph intial monitor creation failed.' + when: _ceph_initial_mon is failed + when: "inventory_hostname == groups[pve_ceph_mon_group][0]" - name: Create additional Ceph monitors command: 'pveceph mon create' args: creates: '/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/' - when: inventory_hostname != groups[pve_ceph_mon_group][0] + when: "inventory_hostname != groups[pve_ceph_mon_group][0]" - block: - - name: Get existing ceph volumes - ceph_volume: - action: list - data: "{{ item.device }}" - register: _ceph_volume_data - loop: '{{ pve_ceph_osds }}' - tags: ceph_volume - changed_when: false #Merely gets the list of ceph volumes so never changes anything - - - name: Initialize osd variables - set_fact: - _existing_ceph_volumes_tmp: [] - _existing_ceph_volumes: [] - tags: ceph_volume - - - name: Determine ceph volumes Step1 - set_fact: - _existing_ceph_volumes_tmp: "{{ _existing_ceph_volumes_tmp + item.stdout | from_json | json_query('*[].devices[]') }}" - with_items: "{{ _ceph_volume_data.results }}" - tags: ceph_volume - - - name: Determine ceph volumes Step2 - set_fact: - _existing_ceph_volumes: "{{ _existing_ceph_volumes + [{'device': item}] }}" - with_items: "{{ _existing_ceph_volumes_tmp }}" - tags: ceph_volume - - - name: Change osd list (remove existing osds from the list) - set_fact: - pve_ceph_osds_diff: "{{ pve_ceph_osds | difference(_existing_ceph_volumes) }}" - tags: ceph_volume - - - name: Create Ceph OSDs - command: >- - pveceph osd create {{ item.device }} - {% if "encrypted" in item and item["encrypted"] | bool %}--encrypted 1{% endif %} - {% if "block.db" in item %}--journal_dev {{ item["block.db"] }}{% endif %} - args: - creates: '{{ item.device }}1' - with_items: '{{ pve_ceph_osds_diff }}' - + - name: Get existing ceph volumes + ceph_volume: + action: list + data: "{{ item.device }}" + register: _ceph_volume_data + loop: '{{ pve_ceph_osds }}' + tags: ceph_volume + changed_when: false #Merely gets the list of ceph volumes so never changes anything + + - name: Initialize osd variables + set_fact: + _existing_ceph_volumes_tmp: [] + _existing_ceph_volumes: [] + tags: ceph_volume + + - name: Determine ceph volumes Step1 + set_fact: + _existing_ceph_volumes_tmp: "{{ _existing_ceph_volumes_tmp + item.stdout | from_json | json_query('*[].devices[]') }}" + with_items: "{{ _ceph_volume_data.results }}" + tags: ceph_volume + + - name: Determine ceph volumes Step2 + set_fact: + _existing_ceph_volumes: "{{ _existing_ceph_volumes + [{'device': item}] }}" + with_items: "{{ _existing_ceph_volumes_tmp }}" + tags: ceph_volume + + - name: Change osd list (remove existing osds from the list) + set_fact: + pve_ceph_osds_diff: "{{ pve_ceph_osds | difference(_existing_ceph_volumes) }}" + tags: ceph_volume + + - name: Create Ceph OSDs + command: >- + pveceph osd create {{ item.device }} + {% if "encrypted" in item and item["encrypted"] | bool %}--encrypted 1{% endif %} + {% if "block.db" in item %}--journal_dev {{ item["block.db"] }}{% endif %} + args: + creates: '{{ item.device }}1' + with_items: '{{ pve_ceph_osds_diff }}' tags: create_osd - block: - - name: List Ceph CRUSH rules - command: 'ceph osd crush rule ls' - changed_when: false - register: _ceph_crush - - - name: Create Ceph CRUSH rules - command: >- - ceph osd crush rule create-replicated - {{ item.name }} default {{ item.type | default ("host") }} {{ item.class | default("") }} - when: item.name not in _ceph_crush.stdout_lines - with_items: '{{ pve_ceph_crush_rules }}' - - - name: Download and decompress crushmap - command: "{{ item }}" - with_items: - - ceph osd getcrushmap -o crush_map_compressed - - crushtool -d crush_map_compressed -o crush_map_decompressed - changed_when: false #This is just getting information for us to possibly edit, don't mislead user with 'changed' - - - name: Modify crushmap for rules that should be updated - replace: - path: crush_map_decompressed - regexp: >- - rule\s+{{ item.name }}\s+{(?:(?P\s+)id\s+(?P[^\s]+)|\s+type\s+(?P[^\s]+)|\s+min_size[ ](?P[^\s]+)|\s+max_size\s+(?P[^\s]+)|\s+step\s+take\s+default(?:\n|\s+class\s+(?P[^\n]*))|\s+step\s+(?Pchooseleaf|choose).*?type\s+(?P[^\s]+))+(?:.|\n)*?} - replace: >- - rule {{item.name}} { - \gid \g - \gtype \g - \gmin_size {{ (pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].min_size | default("\g") | trim }} - \gmax_size {{ (pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].max_size | default("\g") | trim }} - {%- if ((pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].class | default(False)) -%} - \gstep take default class {{ (pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].class }} - {%- else -%} - \gstep take default\g - {%- endif -%} - \gstep \g firstn 0 type {{ (pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].type | default("\g") | trim }} - \gstep emit\n} - loop: '{{ pve_ceph_crush_rules }}' - register: _crushmap - - - name: Compress and upload changed crushmap - command: "{{ item }}" - with_items: - - crushtool -c crush_map_decompressed -o new_crush_map_compressed - - ceph osd setcrushmap -i new_crush_map_compressed - when: _crushmap.changed - - - name: Cleanup temp files from generating new crushmap - file: - path: - - crush_map_compressed - - crush_map_decompressed - - new_crush_map_compressed - state: absent - - - name: List Ceph Pools - command: ceph osd pool ls - changed_when: false - register: _ceph_pools - - - name: Create Ceph Pools - command: >- - pveceph pool create {{ item.name }} - {% if 'storage' in item %} - --add_storages {{ item.storage }} - {% endif %} - {% if 'application' in item %} - --application {{ item.application }} - {% endif %} - {% if 'rule' in item %} - --crush_rule {{ item.rule }} - {% endif %} - {% if 'pgs' in item %} - --pg_num {{ item.pgs }} - {% endif %} - {% if 'size' in item %} - --size {{ item.size }} - {% endif %} - {% if 'min_size' in item %} - --min_size {{ item.min_size }} - {% endif %} - when: item.name not in _ceph_pools.stdout_lines - with_items: '{{ pve_ceph_pools }}' - - when: inventory_hostname == groups[pve_ceph_mon_group][0] + - name: List Ceph CRUSH rules + command: 'ceph osd crush rule ls' + changed_when: false + register: _ceph_crush + + - name: Create Ceph CRUSH rules + command: >- + ceph osd crush rule create-replicated + {{ item.name }} default {{ item.type | default ("host") }} {{ item.class | default("") }} + when: item.name not in _ceph_crush.stdout_lines + with_items: '{{ pve_ceph_crush_rules }}' + + - name: Download and decompress crushmap + command: "{{ item }}" + with_items: + - ceph osd getcrushmap -o crush_map_compressed + - crushtool -d crush_map_compressed -o crush_map_decompressed + changed_when: false # This is just getting information for us to possibly edit, don't mislead user with 'changed' + + - name: Modify crushmap for rules that should be updated + replace: + path: crush_map_decompressed + regexp: >- + rule\s+{{ item.name }}\s+{(?:(?P\s+)id\s+(?P[^\s]+)|\s+type\s+(?P[^\s]+)|\s+min_size[ ](?P[^\s]+)|\s+max_size\s+(?P[^\s]+)|\s+step\s+take\s+default(?:\n|\s+class\s+(?P[^\n]*))|\s+step\s+(?Pchooseleaf|choose).*?type\s+(?P[^\s]+))+(?:.|\n)*?} + replace: >- + rule {{item.name}} { + \gid \g + \gtype \g + \gmin_size {{ (pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].min_size | default("\g") | trim }} + \gmax_size {{ (pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].max_size | default("\g") | trim }} + {%- if ((pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].class | default(False)) -%} + \gstep take default class {{ (pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].class }} + {%- else -%} + \gstep take default\g + {%- endif -%} + \gstep \g firstn 0 type {{ (pve_ceph_crush_rules | selectattr("name", "match", item.name) | list)[0].type | default("\g") | trim }} + \gstep emit\n} + loop: '{{ pve_ceph_crush_rules }}' + register: _crushmap + + - name: Compress and upload changed crushmap + command: "{{ item }}" + with_items: + - crushtool -c crush_map_decompressed -o new_crush_map_compressed + - ceph osd setcrushmap -i new_crush_map_compressed + when: _crushmap.changed + + - name: Cleanup temp files from generating new crushmap + file: + path: + - crush_map_compressed + - crush_map_decompressed + - new_crush_map_compressed + state: absent + + - name: List Ceph Pools + command: ceph osd pool ls + changed_when: false + register: _ceph_pools + + - name: Create Ceph Pools + command: >- + pveceph pool create {{ item.name }} + {% if 'storage' in item %} + --add_storages {{ item.storage }} + {% endif %} + {% if 'application' in item %} + --application {{ item.application }} + {% endif %} + {% if 'rule' in item %} + --crush_rule {{ item.rule }} + {% endif %} + {% if 'pgs' in item %} + --pg_num {{ item.pgs }} + {% endif %} + {% if 'size' in item %} + --size {{ item.size }} + {% endif %} + {% if 'min_size' in item %} + --min_size {{ item.min_size }} + {% endif %} + when: item.name not in _ceph_pools.stdout_lines + with_items: '{{ pve_ceph_pools }}' + when: "inventory_hostname == groups[pve_ceph_mon_group][0]" - name: Create Ceph MDS servers command: pveceph mds create args: creates: '/var/lib/ceph/mds/ceph-{{ ansible_hostname }}' register: _ceph_mds_create - when: inventory_hostname in groups[pve_ceph_mds_group] and pve_ceph_fs + when: "inventory_hostname in groups[pve_ceph_mds_group] and pve_ceph_fs" - name: Wait for standby MDS command: ceph mds stat -f json @@ -190,47 +187,47 @@ until: '(_ceph_mds_stat.stdout | from_json).fsmap.standbys | length > 0' retries: 10 delay: 2 - when: _ceph_mds_create is changed + when: "_ceph_mds_create is changed" - block: - - name: List Ceph Filesystems - command: ceph fs ls -f json - changed_when: false - when: pve_ceph_fs | length > 0 - register: _ceph_fs - - - name: Create Ceph Filesystems - command: >- - pveceph fs create - --name {{ item.name }} - --add-storage {{ item.storage }} - --pg_num {{ item.pgs }} - register: _ceph_fs_create - failed_when: _ceph_fs_create.stderr - when: item.name not in (_ceph_fs.stdout | from_json | map(attribute="name")) - with_items: '{{ pve_ceph_fs }}' - - - name: Get Ceph Filesystem pool CRUSH rules - command: 'ceph -f json osd pool get {{ item.0.name }}_{{ item.1 }} crush_rule' - changed_when: false - when: pve_ceph_fs | length > 0 - register: _ceph_fs_rule - loop: '{{ pve_ceph_fs | product(["data", "metadata"]) | list }}' - - - name: Set Ceph Filesystem pool CRUSH rules - command: >- - ceph osd pool set {{ item.item.0.name }}_{{ item.item.1 }} crush_rule {{ item.item.0.rule }} - when: item.item.0.rule != (item.stdout | from_json).crush_rule - loop: '{{ _ceph_fs_rule.results }}' - loop_control: - label: '{{ item.item.0.name }}_{{ item.item.1 }}' - - - name: Create Ceph filesystem key - command: 'ceph auth get-or-create client.{{ item.name }} osd "allow rw pool={{ item.name }}_data" mon "allow r" mds "allow rw"' - register: _ceph_fs_auth - changed_when: '"added key" in _ceph_fs_auth.stdout' - when: item.mountpoint is defined - loop: '{{ pve_ceph_fs }}' + - name: List Ceph Filesystems + command: ceph fs ls -f json + changed_when: false + when: "pve_ceph_fs | length > 0" + register: _ceph_fs + + - name: Create Ceph Filesystems + command: >- + pveceph fs create + --name {{ item.name }} + --add-storage {{ item.storage }} + --pg_num {{ item.pgs }} + register: _ceph_fs_create + failed_when: _ceph_fs_create.stderr + when: "item.name not in (_ceph_fs.stdout | from_json | map(attribute='name'))" + with_items: '{{ pve_ceph_fs }}' + + - name: Get Ceph Filesystem pool CRUSH rules + command: 'ceph -f json osd pool get {{ item.0.name }}_{{ item.1 }} crush_rule' + changed_when: false + when: "pve_ceph_fs | length > 0" + register: _ceph_fs_rule + loop: '{{ pve_ceph_fs | product(["data", "metadata"]) | list }}' + + - name: Set Ceph Filesystem pool CRUSH rules + command: >- + ceph osd pool set {{ item.item.0.name }}_{{ item.item.1 }} crush_rule {{ item.item.0.rule }} + when: "item.item.0.rule != (item.stdout | from_json).crush_rule" + loop: '{{ _ceph_fs_rule.results }}' + loop_control: + label: '{{ item.item.0.name }}_{{ item.item.1 }}' + + - name: Create Ceph filesystem key + command: 'ceph auth get-or-create client.{{ item.name }} osd "allow rw pool={{ item.name }}_data" mon "allow r" mds "allow rw"' + register: _ceph_fs_auth + changed_when: '"added key" in _ceph_fs_auth.stdout' + when: "item.mountpoint is defined" + loop: '{{ pve_ceph_fs }}' when: inventory_hostname == groups[pve_ceph_mon_group][0] - name: Fetch Ceph filesystem key @@ -238,7 +235,7 @@ args: creates: '/etc/ceph/{{ item.name }}.secret' register: _ceph_fs_key - when: item.mountpoint is defined + when: "item.mountpoint is defined" loop: '{{ pve_ceph_fs }}' - name: Save Ceph filesystem key @@ -248,7 +245,7 @@ group: 'root' mode: '0600' content: '{{ item.stdout }}' - when: item is changed + when: "item is changed" loop: '{{ _ceph_fs_key.results }}' loop_control: label: '{{ item.item }}' @@ -264,5 +261,5 @@ fstype: 'ceph' opts: 'name={{ item.name }},secretfile=/etc/ceph/{{ item.name }}.secret,_netdev' state: 'mounted' - when: item.mountpoint is defined + when: "item.mountpoint is defined" loop: '{{ pve_ceph_fs }}' diff --git a/tasks/disable_nmi_watchdog.yml b/tasks/disable_nmi_watchdog.yml index 2979eda5..cf4dcc15 100644 --- a/tasks/disable_nmi_watchdog.yml +++ b/tasks/disable_nmi_watchdog.yml @@ -12,19 +12,18 @@ register: _pve_rmmod_softdog - block: - - name: Stop watchdog-mux - service: - name: watchdog-mux - state: stopped - notify: - - restart watchdog-mux + - name: Stop watchdog-mux + service: + name: watchdog-mux + state: stopped + notify: + - restart watchdog-mux - - name: Unload softdog - modprobe: - name: softdog - state: absent - - when: _pve_rmmod_softdog is failed + - name: Unload softdog + modprobe: + name: softdog + state: absent + when: "_pve_rmmod_softdog is failed" - name: Disable nmi_watchdog via GRUB config lineinfile: @@ -37,5 +36,5 @@ command: update-grub register: _pve_grub_update failed_when: ('error' in _pve_grub_update.stderr) - when: _pve_grub is changed + when: "_pve_grub is changed" tags: skiponlxc diff --git a/tasks/kernel_module_cleanup.yml b/tasks/kernel_module_cleanup.yml index 7f472687..303ebf42 100644 --- a/tasks/kernel_module_cleanup.yml +++ b/tasks/kernel_module_cleanup.yml @@ -15,33 +15,33 @@ when: "not pve_zfs_enabled | bool" - block: - - name: Re-enable nmi_watchdog via GRUB config - lineinfile: - dest: /etc/default/grub - line: 'GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX nmi_watchdog=0"' - state: absent - register: _pve_grub + - name: Re-enable nmi_watchdog via GRUB config + lineinfile: + dest: /etc/default/grub + line: 'GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX nmi_watchdog=0"' + state: absent + register: _pve_grub - - name: Update GRUB configuration - command: update-grub - register: _pve_grub_update - failed_when: ('error' in _pve_grub_update.stderr) - when: _pve_grub is changed - tags: skiponlxc + - name: Update GRUB configuration + command: update-grub + register: _pve_grub_update + failed_when: ('error' in _pve_grub_update.stderr) + when: "_pve_grub is changed" + tags: skiponlxc - - name: Remove ipmi_watchdog modprobe configuration - file: - dest: /etc/modprobe.d/ipmi_watchdog.conf - state: absent + - name: Remove ipmi_watchdog modprobe configuration + file: + dest: /etc/modprobe.d/ipmi_watchdog.conf + state: absent - - name: Load softdog - modprobe: - name: softdog + - name: Load softdog + modprobe: + name: softdog - - name: Set PVE HA Manager watchdog configuration back to default - copy: - content: "WATCHDOG_MODULE=softdog" - dest: /etc/default/pve-ha-manager - notify: - - restart watchdog-mux - when: pve_watchdog != 'ipmi' + - name: Set PVE HA Manager watchdog configuration back to default + copy: + content: "WATCHDOG_MODULE=softdog" + dest: /etc/default/pve-ha-manager + notify: + - restart watchdog-mux + when: "pve_watchdog != 'ipmi'" diff --git a/tasks/kernel_updates.yml b/tasks/kernel_updates.yml index f2c1b2d1..8b0a8359 100644 --- a/tasks/kernel_updates.yml +++ b/tasks/kernel_updates.yml @@ -5,18 +5,12 @@ register: _pve_kernel_update when: "pve_reboot_on_kernel_update | bool" -- block: - - name: Reboot for kernel update - shell: "sleep 5 && shutdown -r now 'PVE kernel update detected by Ansible'" - async: 1 - poll: 0 - - - name: Wait for server to come back online - wait_for_connection: - delay: 60 +- name: Reboot for kernel update + reboot: + msg: "PVE kernel update detected by Ansible" when: - "pve_reboot_on_kernel_update | bool" - - _pve_kernel_update.new_kernel_exists + - "_pve_kernel_update.new_kernel_exists" - name: Collect kernel package information collect_kernel_info: diff --git a/tasks/load_variables.yml b/tasks/load_variables.yml index 3a5c2ed7..cbcd522e 100644 --- a/tasks/load_variables.yml +++ b/tasks/load_variables.yml @@ -8,28 +8,28 @@ # address. Thus, we're deprecating them. See below references. # https://pve.proxmox.com/wiki/Separate_Cluster_Network#Setup_at_Cluster_Creation # https://git.proxmox.com/?p=pve-cluster.git;a=blob;f=data/PVE/Corosync.pm;h=8b5c91e0da084da4e9ba7423176872a0c16ef5af;hb=refs/heads/stable-5#l209 - - name: LEGACY - Define pve_cluster_addr0 from bindnet0_addr/ring0_addr - set_fact: - pve_cluster_addr0: "{{ pve_cluster_bindnet0_addr | default(pve_cluster_ring0_addr) }}" - when: pve_cluster_ring0_addr is defined and ansible_distribution_release == 'stretch' + - name: LEGACY - Define pve_cluster_addr0 from bindnet0_addr/ring0_addr + set_fact: + pve_cluster_addr0: "{{ pve_cluster_bindnet0_addr | default(pve_cluster_ring0_addr) }}" + when: "pve_cluster_ring0_addr is defined and ansible_distribution_release == 'stretch'" - - name: LEGACY - Define pve_cluster_addr0 from link0_addr - set_fact: - pve_cluster_addr0: "{{ pve_cluster_link0_addr }}" - when: pve_cluster_link0_addr is defined and ansible_distribution_release == 'buster' - when: pve_cluster_addr0 is not defined + - name: LEGACY - Define pve_cluster_addr0 from link0_addr + set_fact: + pve_cluster_addr0: "{{ pve_cluster_link0_addr }}" + when: "pve_cluster_link0_addr is defined and ansible_distribution_release == 'buster'" + when: "pve_cluster_addr0 is not defined" - block: - - name: LEGACY - Define pve_cluster_addr1 from bindnet1_addr/ring1_addr - set_fact: - pve_cluster_addr1: "{{ pve_cluster_bindnet1_addr | default(pve_cluster_ring1_addr) }}" - when: pve_cluster_ring1_addr is defined and ansible_distribution_release == 'stretch' + - name: LEGACY - Define pve_cluster_addr1 from bindnet1_addr/ring1_addr + set_fact: + pve_cluster_addr1: "{{ pve_cluster_bindnet1_addr | default(pve_cluster_ring1_addr) }}" + when: "pve_cluster_ring1_addr is defined and ansible_distribution_release == 'stretch'" - - name: LEGACY - Define pve_cluster_addr1 from link1_addr - set_fact: - pve_cluster_addr1: "{{ pve_cluster_link1_addr }}" - when: pve_cluster_link1_addr is defined and ansible_distribution_release == 'buster' - when: pve_cluster_addr1 is not defined + - name: LEGACY - Define pve_cluster_addr1 from link1_addr + set_fact: + pve_cluster_addr1: "{{ pve_cluster_link1_addr }}" + when: "pve_cluster_link1_addr is defined and ansible_distribution_release == 'buster'" + when: "pve_cluster_addr1 is not defined" - name: Define pve_cluster_addr0 if not provided set_fact: diff --git a/tasks/main.yml b/tasks/main.yml index d9e0a555..31389597 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -1,5 +1,5 @@ ---- # tasks file for ansible-role-proxmox +--- - import_tasks: load_variables.yml - name: Ensure that facts are present for all cluster hosts @@ -8,6 +8,7 @@ - "hostvars[item].ansible_facts" msg: "Could not load facts for {{ item }}. Please run your playbook against all hosts in {{ pve_group }}." with_items: "{{ groups[pve_group] }}" + when: "pve_cluster_enabled | bool" - name: Ensure this host is in the group specified assert: @@ -139,31 +140,30 @@ until: _proxmox_install is succeeded - block: - - name: Remove automatically installed PVE Enterprise repo configuration - apt_repository: - repo: "{{ item }}" - filename: pve-enterprise - state: absent - with_items: - - "deb https://enterprise.proxmox.com/debian {{ ansible_distribution_release }} pve-enterprise" - - "deb https://enterprise.proxmox.com/debian/pve {{ ansible_distribution_release }} pve-enterprise" - - - name: Remove subscription check wrapper function in web UI - patch: - src: "00_remove_checked_command_{{ ansible_distribution_release }}.patch" - basedir: / - strip: 1 - backup: yes - when: - - "pve_remove_subscription_warning | bool" - + - name: Remove automatically installed PVE Enterprise repo configuration + apt_repository: + repo: "{{ item }}" + filename: pve-enterprise + state: absent + with_items: + - "deb https://enterprise.proxmox.com/debian {{ ansible_distribution_release }} pve-enterprise" + - "deb https://enterprise.proxmox.com/debian/pve {{ ansible_distribution_release }} pve-enterprise" + + - name: Remove subscription check wrapper function in web UI + patch: + src: "00_remove_checked_command_{{ ansible_distribution_release }}.patch" + basedir: / + strip: 1 + backup: yes + when: + - "pve_remove_subscription_warning | bool" when: - "'pve-no-subscription' in pve_repository_line" - import_tasks: kernel_updates.yml - import_tasks: ipmi_watchdog.yml - when: pve_watchdog == 'ipmi' + when: "pve_watchdog == 'ipmi'" - import_tasks: zfs.yml when: "pve_zfs_enabled | bool" @@ -176,8 +176,8 @@ basedir: / strip: 1 when: - - ansible_distribution_release == 'buster' - - pve_cluster_enabled | bool + - "ansible_distribution_release == 'buster'" + - "pve_cluster_enabled | bool" - import_tasks: pve_cluster_config.yml when: "pve_cluster_enabled | bool" @@ -185,13 +185,21 @@ - import_tasks: ceph.yml when: "pve_ceph_enabled | bool" +- name: Configure Proxmox roles + proxmox_role: + name: "{{ item.name }}" + privileges: "{{ item.privileges }}" + state: "{{ item.state | default('present') }}" + with_items: "{{ pve_roles }}" + when: "not pve_cluster_enabled | bool or (pve_cluster_enabled | bool and inventory_hostname == groups[pve_group][0])" + - name: Configure Proxmox groups proxmox_group: name: "{{ item.name }}" state: "{{ item.state | default('present') }}" comment: "{{ item.comment | default(omit) }}" with_items: "{{ pve_groups }}" - when: "not pve_cluster_enabled or (pve_cluster_enabled and inventory_hostname == groups[pve_group][0])" + when: "not pve_cluster_enabled or (pve_cluster_enabled | bool and inventory_hostname == groups[pve_group][0])" - name: Configure Proxmox user accounts proxmox_user: @@ -206,7 +214,7 @@ password: "{{ item.password | default(omit) }}" expire: "{{ item.expire | default(omit) }}" with_items: "{{ pve_users }}" - when: "not pve_cluster_enabled | bool or (pve_cluster_enabled and inventory_hostname == groups[pve_group][0])" + when: "not pve_cluster_enabled | bool or (pve_cluster_enabled | bool and inventory_hostname == groups[pve_group][0])" - name: Configure Proxmox ACLs proxmox_acl: @@ -216,7 +224,7 @@ groups: "{{ item.groups | default([]) }}" users: "{{ item.users | default([]) }}" with_items: "{{ pve_acls }}" - when: "not pve_cluster_enabled | bool or (pve_cluster_enabled and inventory_hostname == groups[pve_group][0])" + when: "not pve_cluster_enabled | bool or (pve_cluster_enabled | bool and inventory_hostname == groups[pve_group][0])" - name: Configure Proxmox Storage proxmox_storage: @@ -238,24 +246,24 @@ vgname: "{{ item.vgname | default(omit) }}" thinpool: "{{ item.thinpool | default(omit) }}" with_items: "{{ pve_storages }}" - when: "not pve_cluster_enabled | bool or (pve_cluster_enabled and inventory_hostname == groups[pve_group][0])" + when: "not pve_cluster_enabled | bool or (pve_cluster_enabled | bool and inventory_hostname == groups[pve_group][0])" - name: Check datacenter.cfg exists stat: path: "/etc/pve/datacenter.cfg" register: _datacenter_cfg when: - - not pve_cluster_enabled or (pve_cluster_enabled and inventory_hostname == groups[pve_group][0]) - - pve_datacenter_cfg | length > 0 + - "not pve_cluster_enabled | bool or (pve_cluster_enabled | bool and inventory_hostname == groups[pve_group][0])" + - "pve_datacenter_cfg | length > 0" - name: Create datacenter.cfg if it does not exist file: path: "/etc/pve/datacenter.cfg" state: "touch" when: - - not pve_cluster_enabled | bool or (pve_cluster_enabled and inventory_hostname == groups[pve_group][0]) - - pve_datacenter_cfg | length > 0 - - not _datacenter_cfg.stat.exists + - "not pve_cluster_enabled | bool or (pve_cluster_enabled | bool and inventory_hostname == groups[pve_group][0])" + - "pve_datacenter_cfg | length > 0" + - "not _datacenter_cfg.stat.exists" - name: Configure datacenter.cfg copy: @@ -268,13 +276,13 @@ {{ k }}: {{ v }} {% endfor %} when: - - not pve_cluster_enabled | bool or (pve_cluster_enabled and inventory_hostname == groups[pve_group][0]) - - pve_datacenter_cfg | length > 0 + - "not pve_cluster_enabled | bool or (pve_cluster_enabled | bool and inventory_hostname == groups[pve_group][0])" + - "pve_datacenter_cfg | length > 0" - import_tasks: ssl_config.yml when: - - pve_ssl_private_key is defined - - pve_ssl_certificate is defined + - "pve_ssl_private_key is defined" + - "pve_ssl_certificate is defined" - import_tasks: ssl_letsencrypt.yml when: "pve_ssl_letsencrypt | bool" diff --git a/tasks/ssl_config.yml b/tasks/ssl_config.yml index da7d6d5e..114bf6f0 100644 --- a/tasks/ssl_config.yml +++ b/tasks/ssl_config.yml @@ -4,15 +4,19 @@ content: "{{ item.content }}" dest: "{{ item.dest }}" with_items: - - { dest: "/etc/ssl/pveproxy-ssl.key", content: "{{ pve_ssl_private_key }}" } - - { dest: "/etc/ssl/pveproxy-ssl.pem", content: "{{ pve_ssl_certificate }}" } + - dest: "/etc/ssl/pveproxy-ssl.key" + content: "{{ pve_ssl_private_key }}" + - dest: "/etc/ssl/pveproxy-ssl.pem" + content: "{{ pve_ssl_certificate }}" - name: Install PVE SSL certificate chain and key shell: "diff {{ item.src }} {{ item.dest }} >/dev/null 2>&1 || (cp {{ item.src }} {{ item.dest }}; echo changed)" register: _pve_ssl_diff changed_when: "'changed' in _pve_ssl_diff.stdout" with_items: - - { src: "/etc/ssl/pveproxy-ssl.key", dest: "/etc/pve/local/pveproxy-ssl.key"} - - { src: "/etc/ssl/pveproxy-ssl.pem", dest: "/etc/pve/local/pveproxy-ssl.pem"} + - src: "/etc/ssl/pveproxy-ssl.key" + dest: "/etc/pve/local/pveproxy-ssl.key" + - src: "/etc/ssl/pveproxy-ssl.pem" + dest: "/etc/pve/local/pveproxy-ssl.pem" notify: - restart pveproxy diff --git a/tasks/zfs.yml b/tasks/zfs.yml index 5c2fa188..e192af99 100644 --- a/tasks/zfs.yml +++ b/tasks/zfs.yml @@ -24,4 +24,4 @@ regexp: '^#?ZED_EMAIL_ADDR=' notify: - restart zfs-zed - when: pve_zfs_zed_email is defined + when: "pve_zfs_zed_email is defined" diff --git a/tests/group_vars/all b/tests/group_vars/all index 6625e209..5f8155e6 100644 --- a/tests/group_vars/all +++ b/tests/group_vars/all @@ -20,6 +20,14 @@ pve_cluster_ha_groups: comment: "Resources on proxmox-5-01" nodes: proxmox-5-01 restricted: 1 +pve_roles: + - name: Monitoring + privileges: + - "Sys.Modify" + - "Sys.Audit" + - "Datastore.Audit" + - "VM.Monitor" + - "VM.Audit" pve_groups: - name: Admins comment: Administrators of this PVE cluster diff --git a/tests/install.yml b/tests/install.yml index 71b97b76..bdfde017 100644 --- a/tests/install.yml +++ b/tests/install.yml @@ -46,10 +46,10 @@ - name: Update CA certificate store shell: update-ca-certificates - block: - - name: Create host SSL private key - shell: "openssl genrsa -out {{ ssl_host_key_path }} 2048" - - name: Create host SSL certificate signing request - shell: "openssl req -new -key {{ ssl_host_key_path }} -subj '{{ ssl_subj }}' -out {{ ssl_host_csr_path }}" - - name: Create host SSL certificate - shell: "openssl x509 -req -in {{ ssl_host_csr_path }} -CA {{ ssl_ca_cert_path }} -CAkey {{ ssl_ca_key_path }} -days 1 -CAcreateserial -sha256 -out {{ ssl_host_cert_path }}" + - name: Create host SSL private key + shell: "openssl genrsa -out {{ ssl_host_key_path }} 2048" + - name: Create host SSL certificate signing request + shell: "openssl req -new -key {{ ssl_host_key_path }} -subj '{{ ssl_subj }}' -out {{ ssl_host_csr_path }}" + - name: Create host SSL certificate + shell: "openssl x509 -req -in {{ ssl_host_csr_path }} -CA {{ ssl_ca_cert_path }} -CAkey {{ ssl_ca_key_path }} -days 1 -CAcreateserial -sha256 -out {{ ssl_host_cert_path }}" delegate_to: localhost diff --git a/tests/test.yml b/tests/test.yml index 7fe70544..bf5bb55d 100644 --- a/tests/test.yml +++ b/tests/test.yml @@ -99,16 +99,16 @@ with_items: "{{ pve_cluster_ha_groups }}" - block: - - name: pvedaemon service status - shell: "journalctl --no-pager -xu pvedaemon.service" - changed_when: False - - name: pve-cluster service status - shell: "journalctl --no-pager -xu pve-cluster.service" - changed_when: False - - name: pveproxy service status - shell: "journalctl --no-pager -xu pveproxy.service" - changed_when: False - - name: hosts file - shell: "cat /etc/hosts" - changed_when: False + - name: pvedaemon service status + shell: "journalctl --no-pager -xu pvedaemon.service" + changed_when: False + - name: pve-cluster service status + shell: "journalctl --no-pager -xu pve-cluster.service" + changed_when: False + - name: pveproxy service status + shell: "journalctl --no-pager -xu pveproxy.service" + changed_when: False + - name: hosts file + shell: "cat /etc/hosts" + changed_when: False ignore_errors: yes diff --git a/tests/vagrant/package_role.yml b/tests/vagrant/package_role.yml index 3e2baee2..a91d8fcd 100644 --- a/tests/vagrant/package_role.yml +++ b/tests/vagrant/package_role.yml @@ -5,12 +5,12 @@ role_name: lae.proxmox tasks: - block: - - shell: pwd - - name: Package up current working role - shell: "cd $(git rev-parse --show-toplevel); git ls-files -z | xargs -0 tar -czvf $OLDPWD/{{ role_name }}.tar.gz" - - name: Install packaged role - shell: "ansible-galaxy install {{ role_name }}.tar.gz,devel-$(git rev-parse HEAD),{{ role_name }} --force" - - name: Remove packaged role artifact - file: - dest: "{{ role_name }}.tar.gz" - state: absent + - shell: pwd + - name: Package up current working role + shell: "cd $(git rev-parse --show-toplevel); git ls-files -z | xargs -0 tar -czvf $OLDPWD/{{ role_name }}.tar.gz" + - name: Install packaged role + shell: "ansible-galaxy install {{ role_name }}.tar.gz,devel-$(git rev-parse HEAD),{{ role_name }} --force" + - name: Remove packaged role artifact + file: + dest: "{{ role_name }}.tar.gz" + state: absent diff --git a/tests/vagrant/provision.yml b/tests/vagrant/provision.yml index 0c01c63e..36ac9f3b 100644 --- a/tests/vagrant/provision.yml +++ b/tests/vagrant/provision.yml @@ -1,3 +1,4 @@ +--- - hosts: all become: True pre_tasks: