diff --git a/.circleci/config.yml b/.circleci/config.yml index 286a6c0900..9beb4b57d7 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,6 +9,21 @@ parameters: run-setup: type: boolean default: true + continued: + type: boolean + default: false + GHA_Event: + type: string + default: "" + GHA_Actor: + type: string + default: "" + GHA_Action: + type: string + default: "" + GHA_Meta: + type: string + default: "" run-any: type: boolean default: false @@ -53,20 +68,18 @@ parameters: default: false run-networkcompliance: type: boolean - default: false + default: false + ansible_cisco_dnac_version: + type: string + default: "6.17.0" jobs: pre: - parameters: - ansible_cisco_dnac_version: - type: string - default: "6.9.0" - #machine: true docker: - image: python:3.8.10 - resource_class: madhansansel/dnacenter-ansible + resource_class: madhansansel/dnacenter-ansible steps: - run: @@ -77,16 +90,11 @@ jobs: echo "CIRCLE_PROJECT_REPONAME: $CIRCLE_PROJECT_REPONAME" echo "CIRCLE_PROJECT_BRANCHNAME: $CIRCLE_PROJECT_BRANCHNAME" - build: - parameters: - ansible_cisco_dnac_version: - type: string - default: "6.9.0" - + build: #machine: true docker: - image: python:3.8.10 - resource_class: madhansansel/dnacenter-ansible + resource_class: madhansansel/dnacenter-ansible steps: - run: name: Debug information @@ -120,7 +128,7 @@ jobs: # Install ansible, dnacentersdk pip install --upgrade pip pip install -r test-requirements.txt - # Build collection and store resulting tarball in directory $HOME/.cache/v<< parameters.ansible_cisco_dnac_version >>/collection-tarballs + # Build collection and store resulting tarball in directory $HOME/.cache/v<< pipeline.parameters.ansible_cisco_dnac_version >>/collection-tarballs ansible-galaxy collection build --force --output-path workspace/ - save_cache: key: collection-<< pipeline.git.revision >> @@ -132,10 +140,14 @@ jobs: - ~/.cache/pip addrole: + parameters: + run-all: + type: boolean + default: false #machine: true docker: - image: python:3.8.10 - resource_class: madhansansel/dnacenter-ansible + resource_class: madhansansel/dnacenter-ansible steps: - run: @@ -257,17 +269,21 @@ jobs: paths: - 'ccc_roles.yml' sanity-tests: - parameters: - ansible_cisco_dnac_version: - type: string - default: "6.9.0" - #machine: true docker: - image: python:3.8.10 - resource_class: madhansansel/dnacenter-ansible + resource_class: madhansansel/dnacenter-ansible parallelism: 4 steps: + - when: + condition: + not: << pipeline.parameters.run-any >> + steps: + - run: + name: Finish sanity tests as there is nothing to execute + command: | + circleci-agent step halt + - run: name: Debug information command: | @@ -319,7 +335,16 @@ jobs: export ANSIBLE_ROLES_PATH=$PWD/tests/integration cat ccc_roles.yml echo $(circleci tests glob "tests/integration/*") - cat ccc_roles.yml | circleci tests run --command "xargs ./run_tests.sh" --split-by=timings --timings-type=name + echo -n 0 > rc + cat ccc_roles.yml | circleci tests run --command "xargs ./run_tests.sh" --split-by=timings --timings-type=name || echo -n $? > rc + echo ${LOGS_SSH_KEY} | base64 -d > ~/id_ssh + chmod 600 ~/id_ssh + export NO_PROXY="" + export no_proxy="" + ssh $LOGS_MACHINE -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ~/id_ssh mkdir -p /var/www/html/madhan-logs/$CIRCLE_BUILD_NUM + scp -i ~/id_ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null sanity_tests_logs_$CIRCLE_NODE_INDEX $LOGS_MACHINE:/var/www/html/madhan-logs/$CIRCLE_BUILD_NUM + echo "LOGS URL: http://10.195.243.37/madhan-logs/$CIRCLE_BUILD_NUM/sanity_tests_logs_$CIRCLE_NODE_INDEX" + if [[ $(cat rc) != "0" ]]; then exit 1; fi no_output_timeout: 120m @@ -327,7 +352,7 @@ jobs: #machine: true docker: - image: python:3.8.10 - resource_class: madhansansel/dnacenter-ansible + resource_class: madhansansel/dnacenter-ansible steps: - when: @@ -337,18 +362,56 @@ jobs: command: | python ${HOME}/static/pnp_script.py #TODO + main-pr: + docker: + - image: maniator/gh:v2.49.2 + resource_class: madhansansel/dnacenter-ansible + steps: + - run: + name: Clone repo to workspace + command: git clone --depth=1 -b $CIRCLE_BRANCH https://github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME.git . + - run: + name: Create release pr to main + command: | + gh repo set-default $CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME + gh pr create --base main --head $CIRCLE_BRANCH --title "Release v<< pipeline.parameters.ansible_cisco_dnac_version >>" --body "Release v<< pipeline.parameters.ansible_cisco_dnac_version >>" + + release-job: + docker: + - image: python:3.8.10 + resource_class: madhansansel/dnacenter-ansible + steps: + - run: + name: Clone repo to workspace + command: | + git clone --depth=1 https://github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME.git . + git fetch origin $CIRCLE_BRANCH:work + git checkout work + - restore_cache: + keys: + - collection-<< pipeline.git.revision >> + # - add_ssh_keys: + # fingerprints: + # - "KEY_FINGERPRINT" + - run: + echo create release + #git tag -a v<< pipeline.parameters.ansible_cisco_dnac_version >> -m "Ansible DNACCollection Version v<< pipeline.parameters.ansible_cisco_dnac_version >>" + #gh release create "v<< pipeline.parameters.ansible_cisco_dnac_version >>" --title "DNAC Collection Version v<< pipeline.parameters.ansible_cisco_dnac_version >>" --latest + #ansible-galaxy collection publish workspace/*.tar.gz --api-key=$GALAXYKEY + workflows: building: - when: << pipeline.parameters.run-setup >> + when: + and: + - or: + - equal: [ run-tests, << pipeline.parameters.GHA_Meta >> ] +# - equal: [ true, << pipeline.parameters.run-setup >> ] + - not: << pipeline.parameters.continued >> jobs: - pre - build: - matrix: - parameters: - ansible_cisco_dnac_version: - - "6.9.0" requires: - pre @@ -359,6 +422,7 @@ workflows: config-path: .circleci/config.yml mapping: | .* run-setup false + .* continued true plugins/.* run-any true tests/integration/.* run-any true @@ -394,7 +458,10 @@ workflows: tests/integration/ccc_network_compliance_management/.* run-networkcompliance true testing: - when: << pipeline.parameters.run-any >> + when: + or: + - equal: [ true, << pipeline.parameters.run-any >> ] + - equal: [ true, << pipeline.parameters.continued >> ] jobs: - addrole - sanity-tests: @@ -402,11 +469,48 @@ workflows: - addrole context: - dnac-servers - matrix: - parameters: - ansible_cisco_dnac_version: - - "6.9.0" + - logs-vm # - post_pnp_testing: # requires: # - sanity-tests + release-candidate: + when: + or: + - equal: [ run-release-prep, << pipeline.parameters.GHA_Meta >> ] + jobs: + - build + - addrole: + matrix: + parameters: + run-all: + - true + - sanity-tests: + requires: + - addrole + - build + context: + - dnac-servers + - hold: + type: approval + requires: + - sanity-tests + + - main-pr: + context: + - gh-token + requires: + - hold + + release: + when: + or: + - equal: [ run-release, << pipeline.parameters.GHA_Meta >> ] + jobs: + - build + - release-job: + requires: + - build + context: + - gh-token + - galaxy-token \ No newline at end of file diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000..7835113143 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,35 @@ +## Description +Please include a summary of the changes and the related issue. Also, include relevant motivation and context. + +## Type of Change +- [ ] Bug fix +- [ ] New feature +- [ ] Breaking change +- [ ] Documentation update + +## Checklist +- [ ] My code follows the style guidelines of this project +- [ ] I have performed a self-review of my own code +- [ ] I have commented my code, particularly in hard-to-understand areas +- [ ] I have made corresponding changes to the documentation +- [ ] My changes generate no new warnings +- [ ] I have added tests that prove my fix is effective or that my feature works +- [ ] New and existing unit tests pass locally with my changes +- [ ] Any dependent changes have been merged and published in downstream modules +- [ ] All the sanity checks have been completed and the sanity test cases have been executed + +## Ansible Best Practices +- [ ] Tasks are idempotent (can be run multiple times without changing state) +- [ ] Variables and secrets are handled securely (e.g., using `ansible-vault` or environment variables) +- [ ] Playbooks are modular and reusable +- [ ] Handlers are used for actions that need to run on change + +## Documentation +- [ ] All options and parameters are documented clearly. +- [ ] Examples are provided and tested. +- [ ] Notes and limitations are clearly stated. + +## Screenshots (if applicable) + +## Notes to Reviewers + diff --git a/.github/workflows/automerge.yml b/.github/workflows/automerge.yml new file mode 100644 index 0000000000..348396929a --- /dev/null +++ b/.github/workflows/automerge.yml @@ -0,0 +1,25 @@ +name: automerge +on: + pull_request: + types: + - labeled + - unlabeled + - synchronize + - opened + - edited + - ready_for_review + - reopened + - unlocked + pull_request_review: + types: [submitted] +jobs: + automerge-job: + runs-on: ubuntu-latest + steps: + - uses: reitermarkus/automerge@v2 + with: + token: ${{ secrets.GITHUB_TOKEN }} + merge-method: rebase + do-not-merge-labels: never-merge +# pull-request: ${{ github.event.inputs.pull-request }} + dry-run: false \ No newline at end of file diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml new file mode 100644 index 0000000000..a45890b477 --- /dev/null +++ b/.github/workflows/integration_tests.yml @@ -0,0 +1,22 @@ +name: integration-tests +on: + workflow_run: + workflows: + - main +jobs: + trigger-circleci: + runs-on: ubuntu-20.04 + if: github.event.workflow_run.conclusion == 'success' + steps: + - uses: actions/download-artifact@v4 + with: + name: github_ref-${{ github.event.workflow_run.id }} + run-id: ${{ github.event.workflow_run.id }} + github-token: ${{ secrets.GITHUB_TOKEN }} + - name: Trigger circleci + run: | + printf '{"branch": "%s" ,"parameters": {"GHA_Meta":"run-tests"}}' $(cat github_ref) > req_body + curl -X POST --location "https://circleci.com/api/v2/project/gh/madhansansel/dnacenter-ansible/pipeline" \ + -H "Content-Type: application/json" \ + -H "Circle-Token: ${{ secrets.CCI_TOKEN }}" \ + -d "@req_body" \ No newline at end of file diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000000..2202794cc3 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,15 @@ +name: main +on: + pull_request_review: + types: [submitted] +jobs: + trigger-circleci: + if: github.event.review.state == 'approved' + runs-on: ubuntu-20.04 + steps: + - run: echo -n "${GITHUB_REF}" | sed -r 's/^refs\///' | sed -r 's/merge/head/' > github_ref + - name: Store ref for circleci trigger + uses: actions/upload-artifact@v4 + with: + name: github_ref-${{ github.run_id }} + path: github_ref \ No newline at end of file diff --git a/.github/workflows/release-prep.yml b/.github/workflows/release-prep.yml new file mode 100644 index 0000000000..9d02483231 --- /dev/null +++ b/.github/workflows/release-prep.yml @@ -0,0 +1,17 @@ +# name: release-prep +# on: +# push: +# branches: +# - '**-rc' + +# jobs: +# trigger-circleci: +# runs-on: ubuntu-20.04 +# steps: +# - name: Trigger CircleCI +# id: trigger-circle-ci +# uses: CircleCI-Public/trigger-circleci-pipeline-action@v1.2.0 +# with: +# GHA_Meta: "run-release-prep" +# env: +# CCI_TOKEN: ${{ secrets.CCI_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000000..936ca9d29a --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,16 @@ +# name: release +# on: +# push: +# branches: +# - 'main' +# jobs: +# trigger-circleci: +# runs-on: ubuntu-20.04 +# steps: +# - name: Trigger CircleCI +# id: trigger-circle-ci +# uses: CircleCI-Public/trigger-circleci-pipeline-action@v1.2.0 +# with: +# GHA_Meta: "run-release" +# env: +# CCI_TOKEN: ${{ secrets.CCI_TOKEN }} \ No newline at end of file diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000000..c149cd20f5 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1 @@ +* @madhansansel \ No newline at end of file diff --git a/README.md b/README.md index 911f7d3e88..6ed7c1e038 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ The following table shows the supported versions. | 2.2.3.3 | 6.4.0 | 2.4.11 | | 2.3.3.0 | 6.6.4 | 2.5.5 | | 2.3.5.3 | 6.13.3 | 2.6.11 | -| 2.3.7.6 | ^6.17.1 | ^2.7.2 | +| 2.3.7.6 | ^6.18.0 | ^2.7.2 | If your Ansible collection is older please consider updating it first. diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index 2ffb8427c7..e158aaf061 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -962,3 +962,21 @@ releases: release_summary: Fix family name. minor_changes: - Fix family name from userand_roles to user_and_roles. + 6.18.0: + release_date: "2024-08-30" + changes: + release_summary: Code changes in workflow manager modules. + minor_changes: + - Added 'fabric_sites_zones_workflow_manager.py' to manage fabric sites/zones and update the authentication profile template. + - Added 'sda_extranet_policies_workflow_manager' to provide SDA Extranet Policies for managing SDA Extranet Policy. + - Added Circle CI support for integration testing. + - Changes in inventory_workflow_manager to support maximum devices to resync, and resync timeout. + - Changes in network_settings_workflow_manager to support reserve ip subpools. + - Changes in provision_workflow_manager to support enhanced log messages. + - Changes in rma_workflow_manager module to support pre check for device replacement. + - Bug fixes in user_role_workflow_manager module. + - Changes in accesspoint_workflow_manager module. + - Changes in device_configs_backup_workflow_manager to support name of the site to which the device is assigned. + - inventory_workflow_manager.py: added attributes resync_device_count and resync_max_timeout + - accesspoint_workflow_manager.py: added attributes 'is_assigned_site_as_location', and other new attributes. + - device_configs_backup_workflow_manager.py. added attribute 'site'. diff --git a/galaxy.yml b/galaxy.yml index ee68239a10..a031d65653 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,7 +1,7 @@ --- namespace: cisco name: dnac -version: 6.17.1 +version: 6.18.0 readme: README.md authors: - Rafael Campos @@ -20,6 +20,7 @@ authors: - Rangaprabhu Deenadayalu - Ajith Andrew J - Syed Khadeer Ahmed + - A Mohamed Rafeek description: Ansible Modules for Cisco DNA Center license_file: "LICENSE" tags: diff --git a/playbooks/fabric_sites_zones_workflow_manager.yml b/playbooks/fabric_sites_zones_workflow_manager.yml new file mode 100644 index 0000000000..a230460d4d --- /dev/null +++ b/playbooks/fabric_sites_zones_workflow_manager.yml @@ -0,0 +1,37 @@ +--- +- name: Configure fabric site/zones and authentication profile template in Cisco Catalyst Center + hosts: localhost + connection: local + gather_facts: no + vars_files: + - "input_fabric_sites_zones.yml" + - "credentials.yml" + tasks: + - name: Configure the fabric sites/zones and authentication profile template in Cisco Catalyst Center. + cisco.dnac.fabric_sites_zones_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log_level: DEBUG + dnac_log: true + config_verify: True + state: merged + config: + - fabric_sites: + - site_name: "{{item.fabric_sites.site_name}}" + site_type: "{{item.fabric_sites.site_type}}" + authentication_profile: "{{item.fabric_sites.authentication_profile}}" + is_pub_sub_enabled: "{{item.fabric_sites.is_pub_sub_enabled}}" + update_authentication_profile: + authentication_order: "{{item.fabric_sites.update_authentication_profile.authentication_order}}" + dot1x_fallback_timeout: "{{item.fabric_sites.update_authentication_profile.dot1x_fallback_timeout}}" + wake_on_lan: "{{item.fabric_sites.update_authentication_profile.wake_on_lan}}" + number_of_hosts: "{{item.fabric_sites.update_authentication_profile.number_of_hosts}}" + + with_items: "{{ fabric_sites}}" + tags: + - fabric_site_zones_testing diff --git a/playbooks/rma_workflow_manager.yml b/playbooks/rma_workflow_manager.yml index 8f3189b54b..99ac72618d 100644 --- a/playbooks/rma_workflow_manager.yml +++ b/playbooks/rma_workflow_manager.yml @@ -23,6 +23,6 @@ timeout_interval: 100 state: replaced config: - - faulty_device_ip_address: 204.1.2.9 - replacement_device_ip_address: 204.1.2.10 + - faulty_device_ip_address: 204.1.2.9 + replacement_device_ip_address: 204.1.2.10 register: result diff --git a/playbooks/sda_extranet_policies_workflow_manager.yml b/playbooks/sda_extranet_policies_workflow_manager.yml new file mode 100644 index 0000000000..706722f81d --- /dev/null +++ b/playbooks/sda_extranet_policies_workflow_manager.yml @@ -0,0 +1,71 @@ +--- +- name: Testing + hosts: dnac_servers + gather_facts: no + + vars_files: + - "credentials.yml" + + vars: + dnac_login: &dnac_login + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: INFO + dnac_log_append: False + config_verify: true + + tasks: + - name: Create Extranet Policy + cisco.dnac.network_compliance_workflow_manager: + <<: *dnac_login + state: merged + config: + - extranet_policy_name: "test_extranet_policy_1" + provider_virtual_network: "VN_1" + subscriber_virtual_networks: ["VN_2", "VN_3"] + + + - name: Create Extranet Policy with Fabric Site(s) specified + cisco.dnac.network_compliance_workflow_manager: + <<: *dnac_login + state: merged + config: + - extranet_policy_name: "test_extranet_policy_1" + provider_virtual_network: "VN_1" + subscriber_virtual_networks: ["VN_2", "VN_3"] + fabric_sites: ["Global/Test_Extranet_Polcies/USA", "Global/Test_Extranet_Polcies/India"] + + + - name: Update existing Extranet Policy + cisco.dnac.network_compliance_workflow_manager: + <<: *dnac_login + state: merged + config: + - extranet_policy_name: "test_extranet_policy_1" + provider_virtual_network: "VN_1" + subscriber_virtual_networks: ["VN_2", "VN_4"] + + + - name: Update existing Extranet Policy with Fabric Site(s) specified + cisco.dnac.network_compliance_workflow_manager: + <<: *dnac_login + state: merged + config: + - extranet_policy_name: "test_extranet_policy_1" + provider_virtual_network: "VN_1" + subscriber_virtual_networks: ["VN_2", "VN_4"] + fabric_sites: ["Global/Test_Extranet_Polcies/USA", "Global/Test_Extranet_Polcies/India"] + + + - name: Delete Extranet Policy + cisco.dnac.network_compliance_workflow_manager: + <<: *dnac_login + state: deleted + config: + - extranet_policy_name: "test_extranet_policy_1" diff --git a/plugins/module_utils/dnac.py b/plugins/module_utils/dnac.py index f44223c1ae..bd5c430bd4 100644 --- a/plugins/module_utils/dnac.py +++ b/plugins/module_utils/dnac.py @@ -6,6 +6,11 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +try: + from cryptography.fernet import Fernet + HAS_FERNET = True +except ImportError: + HAS_FERNET = False try: from dnacentersdk import api, exceptions except ImportError: @@ -290,6 +295,7 @@ def get_dnac_params(self, params): "dnac_port": params.get("dnac_port"), "dnac_username": params.get("dnac_username"), "dnac_password": params.get("dnac_password"), + "dnac_version": params.get("dnac_version"), "dnac_verify": params.get("dnac_verify"), "dnac_debug": params.get("dnac_debug"), "dnac_log": params.get("dnac_log"), @@ -504,6 +510,58 @@ def check_string_dictionary(self, task_details_data): pass return None + def generate_key(self): + """ + Generate a new encryption key using Fernet. + Returns: + - key (bytes): A newly generated encryption key. + Criteria: + - This function should only be called if HAS_FERNET is True. + """ + if HAS_FERNET: + return {"generate_key": Fernet.generate_key()} + else: + error_message = "The 'cryptography' library is not installed. Please install it using 'pip install cryptography'." + return {"error_message": error_message} + + def encrypt_password(self, password, key): + """ + Encrypt a plaintext password using the provided encryption key. + Args: + - password (str): The plaintext password to be encrypted. + - key (bytes): The encryption key used to encrypt the password. + Returns: + - encrypted_password (bytes): The encrypted password as bytes. + Criteria: + - This function should only be called if HAS_FERNET is True. + - The password should be encoded to bytes before encryption. + """ + try: + fernet = Fernet(key) + encrypted_password = fernet.encrypt(password.encode()) + return {"encrypt_password": encrypted_password} + except Exception as e: + return {"error_message": "Exception occurred while encrypting password: {0}".format(e)} + + def decrypt_password(self, encrypted_password, key): + """ + Decrypt an encrypted password using the provided encryption key. + Args: + - encrypted_password (bytes): The encrypted password as bytes to be decrypted. + - key (bytes): The encryption key used to decrypt the password. + Returns: + - decrypted_password (str): The decrypted plaintext password. + Criteria: + - This function should only be called if HAS_FERNET is True. + - The encrypted password should be decoded from bytes after decryption. + """ + try: + fernet = Fernet(key) + decrypted_password = fernet.decrypt(encrypted_password.encode()).decode() + return {"decrypt_password": decrypted_password} + except Exception as e: + return {"error_message": "Exception occurred while decrypting password: {0}".format(e)} + def camel_to_snake_case(self, config): """ Convert camel case keys to snake case keys in the config. diff --git a/plugins/modules/accesspoint_workflow_manager.py b/plugins/modules/accesspoint_workflow_manager.py index 84ab02ee89..bd78180194 100644 --- a/plugins/modules/accesspoint_workflow_manager.py +++ b/plugins/modules/accesspoint_workflow_manager.py @@ -65,7 +65,7 @@ type: str required: True rf_profile: - description: Radio Frequency (RF) profile of the Access Point (e.g., 'HIGH'). + description: Radio Frequency (RF) profile of the Access Point. For example, "HIGH". type: str required: False site: @@ -78,37 +78,43 @@ required: False suboptions: name: - description: Name of the floor.(eg. 'FLOOR1') + description: Name of the floor. For example, "FLOOR1". type: str required: False parent_name: - description: Parent name of the floor in the site hierarchy.(eg. 'Global/USA/New York/BLDNYC') + description: Parent name of the floor in the site hierarchy. For example, "Global/USA/New York/BLDNYC". type: str required: False ap_name: - description: Current AP name that needs to be changed along with the new AP name.(eg. "Test2") + description: Current AP name that needs to be changed along with the new AP name. For example, "Test2". type: str required: False admin_status: - description: Status of the AP configuration. Accepts "Enabled" or "Disabled". (eg. "Enabled") + description: Status of the AP configuration. Accepts "Enabled" or "Disabled". For example, "Enabled". type: str required: False led_status: - description: State of the AP's LED. Accepts "Enabled" or "Disabled".(eg. "Enabled") + description: State of the AP's LED. Accepts "Enabled" or "Disabled". For example, "Enabled". type: str required: False led_brightness_level: - description: Brightness level of the AP's LED. Accepts values from 1 to 8.(eg. 3) + description: Brightness level of the AP's LED. Accepts values from 1 to 8. For example, 3. type: int required: False ap_mode: description: | - Mode of operation for the Access Point (AP). Possible values include "local/flexconnect", - "monitor", "sniffer", or "Bridge/Flex+Bridge".(eg. "Local") + Defines the mode of operation for the Access Point (AP). Possible values include "Local", + "Monitor", "Sniffer", or "Bridge". For example, "Local". type: str required: False location: - description: Location name of the AP. Provide this data if a change is required.(eg. "Bangalore") + description: Location name of the AP. Provide this data if a change is required. For example, "Bangalore". + type: str + required: False + is_assigned_site_as_location: + description: | + Configures whether the access point location is automatically set to the site assigned to the access point. + Accepts "Enabled" or "Disabled". If set to "Enabled", no additional location configuration is required. type: str required: False failover_priority: @@ -117,24 +123,24 @@ required: False clean_air_si_2.4ghz: description: | - Clean Air Spectrum Intelligence (SI) feature status for the 2.4GHz band. Indicates whether.(eg. "Enabled") + Clean Air Spectrum Intelligence (SI) feature status for the 2.4GHz band. Indicates whether. For example, "Enabled". Clean Air Spectrum Intelligence is enabled or disabled. type: str required: False clean_air_si_5ghz: description: | - Clean Air Spectrum Intelligence (SI) feature status for the 5GHz band. Indicates whether.(eg. "Enabled") + Clean Air Spectrum Intelligence (SI) feature status for the 5GHz band. Indicates whether. For example, "Enabled". Clean Air Spectrum Intelligence is enabled or disabled. type: str required: False clean_air_si_6ghz: description: | - Clean Air Spectrum Intelligence (SI) feature status for the 6GHz band. Indicates whether.(eg. "Enabled") + Clean Air Spectrum Intelligence (SI) feature status for the 6GHz band. Indicates whether. For example, "Enabled". Clean Air Spectrum Intelligence is enabled or disabled. type: str required: False primary_controller_name: - description: Name or identifier of the primary wireless LAN controller (WLC) managing the Access Point (AP).(eg. "SJ-EWLC-1") + description: Name or identifier of the primary wireless LAN controller (WLC) managing the Access Point (AP). For example, "SJ-EWLC-1". type: str required: False primary_ip_address: @@ -143,11 +149,11 @@ required: False suboptions: address: - description: IP address of the primary wireless LAN controller. (eg. '10.0.0.3') + description: IP address of the primary wireless LAN controller. For example, '10.0.0.3'. type: str required: False secondary_controller_name: - description: Name or identifier of the secondary wireless LAN controller (WLC) managing the Access Point (AP).(eg. "Inherit from site/Clear") + description: Name or identifier of the secondary wireless LAN controller (WLC) managing the Access Point (AP). For example, "Inherit from site/Clear". type: str required: False secondary_ip_address: @@ -156,11 +162,11 @@ required: False suboptions: address: - description: IP address of the primary wireless LAN controller (eg. '10.0.0.3') + description: IP address of the primary wireless LAN controller. For example, '10.0.0.3'. type: str required: False tertiary_controller_name: - description: Name or identifier of the tertiary wireless LAN controller (WLC) managing the Access Point (AP).(eg. "Clear") + description: Name or identifier of the tertiary wireless LAN controller (WLC) managing the Access Point (AP). For example, "Clear". type: str required: False tertiary_ip_address: @@ -169,272 +175,267 @@ required: False suboptions: address: - description: IP address of the primary wireless LAN controller (eg. '10.0.0.2') + description: IP address of the primary wireless LAN controller. For example, '10.0.0.2'. type: str required: False - radio_settings: - description: Configuration options for radio interfaces. + 2.4ghz_radio: + description: Configuration options for the 2.4GHz radio interface. type: dict required: False suboptions: - 2.4ghz_radio: - description: Configuration options for the 2.4GHz radio interface. - type: dict + admin_status: + description: Administrative status for the 2.4GHz radio interface. For example, "Enabled". + type: str required: False - suboptions: - admin_status: - description: Administrative status for the 2.4GHz radio interface.(eg. 'Enabled') - type: str - required: False - antenna_name: - description: Name or type of antenna used for the 2.4GHz radio interface.(eg. "other") - type: str - required: False - antenna_gain: - description: Antenna gain value in decibels (dB) for the 2.4GHz radio interface.(eg. 4) - type: int - required: False - radio_role_assignment: - description: Role assignment mode for the 2.4GHz radio interface. Accepts "Auto", "Client-serving", or "Monitor".(eg. "Auto") - type: str - required: False - cable_loss: - description: Cable loss in dB for the 2.4GHz radio interface.(eg. 75) - type: int - required: False - antenna_cable_name: - description: Name or type of antenna cable used for the 2.4GHz radio interface.(eg. "other") - type: str - required: False - channel_assignment_mode: - description: Mode of channel assignment for the 2.4GHz radio interface. Accepts "Global" or "Custom".(eg. "Custom") - type: str - required: False - channel_number: - description: Custom channel number configured for the 2.4GHz radio interface.(eg. 6) - type: int - required: False - power_assignment_mode: - description: Mode of power assignment for the 2.4GHz radio interface. Accepts "Global" or "Custom".(eg. "Custom") - type: str - required: False - power_level: - description: Custom power level configured for the 2.4GHz radio interface. (eg. 3) - type: int - required: False - 5ghz_radio: - description: Configuration options for the 5GHz radio interface. - type: dict + antenna_name: + description: Name or type of antenna used for the 2.4GHz radio interface. For example, "other". + type: str required: False - suboptions: - admin_status: - description: Administrative status for the 5GHz radio interface.(eg. "Enabled") - type: str - required: False - antenna_name: - description: Name or type of antenna used for the 5GHz radio interface.(eg. other) - type: str - required: False - antenna_gain: - description: Antenna gain value in decibels (dB) for the 5GHz radio interface.(eg. 5) - type: int - required: False - radio_role_assignment: - description: Role assignment mode for the 5GHz radio interface. Accepts "Auto", "Client-serving", or "Monitor".(eg. Auto) - type: str - required: False - cable_loss: - description: Cable loss in dB for the 5GHz radio interface. (eg. 80) - type: int - required: False - antenna_cable_name: - description: Name or type of antenna cable used for the 5GHz radio interface.(eg. other) - type: str - required: False - channel_assignment_mode: - description: Mode of channel assignment for the 5GHz radio interface. Accepts "Global" or "Custom".(eg. "Custom") - type: str - required: False - channel_number: - description: Custom channel number configured for the 5GHz radio interface. (eg. 36) - type: int - required: False - power_assignment_mode: - description: Mode of power assignment for the 5GHz radio interface. Accepts "Global" or "Custom".(eg. "Custom") - type: str - required: False - power_level: - description: Custom power level configured for the 5GHz radio interface. (eg. 3) - type: int - required: False - 6ghz_radio: - description: Configuration options for the 6GHz radio interface. - type: dict + antenna_gain: + description: Specifies the antenna gain value in decibels (dB) for the 2.4GHz radio interface. For example, 4. + type: int required: False - suboptions: - admin_status: - description: Administrative status for the 6GHz radio interface.(eg. "Enabled") - type: str - required: False - antenna_name: - description: Name or type of antenna used for the 6GHz radio interface. (eg. other) - type: str - required: False - antenna_gain: - description: Antenna gain value in decibels (dB) for the 6GHz radio interface.(eg. 4) - type: int - required: False - radio_role_assignment: - description: Role assignment mode for the 6GHz radio interface. Accepts "Auto", "Client-serving", or "Monitor". - type: str - required: False - cable_loss: - description: Cable loss in dB for the 6GHz radio interface. (eg. 75) - type: int - required: False - antenna_cable_name: - description: Name or type of antenna cable used for the 6GHz radio interface.(eg. "other") - type: str - required: False - channel_assignment_mode: - description: Mode of channel assignment for the 6GHz radio interface. Accepts "Global" or "Custom".(eg. "Custom") - type: str - required: False - channel_number: - description: Custom channel number configured for the 6GHz radio interface.(eg. 6) - type: int - required: False - power_assignment_mode: - description: Mode of power assignment for the 6GHz radio interface. Accepts "Global" or "Custom". (eg. "Custom") - type: str - required: False - power_level: - description: Custom power level configured for the 6GHz radio interface.(eg. 3) - type: int - required: False - xor_radio: - description: Configuration options for the xor radio interface. - type: dict + radio_role_assignment: + description: Role assignment mode for the 2.4GHz radio interface. Accepts "Auto", "Client-serving", or "Monitor". For example, Auto. + type: str required: False - suboptions: - admin_status: - description: Administrative status for the xor radio interface. (eg. Enabled) - type: str - required: False - antenna_name: - description: Name or type of antenna used for the xor radio interface.(eg. other) - type: str - required: False - antenna_gain: - description: Antenna gain value in decibels (dB) for the xor radio interface.(eg. 4) - type: int - required: False - radio_role_assignment: - description: | - Role assignment mode for the xor radio interface. Accepts "Auto", "Client-serving", or "Monitor". - If radio_role_assignment is "client-serving", then only power-level and channel-level can be changed. - (eg. Auto) - type: str - required: False - cable_loss: - description: Cable loss in dB for the xor radio interface. (e.g 75) - type: int - required: False - antenna_cable_name: - description: Name or type of antenna cable used for the xor radio interface.(eg. other) - type: str - required: False - channel_assignment_mode: - description: | - Mode of channel assignment for the xor radio interface. Accepts "Global" or "Custom". - For xor Custom, it accepts values like 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, - 116, 120, 124, 128, 132, 136, 140, 144, 149, 153, 157, 161, 165, 169, 173. (eg. "Custom") - type: str - required: False - channel_number: - description: Custom channel number configured for the xor radio interface.(eg. 6) - type: int - required: False - channel_width: - description: | - Width of the channel configured for the xor radio interface. Accepts values - "20 MHz", "40 MHz", "80 MHz", or "160 MHz". (eg. 20 MHz) - type: str - required: False - power_assignment_mode: - description: | - Mode of power assignment for the xor radio interface. Accepts "Global" or "Custom". - In Custom, it accepts values 1 to 5. - type: str - required: False - power_level: - description: Custom power level configured for the xor radio interface. (eg. 3) - type: int - required: False - tri_radio: - description: Configuration options for the tri radio interface. - type: dict + cable_loss: + description: Cable loss in dB for the 2.4GHz radio interface. For example, 75. + type: int + required: False + antenna_cable_name: + description: Name or type of antenna cable used for the 2.4GHz radio interface. For example, "other". + type: str + required: False + channel_assignment_mode: + description: Mode of channel assignment for the 2.4GHz radio interface. Accepts "Global" or "Custom". For example, "Custom". + type: str + required: False + channel_number: + description: Custom channel number configured for the 2.4GHz radio interface. For example, 6. + type: int + required: False + power_assignment_mode: + description: Mode of power assignment for the 2.4GHz radio interface. Accepts "Global" or "Custom". For example, "Custom". + type: str + required: False + power_level: + description: Custom power level configured for the 2.4GHz radio interface. For example, 3. + type: int + required: False + 5ghz_radio: + description: Configuration options for the 5GHz radio interface. + type: dict + required: False + suboptions: + admin_status: + description: Administrative status for the 5GHz radio interface. For example, "Enabled". + type: str + required: False + antenna_name: + description: Name or type of antenna used for the 5GHz radio interface. For example, "other". + type: str + required: False + antenna_gain: + description: Antenna gain value in decibels (dB) for the 5GHz radio interface. For example, 5. + type: int + required: False + radio_role_assignment: + description: Role assignment mode for the 5GHz radio interface. Accepts "Auto", "Client-serving", or "Monitor". For example, "Auto". + type: str + required: False + cable_loss: + description: Cable loss in dB for the 5GHz radio interface. For example, 80. + type: int + required: False + antenna_cable_name: + description: Name or type of antenna cable used for the 5GHz radio interface. For example, "other". + type: str + required: False + channel_assignment_mode: + description: Mode of channel assignment for the 5GHz radio interface. Accepts "Global" or "Custom". For example, "Custom". + type: str + required: False + channel_number: + description: Custom channel number configured for the 5GHz radio interface. For example, 36. + type: int + required: False + power_assignment_mode: + description: Mode of power assignment for the 5GHz radio interface. Accepts "Global" or "Custom". For example, "Custom". + type: str + required: False + power_level: + description: Custom power level configured for the 5GHz radio interface. For example, 3. + type: int + required: False + 6ghz_radio: + description: Configuration options for the 6GHz radio interface. + type: dict + required: False + suboptions: + admin_status: + description: Administrative status for the 6GHz radio interface. For example, "Enabled". + type: str + required: False + antenna_name: + description: Name or type of antenna used for the 6GHz radio interface. For example, "other". + type: str + required: False + antenna_gain: + description: Antenna gain value in decibels (dB) for the 6GHz radio interface. For example, 4. + type: int + required: False + radio_role_assignment: + description: Role assignment mode for the 6GHz radio interface. Accepts "Auto", "Client-serving", or "Monitor". + type: str + required: False + cable_loss: + description: Cable loss in dB for the 6GHz radio interface. For example, 75. + type: int + required: False + antenna_cable_name: + description: Name or type of antenna cable used for the 6GHz radio interface. For example, "other". + type: str + required: False + channel_assignment_mode: + description: Mode of channel assignment for the 6GHz radio interface. Accepts "Global" or "Custom". For example, "Custom". + type: str + required: False + channel_number: + description: Custom channel number configured for the 6GHz radio interface. For example, 6. + type: int + required: False + power_assignment_mode: + description: Mode of power assignment for the 6GHz radio interface. Accepts "Global" or "Custom". For example, "Custom". + type: str + required: False + power_level: + description: Custom power level configured for the 6GHz radio interface. For example, 3. + type: int + required: False + xor_radio: + description: Configuration options for the xor radio interface. + type: dict + required: False + suboptions: + admin_status: + description: Administrative status for the xor radio interface. For example, "Enabled". + type: str + required: False + antenna_name: + description: Name or type of antenna used for the xor radio interface. For example, "other". + type: str + required: False + antenna_gain: + description: Antenna gain value in decibels (dB) for the xor radio interface. For example, 4. + type: int + required: False + radio_role_assignment: + description: | + Role assignment mode for the xor radio interface. Accepts "Auto", "Client-serving", or "Monitor". + If radio_role_assignment is "client-serving", then only power-level and channel-level can be changed. + For example, Auto. + type: str + required: False + cable_loss: + description: Cable loss in dB for the xor radio interface. For example, 75. + type: int + required: False + antenna_cable_name: + description: Name or type of antenna cable used for the xor radio interface. For example, "other". + type: str + required: False + channel_assignment_mode: + description: | + Mode of channel assignment for the xor radio interface. Accepts "Global" or "Custom". + For xor Custom, it accepts values like 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, + 116, 120, 124, 128, 132, 136, 140, 144, 149, 153, 157, 161, 165, 169, 173. For example, "Custom". + type: str + required: False + channel_number: + description: Custom channel number configured for the xor radio interface. For example, 6. + type: int + required: False + channel_width: + description: | + Width of the channel configured for the xor radio interface. Accepts values + "20 MHz", "40 MHz", "80 MHz", or "160 MHz". For example, 20 MHz. + type: str + required: False + power_assignment_mode: + description: | + Mode of power assignment for the xor radio interface. Accepts "Global" or "Custom". + In Custom, it accepts values 1 to 5. + type: str + required: False + power_level: + description: Custom power level configured for the xor radio interface. For example, 3. + type: int + required: False + tri_radio: + description: Configuration options for the tri radio interface. + type: dict + required: False + suboptions: + admin_status: + description: Administrative status for the tri radio interface. For example, "Enabled". + type: str + required: False + antenna_name: + description: Name or type of antenna used for the tri radio interface. For example, "other". + type: str + required: False + antenna_gain: + description: Antenna gain value in decibels (dB) for the tri radio interface. For example, 4. + type: int + required: False + radio_role_assignment: + description: | + Role assignment mode for the tri radio interface. Accepts "Auto", "Client-serving", or "Monitor". + If radio_role_assignment is "client-serving", then only power-level and channel-level can be changed. + type: str + required: False + cable_loss: + description: Cable loss in dB for the tri radio interface. For example, 75. + type: int + required: False + antenna_cable_name: + description: Name or type of antenna cable used for the tri radio interface. For example, "other". + type: str + required: False + channel_assignment_mode: + description: | + Mode of channel assignment for the tri radio interface. Accepts "Global" or "Custom". + For tri Custom, it accepts values like 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, + 132, 136, 140, 144, 149, 153, 157, 161, 165, 169, 173. (eg. Custom) + type: str + required: False + channel_number: + description: Custom channel number configured for the tri radio interface. For example, 6. + type: int + required: False + channel_width: + description: | + Width of the channel configured for the tri radio interface. Accepts values + "20 MHz", "40 MHz", "80 MHz", or "160 MHz". . For example, 20 MHz. + type: str + required: False + power_assignment_mode: + description: | + Mode of power assignment for the tri radio interface. Accepts "Global" or "Custom". + In Custom, it accepts values 1 to 5. + type: str + required: False + power_level: + description: Custom power level configured for the tri radio interface. For example, 3. + type: int + required: False + dual_radio_mode: + description: | + Mode of operation configured for the tri radio interface. Specifies how the + access point (AP) manages its dual radio functionality. eg . Auto + type: str required: False - suboptions: - admin_status: - description: Administrative status for the tri radio interface. (eg. Enabled) - type: str - required: False - antenna_name: - description: Name or type of antenna used for the tri radio interface.(eg. other) - type: str - required: False - antenna_gain: - description: Antenna gain value in decibels (dB) for the tri radio interface. (eg. 4) - type: int - required: False - radio_role_assignment: - description: | - Role assignment mode for the tri radio interface. Accepts "Auto", "Client-serving", or "Monitor". - If radio_role_assignment is "client-serving", then only power-level and channel-level can be changed. - type: str - required: False - cable_loss: - description: Cable loss in dB for the tri radio interface. (eg. 75) - type: int - required: False - antenna_cable_name: - description: Name or type of antenna cable used for the tri radio interface. (eg. "other") - type: str - required: False - channel_assignment_mode: - description: | - Mode of channel assignment for the tri radio interface. Accepts "Global" or "Custom". - For tri Custom, it accepts values like 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, - 132, 136, 140, 144, 149, 153, 157, 161, 165, 169, 173. (eg. Custom) - type: str - required: False - channel_number: - description: Custom channel number configured for the tri radio interface. (eg. 6) - type: int - required: False - channel_width: - description: | - Width of the channel configured for the tri radio interface. Accepts values - "20 MHz", "40 MHz", "80 MHz", or "160 MHz". eg. 20 MHz - type: str - required: False - power_assignment_mode: - description: | - Mode of power assignment for the tri radio interface. Accepts "Global" or "Custom". - In Custom, it accepts values 1 to 5. - type: str - required: False - power_level: - description: Custom power level configured for the tri radio interface.(eg. 3) - type: int - required: False - dual_radio_mode: - description: | - Mode of operation configured for the tri radio interface. Specifies how the - access point (AP) manages its dual radio functionality. eg . Auto - type: str - required: False ap_selected_fields: description: When enable the verify flag "config_verify" to see only the filter field of the AP details in the output. (eg. "id,hostname,family,type,mac_address,management_ip_address,ap_ethernet_mac_address") @@ -693,7 +694,7 @@ led_status: "Enabled" led_brightness_level: 5 ap_mode: "Local" - location: "LTTS/Cisco/Chennai" + is_assigned_site_as_location: "Enabled" failover_priority: "Low" 2.4ghz_radio: admin_status: "Enabled" @@ -810,13 +811,14 @@ def __init__(self, module): self.allowed_series = { "6ghz_radio": ["9136I", "9162I", "9163E", "9164I", "IW9167IH", "9178I", "9176I", "9176D1"], - "xor_radio": ["2800", "3800", "4800", "9120", "9166"], - "tri_radio": ["9124AXE", "9130AXI", "9130AXE"] + "xor_radio": ["280", "380", "480", "9120", "9166", "IW9167EH", "IW9165E", "IW9165DH"], + "tri_radio": ["9124AXE", "9130AXI", "9130AXE", "9178I"] } self.allowed_channel_no = { - "2.4ghz_radio": list(range(1, 12)), + "2.4ghz_radio": list(range(1, 15)), "5ghz_radio": (36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, - 124, 128, 132, 136, 140, 144, 149, 153, 157, 161, 165, 169, 173) + 124, 128, 132, 136, 140, 144, 149, 153, 157, 161, 165, 169, 173), + "xor_radio": list(range(1, 15)) } def validate_input_yml(self): @@ -864,6 +866,7 @@ def validate_input_yml(self): "led_brightness_level": {"required": False, "type": "int"}, "ap_mode": {"required": False, "type": "str"}, "location": {"required": False, "type": "str"}, + "is_assigned_site_as_location": {"required": False, "type": "str"}, "failover_priority": {"required": False, "type": "str"}, "primary_controller_name": {"required": False, "type": "str"}, "primary_ip_address": {"required": False, "type": "dict"}, @@ -1045,7 +1048,6 @@ def get_diff_merged(self, ap_config): self.msg = "AP {0} provisioned successfully.".format(self.have['hostname']) self.log(self.msg, "INFO") responses["accesspoints_updates"].update({ - "provision_response": provision_details, "provision_message": self.msg }) else: @@ -1065,7 +1067,6 @@ def get_diff_merged(self, ap_config): self.log(self.msg, "INFO") del self.payload["access_point_details"] responses["accesspoints_updates"].update({ - "ap_config_response": self.payload["access_point_config"], "ap_config_message": self.msg }) self.result['ap_update_msg'] = self.msg @@ -1095,7 +1096,6 @@ def get_diff_merged(self, ap_config): self.log("Task Details: {0} .".format(self.pprint( task_details_response)), "ERROR") responses["accesspoints_updates"] = { - "ap_update_config_task_response": task_response, "ap_update_config_task_details": task_details_response, "ap_config_update_status": self.msg} self.module.fail_json(msg=self.msg, response=responses) @@ -1107,8 +1107,7 @@ def get_diff_merged(self, ap_config): .format(self.have["current_ap_config"].get("ap_name")) self.log(self.msg, "INFO") responses["accesspoints_updates"] = { - "ap_update_config_task_response": task_response, - "ap_update_config_task_details": task_details_response, + "ap_update_config_task_details": task_details_response["id"], "ap_config_update_status": self.msg } self.result['ap_update_msg'] = self.msg @@ -1159,13 +1158,14 @@ def verify_diff_merged(self, config): unmatch_count = 0 require_update = self.config_diff(self.have["current_ap_config"]) + self.log(self.pprint(require_update), "INFO") if require_update: radio_list = require_update.get("radioConfigurations") if len(radio_list) > 0: for each_radio in radio_list: radio_key_list = list(each_radio.keys()) for each_key in radio_key_list: - if each_key not in ("antenna_name", "radioType", "unmatch", "cable_loss"): + if each_key not in ("antenna_name", "radioType", "unmatch", "cable_loss", "radioRoleAssignment"): unmatch_count += 1 other_keys = list(require_update.keys()) @@ -1176,8 +1176,7 @@ def verify_diff_merged(self, config): self.log("Unmatch count for the radio configuration : {0}".format(str(unmatch_count)), "INFO") self.log(str(require_update), "INFO") responses = {} - responses["accesspoints_verify"] = {"want": self.want, - "have": self.have} + responses["accesspoints_verify"] = {} if unmatch_count < 1: msg = "The update for AP Config '{0}' has been successfully verified.".format(ap_name) @@ -1241,10 +1240,11 @@ def validate_radio_series(self, ap_config): self.log('Validating radio type: {0}'.format(radio_type), "INFO") if ap_series is not None: for series in self.allowed_series[radio_type]: - pattern = r'\b{}\b'.format(re.escape(series)) + pattern = r'\b{}\w+'.format(re.escape(series)) compiled_pattern = re.compile(pattern) is_valid = compiled_pattern.search(self.payload["access_point_details"]["series"]) if is_valid: + invalid_series = [] break if not is_valid: @@ -1324,6 +1324,11 @@ def validate_ap_config_parameters(self, ap_config): param_spec = dict(type="str", length_max=32) validate_str(ap_name, param_spec, "ap_name", errormsg) + admin_status = ap_config.get("admin_status") + if admin_status and admin_status not in ("Enabled", "Disabled"): + errormsg.append("admin_status: Invalid value '{0}' for admin_status in playbook. Must be either 'Enabled' or 'Disabled'." + .format(admin_status)) + led_brightness_level = ap_config.get("led_brightness_level") if led_brightness_level and led_brightness_level not in range(1, 9): errormsg.append("led_brightness_level: Invalid LED Brightness level '{0}' in playbook." @@ -1338,6 +1343,11 @@ def validate_ap_config_parameters(self, ap_config): param_spec = dict(type="str", length_max=255) validate_str(location, param_spec, "location", errormsg) + is_assigned_site_as_location = ap_config.get("is_assigned_site_as_location") + if is_assigned_site_as_location and is_assigned_site_as_location not in ("Disabled", "Enabled"): + errormsg.append("is_assigned_site_as_location: Invalid value '{0}' for is_assigned_site_as_location in playbook.\ + Must be either 'Disabled' or 'Enabled'.".format(is_assigned_site_as_location)) + ap_mode = ap_config.get("ap_mode") if ap_mode and ap_mode not in ("Local", "Monitor", "Sniffer", "Bridge"): errormsg.append("ap_mode: Invalid value '{0}' for ap_mode in playbook. Must be one of: Local, Monitor, Sniffer or Bridge." @@ -1530,6 +1540,12 @@ def check_current_radio_role_assignment(self, radio_type, radio_dtos, radio_band (radio_band == "5 GHz" and slot_id == 1): break + if radio_type == "tri_radio": + if (radio_band == "2.4 GHz" and slot_id == 0) or \ + (radio_band == "5 GHz" and slot_id == 1) or \ + (radio_band == "5 GHz" and slot_id == 2): + break + self.log('Completed checking radio role assignments. Role assignment: {0}, radio type: {1}, radio band: {2}' .format(role_assignment, radio_type, radio_band), "INFO") return role_assignment @@ -2091,6 +2107,8 @@ def config_diff(self, current_ap_config): if self.want["ap_name"] != current_ap_config.get("ap_name"): update_config["apNameNew"] = self.want["ap_name"] update_config["apName"] = current_ap_config.get("ap_name") + elif each_key == "is_assigned_site_as_location": + update_config["isAssignedSiteAsLocation"] = self.want["is_assigned_site_as_location"] elif each_key in ("primary_ip_address", "secondary_ip_address", "tertiary_ip_address"): if current_ap_config.get(each_key) != self.want.get(each_key): @@ -2111,7 +2129,7 @@ def config_diff(self, current_ap_config): elif each_key == "6ghz_radio" and each_radio["slot_id"] == 2: radio_data = self.compare_radio_config(each_radio, self.want[each_key]) - elif each_key == "xor_radio" and each_radio["slot_id"] == 3: + elif each_key == "xor_radio" and each_radio["slot_id"] == 0: radio_data = self.compare_radio_config(each_radio, self.want[each_key]) elif each_key == "tri_radio" and each_radio["slot_id"] == 4: @@ -2184,12 +2202,15 @@ def update_ap_configuration(self, ap_config): self.log("Updating access point configuration information: {0}" .format(ap_config["macAddress"]), "INFO") - ap_config["adminStatus"] = True - ap_config["configureAdminStatus"] = True ap_config["apList"] = [] temp_dict = {} + if ap_config.get("adminStatus") is not None: + ap_config["configureAdminStatus"] = True + ap_config["adminStatus"] = True \ + if ap_config["adminStatus"] == "Enabled" else False + if ap_config.get(self.keymap["ap_name"]) is not None: temp_dict[self.keymap["ap_name"]] = ap_config.get(self.keymap["ap_name"]) temp_dict["apNameNew"] = ap_config["apNameNew"] @@ -2203,8 +2224,11 @@ def update_ap_configuration(self, ap_config): if ap_config.get(self.keymap["location"]) is not None: ap_config["configureLocation"] = True - else: - ap_config["isAssignedSiteAsLocation"] = True + + if ap_config.get("isAssignedSiteAsLocation") is not None: + ap_config["configureLocation"] = True + ap_config["isAssignedSiteAsLocation"] = True \ + if ap_config["isAssignedSiteAsLocation"] == "Enabled" else False if ap_config.get(self.keymap["led_brightness_level"]) is not None: ap_config["configureLedBrightnessLevel"] = True @@ -2265,8 +2289,10 @@ def update_ap_configuration(self, ap_config): for each_radio in radio_config_list: radio_dtos = {} - radio_dtos["configureAdminStatus"] = True - radio_dtos["adminStatus"] = True + if each_radio.get(self.keymap["admin_status"]) is not None: + radio_dtos["configureAdminStatus"] = True + radio_dtos[self.keymap["admin_status"]] = True \ + if each_radio[self.keymap["admin_status"]] == "Enabled" else False if each_radio.get(self.keymap["channel_assignment_mode"]) is not None: radio_dtos[self.keymap["channel_assignment_mode"]] = 1 \ @@ -2353,7 +2379,7 @@ def update_ap_configuration(self, ap_config): try: response = self.dnac._exec( family="wireless", - function='configure_access_points_v1', + function='configure_access_points_v2', op_modifies=True, params={"payload": ap_config} ) diff --git a/plugins/modules/device_configs_backup_workflow_manager.py b/plugins/modules/device_configs_backup_workflow_manager.py index 03511c46d7..2c8f61dc40 100644 --- a/plugins/modules/device_configs_backup_workflow_manager.py +++ b/plugins/modules/device_configs_backup_workflow_manager.py @@ -44,6 +44,9 @@ management_ip_address: description: IP address of the device as displayed on the inventory GUI of Cisco Catalyst Center type: str + site: + description: Name of the site to which the device is assigned + type: str mac_address: description: Mac address of the device as displayed on the inventory GUI of Cisco Catalyst Center type: str @@ -110,6 +113,22 @@ series: Cisco Catalyst 9300 Series Switches collection_status: Managed file_path: /home/admin/madhan_ansible/collections/ansible_collections/cisco/dnac/playbooks/new_tmp + +- name: Take backup of a 9300 wired device associated with site + cisco.dnac.device_configs_backup_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log: True + dnac_log_level: "{{dnac_log_level}}" + state: merged + config: + - site: Global/USA/New York/BLDNYC + """ RETURN = r""" @@ -214,6 +233,7 @@ def validate_input(self): device_configs_backup_spec = { 'hostname': {'type': 'str', 'required': False}, + 'site': {'type': 'str', 'required': False}, 'management_ip_address': {'type': 'str', 'required': False}, 'mac_address': {'type': 'str', 'required': False}, 'serial_number': {'type': 'str', 'required': False}, @@ -255,6 +275,44 @@ def validate_ipv4_address(self): self.log("Validated IP address collected for config collection is {0}".format(ip_address), "INFO") + def get_site_details(self, site_name_hierarchy): + """ + Fetches the existance status of the site + + Parameters: + - self: The instance of the class containing the 'config' attribute + to be validated. + - site_name_hierarchy: Name of the site collected from the input. + Returns: + - site_exits: A boolean value indicating the existance of the site. + Example: + Post creation of the validated input, this method checks whther the site + exists in the Cisco Catalyst Center or not + """ + + site_exists = False + try: + response = self.dnac_apply['exec']( + family="sites", + function='get_site', + params={"name": site_name_hierarchy}, + op_modifies=True + ) + except Exception: + self.log("Exception occurred as \ + site '{0}' was not found".format(self.want.get("site_name")), "CRITICAL") + self.module.fail_json(msg="Site not found", response=[]) + + if response: + self.log("Received site details\ + for '{0}': {1}".format(site_name_hierarchy, str(response)), "DEBUG") + site = response.get("response") + if len(site) == 1: + site_exists = True + self.log("Site Name: {0} exists in the Cisco Catalyst Center".format(site_name_hierarchy), "INFO") + + return site_exists + def get_have(self): """ Get the current device_configs_backup details @@ -275,11 +333,11 @@ def get_have(self): def get_device_ids_list(self): """ - Fethces the list of device ids from various paramters passed in the playbook + Fetches the list of device ids from various paramters passed in the playbook Args: self: The instance of the class containing the 'config' attribute to be validated. Returns: - dev_id_list: The list of device ids based on the parameters passed by the user + device_ids: The list of device ids based on the parameters passed by the user Example: Stored paramters like management ip address/ family can be used to fetch the device ids list @@ -300,16 +358,111 @@ def get_device_ids_list(self): ) self.log("Response collected from the API 'get_device_list' is {0}".format(str(response)), "DEBUG") device_list = response.get("response") + self.log("Length of the device list fetched from the API 'get_device_list' is {0}".format(str(device_list)), "INFO") + original_valid_device_count = len(device_list) + if original_valid_device_count == 0: + msg = "No devices found in the inventory matching the given parameters." + self.log(msg, "CRITICAL") + self.module.fail_json(msg=msg) + + valid_devices = [] + for device in device_list: + ip_address = device.get("managementIpAddress") + if device.get("collectionStatus") != "Managed": + msg = "Device backup of device with IP address {0} \ + is not possible due to collection status not being in Managed state".format(ip_address) + self.log(msg, "WARNING") + + elif device.get("family") == "Unified AP": + msg = "Device backup of device with IP address {0} \ + is not possible due to device being an Unified AP".format(ip_address) + self.log(msg, "WARNING") + + elif device.get("reachabilityStatus") != "Reachable": + msg = "Device backup of device with IP address {0} \ + is not possible due to device being not reachable".format(ip_address) + self.log(msg, "WARNING") + else: + valid_devices.append(device) + if not valid_devices: + msg = "No device IDs were collected because the devices are either Unified APs \ + not in the Managed state, or not reachable." + self.log(msg, "CRITICAL") + self.module.fail_json(msg=msg) + + device_ids = [id.get("id") for id in valid_devices] + valid_device_count = len(device_ids) + self.log("Collected device IDs: {0}".format(device_ids), "INFO") + self.log("Backup of {0} devices out of {1} devices is possible".format(valid_device_count, original_valid_device_count), "INFO") + return device_ids + + def get_devices_by_site_and_params(self): + """ + Retrieves a list of device IDs that match the given site and other parameters. + Args: + self: The instance of the class containing the 'config' attribute to be validated. + Returns: + device_in_site: List of device IDs that match the criteria and are located at the specified site. + Example: + Uses stored parameters like management IP address/family to fetch device IDs at the given site. + """ + + site = self.validated_config[0].get("site") + device_ids = self.get_device_ids_list() + devices_in_site = [] + + for dev_id in device_ids: + device_details_response = self.dnac_apply['exec']( + family="devices", + function='get_device_detail', + params={"search_by": dev_id, + "identifier": "uuid"}, + op_modifies=True + ) + self.log("Response collected from the API 'get_device_detail' {0}".format(device_details_response), "DEBUG") + device_details = device_details_response.get("response") + if device_details.get("location") == site: + devices_in_site.append(dev_id) + + if not devices_in_site: + msg = "No devices found in the given site {0}".format(site) + self.log(msg, "CRITICAL") + self.module.fail_json(msg=msg) + + return devices_in_site + + def get_device_ids_by_site(self): + """ + Retrieves the list of device IDs at the specified site., when only site is passed as the parameter + Args: + self: The instance of the class containing the 'config' attribute to be validated. + Returns: + device_ids: The list of device ids present at the site passed by the user + Example: + Uses site as parameter to fetch the device IDs at the given site. + """ + site = self.validated_config[0].get("site") + response = self.dnac_apply['exec']( + family="devices", + function='get_device_list', + op_modifies=True + ) + self.log("Response collected from the API 'get_device_list' is {0}".format(str(response)), "DEBUG") + device_list = response.get("response") self.log("Length of the device list fetched from the API 'get_device_list' is {0}".format(str(device_list)), "INFO") if len(device_list) == 0: - msg = "Couldn't find any devices in the inventory that match the given parameters." + msg = "No devices found in the inventory" self.log(msg, "CRITICAL") self.module.fail_json(msg=msg) - dev_id_list = [id.get("id") for id in device_list] - self.log("Device Ids list collected is {0}".format(dev_id_list), "INFO") - return dev_id_list + device_ids = [] + for device in device_list: + if device.get("site") == site: + device_ids.append(device) + + self.log("Device IDs collected:{0}".format(device_ids), "INFO") + return device_ids def password_generator(self): """ @@ -372,10 +525,25 @@ def get_want(self): """ self.want = {} + device_params = self.validated_config[0] + site = device_params.get("site") + filtered_keys = {} + for key, value in device_params.items(): + if key not in ['file_path', 'file_password']: + filtered_keys[key] = value + + if device_params.get("site") and self.get_site_details(site): + if len(filtered_keys) > 1: + self.want["deviceId"] = self.get_devices_by_site_and_params() + else: + self.want["deviceId"] = self.get_device_ids_by_site() + else: + self.want["deviceId"] = self.get_device_ids_list() - self.want["deviceId"] = self.get_device_ids_list() - if self.validated_config[0].get("file_password"): - password = self.validated_config[0].get("file_password") + self.log("Device IDs passed is {0}".format(self.want["deviceId"]), "INFO") + + if device_params.get("file_password"): + password = device_params.get("file_password") if self.validate_password(password=password) is True: self.want["password"] = password @@ -390,7 +558,7 @@ def get_want(self): self.want["password"] = self.password_generator() self.msg = "Successfully collected all parameters from playbook " + \ - "for comparison" + "for exceution" self.status = "success" self.log(self.msg, "INFO") return self diff --git a/plugins/modules/events_and_notifications_workflow_manager.py b/plugins/modules/events_and_notifications_workflow_manager.py index de0025209d..c871b4049a 100644 --- a/plugins/modules/events_and_notifications_workflow_manager.py +++ b/plugins/modules/events_and_notifications_workflow_manager.py @@ -678,7 +678,7 @@ - snmp_destination: name: Snmp test description: "Updating snmp destination with snmp version v2." - server_address: "10.30.0.90" + server_address: "10.30.0.23" port: "25" snmp_version: "V2C" community: "public123" @@ -740,7 +740,7 @@ name: "Webhook Notification." description: "Notification for webhook events subscription" sites: ["Global/India", "Global/USA"] - events: ["AP Flap", "AP Reboot Crash"] + events: ["AP Flap", "AP Reboot Crash", "Device Updation"] destination: "Webhook Demo" - name: Updating Webhook Notification with the list of names of subscribed events in the system. diff --git a/plugins/modules/fabric_sites_zones_workflow_manager.py b/plugins/modules/fabric_sites_zones_workflow_manager.py new file mode 100644 index 0000000000..db3e5477d1 --- /dev/null +++ b/plugins/modules/fabric_sites_zones_workflow_manager.py @@ -0,0 +1,1783 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2022, Cisco Systems +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +__author__ = ("Abhishek Maheshwari, Madhan Sankaranarayanan") + +DOCUMENTATION = r""" +--- +module: fabric_sites_zones_workflow_manager +short_description: Manage fabric site(s)/zone(s) and update the authentication profile template in Cisco Catalyst Center. +description: +- Creating fabric site(s) for the SDA operation in Cisco Catalyst Center. +- Updating fabric site(s) for the SDA operation in Cisco Catalyst Center. +- Creating fabric zone(s) for the SDA operation in Cisco Catalyst Center. +- Updating fabric zone(s) for the SDA operation in Cisco Catalyst Center. +- Deletes fabric site(s) from Cisco Catalyst Center. +- Deletes fabric zone(s) from Cisco Catalyst Center. +- Configure the authentication profile template for fabric site/zone in Cisco Catalyst Center. +version_added: '6.17.0' +extends_documentation_fragment: + - cisco.dnac.workflow_manager_params +author: Abhishek Maheshwari (@abmahesh) + Madhan Sankaranarayanan (@madhansansel) +options: + config_verify: + description: Set to True to verify the Cisco Catalyst Center configuration after applying the playbook configuration. + type: bool + default: False + state: + description: The desired state of Cisco Catalyst Center after the module execution. + type: str + choices: [ merged, deleted ] + default: merged + config: + description: A list containing detailed configurations for creating, updating, or deleting fabric sites or zones + in a Software-Defined Access (SDA) environment. It also includes specifications for updating the authentication + profile template for these sites. Each element in the list represents a specific operation to be performed on + the SDA infrastructure, such as the addition, modification, or removal of fabric sites/zones, and modifications + to authentication profiles. + type: list + elements: dict + required: True + suboptions: + fabric_sites: + description: A dictionary containing detailed configurations for managing REST Endpoints that will receive Audit log + and Events from the Cisco Catalyst Center Platform. This dictionary is essential for specifying attributes and + parameters required for the lifecycle management of fabric sites, zones, and associated authentication profiles. + type: dict + suboptions: + site_name: + description: This name uniquely identifies the site for operations such as creating, updating, or deleting fabric + sites or zones, as well as for updating the authentication profile template. This parameter is mandatory for + any fabric site/zone management operation. + type: str + required: True + site_type: + description: Specifies the type of site to be managed within the SDA environment. The acceptable values are 'fabric_site' + and 'fabric_zone'. The default value is 'fabric_site', indicating the configuration of a broader network area, whereas + 'fabric_zone' typically refers to a more specific segment within the site. + type: str + required: True + authentication_profile: + description: The authentication profile applied to the specified fabric. This profile determines the security posture and + controls for network access within the site. Possible values include 'Closed Authentication', 'Low Impact', + 'No Authentication', and 'Open Authentication'. This setting is critical when creating or updating a fabric site or + updating the authentication profile template. + type: str + is_pub_sub_enabled: + description: A boolean flag that indicates whether the pub/sub mechanism is enabled for control nodes in the fabric site. + This feature is relevant only when creating or updating fabric sites, not fabric zones. When set to True, + pub/sub facilitates more efficient communication and control within the site. The default is True for fabric sites, + and this setting is not applicable for fabric zones. + type: bool + update_authentication_profile: + description: A dictionary containing the specific details required to update the authentication profile template associated + with the fabric site. This includes advanced settings that fine-tune the authentication process and security controls + within the site. + type: dict + suboptions: + authentication_order: + description: Specifies the primary method of authentication for the site. The available methods are 'dot1x' (IEEE 802.1X) + and 'mac' (MAC-based authentication). This setting determines the order in which authentication mechanisms are attempted. + type: str + dot1x_fallback_timeout: + description: The timeout duration, in seconds, for falling back from 802.1X authentication. This value must be within the + range of 3 to 120 seconds. It defines the period a device waits before attempting an alternative authentication method + if 802.1X fails. + type: int + wake_on_lan: + description: A boolean value indicating whether the Wake-on-LAN feature is enabled. Wake-on-LAN allows the network to + remotely wake up devices that are in a low-power state. + type: bool + number_of_hosts: + description: Specifies the number of hosts allowed per port. The available options are 'Single' for one device per port or + 'Unlimited' for multiple devices. This setting helps in controlling the network access and maintaining security. + type: str + enable_bpu_guard: + description: A boolean setting that enables or disables BPDU Guard. BPDU Guard provides a security mechanism by disabling + a port when a BPDU (Bridge Protocol Data Unit) is received, protecting against potential network loops. This setting + defaults to true and is applicable only when the authentication profile is set to "Closed Authentication". + type: bool + + +requirements: +- dnacentersdk >= 2.9.2 +- python >= 3.9 + +notes: + - To ensure the module operates correctly for scaled sets, which involve creating or updating fabric sites/zones and handling + the updation of authentication profile template, please provide valid input in the playbook. If any failure is encountered, + the module will and halt execution without proceeding to further operations. + - When deleting fabric sites, make sure to provide the input to remove the fabric zones associated with them in the + playbook. Fabric sites cannot be deleted until all underlying fabric zones have been removed. + - SDK Method used are + ccc_fabric_sites.FabricSitesZones.get_site + ccc_fabric_sites.FabricSitesZones.get_fabric_sites + ccc_fabric_sites.FabricSitesZones.get_fabric_zones + ccc_fabric_sites.FabricSitesZones.add_fabric_site + ccc_fabric_sites.FabricSitesZones.update_fabric_site + ccc_fabric_sites.FabricSitesZones.add_fabric_zone + ccc_fabric_sites.FabricSitesZones.update_fabric_zone + ccc_fabric_sites.FabricSitesZones.get_authentication_profiles + ccc_fabric_sites.FabricSitesZones.update_authentication_profile + ccc_fabric_sites.FabricSitesZones.delete_fabric_site_by_id + ccc_fabric_sites.FabricSitesZones.delete_fabric_zone_by_id + +""" + +EXAMPLES = r""" +- name: Create a fabric site for SDA with the specified name. + cisco.dnac.fabric_sites_zones_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log_level: "{{dnac_log_level}}" + dnac_log: False + state: merged + config: + - fabric sites: + site_name: "Global/Test_SDA/Bld1" + authentication_profile: "Closed Authentication" + is_pub_sub_enabled: False + +- name: Update a fabric site for SDA with the specified name. + cisco.dnac.fabric_sites_zones_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log_level: "{{dnac_log_level}}" + dnac_log: False + state: merged + config: + - fabric sites: + site_name: "Global/Test_SDA/Bld1" + authentication_profile: "Open Authentication" + +- name: Update a fabric zone for SDA with the specified name. + cisco.dnac.fabric_sites_zones_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log_level: "{{dnac_log_level}}" + dnac_log: False + state: merged + config: + - fabric sites: + site_name: "Global/Test_SDA/Bld1/Floor1" + site_type: "fabric_zone" + authentication_profile: "Closed Authentication" + +- name: Update fabric zone for sda with given name. + cisco.dnac.fabric_sites_zones_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log_level: "{{dnac_log_level}}" + dnac_log: False + state: merged + config: + - fabric sites: + site_name: "Global/Test_SDA/Bld1/Floor1" + site_type: "fabric_zone" + authentication_profile: "Open Authentication" + +- name: Update/customise authentication profile template for fabric site/zone. + cisco.dnac.fabric_sites_zones_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log_level: "{{dnac_log_level}}" + dnac_log: False + state: merged + config: + - fabric_sites: + site_name: "Global/Test_SDA/Bld1" + site_type: "fabric_zone" + authentication_profile: "Open Authentication" + is_pub_sub_enabled: False + update_authentication_profile: + authentication_order: "dot1x" + dot1x_fallback_timeout: 28 + wake_on_lan: False + number_of_hosts: "Single" + +- name: Deleting/removing fabric site from sda from Cisco Catalyst Center + cisco.dnac.fabric_sites_zones_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log_level: "{{dnac_log_level}}" + dnac_log: False + state: deleted + config: + - fabric_sites: + site_name: "Global/Test_SDA/Bld1" + +- name: Deleting/removing fabric zone from sda from Cisco Catalyst Center + cisco.dnac.fabric_sites_zones_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log_level: "{{dnac_log_level}}" + dnac_log: False + state: deleted + config: + - fabric_sites: + site_name: "Global/Test_SDA/Bld1/Floor1" + site_type: "fabric_zone" + +""" + +RETURN = r""" + +dnac_response: + description: A dictionary or list with the response returned by the Cisco Catalyst Center Python SDK + returned: always + type: dict + sample: > + { + "response": { + "taskId": "string", + "url": "string" + }, + "version": "string" + } +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.dnac.plugins.module_utils.dnac import ( + DnacBase, + validate_list_of_dicts, +) +import time + + +class FabricSitesZones(DnacBase): + """Class containing member attributes for fabric sites and zones workflow manager module""" + + def __init__(self, module): + super().__init__(module) + self.supported_states = ["merged", "deleted"] + self.create_site, self.update_site, self.no_update_site = [], [], [] + self.create_zone, self.update_zone, self.no_update_zone = [], [], [] + self.update_auth_profile, self.no_update_profile = [], [] + self.delete_site, self.delete_zone, self.absent_site, self.absent_zone = [], [], [], [] + + def validate_input(self): + """ + Validate the fields provided in the playbook. + Checks the configuration provided in the playbook against a predefined specification + to ensure it adheres to the expected structure and data types. + Parameters: + self: The instance of the class containing the 'config' attribute to be validated. + Returns: + The method returns an instance of the class with updated attributes: + - self.msg: A message describing the validation result. + - self.status: The status of the validation (either 'success' or 'failed'). + - self.validated_config: If successful, a validated version of the 'config' parameter. + Example: + To use this method, create an instance of the class and call 'validate_input' on it. + If the validation succeeds, 'self.status' will be 'success' and 'self.validated_config' + will contain the validated configuration. If it fails, 'self.status' will be 'failed', and + 'self.msg' will describe the validation issues. + """ + + temp_spec = { + 'fabric_sites': { + 'type': 'list', + 'elements': 'dict', + 'site_name': {'type': 'str'}, + 'site_type': {'type': 'str', 'default': 'fabric_site'}, + 'authentication_profile': {'type': 'str'}, + 'is_pub_sub_enabled': {'type': 'bool', 'default': False}, + 'update_authentication_profile': { + 'elements': 'dict', + 'site_name_hierarchy': {'type': 'str'}, + 'authentication_profile': {'type': 'str'}, + 'authentication_order': {'type': 'str'}, + 'dot1x_fallback_timeout': {'type': 'int'}, + 'wake_on_lan': {'type': 'bool'}, + 'number_of_hosts': {'type': 'str'}, + 'enable_bpu_guard': {'type': 'bool'} + } + }, + } + + if not self.config: + self.status = "failed" + self.msg = "The playbook configuration is empty or missing." + self.log(self.msg, "ERROR") + return self + + # Validate device params + valid_temp, invalid_params = validate_list_of_dicts( + self.config, temp_spec + ) + + if invalid_params: + self.msg = "The playbook contains invalid parameters: {0}".format(invalid_params) + self.log(self.msg, "ERROR") + self.status = "failed" + return self + + self.validated_config = valid_temp + self.msg = "Successfully validated playbook configuration parameters using 'validate_input': {0}".format(str(valid_temp)) + self.log(self.msg, "INFO") + self.status = "success" + + return self + + def get_site_id(self, site_name): + """ + Retrieves the site ID for a given site name from the Cisco Catalyst Center. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + site_name (str): The complete name of site for which the site ID need to be retrieved. + Returns: + str: A site ID corresponding to the provided site name. + Description: + This function invokes an API to fetch the details of given site from the Cisco Catalyst Center. If the + site is found, its site ID is extracted. + The function logs messages for successful API responses, missing site, and any errors + encountered during the process. The final site ID is returned. + """ + + try: + response = self.dnac._exec( + family="sites", + function='get_site', + op_modifies=True, + params={"name": site_name}, + ) + self.log("Received API response from 'get_site': {0}".format(str(response)), "DEBUG") + response = response.get('response') + + if not response or not response[0].get("id"): + self.status = "failed" + self.msg = "No site with the name '{0}' found in Cisco Catalyst Center.".format(site_name) + self.log(self.msg, "ERROR") + self.check_return_status() + site_id = response[0].get("id") + + except Exception as e: + self.status = "failed" + self.msg = """Error while getting the details of Site with given name '{0}' present in + Cisco Catalyst Center: {1}""".format(site_name, str(e)) + self.log(self.msg, "ERROR") + self.check_return_status() + + return site_id + + def get_fabric_site_detail(self, site_name, site_id): + """ + Retrieves the detailed information of a fabric site from the Cisco Catalyst Center. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + site_name (str): The complete name of the site for which the details need to be retrieved. + site_id (str): The unique identifier of the site in the Cisco Catalyst Center. + Returns: + dict or None: A dictionary containing the details of the fabric site if found. + Returns None if the site is not a fabric site or if an error occurs. + Description: + This function fetches the fabric site details from Cisco Catalyst Center using the provided site ID. + It logs the API response and returns the site details if the site is a fabric site. If the site is not + found or is not a fabric site, it returns None. In case of an error, it logs the issue, sets the status + to "failed", and handles the failure. + """ + + try: + response = self.dnac._exec( + family="sda", + function='get_fabric_sites', + op_modifies=True, + params={"site_id": site_id}, + ) + response = response.get("response") + self.log("Received API response from 'get_fabric_sites' for the site '{0}': {1}".format(site_name, str(response)), "DEBUG") + + if not response: + self.log("Given site '{0}' is not a fabric site in Cisco Catalyst Center.".format(site_name), "INFO") + return None + + return response[0] + except Exception as e: + self.status = "failed" + self.msg = """Error while getting the details of Site with given name '{0}' present in + Cisco Catalyst Center: {1}""".format(site_name, str(e)) + self.log(self.msg, "ERROR") + self.check_return_status() + + return None + + def get_fabric_zone_detail(self, site_name, site_id): + """ + Retrieves the detailed information of a fabric zone from the Cisco Catalyst Center. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + site_name (str): The complete name of the site for which the fabric zone details need to be retrieved. + site_id (str): The unique identifier of the site in the Cisco Catalyst Center. + Returns: + dict or None: A dictionary containing the details of the fabric zone if found, + or None if the site is not a fabric zone or an error occurs. + Description: + This function fetches the fabric zone details from Cisco Catalyst Center using the provided site ID. + It logs the API response and returns the details if the site is a fabric zone. If the site is not + recognized as a fabric zone, it returns None. In case of an error, it logs the issue, sets the status + to "failed", and handles the failure appropriately. + """ + + try: + response = self.dnac._exec( + family="sda", + function='get_fabric_zones', + op_modifies=True, + params={"site_id": site_id}, + ) + response = response.get("response") + self.log("Received API response from 'get_fabric_zones' for the site '{0}': {1}".format(site_name, str(response)), "DEBUG") + + if not response: + self.log("Given site '{0}' is not a fabric zone in Cisco Catalyst Center.".format(site_name), "INFO") + return None + + return response[0] + + except Exception as e: + self.status = "failed" + self.msg = """Error while getting the details of fabric zone '{0}' present in + Cisco Catalyst Center: {1}""".format(site_name, str(e)) + self.log(self.msg, "ERROR") + self.check_return_status() + + return None + + def get_have(self, config): + """ + Retrieves the current state of fabric sites and zones from the Cisco Catalyst Center based on the given configuration. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + config (dict): A configuration dictionary containing details about the fabric sites and zones. + The key "fabric_sites" should contain a list of dictionaries. + Returns: + self (object): The instance of the class with the updated `have` attribute containing the current state + of fabric sites and zones. + Description: + This function processes the provided configuration to determine the current state of fabric sites + and zones in the Cisco Catalyst Center. It iterates over the "fabric_sites" list in the configuration, + extracting the site name and type. For each site, it retrieves the corresponding site or zone ID + and details using the `get_site_id`, `get_fabric_site_detail`, and `get_fabric_zone_detail` methods. + The `have` attribute of the instance is updated with this dictionary, representing the current state + of the system. The function logs the final state and returns the instance for further use. + """ + + have = { + "fabric_sites_ids": [], + "fabric_zone_ids": [] + } + fabric_sites = config.get("fabric_sites", []) + + for site in fabric_sites: + site_name = site.get("site_name") + site_type = site.get("site_type", "fabric_site") + site_id = self.get_site_id(site_name) + + if site_type == "fabric_site": + site_detail = self.get_fabric_site_detail(site_name, site_id) + if site_detail: + self.log("Site detail for fabric site {0} collected successfully.".format(site_name), "DEBUG") + have["fabric_sites_ids"].append(site_detail.get("siteId")) + else: + zone_detail = self.get_fabric_zone_detail(site_name, site_id) + if zone_detail: + self.log("Site detail for fabric zone {0} collected successfully.".format(site_name), "DEBUG") + have["fabric_zone_ids"].append(zone_detail.get("siteId")) + + self.have = have + self.log("Current State (have): {0}".format(str(have)), "INFO") + + return self + + def get_want(self, config): + """ + Collects and validates the desired state configuration for fabric sites and zones from the given playbook configuration. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + config (dict): A dictionary containing the configuration for the desired state of fabric sites and zones. + It should include a key "fabric_sites" with a list of dictionaries. + Returns: + self (object): The instance of the class with the updated `want` attribute containing the validated desired state + of fabric sites and zones and updating authentication profile template. + Description: + This function processes the provided playbook configuration to determine the desired state of fabric sites + and zones in the Cisco Catalyst Center. + The validated site information is stored in the `want` dictionary under the key "fabric_sites". + The `want` attribute of the instance is updated with this dictionary, representing the desired state + of the system. The function returns the instance for further processing or method chaining. + """ + + want = {} + fabric_sites = config.get("fabric_sites") + + if not fabric_sites: + self.status = "failed" + self.msg = ( + "No input provided in the playbook for fabric site/zone operation or updating the " + "authentication profile template in Cisco Catalysyt Center." + ) + self.log(self.msg, "ERROR") + self.result["response"] = self.msg + return self + + if fabric_sites: + fabric_site_info = [] + + for site in fabric_sites: + site_name = site.get("site_name") + site_type = site.get("site_type", "fabric_site") + + if not site_name: + self.status = "failed" + self.msg = ( + "Required parameter 'site_name' is missing. It must be provided in the playbook for fabric site/zone " + "operations in Cisco Catalyst Center." + ) + self.log(self.msg, "ERROR") + self.result["response"] = self.msg + return self + + if site_type not in ["fabric_site", "fabric_zone"]: + self.status = "failed" + self.msg = ( + "Invalid site_type '{0}' provided. Please use 'fabric_site' or 'fabric_zone' for fabric site/zone operations" + " in Cisco Catalyst Center." + ).format(site_type) + self.log(self.msg, "ERROR") + return self + + fabric_site_info.append(site) + + want["fabric_sites"] = fabric_site_info + + self.want = want + self.msg = "Successfully collected all parameters from the playbook for creating/updating the fabric sites/zones." + self.status = "success" + self.log("Desired State (want): {0}".format(str(self.want)), "INFO") + + return self + + def create_fabric_site(self, site): + """ + Creates a fabric site in the Cisco Catalyst Center using the provided site configuration. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + site (dict): A dictionary containing the details of the fabric site to be created. + Returns: + self (object): The instance of the class with updated status and result attributes reflecting the outcome + of the fabric site creation operation. + Description: + This function creates a fabric site in the Cisco Catalyst Center based on the configuration provided + in the `site` dictionary. + The function constructs the payload for the API request, which includes the site ID, authentication profile, + and an optional flag for PubSub enablement. The payload is then sent to the `add_fabric_site` API endpoint. + After the API call, the function monitors the status of the task using the `get_task_details` method. + If the task encounters an error, the function logs the error and sets the status to "failed". If the task completes + successfully and contains the necessary data, the status is set to "success", and the site is marked as created. + """ + + try: + fabric_site_payload = [] + site_name = site.get("site_name") + auth_profile = site.get("authentication_profile") + if not auth_profile: + self.status = "failed" + self.msg = ( + "Required parameter 'authentication_profile'is missing needed for creation of fabric sites in Cisco Catalyst Center. " + "Please provide one of the following authentication_profile ['Closed Authentication', 'Low Impact'" + ", 'No Authentication', 'Open Authentication'] in the playbook." + ) + self.log(self.msg, "ERROR") + self.result["response"] = self.msg + return self + + site_payload = { + "siteId": self.get_site_id(site_name), + "authenticationProfileName": site.get("authentication_profile"), + "isPubSubEnabled": site.get("is_pub_sub_enabled", False) + } + fabric_site_payload.append(site_payload) + self.log("Requested payload for creating fabric site '{0}' is: {1}".format(site_name, str(site_payload)), "INFO") + + response = self.dnac._exec( + family="sda", + function='add_fabric_site', + op_modifies=True, + params={'payload': fabric_site_payload} + ) + self.log("Received API response from 'add_fabric_site' for the site {0}: {1}".format(site_name, str(response)), "DEBUG") + response = response.get("response") + + if not response: + self.status = "failed" + self.msg = "No response received from 'add_fabric_site' API, task ID not retrieved." + self.log(self.msg, "ERROR") + return self + + task_id = response.get("taskId") + + while True: + task_details = self.get_task_details(task_id) + + if task_details.get("isError"): + self.status = "failed" + failure_reason = task_details.get("failureReason") + if failure_reason: + self.msg = "Failed to create the Fabric site '{0}' due to {1}.".format(site_name, failure_reason) + else: + self.msg = "Failed to create the Fabric site '{0}'.".format(site_name) + self.log(self.msg, "ERROR") + self.result['response'] = self.msg + break + elif task_details.get("endTime") and "workflow_id" in task_details.get("data"): + self.status = "success" + self.create_site.append(site_name) + self.log("Fabric site '{0}' created successfully in the Cisco Catalyst Center".format(site_name), "INFO") + break + + time.sleep(1) + + except Exception as e: + self.status = "failed" + self.msg = "An exception occured while creating the fabric site '{0}' in Cisco Catalyst Center: {1}".format(site_name, str(e)) + self.log(self.msg, "ERROR") + + return self + + def fabric_site_needs_update(self, site, site_in_ccc): + """ + Determines if a fabric site in Cisco Catalyst Center needs to be updated based on the provided configuration. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + site (dict): A dictionary containing the desired configuration of the fabric site. + site_in_ccc (dict): A dictionary containing the current configuration of the fabric site as + present in the Cisco Catalyst Center. + Returns: + bool: True if the fabric site requires an update, False otherwise. + Description: + This function compares the desired configuration (`site`) of a fabric site with its current + configuration (`site_in_ccc`) in the Cisco Catalyst Center. + The function returns True, indicating that the fabric site needs to be updated. Otherwise, it returns False, + indicating no update is needed. + """ + + auth_profile = site.get("authentication_profile") + if auth_profile and auth_profile != site_in_ccc.get("authenticationProfileName"): + return True + + is_pub_sub_enabled = site.get("is_pub_sub_enabled") + if is_pub_sub_enabled is not None and is_pub_sub_enabled != site_in_ccc.get("isPubSubEnabled"): + return True + + return False + + def update_fabric_site(self, site, site_in_ccc): + """ + Updates a fabric site in the Cisco Catalyst Center based on the provided configuration and current state. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + site (dict): A dictionary containing the desired configuration for the fabric site. + site_in_ccc (dict): A dictionary containing the current configuration of the fabric site + in the Cisco Catalyst Center. + Returns: + self (object): The instance of the class with updated status and result attributes reflecting the outcome + of the fabric site update operation. + Description: + This method updates a fabric site in the Cisco Catalyst Center. The constructed payload includes the site ID, + authentication profile name, and PubSub enablement status and payload is sent to the `update_fabric_site` + API endpoint. + After initiating the update, the method tracks the status of the update task using `get_task_details`. + It checks for task errors or successful completion, updating the status and logging messages accordingly. + If the task fails, an appropriate error message is logged, and the status is set to "failed". + """ + + try: + update_site_params = [] + site_name = site.get("site_name") + + if site.get("is_pub_sub_enabled") is None: + pub_sub_enable = site_in_ccc.get("isPubSubEnabled") + else: + pub_sub_enable = site.get("is_pub_sub_enabled") + + if not site.get("authentication_profile"): + auth_profile = site_in_ccc.get("authenticationProfileName") + else: + auth_profile = site.get("authentication_profile") + + site_payload = { + "id": site_in_ccc.get("id"), + "siteId": site_in_ccc.get("siteId"), + "authenticationProfileName": auth_profile, + "isPubSubEnabled": pub_sub_enable + } + update_site_params.append(site_payload) + self.log("Requested payload for updating fabric site '{0}' is: {1}".format(site_name, str(site_payload)), "INFO") + + response = self.dnac._exec( + family="sda", + function='update_fabric_site', + op_modifies=True, + params={'payload': update_site_params} + ) + self.log("Received API response from 'update_fabric_site' for the site {0}: {1}".format(site_name, str(response)), "DEBUG") + response = response.get("response") + + if not response: + self.status = "failed" + self.msg = "Unable to fetch the task Id for the updation of fabric site as the 'update_fabric_site' response is empty." + self.log(self.msg, "ERROR") + return self + + task_id = response.get("taskId") + + while True: + task_details = self.get_task_details(task_id) + if task_details.get("isError"): + self.status = "failed" + failure_reason = task_details.get("failureReason") + if failure_reason: + self.msg = "Unable to update the Fabric site '{0}' because of {1}.".format(site_name, failure_reason) + else: + self.msg = "Unable to update the Fabric site '{0}'.".format(site_name) + self.log(self.msg, "ERROR") + self.result['response'] = self.msg + break + elif task_details.get("endTime") and "workflow_id" in task_details.get("data"): + self.status = "success" + self.update_site.append(site_name) + self.log("Fabric site '{0}' updated successfully in the Cisco Catalyst Center".format(site_name), "INFO") + break + time.sleep(1) + except Exception as e: + self.status = "failed" + self.msg = "An exception occured while updating the fabric site '{0}' in Cisco Catalyst Center: {1}".format(site_name, str(e)) + self.log(self.msg, "ERROR") + + return self + + def create_fabric_zone(self, zone): + """ + Creates a fabric zone in the Cisco Catalyst Center based on the provided configuration. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + zone (dict): A dictionary containing the desired configuration for the fabric zone. + Returns: + self (object): The instance of the class with updated status and result attributes reflecting the outcome + of the fabric zone creation operation. + Description: + This method creates a fabric zone in the Cisco Catalyst Center and sends the payload to the add_fabric_zone + API endpoint. The method logs the requested payload and the API response. + After initiating the creation, the method monitors the task's status using `get_task_details`. + It checks for task errors or successful completion. If the task fails, an appropriate error message + is logged, and the status is set to "failed". If the task succeeds, the status is set to "success", + and the site name is added to the list of successfully created zones. + The function returns the class instance (`self`) with the updated attributes. + """ + + try: + fabric_zone_payload = [] + site_name = zone.get("site_name") + + zone_payload = { + "siteId": self.get_site_id(site_name), + "authenticationProfileName": zone.get("authentication_profile"), + } + fabric_zone_payload.append(zone_payload) + self.log("Requested payload for creating fabric zone '{0}' is: {1}".format(site_name, zone_payload), "INFO") + + response = self.dnac._exec( + family="sda", + function='add_fabric_zone', + op_modifies=True, + params={'payload': fabric_zone_payload} + ) + self.log("Received API response from 'add_fabric_zone' for the site {0}: {1}".format(site_name, str(response)), "DEBUG") + response = response.get("response") + + if not response: + self.status = "failed" + self.msg = "Unable to fetch the task Id for the creation of fabric zone as the 'add_fabric_zone' response is empty." + self.log(self.msg, "ERROR") + return self + + task_id = response.get("taskId") + + while True: + task_details = self.get_task_details(task_id) + + if task_details.get("isError"): + self.status = "failed" + failure_reason = task_details.get("failureReason") + if failure_reason: + self.msg = "Unable to create the Fabric zone '{0}' because of {1}.".format(site_name, failure_reason) + else: + self.msg = "Unable to create the Fabric zone '{0}'.".format(site_name) + self.log(self.msg, "ERROR") + self.result['response'] = self.msg + break + elif task_details.get("endTime") and "workflow_id" in task_details.get("data"): + self.status = "success" + self.create_zone.append(site_name) + self.log("Fabric zone '{0}' created successfully in the Cisco Catalyst Center.".format(site_name), "INFO") + break + time.sleep(1) + except Exception as e: + self.status = "failed" + self.msg = "An exception occured while creating the fabric zone '{0}' in Cisco Catalyst Center: {1}".format(site_name, str(e)) + self.log(self.msg, "ERROR") + + return self + + def update_fabric_zone(self, zone, zone_in_ccc): + """ + Updates an existing fabric zone in the Cisco Catalyst Center with the provided configuration. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + zone (dict): A dictionary containing the desired updates for the fabric zone. + zone_in_ccc (dict): A dictionary containing the current configuration of the fabric zone + in the Cisco Catalyst Center. + Returns: + self (object): The instance of the class with updated status and result attributes reflecting the outcome + of the fabric zone update operation. + Description: + This method updates the configuration of a fabric zone in the Cisco Catalyst Center. + The constructed payload is sent to the `update_fabric_zone` API endpoint. The method logs the + requested payload and the API response. + After initiating the update, the method monitors the task's status using `get_task_details`. It checks + for task errors or successful completion. + The function returns the class instance (`self`) with the updated attributes. + """ + + try: + update_zone_params = [] + site_name = zone.get("site_name") + + zone_payload = { + "id": zone_in_ccc.get("id"), + "siteId": zone_in_ccc.get("siteId"), + "authenticationProfileName": zone.get("authentication_profile") or zone_in_ccc.get("authenticationProfileName") + } + update_zone_params.append(zone_payload) + self.log("Requested payload for updating fabric zone '{0}' is: {1}".format(site_name, zone_payload), "INFO") + + response = self.dnac._exec( + family="sda", + function='update_fabric_zone', + op_modifies=True, + params={'payload': update_zone_params} + ) + self.log("Received API response from 'update_fabric_zone' for the site {0}: {1}".format(site_name, str(response)), "DEBUG") + response = response.get("response") + + if not response: + self.status = "failed" + self.msg = "Unable to fetch the task Id for the updation of fabric zone as the 'update_fabric_zone' response is empty." + self.log(self.msg, "ERROR") + return self + + task_id = response.get("taskId") + + while True: + task_details = self.get_task_details(task_id) + + if task_details.get("isError"): + self.status = "failed" + failure_reason = task_details.get("failureReason") + if failure_reason: + self.msg = "Unable to update the Fabric zone '{0}' because of {1}.".format(site_name, failure_reason) + else: + self.msg = "Unable to update the Fabric zone '{0}'.".format(site_name) + self.log(self.msg, "ERROR") + self.result['response'] = self.msg + break + elif task_details.get("endTime") and "workflow_id" in task_details.get("data"): + self.status = "success" + self.log("Fabric zone '{0}' updated successfully in the Cisco Catalyst Center".format(site_name), "INFO") + self.update_zone.append(site_name) + break + time.sleep(1) + except Exception as e: + self.status = "failed" + self.msg = "An exception occured while updating the fabric zone '{0}' in Cisco Catalyst Center: {1}".format(site_name, str(e)) + self.log(self.msg, "ERROR") + + return self + + def validate_auth_profile_parameters(self, auth_profile_dict): + """ + Validates the parameters provided for updating the authentication profile template. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + auth_profile_dict (dict): A dictionary containing the parameters for the authentication profile. + Returns: + self (objetc): The instance of the class with updated status and result attributes if invalid parameters are found. + Description: + This method checks the validity of the provided parameters for the authentication profile template. It validates + the "authentication_order" to ensure it is either "dot1x" or "mac". For "dot1x_fallback_timeout", it ensures the + value is an integer within the range of 3 to 120. The "number_of_hosts" must be either "Single" or "Unlimited". + If any invalid parameters are found, they are added to the `invalid_auth_profile_list`. Corresponding error messages + are logged, and the status is set to "failed". The method also logs warnings for any exceptions encountered during + the validation process. + """ + + invalid_auth_profile_list = [] + auth_order = auth_profile_dict.get("authentication_order") + if auth_order and auth_order not in ["dot1x", "mac"]: + invalid_auth_profile_list.append("authentication_order") + msg = ( + "Invalid authentication_order '{0}'given in the playbook for the updation of authentication profile template. " + "Please provide one of the following authentication_order ['dot1x', 'mac'] in the playbook." + ).format(auth_order) + self.log(msg, "ERROR") + + fall_timeout = auth_profile_dict.get("dot1x_fallback_timeout") + if fall_timeout: + try: + timeout = int(fall_timeout) + if timeout not in range(3, 121): + invalid_auth_profile_list.append("dot1x_fallback_timeout") + msg = ( + "Invalid 'dot1x_fallback_timeout' '{0}' given in the playbook. " + "Please provide a value in the range [3, 120]." + ).format(timeout) + self.log(msg, "ERROR") + except Exception as e: + invalid_auth_profile_list.append("dot1x_fallback_timeout") + msg = ( + "Invalid 'dot1x_fallback_timeout' string '{0}' given in the playbook, unable to convert it into the integer. " + "Please provide a value in the range [3, 120]." + ).format(fall_timeout) + self.log(msg, "WARNING") + + number_of_hosts = auth_profile_dict.get("number_of_hosts") + if number_of_hosts and number_of_hosts.title() not in ["Single", "Unlimited"]: + invalid_auth_profile_list.append("number_of_hosts") + msg = ( + "Invalid number_of_hosts '{0}'given in the playbook for the updation of authentication profile template. " + "Please provide one of the following: ['Single', 'Unlimited']." + ).format(auth_order) + self.log(msg, "ERROR") + + if invalid_auth_profile_list: + self.status = "failed" + self.msg = ( + "Invalid parameters found: {0}. " + "Unable to update the authentication profile template." + ).format(invalid_auth_profile_list) + self.log(self.msg, "ERROR") + self.result["response"] = self.msg + + return self + + def get_authentication_profile(self, fabric_id, auth_profile, site_name): + """ + Retrieves the details of an authentication profile for a given fabric and site from the Cisco Catalyst Center. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + fabric_id (str): The ID of the fabric to which the authentication profile belongs. + auth_profile (str): The name of the authentication profile to retrieve. + site_name (str): The name of the site associated with the authentication profile. + Returns: + dict or None: A dictionary containing the details of the authentication profile if found, or None if no profile is associated + with the site or if an error occurs. + Description: + This method sends a request to the Cisco Catalyst Center to fetch the authentication profile details based on the provided + `fabric_id` and `auth_profile` name. The `site_name` is used for logging purposes to provide context in the logs. + If the response contains authentication profile details, these details are returned. If no profile is found or if an error + occurs during the request, the method logs an appropriate message and returns `None`. + """ + + try: + profile_details = None + response = self.dnac._exec( + family="sda", + function='get_authentication_profiles', + op_modifies=True, + params={ + "fabric_id": fabric_id, + "authentication_profile_name": auth_profile + } + ) + response = response.get("response") + self.log("Received API response from 'get_authentication_profiles' for the site '{0}': {1}".format(site_name, str(response)), "DEBUG") + + if not response: + self.log("No Authentication profile asssociated to this site '{0}' in Cisco Catalyst Center.".format(site_name), "INFO") + return profile_details + + profile_details = response[0] + return profile_details + except Exception as e: + self.status = "failed" + self.msg = ( + "Error while getting the details of authentication profiles for the site '{0}' present in " + "Cisco Catalyst Center: {1}" + ).format(site_name, str(e)) + self.log(self.msg, "ERROR") + self.check_return_status() + + return None + + def auth_profile_needs_update(self, auth_profile_dict, auth_profile_in_ccc): + """ + Determines if the authentication profile requires an update by comparing it with the existing profile in Cisco Catalyst Center. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + auth_profile_dict (dict): A dictionary containing the desired authentication profile settings to compare. + auth_profile_in_ccc (dict): A dictionary containing the current authentication profile settings from Cisco Catalyst Center. + Returns: + bool: Returns `True` if any of the settings in `auth_profile_dict` differ from those in `auth_profile_in_ccc` and an update + is needed. Returns `False` if the settings match and no update is required. + Description: + This method compares the provided authentication profile settings (`auth_profile_dict`) with the current settings retrieved from + the Cisco Catalyst Center (`auth_profile_in_ccc`). It considers the possibility of an additional setting "enable_bpu_guard" if + the current profile is "Closed Authentication". + It iterates through a mapping of profile settings and checks if any of the settings require an update. If any discrepancies are + found, the method returns `True`. If all settings match, it returns `False`. + """ + + profile_key_mapping = { + "authentication_order": "authenticationOrder", + "dot1x_fallback_timeout": "dot1xToMabFallbackTimeout", + "wake_on_lan": "wakeOnLan", + "number_of_hosts": "numberOfHosts" + } + if auth_profile_in_ccc.get("authenticationProfileName") == "Closed Authentication": + profile_key_mapping["enable_bpu_guard"] = "isBpduGuardEnabled" + + for key, ccc_key in profile_key_mapping.items(): + desired_value = auth_profile_dict.get(key) + + if desired_value is None: + continue + + current_value = auth_profile_in_ccc.get(ccc_key) + + if key == "dot1x_fallback_timeout": + desired_value = int(desired_value) + current_value = int(current_value) + + if desired_value != current_value: + return True + + return False + + def collect_authentication_params(self, auth_profile_dict, auth_profile_in_ccc): + """ + Collects and prepares the updated authentication profile parameters based on the provided dictionary and the current profile in + Cisco Catalyst Center. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + auth_profile_dict (dict): A dictionary containing the desired authentication profile settings. + auth_profile_in_ccc (dict): A dictionary containing the current authentication profile settings from Cisco Catalyst Center. + Returns: + list: A list containing a single dictionary with the updated authentication profile parameters. + Description: + This method prepares the updated parameters for an authentication profile by combining desired settings from `auth_profile_dict` with + the current settings from `auth_profile_in_ccc`. + It creates a dictionary with the ID, fabric ID, profile name, and updated settings for authentication order, dot1x fallback timeout, + number of hosts, and Wake-on-LAN. If the profile is "Closed Authentication," it also includes the BPDU guard setting. + The method returns a list containing the updated parameters in a dictionary, which can be used for further processing or API requests. + """ + + updated_params = [] + profile_name = auth_profile_in_ccc.get("authenticationProfileName") + authentications_params_dict = { + "id": auth_profile_in_ccc.get("id"), + "fabricId": auth_profile_in_ccc.get("fabricId"), + "authenticationProfileName": profile_name, + "authenticationOrder": auth_profile_dict.get("authentication_order") or auth_profile_in_ccc.get("authenticationOrder"), + "dot1xToMabFallbackTimeout": int(auth_profile_dict.get("dot1x_fallback_timeout")) or auth_profile_in_ccc.get("dot1xToMabFallbackTimeout"), + "numberOfHosts": auth_profile_dict.get("number_of_hosts") or auth_profile_in_ccc.get("numberOfHosts"), + } + + if auth_profile_dict.get("wake_on_lan") is None: + authentications_params_dict["wakeOnLan"] = auth_profile_in_ccc.get("wakeOnLan") + else: + authentications_params_dict["wakeOnLan"] = auth_profile_dict.get("wake_on_lan") + + if profile_name == "Closed Authentication": + if auth_profile_dict.get("enable_bpu_guard") is None: + auth_profile_dict["isBpduGuardEnabled"] = auth_profile_in_ccc.get("isBpduGuardEnabled", True) + else: + auth_profile_dict["isBpduGuardEnabled"] = auth_profile_dict.get("enable_bpu_guard") + + updated_params.append(authentications_params_dict) + + return updated_params + + def update_authentication_profile_template(self, profile_update_params, site_name): + """ + Updates the authentication profile template for a specified site in Cisco Catalyst Center. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + profile_update_params (dict): A dictionary containing the parameters to update the authentication profile. + site_name (str): The name of the site where the authentication profile is being updated. + Returns: + self (object): Returns the current instance of the class with updated status and message attributes. + Description: + This method sends a request to update the authentication profile template for the specified site using the + provided parameters. It first logs the requested payload and sends it to the API for processing. + It then monitors the task status by polling until the update is complete. If the update is successful, + it logs a success message and appends the site name to the list of updated profiles. If an error occurs or + the task fails, it logs an error message and updates the status to "failed". + """ + + try: + self.log("Requested payload for updating authentication profile for site {0}: {1}".format(site_name, profile_update_params), "INFO") + response = self.dnac._exec( + family="sda", + function='update_authentication_profile', + op_modifies=True, + params={'payload': profile_update_params} + ) + self.log("Received API response from 'update_authentication_profile'for site {0}: {1}".format(site_name, str(response)), "DEBUG") + response = response.get("response") + + if not response: + self.status = "failed" + self.msg = "Unable to fetch the task Id for the updation of authentication profile for site '{0}'.".format(site_name) + self.log(self.msg, "ERROR") + return self + + task_id = response.get("taskId") + + if not task_id: + self.status = "failed" + self.msg = "No task ID returned for the update request of the authentication profile for site '{0}'.".format(site_name) + self.log(self.msg, "ERROR") + self.result["response"] = self.msg + return self + + while True: + task_details = self.get_task_details(task_id) + + if task_details.get("isError"): + self.status = "failed" + failure_reason = task_details.get("failureReason") + if failure_reason: + self.msg = "Unable to update the authentication profile for site '{0}' because of {1}.".format(site_name, failure_reason) + else: + self.msg = "Unable to update the authentication profile for site '{0}'.".format(site_name) + self.log(self.msg, "ERROR") + self.result['response'] = self.msg + break + + if task_details.get("endTime") and "workflow_id" in task_details.get("data"): + self.status = "success" + self.update_auth_profile.append(site_name) + self.log("Authentication profile for the site '{0}' updated successfully in the Cisco Catalyst Center".format(site_name), "INFO") + break + + time.sleep(1) + except Exception as e: + self.status = "failed" + self.msg = "An exception occured while updating the authentication profile for site '{0}' in Cisco Catalyst Center: {1}".format(site_name, str(e)) + self.log(self.msg, "ERROR") + + return self + + def delete_fabric_site_zone(self, fabric_id, site_name, site_type): + """ + Deletes a fabric site or fabric zone from Cisco Catalyst Center. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + fabric_id (str): The ID of the fabric site or fabric zone to be deleted. + site_name (str): The name of the fabric site or fabric zone to be deleted. + site_type (str): The type of the entity to be deleted. Should be either "fabric_site" or "fabric_zone". + Returns: + self (object): Returns the current instance of the class with updated status and message attributes. + Description: + This method sends a request to delete a fabric site or fabric zone based on the provided `fabric_id` and `site_type`. + It determines the appropriate API function to call based on the `site_type`, either "delete_fabric_site_by_id" or + "delete_fabric_zone_by_id". It returns the class instance for further processing or chaining. + """ + + try: + if site_type == "fabric_site": + api_name = "delete_fabric_site_by_id" + type_name = "fabric site" + else: + api_name = "delete_fabric_zone_by_id" + type_name = "fabric zone" + + response = self.dnac._exec( + family="sda", + function=api_name, + op_modifies=True, + params={"id": fabric_id}, + ) + self.log("Received API response from '{0}' for the site {1}: {2}".format(api_name, site_name, str(response)), "DEBUG") + response = response.get("response") + + if not response: + self.status = "failed" + self.msg = "Unable to fetch the task Id for the deletion of {0}: '{1}'.".format(type_name, site_name) + self.log(self.msg, "ERROR") + return self + + task_id = response.get("taskId") + + if not task_id: + self.status = "failed" + self.msg = "No task ID returned for the update request of the deletion of fabric site/zone '{0}'.".format(site_name) + self.log(self.msg, "ERROR") + self.result["response"] = self.msg + return self + + while True: + task_details = self.get_task_details(task_id) + + if task_details.get("isError"): + self.status = "failed" + failure_reason = task_details.get("failureReason") + if failure_reason: + self.msg = "Unable to delete {0} '{1}' because of {2}.".format(type_name, site_name, failure_reason) + else: + self.msg = "Unable to delete {0} '{1}'.".format(type_name, site_name) + self.log(self.msg, "ERROR") + self.result['response'] = self.msg + break + + if task_details.get("endTime") and "workflow_id" in task_details.get("data"): + self.status = "success" + if site_type == "fabric_site": + self.delete_site.append(site_name) + else: + self.delete_zone.append(site_name) + self.log("{0} '{1}' deleted successfully from the Cisco Catalyst Center".format(type_name.title(), site_name), "INFO") + break + + time.sleep(1) + except Exception as e: + self.status = "failed" + self.msg = "Exception occurred while deleting {0} '{1}' due to: {2}".format(type_name, site_name, str(e)) + self.log(self.msg, "ERROR") + + return self + + def update_site_zones_profile_messages(self): + """ + Updates and logs messages based on the status of fabric sites, fabric zones, and authentication profile templates. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + Returns: + self (object): Returns the current instance of the class with updated `result` and `msg` attributes. + Description: + This method aggregates status messages related to the creation, update, or deletion of fabric sites, fabric zones, + and authentication profile templates. + It checks various instance variables (`create_site`, `update_site`, `no_update_site`, `create_zone`, `update_zone`, + `no_update_zone`, `update_auth_profile`, `no_update_profile`, `delete_site`, `absent_site`, `delete_zone`, `absent_zone`) + to determine the status and generates corresponding messages. + The method also updates the `result["response"]` attribute with the concatenated status messages. + """ + + self.result["changed"] = False + result_msg_list = [] + + if self.create_site: + create_site_msg = "Fabric site(s) '{0}' created successfully in Cisco Catalyst Center.".format(self.create_site) + result_msg_list.append(create_site_msg) + + if self.update_site: + update_site_msg = "Fabric site(s) '{0}' updated successfully in Cisco Catalyst Center.".format(self.update_site) + result_msg_list.append(update_site_msg) + + if self.no_update_site: + no_update_site_msg = "Fabric site(s) '{0}' need no update in Cisco Catalyst Center.".format(self.no_update_site) + result_msg_list.append(no_update_site_msg) + + if self.create_zone: + create_zone_msg = "Fabric zone(s) '{0}' created successfully in Cisco Catalyst Center.".format(self.create_zone) + result_msg_list.append(create_zone_msg) + + if self.update_zone: + update_zone_msg = "Fabric zone(s) '{0}' updated successfully in Cisco Catalyst Center.".format(self.update_zone) + result_msg_list.append(update_zone_msg) + + if self.no_update_zone: + no_update_zone_msg = "Fabric zone(s) '{0}' need no update in Cisco Catalyst Center.".format(self.no_update_zone) + result_msg_list.append(no_update_zone_msg) + + if self.update_auth_profile: + update_auth_msg = """Authentication profile template for site(s) '{0}' updated successfully in Cisco Catalyst + Center.""".format(self.update_auth_profile) + result_msg_list.append(update_auth_msg) + + if self.no_update_profile: + no_update_auth_msg = "Authentication profile template for site(s) '{0}' need no update in Cisco Catalyst Center.".format(self.no_update_profile) + result_msg_list.append(no_update_auth_msg) + + if self.delete_site: + delete_site_msg = "Fabric site(s) '{0}' deleted successfully from the Cisco Catalyst Center.".format(self.delete_site) + result_msg_list.append(delete_site_msg) + + if self.absent_site: + absent_site_msg = "Unable to delete fabric site(s) '{0}' as they are not present in Cisco Catalyst Center.".format(self.absent_site) + result_msg_list.append(absent_site_msg) + + if self.delete_zone: + delete_zone_msg = "Fabric zone(s) '{0}' deleted successfully from the Cisco Catalyst Center.".format(self.delete_zone) + result_msg_list.append(delete_zone_msg) + + if self.absent_zone: + absent_zone_msg = "Unable to delete fabric zone(s) '{0}' as they are not present in Cisco Catalyst Center.".format(self.absent_zone) + result_msg_list.append(absent_zone_msg) + + if self.create_site or self.update_site or self.create_zone or self.update_zone or self.delete_site or self.update_auth_profile: + self.result["changed"] = True + + self.msg = " ".join(result_msg_list) + self.log(self.msg, "INFO") + self.result["response"] = self.msg + + return self + + def get_diff_merged(self, config): + """ + Creates, updates, or deletes fabric sites and zones based on the provided configuration, and manages + authentication profile updates. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + config (dict): A dictionary containing the configuration for fabric sites and zones and updating + authentication profile template. + Returns: + self (object): Returns the current instance of the class with updated attributes based on the operations performed. + Description: + This method processes the provided configuration to manage fabric sites and zones in Cisco Catalyst Center. + 1. Fabric Sites + - If 'fabric_sites' is present in the configuration, it iterates over the list of sites. + - Checks if the site needs to be created or updated based on its type ("fabric_site" or "fabric_zone"). + - Creates or updates the site as necessary. If the site does not need any updates, it logs this information. + 2. Authentication Profile + - If an `update_authentication_profile` parameter is provided, it validates and updates the authentication + profile template associated with the site. + - Ensures that the authentication profile is valid and performs updates if needed. + - If no update is necessary or if the profile is not present, it logs the appropriate messages. + """ + + # Create/Update Fabric sites/zones in Cisco Catalyst Center + fabric_sites = self.want.get('fabric_sites') + + for site in fabric_sites: + site_name = site.get("site_name") + site_type = site.get("site_type", "fabric_site") + site_id = self.get_site_id(site_name) + auth_profile = site.get("authentication_profile") + + if auth_profile and auth_profile not in ["Closed Authentication", "Low Impact", "No Authentication", "Open Authentication"]: + self.status = "failed" + self.msg = ( + "Invalid authentication_profile '{0}'given in the playbook for the creation of fabric site. " + "Please provide one of the following authentication_profile ['Closed Authentication', 'Low Impact'" + ", 'No Authentication', 'Open Authentication'] in the playbook." + ).format(auth_profile) + self.log(self.msg, "ERROR") + self.result["response"] = self.msg + return self + + if site_type == "fabric_site": + # Check whether site is already fabric or not. + if site_id not in self.have.get("fabric_sites_ids"): + # Create the fabric site in Cisco Catalyst Center + self.create_fabric_site(site).check_return_status() + else: + # Check whether fabric site needs any update or not + site_in_ccc = self.get_fabric_site_detail(site_name, site_id) + require_update = self.fabric_site_needs_update(site, site_in_ccc) + if require_update: + self.update_fabric_site(site, site_in_ccc).check_return_status() + else: + self.status = "success" + self.no_update_site.append(site_name) + self.log("Fabric site '{0}' already present and doesnot need any update in the Cisco Catalyst Center.".format(site_name), "INFO") + else: + # Check whether site zone is already fabric or not. + if site_id not in self.have.get("fabric_zone_ids"): + # Create the fabric site in Cisco Catalyst Center + self.create_fabric_zone(site).check_return_status() + else: + # Check whether fabric site needs any update or not + zone_in_ccc = self.get_fabric_zone_detail(site_name, site_id) + if auth_profile and auth_profile != zone_in_ccc.get("authenticationProfileName"): + self.update_fabric_zone(site, zone_in_ccc).check_return_status() + else: + self.status = "success" + self.no_update_zone.append(site_name) + self.log("Fabric zone '{0}' already present and doesnot need any update in the Cisco Catalyst Center.".format(site_name), "INFO") + + # Updating/customising the default parameters for authentication profile template + if site.get("update_authentication_profile"): + if not auth_profile: + self.status = "failed" + self.msg = ( + "Required parameter 'authentication_profile' is missing needed for updation of Authentication Profile template. " + "Please provide one of the following authentication_profile ['Closed Authentication', 'Low Impact'" + ", 'Open Authentication'] in the playbook." + ) + self.log(self.msg, "ERROR") + self.result["response"] = self.msg + return self + + if auth_profile == "No Authentication": + self.status = "success" + msg = ( + "Unable to update 'authentication_profile' for the site '{0}' as for the profile template 'No Authentication' updating " + "authentication_profile is not supported. Please provide one of the following authentication_profile ['Closed Authentication'" + ", 'Low Impact', 'Open Authentication'] in the playbook." + ).format(site_name) + self.log(msg, "INFO") + self.no_update_profile.append(site_name) + return self + + # With the given site id collect the fabric site/zone id + if site_type == "fabric_site": + site_detail = self.get_fabric_site_detail(site_name, site_id) + fabric_id = site_detail.get("id") + else: + zone_detail = self.get_fabric_zone_detail(site_name, site_id) + fabric_id = zone_detail.get("id") + + # Validate the playbook input parameter for updating the authentication profile + auth_profile_dict = site.get("update_authentication_profile") + self.validate_auth_profile_parameters(auth_profile_dict).check_return_status() + validate_msg = ( + "All the given parameter(s) '{0}' in the playbook for the updation of authentication " + " profile in SDA fabric site/zone are validated successfully." + ).format(auth_profile_dict) + self.log(validate_msg, "INFO") + auth_profile_in_ccc = self.get_authentication_profile(fabric_id, auth_profile, site_name) + + if not auth_profile_in_ccc: + self.status = "success" + msg = ( + "There is no authentication template profile associated to the site '{0}' " + "in the Cisco Catalyst Center so unable to update the profile parameters." + ).format(site_name) + self.log(self.msg, "INFO") + self.no_update_profile.append(site_name) + return self + + profile_needs_update = self.auth_profile_needs_update(auth_profile_dict, auth_profile_in_ccc) + if not profile_needs_update: + self.status = "success" + msg = ( + "Authentication profile for the site '{0}' does not need any update in the " + "Cisco Catalyst Center." + ).format(site_name) + self.log(msg, "INFO") + self.no_update_profile.append(site_name) + return self + + # Collect the authentication profile parameters for the update operation + profile_update_params = self.collect_authentication_params(auth_profile_dict, auth_profile_in_ccc) + self.update_authentication_profile_template(profile_update_params, site_name).check_return_status() + + return self + + def get_diff_deleted(self, config): + """ + Deletes fabric sites and zones from the Cisco Catalyst Center based on the provided configuration. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + config (dict): A dictionary containing the configuration for fabric sites and zones. It may include: + - 'fabric_sites' - List of dictionaries, where each dictionary represents a fabric site or zone. + - 'site_name' - The name of the site or zone to be deleted. + - 'site_type'- Type of the site or zone, either "fabric_site" or "fabric_zone". Defaults to "fabric_site". + Returns: + self (object): Returns the current instance of the class with updated attributes based on the deletion operations performed. + Description: + This method processes the provided configuration to manage the deletion of fabric sites and zones in Cisco Catalyst Center. + - For Fabric Sites + - Verifies if the site exists in Cisco Catalyst Center. + - Deletes the site if it exists; otherwise, logs a message indicating the site is not present. + - For Fabric Zones + - Verifies if the zone exists in Cisco Catalyst Center. + - Deletes the zone if it exists; otherwise, logs a message indicating the zone is not present. + """ + + # Delete Fabric sites/zones from the Cisco Catalyst Center + if not config.get('fabric_sites'): + self.status = "failed" + self.msg = "Unable to delete any fabric site/zone or authentication profile template as input is not given in the playbook." + self.log(self.msg, "ERROR") + self.result["response"] = self.msg + return self + + fabric_sites = self.want.get('fabric_sites') + + for site in fabric_sites: + site_name = site.get("site_name") + site_type = site.get("site_type", "fabric_site") + if not site_name: + self.status = "failed" + self.msg = "Unable to delete fabric site/zone as required parameter 'site_name' is not given in the playbook." + self.log(self.msg, "ERROR") + self.result["response"] = self.msg + return self + + site_id = self.get_site_id(site_name) + + if site_type == "fabric_site": + # Check whether fabric site is present in Cisco Catalyst Center. + if site_id in self.have.get("fabric_sites_ids"): + site_detail = self.get_fabric_site_detail(site_name, site_id) + fabric_id = site_detail.get("id") + # Delete the fabric site from the Cisco Catalyst Center + self.delete_fabric_site_zone(fabric_id, site_name, site_type).check_return_status() + else: + self.status = "success" + self.absent_site.append(site_name) + self.log("Unable to delete fabric site '{0}' as it is not present in the Cisco Catalyst Center.".format(site_name), "INFO") + else: + # Check whether fabric zone is present in Cisco Catalyst Center. + if site_id in self.have.get("fabric_zone_ids"): + site_detail = self.get_fabric_zone_detail(site_name, site_id, ) + fabric_id = site_detail.get("id") + # Delete the fabric zone from the Cisco Catalyst Center + self.delete_fabric_site_zone(fabric_id, site_name, site_type).check_return_status() + else: + self.status = "success" + self.absent_zone.append(site_name) + self.log("Unable to delete fabric zone '{0}' as it is not present in the Cisco Catalyst Center.".format(site_name), "INFO") + + return self + + def verify_diff_merged(self, config): + """ + Verify the addition/update status of fabric site/zones in Cisco Catalyst Center. + Parameters: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + config (dict): The configuration details to be verified. + Returns: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + Description: + This method verifies whether the specified configurations have been successfully added/updated + in Cisco Catalyst Center as desired. + """ + + self.get_have(config) + self.log("Current State (have): {0}".format(str(self.have)), "INFO") + self.log("Desired State (want): {0}".format(str(self.want)), "INFO") + + if config.get('fabric_sites'): + fabric_sites = self.want.get('fabric_sites') + verify_site_list, verify_auth_list = [], [] + site_name_list, auth_name_list = [], [] + auth_flag = False + + for site in fabric_sites: + site_name = site.get("site_name") + site_type = site.get("site_type", "fabric_site") + site_id = self.get_site_id(site_name) + + if site_type == "fabric_site": + if site_id not in self.have.get("fabric_sites_ids"): + verify_site_list.append(site_name) + else: + site_name_list.append(site_name) + else: + if site_id not in self.have.get("fabric_zone_ids"): + verify_site_list.append(site_name) + else: + site_name_list.append(site_name) + + # Verifying updating/customising the default parameters for authentication profile template + if site.get("update_authentication_profile"): + auth_flag = True + # With the given site id collect the fabric site/zone id + if site_type == "fabric_site": + site_detail = self.get_fabric_site_detail(site_name, site_id) + fabric_id = site_detail.get("id") + auth_name_list.append(site_name) + else: + zone_detail = self.get_fabric_zone_detail(site_name, site_id) + fabric_id = zone_detail.get("id") + auth_name_list.append(site_name) + + if not fabric_id: + verify_auth_list.append(site_name) + + if not verify_site_list: + self.status = "success" + msg = ( + "Requested fabric site(s)/zone(s) '{0}' have been successfully added/updated to the Cisco Catalyst Center " + "and their addition/updation has been verified." + ).format(site_name_list) + self.log(msg, "INFO") + else: + msg = ( + "Playbook's input does not match with Cisco Catalyst Center, indicating that the fabric site(s) '{0}' " + " addition/updation task may not have executed successfully." + ).format(verify_site_list) + self.log(msg, "INFO") + + if not auth_flag: + return self + + if not verify_auth_list: + self.status = "success" + msg = ( + "Authentication template profile for the site(s) '{0}' have been successfully updated to the Cisco Catalyst Center " + "and their updation has been verified." + ).format(auth_name_list) + self.log(msg, "INFO") + else: + msg = ( + "Playbook's input does not match with Cisco Catalyst Center, indicating that the Authentication template " + "profile for the site(s) '{0}' updation task may not have executed successfully." + ).format(verify_auth_list) + self.log(msg, "INFO") + + return self + + def verify_diff_deleted(self, config): + """ + Verify the deletion status of fabric sites/zones fromt the Cisco Catalyst Center. + Parameters: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + config (dict): The configuration details to be verified. + Returns: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + Description: + This method checks the deletion status of a configuration in Cisco Catalyst Center. + It validates whether the specified fabric site/zone deleted from Cisco Catalyst Center. + """ + + self.get_have(config) + self.log("Current State (have): {0}".format(str(self.have)), "INFO") + self.log("Desired State (want): {0}".format(str(self.want)), "INFO") + + fabric_sites = self.want.get('fabric_sites') + verify_site_list, site_name_list = [], [] + + for site in fabric_sites: + site_name = site.get("site_name") + site_type = site.get("site_type", "fabric_site") + site_id = self.get_site_id(site_name) + + if site_type == "fabric_site": + # Check whether fabric site is present in Cisco Catalyst Center. + if site_id in self.have.get("fabric_sites_ids"): + verify_site_list.append(site_name) + else: + site_name_list.append(site_name) + else: + # Check whether fabric zone is present in Cisco Catalyst Center. + if site_id in self.have.get("fabric_zone_ids"): + verify_site_list.append(site_name) + else: + site_name_list.append(site_name) + + if not verify_site_list: + self.status = "success" + msg = ( + "Requested fabric site(s)/zones(s) '{0}' have been successfully deleted from the Cisco Catalyst " + "Center and their deletion has been verified." + ).format(site_name_list) + self.log(msg, "INFO") + else: + msg = ( + "Playbook's input does not match with Cisco Catalyst Center, indicating that fabric site(s)/zones(s)" + " '{0}' deletion task may not have executed successfully." + ).format(verify_site_list) + + return self + + +def main(): + """ main entry point for module execution + """ + + element_spec = {'dnac_host': {'required': True, 'type': 'str'}, + 'dnac_port': {'type': 'str', 'default': '443'}, + 'dnac_username': {'type': 'str', 'default': 'admin', 'aliases': ['user']}, + 'dnac_password': {'type': 'str', 'no_log': True}, + 'dnac_verify': {'type': 'bool', 'default': 'True'}, + 'dnac_version': {'type': 'str', 'default': '2.2.3.3'}, + 'dnac_debug': {'type': 'bool', 'default': False}, + 'dnac_log_level': {'type': 'str', 'default': 'WARNING'}, + "dnac_log_file_path": {"type": 'str', "default": 'dnac.log'}, + "dnac_log_append": {"type": 'bool', "default": True}, + 'dnac_log': {'type': 'bool', 'default': False}, + 'validate_response_schema': {'type': 'bool', 'default': True}, + 'config_verify': {'type': 'bool', "default": False}, + 'dnac_api_task_timeout': {'type': 'int', "default": 1200}, + 'dnac_task_poll_interval': {'type': 'int', "default": 2}, + 'config': {'required': True, 'type': 'list', 'elements': 'dict'}, + 'state': {'default': 'merged', 'choices': ['merged', 'deleted']} + } + + module = AnsibleModule(argument_spec=element_spec, + supports_check_mode=False) + + ccc_fabric_sites = FabricSitesZones(module) + state = ccc_fabric_sites.params.get("state") + + if state not in ccc_fabric_sites.supported_states: + ccc_fabric_sites.status = "invalid" + ccc_fabric_sites.msg = "State {0} is invalid".format(state) + ccc_fabric_sites.check_return_status() + + ccc_fabric_sites.validate_input().check_return_status() + config_verify = ccc_fabric_sites.params.get("config_verify") + + for config in ccc_fabric_sites.validated_config: + ccc_fabric_sites.reset_values() + ccc_fabric_sites.get_want(config).check_return_status() + ccc_fabric_sites.get_have(config).check_return_status() + ccc_fabric_sites.get_diff_state_apply[state](config).check_return_status() + if config_verify: + ccc_fabric_sites.verify_diff_state_apply[state](config).check_return_status() + + # Invoke the API to check the status and log the output of each site/zone and authentication profile update on console. + ccc_fabric_sites.update_site_zones_profile_messages().check_return_status() + + module.exit_json(**ccc_fabric_sites.result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/inventory_intent.py b/plugins/modules/inventory_intent.py index 4f312cb7d1..25c5c7a012 100644 --- a/plugins/modules/inventory_intent.py +++ b/plugins/modules/inventory_intent.py @@ -185,6 +185,17 @@ description: Make this as true needed for the resyncing of device. type: bool default: False + resync_device_count: + description: Specifies the maximum number of devices to be resynced in the inventory. Ensure this count does not exceed 200, + as attempting to resync more than 200 devices may cause the 'sync_devices_using_forcesync' API to enter an + infinite loop. + type: int + default: 200 + resync_max_timeout: + description: Sets the maximum timeout for the device resync process in the inventory, in seconds. The default is 600 seconds, + which helps prevent infinite loops. + type: int + default: 600 reboot_device: description: Make this as true needed for the Rebooting of Access Points. type: bool @@ -782,7 +793,9 @@ def validate_input(self): 'device_resync': {'type': 'bool'}, 'reboot_device': {'type': 'bool'}, 'credential_update': {'type': 'bool'}, - 'export_device_details_limit': {'default': 500, 'type': 'bool'}, + 'export_device_details_limit': {'default': 500, 'type': 'int'}, + 'resync_device_count': {'default': 200, 'type': 'int'}, + 'resync_max_timeout': {'default': 600, 'type': 'int'}, 'force_sync': {'type': 'bool'}, 'clean_config': {'type': 'bool'}, 'add_user_defined_field': { @@ -1335,43 +1348,84 @@ def resync_devices(self): device_ids = self.get_device_ids(input_device_ips) try: + # Resync the device in a batch of 200 devices at a time in inventory by default + start = 0 + resync_failed_for_all_device = False + resync_device_count = self.config[0].get("resync_device_count", 200) + resync_successful_devices, resync_failed_devices = [], [] force_sync = self.config[0].get("force_sync", False) - resync_param_dict = { - 'payload': device_ids, - 'force_sync': force_sync - } - response = self.dnac._exec( - family="devices", - function='sync_devices_using_forcesync', - op_modifies=True, - params=resync_param_dict, - ) - self.log("Received API response from 'sync_devices_using_forcesync': {0}".format(str(response)), "DEBUG") + resync_task_dict = {} + + while start < len(device_ids): + device_ids_list = device_ids[start:start + resync_device_count] + device_ips_list = input_device_ips[start:start + resync_device_count] + resync_param_dict = { + 'payload': device_ids_list, + 'force_sync': force_sync + } + self.log("Request payload for reysnc Device having the device ids: {0}".format(device_ids_list), "INFO") + response = self.dnac._exec( + family="devices", + function='sync_devices_using_forcesync', + op_modifies=True, + params=resync_param_dict, + ) + self.log("Received API response from 'sync_devices_using_forcesync': {0}".format(str(response)), "DEBUG") + + if not response or not isinstance(response, dict): + self.status = "failed" + self.msg = "Unable to resync the device(s) {0} in the inventory as response is empty.".format(device_ips_list) + self.log(self.msg, "ERROR") + self.result['response'] = self.msg + return self - if response and isinstance(response, dict): task_id = response.get('response').get('taskId') + resync_task_dict[task_id] = device_ips_list + start += resync_device_count + + for task_id, device_list in resync_task_dict.items(): + max_timeout = self.config[0].get("resync_max_timeout", 600) + start_time = time.time() + + while (True): + + if (time.time() - start_time) >= max_timeout: + self.log("""Max timeout of {0} has reached for the task id '{1}' for the device(s) '{2}' to be resynced and unexpected + task status so moving out to next task id""".format(max_timeout, task_id, device_list), "WARNING") + resync_failed_devices.extend(device_list) + break - while True: execution_details = self.get_task_details(task_id) if 'Synced' in execution_details.get("progress"): - self.status = "success" - self.result['changed'] = True - self.result['response'] = execution_details - self.msg = "Devices have been successfully resynced. Devices resynced: {0}".format(str(input_device_ips)) - self.log(self.msg, "INFO") + resync_successful_devices.extend(device_list) break elif execution_details.get("isError"): - self.status = "failed" - failure_reason = execution_details.get("failureReason") - if failure_reason: - self.msg = "Device resynced get failed because of {0}".format(failure_reason) - else: - self.msg = "Device resynced get failed." - self.log(self.msg, "ERROR") - self.result['response'] = self.msg + resync_failed_devices.extend(device_list) break + time.sleep(self.params.get('dnac_task_poll_interval')) + + if resync_failed_devices and resync_successful_devices: + self.msg = ( + "Device(s) '{0}' have been successfully resynced in the inventory in Cisco Catalyst Center. " + "Some device(s) '{1}' failed." + ).format(resync_successful_devices, resync_failed_devices) + elif resync_failed_devices: + resync_failed_for_all_device = True + self.msg = "Device resynced get failed for all given device(s) '{0}'.".format(resync_failed_devices) + else: + self.msg = ( + "Device(s) '{0}' have been successfully resynced in the inventory in Cisco Catalyst Center. " + ).format(resync_successful_devices) + if resync_failed_for_all_device: + self.status = "failed" + self.log(self.msg, "ERROR") + else: + self.status = "success" + self.log(self.msg, "INFO") + self.result['changed'] = True + self.result["response"] = self.msg except Exception as e: self.status = "failed" error_message = "Error while resyncing device in Cisco Catalyst Center: {0}".format(str(e)) diff --git a/plugins/modules/inventory_workflow_manager.py b/plugins/modules/inventory_workflow_manager.py index ac719c6205..61fbef9f0f 100644 --- a/plugins/modules/inventory_workflow_manager.py +++ b/plugins/modules/inventory_workflow_manager.py @@ -185,6 +185,17 @@ description: Make this as true needed for the resyncing of device. type: bool default: False + resync_device_count: + description: Specifies the maximum number of devices to be resynced in the inventory. Ensure this count does not exceed 200, + as attempting to resync more than 200 devices may cause the 'sync_devices_using_forcesync' API to enter an + infinite loop. + type: int + default: 200 + resync_max_timeout: + description: Sets the maximum timeout for the device resync process in the inventory, in seconds. The default is 600 seconds, + which helps prevent infinite loops. + type: int + default: 600 reboot_device: description: Make this as true needed for the Rebooting of Access Points. type: bool @@ -781,7 +792,9 @@ def validate_input(self): 'device_resync': {'type': 'bool'}, 'reboot_device': {'type': 'bool'}, 'credential_update': {'type': 'bool'}, - 'export_device_details_limit': {'default': 500, 'type': 'bool'}, + 'export_device_details_limit': {'default': 500, 'type': 'int'}, + 'resync_device_count': {'default': 200, 'type': 'int'}, + 'resync_max_timeout': {'default': 600, 'type': 'int'}, 'force_sync': {'type': 'bool'}, 'clean_config': {'type': 'bool'}, 'add_user_defined_field': { @@ -1334,43 +1347,86 @@ def resync_devices(self): return self device_ids = self.get_device_ids(input_device_ips) + try: + # Resync the device in a batch of 200 devices at a time in inventory by default + start = 0 + resync_failed_for_all_device = False + resync_device_count = self.config[0].get("resync_device_count", 200) + resync_successful_devices, resync_failed_devices = [], [] force_sync = self.config[0].get("force_sync", False) - resync_param_dict = { - 'payload': device_ids, - 'force_sync': force_sync - } - response = self.dnac._exec( - family="devices", - function='sync_devices_using_forcesync', - op_modifies=True, - params=resync_param_dict, - ) - self.log("Received API response from 'sync_devices_using_forcesync': {0}".format(str(response)), "DEBUG") + resync_task_dict = {} + + while start < len(device_ids): + device_ids_list = device_ids[start:start + resync_device_count] + device_ips_list = input_device_ips[start:start + resync_device_count] + resync_param_dict = { + 'payload': device_ids_list, + 'force_sync': force_sync + } + self.log("Request payload for reysnc Device having the device ids: {0}".format(device_ids_list), "INFO") + response = self.dnac._exec( + family="devices", + function='sync_devices_using_forcesync', + op_modifies=True, + params=resync_param_dict, + ) + self.log("Received API response from 'sync_devices_using_forcesync': {0}".format(str(response)), "DEBUG") + + if not response or not isinstance(response, dict): + self.status = "failed" + self.msg = "Unable to resync the device(s) {0} in the inventory as response is empty.".format(device_ips_list) + self.log(self.msg, "ERROR") + self.result['response'] = self.msg + return self - if response and isinstance(response, dict): task_id = response.get('response').get('taskId') + resync_task_dict[task_id] = device_ips_list + start += resync_device_count + + for task_id, device_list in resync_task_dict.items(): + max_timeout = self.config[0].get("resync_max_timeout", 600) + start_time = time.time() + + while (True): + + if (time.time() - start_time) >= max_timeout: + self.log("""Max timeout of {0} has reached for the task id '{1}' for the device(s) '{2}' to be resynced and unexpected + task status so moving out to next task id""".format(max_timeout, task_id, device_list), "WARNING") + resync_failed_devices.extend(device_list) + break - while True: execution_details = self.get_task_details(task_id) if 'Synced' in execution_details.get("progress"): - self.status = "success" - self.result['changed'] = True - self.result['response'] = execution_details - self.msg = "Devices have been successfully resynced. Devices resynced: {0}".format(str(input_device_ips)) - self.log(self.msg, "INFO") + resync_successful_devices.extend(device_list) break elif execution_details.get("isError"): - self.status = "failed" - failure_reason = execution_details.get("failureReason") - if failure_reason: - self.msg = "Device resynced get failed because of {0}".format(failure_reason) - else: - self.msg = "Device resynced get failed." - self.log(self.msg, "ERROR") - self.result['response'] = self.msg + resync_failed_devices.extend(device_list) break + time.sleep(self.params.get('dnac_task_poll_interval')) + + if resync_failed_devices and resync_successful_devices: + self.msg = ( + "Device(s) '{0}' have been successfully resynced in the inventory in Cisco Catalyst Center. " + "Some device(s) '{1}' failed." + ).format(resync_successful_devices, resync_failed_devices) + elif resync_failed_devices: + resync_failed_for_all_device = True + self.msg = "Device resynced get failed for all given device(s) '{0}'.".format(resync_failed_devices) + else: + self.msg = ( + "Device(s) '{0}' have been successfully resynced in the inventory in Cisco Catalyst Center. " + ).format(resync_successful_devices) + + if resync_failed_for_all_device: + self.status = "failed" + self.log(self.msg, "ERROR") + else: + self.status = "success" + self.log(self.msg, "INFO") + self.result['changed'] = True + self.result["response"] = self.msg except Exception as e: self.status = "failed" diff --git a/plugins/modules/network_settings_workflow_manager.py b/plugins/modules/network_settings_workflow_manager.py index f8f9642a5f..41784aed2c 100644 --- a/plugins/modules/network_settings_workflow_manager.py +++ b/plugins/modules/network_settings_workflow_manager.py @@ -638,6 +638,7 @@ import copy import re +import time from ansible.module_utils.basic import AnsibleModule from ansible_collections.cisco.dnac.plugins.module_utils.dnac import ( DnacBase, @@ -660,6 +661,7 @@ def __init__(self, module): self.global_pool_obj_params = self.get_obj_params("GlobalPool") self.reserve_pool_obj_params = self.get_obj_params("ReservePool") self.network_obj_params = self.get_obj_params("Network") + self.all_reserved_pool_details = {} def validate_input(self): """ @@ -1193,6 +1195,56 @@ def get_network_params(self, site_id): self.log("Formatted playbook network details: {0}".format(network_details), "DEBUG") return network_details + def get_reserved_ip_subpool(self, site_id): + """ + Retrieve all the reserved IP subpool details from the Cisco Catalyst Center. + + Parameters: + site_id (str) - The Site ID for which reserved pool details are requested. + self (object) - The current object details. + + Returns: + self (object) - The current object with updated desired Fabric Transits information. + """ + + value = 1 + self.all_reserved_pool_details.update({site_id: []}) + start_time = time.time() + while True: + response = self.dnac._exec( + family="network_settings", + function="get_reserve_ip_subpool", + op_modifies=True, + params={ + "site_id": site_id, + "offset": value + } + ) + if not isinstance(response, dict): + self.msg = "Error in getting reserve pool - Response is not a dictionary" + self.log(self.msg, "CRITICAL") + self.status = "exited" + return self.check_return_status() + + reserve_pool_details = response.get("response") + if not reserve_pool_details: + self.log("No subpools are reserved in the site with ID - '{0}'." + .format(site_id), "DEBUG") + return self + + self.all_reserved_pool_details.get(site_id).extend(reserve_pool_details) + value += 25 + end_time = time.time() + if (end_time - start_time) >= self.max_timeout: + self.msg = ( + "Max timeout of {0} sec has reached for the API 'get_reserved_ip_subpool' status." + .format(self.max_timeout) + ) + self.status = "failed" + break + + return self + def global_pool_exists(self, name): """ Check if the Global Pool with the given name exists @@ -1273,43 +1325,21 @@ def reserve_pool_exists(self, name, site_name): self.status = "failed" return reserve_pool - value = 1 - while True: - self.log(str(value)) - response = self.dnac._exec( - family="network_settings", - function="get_reserve_ip_subpool", - op_modifies=True, - params={ - "site_id": site_id, - "offset": value - } - ) - if not isinstance(response, dict): - reserve_pool.update({"success": False}) - self.msg = "Error in getting reserve pool - Response is not a dictionary" - self.log(self.msg, "CRITICAL") - self.status = "exited" - return self.check_return_status() + if not self.all_reserved_pool_details.get(site_id): + self.get_reserved_ip_subpool(site_id) - all_reserve_pool_details = response.get("response") - self.log(str(all_reserve_pool_details)) - if not all_reserve_pool_details: - self.log("Reserved pool {0} does not exist in the site {1}" - .format(name, site_name), "DEBUG") - return reserve_pool - - reserve_pool_details = get_dict_result(all_reserve_pool_details, "groupName", name) - self.log(str(reserve_pool_details)) - if reserve_pool_details: - self.log("Reserve pool found with name '{0}' in the site '{1}': {2}" - .format(name, site_name, reserve_pool_details), "INFO") - reserve_pool.update({"exists": True}) - reserve_pool.update({"id": reserve_pool_details.get("id")}) - reserve_pool.update({"details": self.get_reserve_pool_params(reserve_pool_details)}) - break + if not self.all_reserved_pool_details.get(site_id): + self.log("Reserved pool {0} does not exist in the site {1}" + .format(name, site_name), "DEBUG") + return reserve_pool - value += 25 + reserve_pool_details = get_dict_result(self.all_reserved_pool_details.get(site_id), "groupName", name) + if reserve_pool_details: + self.log("Reserve pool found with name '{0}' in the site '{1}': {2}" + .format(name, site_name, reserve_pool_details), "INFO") + reserve_pool.update({"exists": True}) + reserve_pool.update({"id": reserve_pool_details.get("id")}) + reserve_pool.update({"details": self.get_reserve_pool_params(reserve_pool_details)}) self.log("Reserved pool details: {0}".format(reserve_pool.get("details")), "DEBUG") self.log("Reserved pool id: {0}".format(reserve_pool.get("id")), "DEBUG") @@ -2535,6 +2565,7 @@ def verify_diff_merged(self, config): self """ + self.all_reserved_pool_details = {} self.get_have(config) self.log("Current State (have): {0}".format(self.have), "INFO") self.log("Requested State (want): {0}".format(self.want), "INFO") @@ -2617,6 +2648,7 @@ def verify_diff_deleted(self, config): self """ + self.all_reserved_pool_details = {} self.get_have(config) self.log("Current State (have): {0}".format(self.have), "INFO") self.log("Desired State (want): {0}".format(self.want), "INFO") diff --git a/plugins/modules/provision_workflow_manager.py b/plugins/modules/provision_workflow_manager.py index fc67313716..398e473e12 100644 --- a/plugins/modules/provision_workflow_manager.py +++ b/plugins/modules/provision_workflow_manager.py @@ -182,6 +182,21 @@ managed_ap_locations: - Global/USA/RTP/BLD11/BLD11_FLOOR1 +- name: Unprovision a device from a site + cisco.dnac.provision_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log: True + state: deleted + config_verify: True + config: + - management_ip_address: 204.1.2.2 + """ RETURN = r""" @@ -261,7 +276,7 @@ def validate_input(self, state=None): """ if not self.config: - self.msg = "config not available in playbook for validattion" + self.msg = "config not available in playbook for validation" self.status = "success" return self @@ -756,8 +771,8 @@ def get_wireless_params(self): ] if not (wireless_params[0].get("managedAPLocations") and isinstance(wireless_params[0].get("managedAPLocations"), list)): - msg = "Managed AP locations must be passed as a list of sites. For example, [Global/USA/RTP/BLD11/BLD11_FLOOR1,\ - Global/USA/RTP/BLD11/BLD11_FLOOR2]" + msg = "Missing Managed AP Locations: Please specify the intended location(s) for the wireless device \ + within the site hierarchy." self.log(msg, "CRITICAL") self.module.fail_json(msg=msg, response=[]) @@ -849,7 +864,7 @@ def perform_wireless_reprovision(self): ) self.log("Wireless provisioning response collected from 'provision_update' API is: {0}".format(str(response)), "DEBUG") execution_id = response.get("executionId") - provision_info = self.get_execution_status_wireless(execution_id=execution_id) + self.get_execution_status_wireless(execution_id=execution_id) self.result["changed"] = True self.result['msg'] = "Wireless device with IP address {0} got re-provisioned successfully".format(self.validated_config[0]["management_ip_address"]) self.result['diff'] = self.validated_config @@ -905,7 +920,7 @@ class instance for further use. ) self.log("Reprovisioning response collected from 're_provision_wired_device' API is: {0}".format(response), "DEBUG") task_id = response.get("taskId") - provision_info = self.get_task_status(task_id=task_id) + self.get_task_status(task_id=task_id) self.result["changed"] = True self.result['msg'] = "Re-Provision done Successfully" self.result['diff'] = self.validated_config @@ -979,7 +994,7 @@ class instance for further use. ) self.log("Wireless provisioning response collected from 'provision' API is: {0}".format(str(response)), "DEBUG") execution_id = response.get("executionId") - provision_info = self.get_execution_status_wireless(execution_id=execution_id) + self.get_execution_status_wireless(execution_id=execution_id) self.result["changed"] = True self.result['msg'] = "Wireless device with IP {0} got provisioned successfully".format(self.validated_config[0]["management_ip_address"]) self.result['diff'] = self.validated_config @@ -1000,7 +1015,7 @@ class instance for further use. return self task_id = response.get("taskId") - provision_info = self.get_task_status(task_id=task_id) + self.get_task_status(task_id=task_id) self.result["changed"] = True self.result['msg'] = "Provision done Successfully" self.result['diff'] = self.validated_config diff --git a/plugins/modules/rma_workflow_manager.py b/plugins/modules/rma_workflow_manager.py index de0d8c2439..d9a141ead9 100644 --- a/plugins/modules/rma_workflow_manager.py +++ b/plugins/modules/rma_workflow_manager.py @@ -5,7 +5,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -__author__ = ("Trupti A Shetty, Mohamed Rafeek, Madhan Sankaranarayanan") +__author__ = ("Trupti A Shetty, Mohamed Rafeek, Madhan Sankaranarayanan, Ajith Andrew J") DOCUMENTATION = r""" @@ -44,6 +44,7 @@ - Trupti A Shetty (@TruptiAShetty) - A Mohamed Rafeek (@mohamedrafeek) - Madhan Sankaranarayanan (@madhansansel) + - Ajith Andrew J (@ajithandrewj) options: config_verify: @@ -311,6 +312,7 @@ def __init__(self, module): self.supported_states = ["merged", "deleted", "replaced"] self.payload = module.params self.keymap = {} + self.faulty_device, self.replacement_device = [], [] def pprint(self, jsondata): return json.dumps(jsondata, indent=4, separators=(',', ': ')) @@ -445,8 +447,14 @@ def get_have(self): config = self.want["config"] identifier_keys = [ ("faulty_device_serial_number", "replacement_device_serial_number"), + ("faulty_device_serial_number", "replacement_device_name"), + ("faulty_device_serial_number", "replacement_device_ip_address"), + ("faulty_device_name", "replacement_device_serial_number"), ("faulty_device_name", "replacement_device_name"), - ("faulty_device_ip_address", "replacement_device_ip_address") + ("faulty_device_name", "replacement_device_ip_address"), + ("faulty_device_ip_address", "replacement_device_ip_address"), + ("faulty_device_ip_address", "replacement_device_name"), + ("faulty_device_ip_address", "replacement_device_serial_number") ] valid_identifier_found = False @@ -460,29 +468,41 @@ def get_have(self): valid_identifier_found = True # Check if faulty device exists - faulty_device_id, faulty_device_serial_number = self.device_exists(faulty_identifier, faulty_key) - if faulty_device_id is None or faulty_device_serial_number is None: + faulty_device_params = self.device_exists(faulty_identifier, faulty_key) + + if not faulty_device_params: self.msg = "Faulty device '{0}' not found in Cisco Catalyst Center".format(faulty_identifier) self.log(self.msg, "ERROR") self.status = "failed" return self - have["faulty_device_id"] = faulty_device_id - have["faulty_device_serial_number"] = faulty_device_serial_number + have["faulty_device_id"] = faulty_device_params.get("device_id") + have["faulty_device_serial_number"] = faulty_device_params.get("serial_number") + have["faulty_device_name"] = faulty_device_params.get("device_name") + have["faulty_device_family_name"] = faulty_device_params.get("family_name") + have["faulty_device_series_name"] = faulty_device_params.get("series_name") + have["faulty_device_reachability_status"] = faulty_device_params.get("reachability_status") + have["faulty_device_platform_id"] = faulty_device_params.get("platform_id") have[faulty_key] = faulty_identifier have["faulty_device_exists"] = True self.log("Faulty device '{0}' found in Cisco Catalyst Center".format(faulty_identifier), "INFO") # Check if replacement device exists - replacement_device_id, replacement_device_serial_number = self.device_exists(replacement_identifier, replacement_key) - if replacement_device_id is None or replacement_device_serial_number is None: + replacement_device_params = self.device_exists(replacement_identifier, replacement_key) + + if not replacement_device_params: self.msg = "Replacement device '{0}' not found in Cisco Catalyst Center".format(replacement_identifier) self.log(self.msg, "ERROR") self.status = "failed" return self - have["replacement_device_id"] = replacement_device_id - have["replacement_device_serial_number"] = replacement_device_serial_number + have["replacement_device_id"] = replacement_device_params.get("device_id") + have["replacement_device_serial_number"] = replacement_device_params.get("serial_number") + have["replacement_device_name"] = replacement_device_params.get("device_name") + have["replacement_device_family_name"] = replacement_device_params.get("family_name") + have["replacement_device_series_name"] = replacement_device_params.get("series_name") + have["replacement_device_reachability_status"] = replacement_device_params.get("reachability_status") + have["replacement_device_platform_id"] = replacement_device_params.get("platform_id") have[replacement_key] = replacement_identifier have["replacement_device_exists"] = True self.log("Replacement device '{0}' found in Cisco Catalyst Center".format(replacement_identifier), "INFO") @@ -514,6 +534,40 @@ def get_have(self): return self + def rma_device_replacement_pre_check(self): + """ + Performs a pre-check for RMA device replacement to ensure compatibility and reachability. + Parameters: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + Returns: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + Description: + This method verifies that the faulty device and the replacement device belong to the same family and series, + ensuring they are compatible for replacement. It also checks the network reachability of the replacement device. + If both checks pass, the method logs a success message and proceeds. If either check fails, it logs an error, + updates the status to 'failed', and returns the instance for further handling in the RMA workflow. + """ + + if self.have["faulty_device_platform_id"] != self.have["replacement_device_platform_id"]: + self.msg = ( + "The faulty device and the replacement device do not belong to the same platform, family and series." + " These attributes must match for a valid replacement." + ) + self.log(self.msg, "ERROR") + self.status = "failed" + return self + + self.log("The faulty device and the replacement device belong to the same platform, family and series.", "DEBUG") + + if self.have["replacement_device_reachability_status"] != "Reachable": + self.msg = "The replacement device is not reachable. Unable to proceed with the RMA device replacement." + self.log(self.msg, "ERROR") + self.status = "failed" + return self + + self.log("The replacement device '{0}' is reachable.".format(self.have.get("replacement_device_name")), "DEBUG") + return self + def device_exists(self, identifier, identifier_type): """ Check if a device exists in Cisco Catalyst Center and return its ID and serial number. @@ -522,14 +576,15 @@ def device_exists(self, identifier, identifier_type): - identifier (str): The identifier of the device to check. - identifier_type (str): The type of identifier (name, ip_address, or serial_number). Returns: - - tuple: A tuple containing the device ID and serial number, or (None, None) if the device is not found or an error occurs. + - dict: A dict containing the device ID, serial number, device_name, series_name, family_name, reachability_status, platform_id if the device + is found or empty dict if device not found. Description: This method queries Cisco Catalyst Center to check if a specified device exists based on the provided identifier. It constructs the appropriate query parameters based on the identifier type (hostname, IP address, or serial number). The method then sends a request to Cisco Catalyst Center using the 'get_device_list' function. If the device is found and both ID and serial number are available, it returns these as a tuple. If the device is not found, lacks necessary information, or if an error occurs during the process, - it logs an appropriate error message and returns (None, None). + it logs an appropriate error message and returns empty dict. This method is used to verify the existence of both faulty and replacement devices in the RMA workflow. """ params = {} @@ -541,7 +596,7 @@ def device_exists(self, identifier, identifier_type): params["serialNumber"] = identifier else: self.log("Invalid identifier type provided", "ERROR") - return None, None + return {} try: response = self.dnac._exec( @@ -551,14 +606,21 @@ def device_exists(self, identifier, identifier_type): params=params ) self.log("Received API response from 'get_device_list': {0}".format(self.pprint(response)), "DEBUG") + device_param_list = {} if response and response.get('response'): if len(response['response']) > 0: device = response['response'][0] - device_id = device.get('id') - serial_number = device.get('serialNumber') - if device_id and serial_number: - return device_id, serial_number + device_param_list["device_id"] = device.get('id') + device_param_list["serial_number"] = device.get('serialNumber') + device_param_list["device_name"] = device.get('hostname') + device_param_list["series_name"] = device.get('series') + device_param_list["family_name"] = device.get('family') + device_param_list["reachability_status"] = device.get('reachabilityStatus') + device_param_list["platform_id"] = device.get('platformId') + + if device_param_list: + return device_param_list self.log("Device found but ID or serial number missing", "ERROR") else: self.log("Device not found in Cisco Catalyst Center", "ERROR") @@ -567,7 +629,7 @@ def device_exists(self, identifier, identifier_type): except Exception as e: self.log("Exception occurred while querying device: {0}".format(str(e)), "ERROR") - return None, None + return {} def validate_device_replacement_params(self): """ @@ -620,6 +682,38 @@ def validate_device_replacement_params(self): self.status = "success" return self + def device_ready_for_replacement_check(self): + """ + Checks if the faulty device is ready for replacement. + Parameters: + - self (object): An instance of the class that interacts with Cisco Catalyst Center and contains device details. + Returns: + bool: + - True if the faulty device is found and is in the "READY-FOR-REPLACEMENT" state. + - False if the faulty device is not found or is not in the "READY-FOR-REPLACEMENT" state. + Description: + This method retrieves a list of devices marked for replacement from Cisco Catalyst Center + using the `device_replacement` API. It iterates through the returned devices to find + the specified faulty device based on its serial number, which is stored in the `self.have` attribute. + If the faulty device is found and its status is "READY-FOR-REPLACEMENT", the method logs a debug message + indicating that the device is already marked for replacement and returns `True`. + If the device is not in the "READY-FOR-REPLACEMENT" state or is not found, it returns `False`. + """ + response = self.dnac._exec( + family="device_replacement", + function='return_replacement_devices_with_details' + ) + devices = response.get("response", []) + self.log("Received API response from 'return_replacement_devices_with_details': {0}".format(self.pprint(response)), "DEBUG") + + for device in devices: + if device.get("faultyDeviceSerialNumber") == self.have.get("faulty_device_serial_number"): + if device.get("replacementStatus") == "READY-FOR-REPLACEMENT": + self.have["device_replacement_id"] = device.get("id") + return True + + return False + def mark_faulty_device_for_replacement(self): """ Mark the faulty device for replacement in Cisco Catalyst Center. @@ -637,37 +731,41 @@ def mark_faulty_device_for_replacement(self): - Updates the status, msg, and result attributes based on the task result. - Handles any exceptions that occur during the process. """ - - import_params = dict( - payload=[{ - "faultyDeviceId": self.have.get("faulty_device_id"), - "replacementStatus": "MARKED-FOR-REPLACEMENT" - }], - ) - - try: - response = self.dnac._exec( - family="device_replacement", - function='mark_device_for_replacement', - params=import_params + is_ready_for_replacement = self.device_ready_for_replacement_check() + if not is_ready_for_replacement: + import_params = dict( + payload=[{ + "faultyDeviceId": self.have.get("faulty_device_id"), + "replacementStatus": "MARKED-FOR-REPLACEMENT" + }], ) - self.log("Received API response from 'mark_device_for_replacement': {0}".format(str(response)), "DEBUG") - task_id = response.get("response", {}).get("taskId") - task_result = self.check_rma_task_status( - task_id, - "Device marked for replacement successfully", - "Error while marking device for replacement" - ) - self.status = task_result["status"] - self.msg = task_result["msg"] - if self.status == "success": - self.result['changed'] = True - except Exception as e: - self.status = "failed" - self.msg = "Exception occurred while marking device for replacement: {0}".format(str(e)) - self.log(self.msg, "ERROR") + try: + response = self.dnac._exec( + family="device_replacement", + function='mark_device_for_replacement', + params=import_params + ) + self.log("Received API response from 'mark_device_for_replacement': {0}".format(str(response)), "DEBUG") + task_id = response.get("response", {}).get("taskId") + task_result = self.check_rma_task_status( + task_id, + "Device marked for replacement successfully", + "Error while marking device for replacement" + ) + self.status = task_result["status"] + self.msg = task_result["msg"] + if self.status == "success": + self.result['changed'] = True + self.device_ready_for_replacement_check() + return self + except Exception as e: + self.status = "failed" + self.msg = "Exception occurred while marking device for replacement: {0}".format(str(e)) + self.log(self.msg, "ERROR") + + self.log("The device '{0}' is already in the 'READY-FOR-REPLACEMENT' state.".format(self.have.get("faulty_device_name")), "DEBUG") return self def get_diff_replaced(self, config): @@ -746,12 +844,14 @@ def get_diff_replaced(self, config): self.result['msg'] = self.msg return self + self.faulty_device.append(self.have.get("faulty_device_name")) + self.replacement_device.append(self.have.get("replacement_device_name")) self.result['changed'] = True self.result['msg'] = self.msg except Exception as e: self.status = "failed" - error_msg = "Exception occurred during device replacement: {0}".format(str(e)) + error_msg = "Exception occurred during device replacement " self.log(error_msg, "ERROR") # Attempt to unmark the device self.log("Attempting to unmark the device after exception", "INFO") @@ -842,11 +942,13 @@ def unmark_device_for_replacement(self): - Updates the status, msg, and result attributes based on the task result. - Handles any exceptions that occur during the process. """ + self.log("Unmarking device for replacement...") + device_id = self.get_ready_for_replacement_device_id() import_params = dict( payload=[{ - "faultyDeviceId": self.have.get("faulty_device_id"), - "replacementStatus": "MARKED-FOR-REPLACEMENT" + "id": device_id, + "replacementStatus": "NON-FAULTY" }], ) @@ -865,16 +967,40 @@ def unmark_device_for_replacement(self): "Error while unmarking device for replacement" ) self.status = task_result["status"] - self.msg = task_result["msg"] - if self.status == "success": - self.result['changed'] = True + self.msg = "RMA failed to replace the device: {0}".format(task_result["msg"]) - except Exception as e: + except Exception: self.status = "failed" - self.msg = "Exception occurred while unmarking device for replacement: {0}".format(str(e)) + self.msg = "RMA failed to replace the device: No device found for unmarking replacement" self.log(self.msg, "ERROR") return self + def get_ready_for_replacement_device_id(self): + """ + Retrieves the ID of the first device marked as "READY-FOR-REPLACEMENT" in Cisco Catalyst Center. + Parameters: + - self (object): An instance of a class used for interacting with Cisco Catalyst Center. + Returns: + - device_id (str or None): The ID of the first device ready for replacement, or None if no such device is found. + Description: + - This method fetches a list of devices with their replacement status from Cisco Catalyst Center. + - It then checks for the first device with a "READY-FOR-REPLACEMENT" status and returns its ID. + - The method exits early if such a device is found. + """ + response = self.dnac._exec( + family="device_replacement", + function='return_replacement_devices_with_details' + ) + devices = response.get("response", []) + for device in devices: + if device.get("replacementStatus") == "READY-FOR-REPLACEMENT": + device_id = device.get("id") + self.log("Found ready-for-replacement device with ID: {0}".format(device_id)) + return device_id + + self.log("No devices found with status 'READY-FOR-REPLACEMENT'.") + return None + def check_rma_task_status(self, task_id, success_message, error_prefix): """ Check the status of an RMA task in Cisco Catalyst Center. @@ -911,6 +1037,43 @@ def check_rma_task_status(self, task_id, success_message, error_prefix): time.sleep(ccc_poll_interval) timeout_interval -= ccc_poll_interval + def update_rma_profile_messages(self): + """ + Updates and logs messages based on the status of RMA device replacements. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + Returns: + self (object): The current instance of the class with updated `result` and `msg` attributes. + Description: + This method generates and updates status messages regarding the RMA (Return Material Authorization) device replacement process. + It checks if there are any faulty and replacement devices specified for replacement. If both are present, it constructs a + success message detailing the completion of the replacement process for the faulty device(s) with the corresponding replacement device(s). + If no faulty or replacement devices are found, it sets a message indicating that no replacements were performed. + The method then updates the `result` attribute with the status of the operation (`changed` set to True if replacements occurred) + and logs the final message using the appropriate log level. The constructed message is also stored in `result["response"]` + for further reference. + """ + self.result["changed"] = False + result_msg_list = [] + + if self.faulty_device and self.replacement_device: + device_replacement_msg = ( + "Device replacement was successfully completed for the faulty device(s) '{0}'," + " with the replacement device(s) '{1}'.".format("', '".join(self.faulty_device), "', '".join(self.replacement_device)) + ) + result_msg_list.append(device_replacement_msg) + + if result_msg_list: + self.result["changed"] = True + self.msg = " ".join(result_msg_list) + else: + self.msg = "No changes were made. No RMA device replacement were performed in Cisco Catalyst Center." + + self.log(self.msg, "INFO") + self.result["response"] = self.msg + + return self + def verify_diff_replaced(self, config): """ Verify the device replacement status in Cisco Catalyst Center. @@ -934,11 +1097,7 @@ def verify_diff_replaced(self, config): self.log("Replacement device serial number is missing", "WARNING") return self - import_params = dict( - payload={ - "replacementDeviceSerialNumber": replacement_device_serial - } - ) + import_params = {"replacementDeviceSerialNumber": replacement_device_serial} try: response = self.dnac._exec( @@ -949,7 +1108,7 @@ def verify_diff_replaced(self, config): devices = response.get("response", []) replacement_status = None for device in devices: - if device.get("replacementDeviceSerialNumber") == replacement_device_serial: + if device.get("id") == self.have.get("device_replacement_id"): replacement_status = device self.log("Replacement status: {0}".format(self.pprint(replacement_status)), "INFO") except Exception as e: @@ -1000,11 +1159,14 @@ def main(): ccc_device_replacement.reset_values() ccc_device_replacement.get_want(config).check_return_status() ccc_device_replacement.get_have().check_return_status() + ccc_device_replacement.rma_device_replacement_pre_check().check_return_status() ccc_device_replacement.mark_faulty_device_for_replacement().check_return_status() ccc_device_replacement.get_diff_state_apply[state](config).check_return_status() if config_verify: ccc_device_replacement.verify_diff_state_apply[state](config).check_return_status() + ccc_device_replacement.update_rma_profile_messages().check_return_status() + module.exit_json(**ccc_device_replacement.result) diff --git a/plugins/modules/sda_extranet_policies_workflow_manager.py b/plugins/modules/sda_extranet_policies_workflow_manager.py new file mode 100644 index 0000000000..883836fa5a --- /dev/null +++ b/plugins/modules/sda_extranet_policies_workflow_manager.py @@ -0,0 +1,1363 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2024, Cisco Systems +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Ansible module to perform Network Compliance Operations on devices in Cisco Catalyst Center.""" +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +__author__ = ("Rugvedi Kapse, Madhan Sankaranarayanan") + +DOCUMENTATION = r""" +--- +module: sda_extranet_policies_workflow_manager +short_description: SDA Extranet Policies Module provides functionality for managing SDA Extranet Policy in Cisco Catalyst Center. +description: + - Manage extranet policy operations such as add/update/delete. + - API to create a new extranet policy. + - API to update an existing or edit an existing extranet policy. + - API for deletion of an existing extranet policy using the policy name. +version_added: "6.17.0" +extends_documentation_fragment: + - cisco.dnac.workflow_manager_params +author: Rugvedi Kapse (@rukapse) + Madhan Sankaranarayanan (@madhansansel) +options: + config_verify: + description: Set to True to verify the Cisco Catalyst Center config after applying the playbook config. + type: bool + default: False + state: + description: State of Cisco Catalyst Center after module completion. + type: str + choices: [ merged, deleted ] + default: merged + config: + description: List of Extranet Policy Details for Creating, Updating, or Deleting Operations. + type: list + elements: dict + required: True + suboptions: + extranet_policy_name: + description: Name of the SDA Extranet Policy. + This parameter allows you to specify the desired name when creating a new extranet policy. + The same name can be used to update or delete the policy. + Note - This parameter is required when creating, updating or deleting extranet policy. + type: str + provider_virtual_network: + description: Specifies the Provider Virtual Network containing shared services resources that subscribers need to access. + If a virtual network is already defined as a Provider, it cannot be assigned as a provider again. + Ensure the default route is present in the Global Routing Table if INFRA_VN is defined as the Provider. + For Subscriber Virtual Networks with multiple Providers having overlapping routes, traffic will be + load-balanced across those Provider Virtual Networks. + This parameter is required when creating or updating extranet policy. + type: str + subscriber_virtual_networks: + description: Specifies a list of Subscriber Virtual Networks that require access to the Provider Virtual Network + containing shared services resources. + A Virtual Network previously defined as a Provider cannot be selected as a subscriber. + This parameter is required when creating or updating extranet policy. + type: list + elements: str + fabric_sites: + description: Specifies the Fabric Site(s) where this Extranet Policy will be applied. + The Provider Virtual Network must be added to a Fabric Site before applying the policy. + Fabric Site(s) connected to the same SD-Access Transit must have consistent Extranet Policies. + Selecting a Fabric Site connected to an SD-Access Transit will automatically select all other Sites connected to that Transit. + type: list + elements: str + + +requirements: +- dnacentersdk == 2.7.0 +- python >= 3.9 +notes: + - SDK Methods used are + sites.Sites.get_site + sda.SDA.get_fabric_sites + sda.SDA.get_extranet_policies + sda.SDA.add_extranet_policy + sda.SDA.update_extranet_policy + sda.SDA.delete_extranet_policy_by_id + task.Task.get_task_by_id + + - Paths used are + get /dna/intent/api/v1/site + get /dna/intent/api/v1/sda/fabricSites + get /dna/intent/api/v1/sda/extranetPolicies + post /dna/intent/api/v1/sda/extranetPolicies + put /dna/intent/api/v1/sda/extranetPolicies + delete dna/intent/api/v1/sda/extranetPolicies/${id} + get /dna/intent/api/v1/task/{taskId} + +""" + +EXAMPLES = r""" +- name: Create Extranet Policy + cisco.dnac.sda_extranet_policies_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log_level: "{{dnac_log_level}}" + dnac_log: True + state: merged + config: + - extranet_policy_name: "test_extranet_policy_1" + provider_virtual_network: "VN_1" + subscriber_virtual_networks: ["VN_2", "VN_3"] + +- name: Create Extranet Policy with Fabric Site(s) specified + cisco.dnac.sda_extranet_policies_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log_level: "{{dnac_log_level}}" + dnac_log: True + state: merged + config: + - extranet_policy_name: "test_extranet_policy_1" + provider_virtual_network: "VN_1" + subscriber_virtual_networks: ["VN_2", "VN_3"] + fabric_sites: ["Global/Test_Extranet_Polcies/USA", "Global/Test_Extranet_Polcies/India"] + +- name: Update existing Extranet Policy + cisco.dnac.sda_extranet_policies_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log_level: "{{dnac_log_level}}" + dnac_log: True + state: merged + config: + - extranet_policy_name: "test_extranet_policy_1" + provider_virtual_network: "VN_1" + subscriber_virtual_networks: ["VN_2", "VN_4"] + +- name: Update existing Extranet Policy with Fabric Site(s) specified + cisco.dnac.sda_extranet_policies_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log_level: "{{dnac_log_level}}" + dnac_log: True + state: merged + config: + - extranet_policy_name: "test_extranet_policy_1" + fabric_sites: ["Global/Test_Extranet_Polcies/USA", "Global/Test_Extranet_Polcies/India"] + provider_virtual_network: "VN_1" + subscriber_virtual_networks: ["VN_2", "VN_4"] + +- name: Delete Extranet Policy + cisco.dnac.sda_extranet_policies_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log_level: "{{dnac_log_level}}" + dnac_log: True + state: deleted + config: + - extranet_policy_name: "test_extranet_policy_1" +""" + +RETURN = r""" +#Case_1: Response when task is successful +sample_response_2: + description: A dictionary with the response returned by the Cisco Catalyst Center Python SDK + returned: always + type: dict + sample: > + { + "status": "string", + "changed": bool, + "msg": "string" + "response": { + "taskId": "string", + "url": "string" + }, + "version": "string" + } + +#Case_3: Response when Error Occurs +sample_response_3: + description: A dictionary with the response returned by the Cisco Catalyst Center Python SDK + returned: always + type: dict + sample: > + { + "changed": bool, + "msg": "string" + } +""" + +import time +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.dnac.plugins.module_utils.dnac import ( + DnacBase, + validate_list_of_dicts +) + + +class SDAExtranetPolicies(DnacBase): + """ + A class for managing Extranet Policies within the Cisco DNA Center using the SDA API. + """ + def __init__(self, module): + """ + Initialize an instance of the class. + Parameters: + - module: The module associated with the class instance. + Returns: + The method does not return a value. + """ + super().__init__(module) + + def validate_input(self): + """ + Validates the input configuration parameters for the playbook. + Returns: + object: An instance of the class with updated attributes: + - self.msg: A message describing the validation result. + - self.status: The status of the validation (either "success" or "failed"). + - self.validated_config: If successful, a validated version of the "config" parameter. + + Description: + This method validates the fields provided in the playbook against a predefined specification. + It checks if the required fields are present and if their data types match the expected types. + If any parameter is found to be invalid, it logs an error message and sets the validation status to "failed". + If the validation is successful, it logs a success message and returns an instance of the class + with the validated configuration. + """ + # Check if configuration is available + if not self.config: + self.status = "success" + self.msg = "Configuration is not available in the playbook for validation" + self.log(self.msg, "ERROR") + return self + + # Expected schema for configuration parameters + temp_spec = { + "extranet_policy_name": {"type": "str", "required": True}, + "fabric_sites": {"type": "list", "elements": "str", "required": False}, + "provider_virtual_network": {"type": "str", "required": False}, + "subscriber_virtual_networks": {"type": "list", "elements": "str", "required": False}, + } + + # Validate params + valid_temp, invalid_params = validate_list_of_dicts( + self.config, temp_spec + ) + + if invalid_params: + self.msg = "Invalid parameters in playbook: {0}".format(invalid_params) + self.update_result("failed", False, self.msg, "ERROR") + return self + + # Set the validated configuration and update the result with success status + self.validated_config = valid_temp + self.msg = "Successfully validated playbook configuration parameters using 'validated_input': {0}".format(str(valid_temp)) + self.update_result("success", False, self.msg, "INFO") + return self + + def update_result(self, status, changed, msg, log_level, data=None): + """ + Update the result of the operation with the provided status, message, and log level. + Parameters: + - status (str): The status of the operation ("success" or "failed"). + - changed (bool): Indicates whether the operation caused changes. + - msg (str): The message describing the result of the operation. + - log_level (str): The log level at which the message should be logged ("INFO", "ERROR", "CRITICAL", etc.). + - data (dict, optional): Additional data related to the operation result. + Returns: + self (object): An instance of the class. + Note: + - If the status is "failed", the "failed" key in the result dictionary will be set to True. + - If data is provided, it will be included in the result dictionary. + """ + # Update the result attributes with the provided values + self.status = status + self.result["status"] = status + self.result["msg"] = msg + self.result["changed"] = changed + + # Log the message at the specified log level + self.log(msg, log_level) + + # If the status is "failed", set the "failed" key to True + if status == "failed": + self.result["failed"] = True + + # If additional data is provided, include it in the result dictionary + if data: + self.result["data"] = data + + return self + + def exit_while_loop(self, start_time, task_id, task_name, response): + """ + Check if the elapsed time exceeds the specified timeout period and exit the while loop if it does. + Parameters: + - start_time (float): The time when the while loop started. + - task_id (str): ID of the task being monitored. + - task_name (str): Name of the task being monitored. + - response (dict): Response received from the task status check. + Returns: + bool: True if the elapsed time exceeds the timeout period, False otherwise. + """ + # If the elapsed time exceeds the timeout period + if time.time() - start_time > self.params.get("dnac_api_task_timeout"): + if response.get("data"): + # If there is data in the response, include it in the error message + self.msg = "Task {0} with task id {1} has not completed within the timeout period. Task Status: {2} ".format( + task_name, task_id, response.get("data")) + else: + # If there is no data in the response, generate a generic error message + self.msg = "Task {0} with task id {1} has not completed within the timeout period.".format( + task_name, task_id) + + # Update the result with failure status and log the error message + self.update_result("failed", False, self.msg, "ERROR") + return True + + return False + + def get_fabric_ids_list(self, site_details): + """ + Extracts a list of fabric IDs from the provided site details. + Parameters: + - site_details (dict): A dictionary containing site information. Each key-value pair + represents a site, where the value is another dictionary that + includes a 'fabric_id'. + Returns: + list: A list of fabric IDs extracted from the site details. + Description: + This method iterates over the values in the provided site_details dictionary, extracts + the 'fabric_id' from each value, and appends it to a list. The resulting list of fabric IDs + is then returned. + """ + # Initialize an empty list to store fabric IDs + fabric_ids_list = [] + + # Iterate over each site's information in the site details + for site_info in site_details.values(): + fabric_ids_list.append(site_info['fabric_id']) + return fabric_ids_list + + def validate_merged_parameters(self, config): + """ + Validate that the required parameters are present in the configuration for performing + Add or Update Extranet Policy operations. + Parameters: + - config (dict): A dictionary containing the configuration parameters to be validated. + Returns: + None: This function does not return a value. It logs messages and raises exceptions + if required parameters are missing. + Description: + This method checks the provided configuration for the presence of the required parameters: + 'provider_virtual_network' and 'subscriber_virtual_networks'. If any of these parameters + are missing, it logs an error message and raises an exception to halt execution. If all + required parameters are present, it logs a success message indicating successful validation. + """ + # Check for provider_virtual_network + provider_virtual_network = config.get("provider_virtual_network") + if provider_virtual_network is None: + msg = ("Missing required parameter: 'provider_virtual_network'. " + "(extranet_policy_name, provider_virtual_network, and subscriber_virtual_networks) - " + "are the required parameters for performing Add or Update Extranet Policy operations.") + self.log(msg, "ERROR") + self.module.fail_json(msg) + + # Check for subscriber_virtual_networks + subscriber_virtual_networks = config.get("subscriber_virtual_networks") + if subscriber_virtual_networks is None: + msg = ( + "Missing required parameter: 'subscriber_virtual_networks'. " + "(extranet_policy_name, provider_virtual_network, and subscriber_virtual_networks) - " + "are the required parameters for performing Add or Update Extranet Policy operations." + ) + self.log(msg, "ERROR") + self.module.fail_json(msg) + + self.log( + "Successfully validated that the required parameters — (extranet_policy_name, " + "provider_virtual_network, and subscriber_virtual_networks) are provided", + "INFO" + ) + + def get_add_extranet_policy_params(self, config, site_details=None): + """ + Generate parameters required for adding an Extranet Policy based on the provided configuration and site details. + Parameters: + - config (dict): A dictionary containing the configuration parameters. + - site_details (dict, optional): A dictionary containing site details. Default is None. + Returns: + dict: A dictionary containing the parameters for adding an Extranet Policy. + Description: + This method constructs a dictionary of parameters required for adding an Extranet Policy. + It includes the 'extranetPolicyName', 'providerVirtualNetworkName', and 'subscriberVirtualNetworkNames' + from the configuration. If 'fabric_sites' are provided in the configuration and site details are available, + it also includes the 'fabricIds' obtained from the site details. + """ + # Initialize the parameters dictionary with basic required parameters + add_extranet_policy_params = { + "extranetPolicyName": config.get("extranet_policy_name"), + "providerVirtualNetworkName": config.get("provider_virtual_network"), + "subscriberVirtualNetworkNames": config.get("subscriber_virtual_networks") + } + + # Check if 'fabric_sites' are provided and site details are available + if config.get("fabric_sites") and site_details: + add_extranet_policy_params["fabricIds"] = self.get_fabric_ids_list(site_details) + + return add_extranet_policy_params + + def get_update_extranet_policy_params(self, config, extranet_policy_id, site_details=None): + """ + Generate parameters required for updating an Extranet Policy based on the provided configuration, + policy ID, and site details. + Parameters: + config (dict): A dictionary containing the configuration parameters. + extranet_policy_id (str): The ID of the Extranet Policy to be updated. + site_details (dict, optional): A dictionary containing site details. Default is None. + Returns: + dict: A dictionary containing the parameters for updating an Extranet Policy. + Description: + This method constructs a dictionary of parameters required for updating an Extranet Policy. + It includes the 'id' of the policy, 'extranetPolicyName', 'providerVirtualNetworkName', and + 'subscriberVirtualNetworkNames' from the configuration. If 'fabric_sites' are provided in the + configuration and site details are available, it also includes the 'fabricIds' obtained from the + site details. + """ + # Initialize the parameters dictionary with basic required parameters + update_extranet_policy_params = { + "id": extranet_policy_id, + "extranetPolicyName": config.get("extranet_policy_name"), + "providerVirtualNetworkName": config.get("provider_virtual_network"), + "subscriberVirtualNetworkNames": config.get("subscriber_virtual_networks") + } + + # Check if 'fabric_sites' are provided and site details are available + if config.get("fabric_sites") and site_details: + update_extranet_policy_params["fabricIds"] = self.get_fabric_ids_list(site_details) + + return update_extranet_policy_params + + def get_delete_extranet_policy_params(self, extranet_policy_id): + """ + Generate parameters required for deleting an Extranet Policy based on the provided policy ID. + Parameters: + extranet_policy_id (str): The unique identifier of the Extranet Policy to be deleted. + Returns: + dict: A dictionary containing the parameters for deleting an Extranet Policy. + Description: + This method constructs a dictionary of parameters required for deleting an Extranet Policy. + It includes the 'id' of the policy, which is necessary for identifying the specific policy + to be deleted. + """ + # Create a dictionary with the extranet policy ID + delete_extranet_policy_params = { + "id": extranet_policy_id + } + + return delete_extranet_policy_params + + def validate_site_exists(self, site_name): + """ + Checks the existence of a site in Cisco Catalyst Center. + Parameters: + site_name (str): The name of the site to be checked. + Returns: + tuple: A tuple containing two values: + - site_exists (bool): Indicates whether the site exists (True) or not (False). + - site_id (str or None): The ID of the site if it exists, or None if the site is not found. + Description: + This method queries Cisco Catalyst Center to determine if a site with the provided name exists. + If the site is found, it sets "site_exists" to True and retrieves the site"s ID. + If the site does not exist, "site_exists" is set to False, and "site_id" is None. + If an exception occurs during the site lookup, an error message is logged, and the module fails. + """ + site_exists = False + site_id = None + response = None + + # Attempt to retrieve site information from Catalyst Center + try: + response = self.dnac._exec( + family="sites", + function="get_site", + op_modifies=True, + params={"name": site_name}, + ) + self.log("Response received post 'get_site' API call: {0}".format(str(response)), "DEBUG") + + # Process the response if available + if response["response"]: + site = response.get("response") + site_id = site[0].get("id") + site_exists = True + else: + self.log("No response received from the 'get_site' API call.", "WARNING") + + except Exception as e: + # Log an error message and fail if an exception occurs + self.log("An error occurred while retrieving site details for Site '{0}' using 'get_site' API call: {1}".format(site_name, str(e)), "ERROR") + + if not site_exists: + self.msg = "An error occurred while retrieving site details for Site '{0}'. Please verify that the site exists.".format(site_name) + self.update_result("failed", False, self.msg, "ERROR") + self.check_return_status() + + return (site_exists, site_id) + + def get_site_details(self, fabric_sites): + """ + Retrieve details for each site in the provided fabric sites list. + Parameters: + - fabric_sites (list): A list of site names to be validated and detailed. + Returns: + dict: A dictionary containing the details for each site, including existence and site ID. + Description: + This method takes a list of fabric sites and checks if each site exists using the validate_site_exists method. + It constructs a dictionary where each key is a site name and the value is another dictionary containing + 'site_exists' (a boolean indicating if the site exists) and 'site_id' (the unique identifier of the site). + """ + # Initialize an empty dictionary to store site details + site_details = {} + + # Iterate over each site in the provided fabric sites list + for site in fabric_sites: + # Validate if the site exists and retrieve its ID + (site_exists, site_id) = self.validate_site_exists(site) + site_details[site] = { + "site_exists": site_exists, + "site_id": site_id, + } + + return site_details + + def get_fabric_sites(self, site_name, site_id): + """ + Retrieve the fabric ID for a given site using the SDA 'get_fabric_sites' API call. + Parameters: + - site_name (str): The name of the site. + - site_id (str): The unique identifier of the site. + Returns: + str: The fabric ID if found, otherwise None. + Description: + This method calls the SDA 'get_fabric_sites' API to retrieve the fabric ID for a specified site. It logs the response, + processes the response to extract the fabric ID, and handles any exceptions that occur during the API call. + """ + try: + # Call the SDA 'get_fabric_sites' API with the provided site ID + response = self.dnac._exec( + family="sda", + function="get_fabric_sites", + op_modifies=True, + params={"siteId": site_id}, + ) + self.log("Response received post SDA - 'get_fabric_sites' API call: {0}".format(str(response)), "DEBUG") + + # Process the response if available + if response["response"]: + fabric_id = response.get("response")[0]["id"] + return fabric_id + else: + self.log("No response received from the SDA - 'get_fabric_sites' API call.", "WARNING") + return None + except Exception as e: + # Log an error message and fail if an exception occurs + self.msg = ( + "An error occurred while retrieving fabric Site 'Id' for Site '{0}' using SDA - " + "'get_fabric_sites' API call: {1}".format(site_name, str(e)) + ) + self.update_result("failed", False, self.msg, "ERROR") + self.check_return_status() + + def get_fabric_sites_ids(self, site_details): + """ + Retrieve and update fabric IDs for a list of sites. + Parameters: + - site_details (dict): A dictionary where each key is a site name and the value is another dictionary + containing site information, including "site_id". + Returns: + dict: The updated dictionary with fabric IDs added to each site's information. + Description: + This method iterates through the provided `site_details` dictionary, retrieves the fabric ID for each site + by calling the `get_fabric_sites` method, and logs the retrieved fabric IDs along with site details. + It updates the `site_details` dictionary to include the fabric ID for each site and logs the updated + information. + """ + for site_name, site_info in site_details.items(): + site_id = site_info["site_id"] + # Get the fabric ID using the site name and site ID + fabric_id = self.get_fabric_sites(site_name, site_id) + self.log("Fabric Id: {0} collected for the fabric_site: {1} with siteId: {2}".format(fabric_id, site_name, site_id)) + site_info["fabric_id"] = fabric_id + self.log("Updated 'site_details' with the fabric_ids of each site. {0}".format(site_details)) + return site_details + + def get_extranet_policies(self, extranet_policy_name): + """ + Retrieve extranet policies for a given policy name using the SDA 'get_extranet_policies' API call. + Parameters: + - extranet_policy_name (str): The name of the extranet policy to retrieve. + Returns: + dict or None: The response dictionary containing policy details if found, otherwise None. + Description: + This method calls the SDA 'get_extranet_policies' API to retrieve details for the specified extranet + policy name. It logs the response received from the API call and processes it. If the API call is successful + and returns data, the first item in the response is returned. If no data is received or an exception occurs, + appropriate warnings or error messages are logged. + """ + try: + # Execute the API call to get extranet policie + response = self.dnac._exec( + family="sda", + function="get_extranet_policies", + op_modifies=True, + params={"extranetPolicyName": extranet_policy_name}, + ) + self.log("Response received post SDA - 'get_extranet_policies' API call: {0}".format(str(response)), "DEBUG") + + # Process the response if available + if response.get("response"): + response = response.get("response")[0] + return response + else: + self.log("No response received from the SDA - 'get_extranet_policies' API call.", "WARNING") + return None + except Exception as e: + # Log an error message and fail if an exception occurs + self.msg = ( + "An error occurred while retrieving Extranet Policy Details: '{0}' using SDA - " + "'get_extranet_policies' API call: {1}".format(extranet_policy_name, str(e)) + ) + self.update_result("failed", False, self.msg, "ERROR") + self.check_return_status() + + def validate_extranet_policy_exists(self, config): + """ + Check if an extranet policy exists and retrieve its details. + Parameters: + - config (dict): A dictionary containing configuration details, including the key "extranet_policy_name". + Returns: + tuple: A tuple containing: + - bool: `True` if the extranet policy exists, otherwise `False`. + - str or None: The ID of the extranet policy if it exists, otherwise `None`. + - dict or None: The details of the extranet policy if it exists, otherwise `None`. + Description: + This method verifies the existence of an extranet policy based on the name provided in the `config` dictionary. + It calls the `get_extranet_policies` method to retrieve policy details. If the policy is found, it sets + `extranet_policy_exists` to `True` and extracts the policy ID and details. The method returns a tuple containing + the existence status, policy ID, and policy details. + """ + # Initialize variables to default values + extranet_policy_exists = False + extranet_policy_id = None + + extranet_policy_name = config.get("extranet_policy_name") + extranet_policy_details = self.get_extranet_policies(extranet_policy_name) + + # Check if the policy details were retrieved successfully + if extranet_policy_details: + extranet_policy_exists = True + extranet_policy_id = extranet_policy_details["id"] + + return (extranet_policy_exists, extranet_policy_id, extranet_policy_details) + + def compare_extranet_policies(self, extranet_policy_details, update_extranet_policy_params): + """ + Compare the details of two extranet policies to check if they are equivalent. + Parameters: + - extranet_policy_details (dict): A dictionary containing the current details of the extranet policy. + - update_extranet_policy_params (dict): A dictionary containing the updated policy parameters to compare against. + Returns: + bool: `True` if all values for the keys match between the two dictionaries, `False` otherwise. + Description: + This method compares the details of two extranet policies by iterating over each key in the `extranet_policy_details` + dictionary and checking if the corresponding values in the `update_extranet_policy_params` dictionary match. + Lists are compared regardless of order, while other values are compared directly. The method returns `True` if + all values are equivalent, and `False` if any values differ. + """ + # Iterate over each key in the extranet policy details and compare the details + for key in extranet_policy_details: + value1 = extranet_policy_details.get(key) + value2 = update_extranet_policy_params.get(key) + + if isinstance(value1, list) and isinstance(value2, list): + # Compare lists regardless of order + if sorted(value1) != sorted(value2): + return False + else: + # Compare values directly + if value1 != value2: + return False + + return True + + def get_task_status(self, task_id, task_name): + """ + Retrieve the status of a task by its ID. + Parameters: + - task_id (str): The ID of the task whose status is to be retrieved. + - task_name (str): The name of the task. + Returns: + response (dict): The response containing the status of the task. + Note: + This method makes an API call to retrieve the task status and logs the status information. + If an error occurs during the API call, it will be caught and logged. + """ + # Make an API call to retrieve the task status + try: + response = self.dnac_apply["exec"]( + family="task", + function="get_task_by_id", + params=dict(task_id=task_id), + op_modifies=True, + ) + self.log("Response received post 'get_task_by_id' API Call for the Task {0} with Task id {1} " + "is {2}".format(task_name, str(task_id), str(response)), "DEBUG") + + if response["response"]: + response = response["response"] + else: + self.log("No response received from the 'get_task_by_id' API call.", "CRITICAL") + return response + + # Log the error if an exception occurs during the API call + except Exception as e: + self.msg = "Error occurred while retrieving 'get_task_by_id' for Task {0} with Task id {1}. Error: {2}".format(task_name, task_id, str(e)) + self.update_result("failed", False, self.msg, "ERROR") + self.check_return_status() + + def add_extranet_policy(self, add_extranet_policy_params): + """ + Add a new extranet policy using the SDA 'add_extranet_policy' API call. + Parameters: + - add_extranet_policy_params (dict): A dictionary containing the parameters for the new extranet policy to be added. + Returns: + str or None: The task ID if the policy is added successfully, otherwise `None`. + Description: + This method sends a request to add a new extranet policy using the SDA 'add_extranet_policy' API. It logs the + response from the API call and processes it to extract the task ID. If the policy is added successfully, the + task ID is returned. If the API call does not return a response or an exception occurs, appropriate warnings or + error messages are logged. + """ + try: + # Execute the API call to add a new extranet policy + response = self.dnac._exec( + family="sda", + function="add_extranet_policy", + op_modifies=True, + params={"payload": [add_extranet_policy_params]}, + ) + self.log("Response received post SDA - 'add_extranet_policy' API call: {0}".format(str(response)), "DEBUG") + + # Process the response if available + if response["response"]: + self.result.update(dict(response=response["response"])) + self.log("Task Id for the 'add_extranet_policy' task is {0}".format(response["response"].get("taskId")), "INFO") + # Return the task ID + return response["response"].get("taskId") + else: + self.log("No response received from the SDA - 'add_extranet_policy' API call.", "WARNING") + return None + except Exception as e: + # Log an error message and fail if an exception occurs + self.msg = ( + "An error occurred while Adding Extranet Policy to the Cisco Catalyst Center. " + "add_extranet_policy_params: {0} Error: {1}".format(add_extranet_policy_params, str(e)) + ) + self.update_result("failed", False, self.msg, "ERROR") + self.check_return_status() + + def get_add_extranet_policy_status(self, task_id): + """ + Monitor the status of the 'Add Extranet Policy' task until completion or failure. + Parameters: + - task_id (str): The unique identifier of the task to monitor. + Returns: + self: The instance of the class, allowing for method chaining. + Description: + This method continuously polls the status of an ongoing task identified by `task_id`. It retrieves task status + using `get_task_status` and handles various outcomes, including errors, timeouts, and successful completion. + If the task encounters errors or fails, it logs appropriate error messages and updates the result. If the task + completes successfully, it logs a success message and updates the result accordingly. The method will break + out of the loop either on successful completion, encountering an error, or when a timeout condition is met. + """ + task_name = "Add Extranet Policy" + start_time = time.time() + + while True: + response = self.get_task_status(task_id, task_name) + + # Check if response returned + if not response: + self.msg = "Error retrieving Task status for the task_name {0} task_id {1}".format(task_name, task_id) + self.update_result("failed", False, self.msg, "ERROR") + break + + # Check if the elapsed time exceeds the timeout + if self.exit_while_loop(start_time, task_id, task_name, response): + break + + # Handle error if task execution encounters an error + if response.get("isError"): + if response.get("failureReason"): + failure_reason = response.get("failureReason") + self.msg = ( + "An error occurred while performing {0} task for add_extranet_policy_params: {1}. " + "The operation failed due to the following reason: {2}".format( + task_name, self.want.get("add_extranet_policy_params"), failure_reason + ) + ) + self.update_result("failed", False, self.msg, "ERROR") + break + else: + self.msg = ( + "An error occurred while performing {0} task for add_extranet_policy_params: {1}. " + .format(task_name, self.want.get("add_extranet_policy_params")) + ) + self.update_result("failed", False, self.msg, "ERROR") + break + + # Check if task completed successfully + if not response.get("isError") and response.get("progress") == "TASK_PROVISION": + if "processcfs_complete=true" in response.get("data").lower(): + extranet_policy_name = self.want.get("add_extranet_policy_params").get("extranetPolicyName") + self.msg = "Extranet Policy - '{0}' has been successfully added to the Cisco Catalyst Center.".format(extranet_policy_name) + self.update_result("success", True, self.msg, "INFO") + break + return self + + def update_extranet_policy(self, update_extranet_policy_params): + """ + Update an existing extranet policy using the SDA 'update_extranet_policy' API call. + Parameters: + - update_extranet_policy_params (dict): A dictionary containing the parameters for updating the extranet policy. + Returns: + str or None: The task ID if the update request is processed successfully, otherwise `None`. + Description: + This method sends a request to update an existing extranet policy using the SDA 'update_extranet_policy' API. + It logs the response from the API call and processes it to extract the task ID. If the API call is successful and + returns a response, the method updates the result with the response details and returns the task ID. If no response + is received or if an exception occurs, appropriate warnings or error messages are logged. + """ + try: + # Execute the API call to update the extranet policy with the provided parameters + response = self.dnac._exec( + family="sda", + function="update_extranet_policy", + op_modifies=True, + params={"payload": [update_extranet_policy_params]}, + ) + self.log("Response received post SDA - 'update_extranet_policy' API call: {0}".format(str(response)), "DEBUG") + + # Process the response if available + if response["response"]: + self.result.update(dict(response=response["response"])) + self.log("Task Id for the 'update_extranet_policy' task is {0}".format(response["response"].get("taskId")), "INFO") + # Return the task ID + return response["response"].get("taskId") + else: + self.log("No response received from the SDA - 'update_extranet_policy' API call.", "WARNING") + return None + except Exception as e: + # Log an error message and fail if an exception occurs + self.msg = ( + "An error occurred while Updating Extranet Policy. " + "update_extranet_policy_params: {0}. Error - {1} ".format(update_extranet_policy_params, str(e)) + ) + self.update_result("failed", False, self.msg, "ERROR") + self.check_return_status() + + def get_update_extranet_policy_status(self, task_id): + """ + Monitor the status of the 'Update Extranet Policy' task until completion or failure. + Parameters: + - task_id (str): The unique identifier of the task to monitor. + Returns: + self: The instance of the class, allowing for method chaining. + Description: + This method continuously polls the status of an ongoing update task identified by `task_id`. It retrieves the + task status using `get_task_status` and handles different outcomes such as errors, timeouts, and successful + completion. The method logs appropriate messages based on the status of the task and updates the result with + success or failure information. The method exits the loop upon encountering an error, exceeding the timeout, + or successful completion of the task. + """ + task_name = "Update Extranet Policy" + start_time = time.time() + + while True: + response = self.get_task_status(task_id, task_name) + + # Check if response returned + if not response: + self.msg = "Error retrieving Task status for the task_name {0} task_id {1}".format(task_name, task_id) + self.update_result("failed", False, self.msg, "ERROR") + break + + # Check if the elapsed time exceeds the timeout + if self.exit_while_loop(start_time, task_id, task_name, response): + break + + # Handle error if task execution encounters an error + if response.get("isError"): + if response.get("failureReason"): + failure_reason = response.get("failureReason") + self.msg = ( + "An error occurred while performing {0} task for update_extranet_policy_params: {1}. " + "The operation failed due to the following reason: {2}".format( + task_name, self.want.get("update_extranet_policy_params"), failure_reason + ) + ) + self.update_result("failed", False, self.msg, "ERROR") + break + else: + self.msg = ( + "An error occurred while performing {0} task for update_extranet_policy_params: {1}. " + .format(task_name, self.want.get("update_extranet_policy_params")) + ) + self.update_result("failed", False, self.msg, "ERROR") + break + + # Check if task completed successfully + if not response.get("isError") and response.get("progress") == "TASK_MODIFY_PUT": + if "processcfs_complete=true" in response.get("data").lower(): + extranet_policy_name = self.want.get("update_extranet_policy_params").get("extranetPolicyName") + self.msg = "Extranet Policy - '{0}' has been successfully updated!".format(extranet_policy_name) + self.update_result("success", True, self.msg, "INFO") + break + + return self + + def delete_extranet_policy(self, delete_extranet_policy_params): + """ + Delete an extranet policy using the SDA 'delete_extranet_policy_by_id' API call. + Parameters: + - delete_extranet_policy_params (dict): A dictionary containing the parameters for deleting the extranet policy, + including the policy ID or other identifying details. + Returns: + str or None: The task ID if the delete request is processed successfully, otherwise `None`. + Description: + This method sends a request to delete an extranet policy using the SDA 'delete_extranet_policy_by_id' API. + It logs the response from the API call and processes it to extract the task ID. If the API call is successful and + returns a response, the method updates the result with the response details and returns the task ID. If no response + is received or if an exception occurs, appropriate warnings or error messages are logged. + """ + try: + # Execute the API call to delete the extranet policy with the provided parameters + response = self.dnac._exec( + family="sda", + function="delete_extranet_policy_by_id", + op_modifies=True, + params=delete_extranet_policy_params, + ) + self.log("Response received post SDA - 'delete_extranet_policy_by_id' API call: {0}".format(str(response)), "DEBUG") + + # Process the response if available + if response["response"]: + self.result.update(dict(response=response["response"])) + self.log("Task Id for the 'delete_extranet_policy_by_id' task is {0}".format(response["response"].get("taskId")), "INFO") + # Return the task ID + return response["response"].get("taskId") + else: + self.log("No response received from the SDA - 'delete_extranet_policy_by_id' API call.", "WARNING") + return None + except Exception as e: + # Log an error message and fail if an exception occurs + self.msg = ( + "An error occurred while Deleting Extranet Policy. " + "delete_extranet_policy_params: {0}. Error - {1} ".format(delete_extranet_policy_params, str(e)) + ) + self.update_result("failed", False, self.msg, "ERROR") + self.check_return_status() + + def get_delete_extranet_policy_status(self, task_id): + """ + Monitor the status of the 'Delete Extranet Policy' task until completion or failure. + Parameters: + - task_id (str): The unique identifier of the task to monitor. + Returns: + self: The instance of the class, allowing for method chaining. + Description: + This method continuously polls the status of an ongoing delete task identified by `task_id`. It uses the + `get_task_status` method to check the task status and handles various outcomes, such as errors, timeouts, + and successful completion. The method logs appropriate messages based on the task's status and updates the + result with success or failure information. The monitoring loop exits upon encountering an error, exceeding + the timeout, or successfully completing the task. + """ + task_name = "Delete Extranet Policy" + start_time = time.time() + + while True: + response = self.get_task_status(task_id, task_name) + + # Check if response returned + if not response: + self.msg = "Error retrieving Task status for the task_name {0} task_id {1}".format(task_name, task_id) + self.update_result("failed", False, self.msg, "ERROR") + break + + # Check if the elapsed time exceeds the timeout + if self.exit_while_loop(start_time, task_id, task_name, response): + break + + # Handle error if task execution encounters an error + if response.get("isError"): + if response.get("failureReason"): + failure_reason = response.get("failureReason") + self.msg = ( + "An error occurred while performing {0} task for delete_extranet_policy_params: {1}. " + "The operation failed due to the following reason: {2}".format( + task_name, self.want.get("delete_extranet_policy_params"), failure_reason + ) + ) + self.update_result("failed", False, self.msg, "ERROR") + break + else: + self.msg = ( + "An error occurred while performing {0} task for " + "delete_extranet_policy_params: {1}. ".format( + task_name, self.want.get("delete_extranet_policy_params") + ) + ) + self.update_result("failed", False, self.msg, "ERROR") + break + + # Check if task completed successfully + if not response.get("isError") and response.get("progress") == "TASK_TERMINATE": + if "processcfs_complete=true" in response.get("data").lower(): + extranet_policy_name = self.want.get("extranet_policy_name") + self.msg = "Extranet Policy - '{0}' has been successfully deleted!".format(extranet_policy_name) + self.update_result("success", True, self.msg, "INFO") + break + + return self + + def get_have(self, config): + """ + Retrieve the current state of the extranet policy based on the provided configuration. + Parameters: + - config (dict): Configuration dictionary containing site details. + Returns: + self: The instance of the class, allowing for method chaining. + Description: + This method checks if the extranet policy specified in the `config` exists. It uses the + `validate_extranet_policy_exists` method to determine if the policy exists and to retrieve its details. + The method logs the current state of the extranet policy and updates the instance attribute `have` with + information about the existence, ID, and details of the extranet policy. It returns the instance for + method chaining. + """ + have = {} + + # check if given site exits, if exists store current site info + (extranet_policy_exists, extranet_policy_id, extranet_policy_details) = self.validate_extranet_policy_exists(config) + + self.log("Current Extranet Policy details (have): {0}".format(str(extranet_policy_details)), "DEBUG") + + have["extranet_policy_exists"] = extranet_policy_exists + have["extranet_policy_id"] = extranet_policy_id + have["current_extranet_policy"] = extranet_policy_details + + self.have = have + self.log("Current State (have): {0}".format(str(self.have)), "INFO") + + return self + + def get_want(self, config, state): + """ + Generate the desired state parameters for API calls based on the provided configuration and state. + Parameters: + - config (dict): Configuration dictionary containing site and policy details. + - state (str): Desired state, which can be 'merged' or 'delete'. + Returns: + self: The instance of the class, allowing for method chaining. + Description: + This method determines the parameters required for API calls based on the desired state and configuration. + It checks if the extranet policy exists and sets the appropriate parameters for creating, updating, or deleting + the policy. For the 'merged' state, it prepares parameters for updating the policy if it exists or creating + it if it does not. For the 'delete' state, it prepares parameters for deleting the policy if it exists. The + method logs the created parameters and updates the instance attribute `want` with these parameters. It returns + the instance for method chaining. + """ + # Initialize want + want = {} + site_details = {} + + self.log("Creating Parameters for API Calls with state: {0}".format(state)) + + # Identify if policy already exists or needs to be created + extranet_policy_name = config.get("extranet_policy_name") + extranet_policy_exists = self.have.get("extranet_policy_exists") + extranet_policy_id = self.have.get("extranet_policy_id") + extranet_policy_details = self.have.get("current_extranet_policy") + + if state == "merged": + self.validate_merged_parameters(config) + fabric_sites = config.get("fabric_sites") + if fabric_sites: + self.log("Attempting to get the 'SiteId' for the provided fabric sites: {0}".format(fabric_sites), "DEBUG") + site_details = self.get_site_details(fabric_sites) + self.log("Attempting to get the fabric 'Id' for the provided fabric sites: {0}".format(fabric_sites), "DEBUG") + site_details = self.get_fabric_sites_ids(site_details) + + if extranet_policy_exists: + self.log( + "Extranet Policy - '{0}' exists in the Cisco Catalyst Center, " + "therefore setting 'update_extranet_policy_params'.".format(extranet_policy_name), + "DEBUG" + ) + want = dict(update_extranet_policy_params=self.get_update_extranet_policy_params(config, extranet_policy_id, site_details)) + if self.compare_extranet_policies(extranet_policy_details, want["update_extranet_policy_params"]): + self.msg = ( + "Extranet Policy - '{0}' is already same as the update requested, " + "and hence an update operation is not required.".format(extranet_policy_name) + ) + self.update_result("ok", False, self.msg, "INFO") + self.check_return_status() + return self + else: + self.log( + "Extranet Policy - '{0}' does not exist in the Cisco Catalyst Center, " + "therefore setting 'add_extranet_policy_params'.".format(extranet_policy_name), + "DEBUG" + ) + want = dict(add_extranet_policy_params=self.get_add_extranet_policy_params(config, site_details)) + else: + if extranet_policy_exists: + self.log( + "State is delete and Extranet Policy - '{0}' exists in the Cisco Catalyst Center, " + "therefore setting 'delete_extranet_policy_params'.".format(extranet_policy_name), + "DEBUG" + ) + want = dict(extranet_policy_name=extranet_policy_name, + delete_extranet_policy_params=self.get_delete_extranet_policy_params(extranet_policy_id)) + else: + self.msg = ( + "Extranet Policy - '{0}' does not exist in the Cisco Catalyst Center and " + "hence delete operation not required.".format(extranet_policy_name) + ) + self.update_result("ok", False, self.msg, "INFO") + self.check_return_status() + return self + + self.want = want + self.log("Desired State (want): {0}".format(str(self.want)), "INFO") + return self + + def get_diff_merged(self): + """ + Executes actions based on the desired state parameters and checks their status. + Parameters: + - None + Returns: + self: The instance of the class, allowing for method chaining. + Description: + This method iterates through a map of action parameters to their corresponding functions for execution and status + checking. For each action parameter present in the desired state (`want`), the associated action function is called + to perform the action, and the corresponding status function is used to check the result. It ensures that all actions + specified in the desired state are executed and their statuses are verified. The method returns the instance for method + chaining. + """ + action_map = { + "add_extranet_policy_params": (self.add_extranet_policy, self.get_add_extranet_policy_status), + "update_extranet_policy_params": (self.update_extranet_policy, self.get_update_extranet_policy_status), + } + + for action_param, (action_func, status_func) in action_map.items(): + # Execute the action and check its status + if self.want.get(action_param): + result_task_id = action_func(self.want.get(action_param)) + status_func(result_task_id).check_return_status() + return self + + def get_diff_deleted(self): + """ + Executes deletion actions based on the desired state parameters and checks their status. + Parameters: + - None + Returns: + self: The instance of the class, allowing for method chaining. + Description: + This method iterates through a map of deletion action parameters to their corresponding functions for execution and + status checking. For each deletion action parameter present in the desired state (`want`), the associated action + function is called to perform the deletion, and the corresponding status function is used to check the result. + It ensures that all deletion actions specified in the desired state are executed and their statuses are verified. + The method returns the instance for method chaining. + """ + action_map = { + "delete_extranet_policy_params": (self.delete_extranet_policy, self.get_delete_extranet_policy_status) + + } + for action_param, (action_func, status_func) in action_map.items(): + # Execute the action and check its status + if self.want.get(action_param): + result_task_id = action_func(self.want.get(action_param)) + status_func(result_task_id).check_return_status() + return self + + def verify_diff_merged(self, config): + """ + Verifies the results of the merged state operations by comparing the state before and after the operations. + Parameters: + - config (dict): Configuration dictionary containing site and policy details. + Returns: + self: The instance of the class, allowing for method chaining. + Description: + This method performs verification of operations related to the 'merged' state. It first retrieves the state before + performing any operations and then compares it with the state after the operations. For add and update operations, + it logs the states before and after the operations and verifies the success based on the presence or absence of + the extranet policy and whether any changes were detected. It ensures that the operations have been performed as + expected and logs appropriate messages based on the results. + """ + pre_operation_state = self.have.copy() + desired_state = self.want + self.get_have(config) + post_operation_state = self.have.copy() + extranet_policy_name = config.get("extranet_policy_name") + + if desired_state.get("add_extranet_policy_params"): + self.log("State before performing ADD Extranet Policy operation: {0}".format(str(pre_operation_state)), "INFO") + self.log("Desired State: {}".format(str(desired_state)), "INFO") + self.log("State after performing ADD Extranet Policy operation: {0}".format(str(post_operation_state)), "INFO") + + if post_operation_state["extranet_policy_exists"]: + self.log("Verified the success of ADD Extranet Policy - '{0}' operation.".format(extranet_policy_name), "INFO") + else: + self.log( + "The ADD Extranet Policy - '{0}' operation may not have been successful " + "since the Extranet Policy does not exist in the Cisco Catalyst Center.".format(extranet_policy_name), + "WARNING" + ) + + if self.want.get("update_extranet_policy_params"): + self.log("State before performing UPDATE Extranet Policy operation: {0}".format(str(pre_operation_state)), "INFO") + self.log("Desired State: {}".format(str(desired_state)), "INFO") + self.log("State after performing UPDATE Extranet Policy operation - '{0}'".format(str(post_operation_state)), "INFO") + + if not self.compare_extranet_policies(pre_operation_state["current_extranet_policy"], post_operation_state["current_extranet_policy"]): + self.log("Verified the success of UPDATE Extranet Policy - '{0}' operation.".format(extranet_policy_name), "INFO") + else: + self.log( + "The UPDATE Extranet Policy - '{0}' operation may not have been performed or " + "may not have been successful because no change was detected in the Extranet Policy " + "in the Cisco Catalyst Center".format(extranet_policy_name), + "WARNING" + ) + return self + + def verify_diff_deleted(self, config): + """ + Verifies the results of the delete state operation by comparing the state before and after the delete operation. + Parameters: + - config (dict): Configuration dictionary containing site and policy details. + Returns: + self: The instance of the class, allowing for method chaining. + Description: + This method performs verification of the delete operation by comparing the state before and after the operation. + It introduces a delay to allow the deletion to process and then retrieves the state. It checks if the extranet policy + no longer exists and logs the result of the delete operation. It ensures that the delete operation was successful + by verifying the absence of the extranet policy and logs appropriate messages based on the outcome. + """ + pre_operation_state = self.have.copy() + desired_state = self.want + time.sleep(10) + self.get_have(config) + post_operation_state = self.have.copy() + extranet_policy_name = config.get("extranet_policy_name") + + self.log("State before performing DELETE Extranet Policy operation: {0}".format(str(pre_operation_state)), "INFO") + self.log("Desired State: {}".format(str(desired_state)), "INFO") + self.log("State after performing DELETE Extranet Policy operation: {0}".format(str(post_operation_state)), "INFO") + + if not post_operation_state["extranet_policy_exists"]: + self.log("Verified the success of DELETE Extranet Policy - '{0}' operation".format(extranet_policy_name), "INFO") + else: + self.log( + "The DELETE Extranet Policy - '{0}' operation may not have been successful since " + "the policy still exists in the Cisco Catalyst Center.".format(extranet_policy_name), + "WARNING" + ) + return self + + +def main(): + """ main entry point for module execution + """ + # Define the specification for the module"s arguments + element_spec = {"dnac_host": {"required": True, "type": "str"}, + "dnac_port": {"type": "str", "default": "443"}, + "dnac_username": {"type": "str", "default": "admin", "aliases": ["user"]}, + "dnac_password": {"type": "str", "no_log": True}, + "dnac_verify": {"type": "bool", "default": "True"}, + "dnac_version": {"type": "str", "default": "2.2.3.3"}, + "dnac_debug": {"type": "bool", "default": False}, + "dnac_log_level": {"type": "str", "default": "WARNING"}, + "dnac_log_file_path": {"type": "str", "default": "dnac.log"}, + "dnac_log_append": {"type": "bool", "default": True}, + "dnac_log": {"type": "bool", "default": False}, + "validate_response_schema": {"type": "bool", "default": True}, + "config_verify": {"type": "bool", "default": False}, + "dnac_api_task_timeout": {"type": "int", "default": 1200}, + "dnac_task_poll_interval": {"type": "int", "default": 2}, + "config": {"required": True, "type": "list", "elements": "dict"}, + 'state': {'default': 'merged', 'choices': ['merged', 'deleted']} + } + + # Initialize the Ansible module with the provided argument specifications + module = AnsibleModule(argument_spec=element_spec, + supports_check_mode=False) + + # Initialize the NetworkCompliance object with the module + ccc_sda_extranet_policies = SDAExtranetPolicies(module) + + # Get the state parameter from the provided parameters + state = ccc_sda_extranet_policies.params.get("state") + + # Check if the state is valid + if state not in ccc_sda_extranet_policies.supported_states: + ccc_sda_extranet_policies.status = "invalid" + ccc_sda_extranet_policies.msg = "State {0} is invalid".format(state) + ccc_sda_extranet_policies.check_return_status() + + # Validate the input parameters and check the return status + ccc_sda_extranet_policies.validate_input().check_return_status() + + # Get the config_verify parameter from the provided parameters + config_verify = ccc_sda_extranet_policies.params.get("config_verify") + + # Iterate over the validated configuration parameters + for config in ccc_sda_extranet_policies.validated_config: + ccc_sda_extranet_policies.reset_values() + ccc_sda_extranet_policies.get_have(config).check_return_status() + ccc_sda_extranet_policies.get_want(config, state).check_return_status() + ccc_sda_extranet_policies.get_diff_state_apply[state]().check_return_status() + if config_verify: + ccc_sda_extranet_policies.verify_diff_state_apply[state](config).check_return_status() + + module.exit_json(**ccc_sda_extranet_policies.result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/swim_intent.py b/plugins/modules/swim_intent.py index 549efa9823..6243afb254 100644 --- a/plugins/modules/swim_intent.py +++ b/plugins/modules/swim_intent.py @@ -928,7 +928,7 @@ def get_have(self): # check if given site exists, store siteid # if not then use global site site_name = tagging_details.get("site_name") - if site_name: + if site_name and site_name != "Global": site_exists = False (site_exists, site_id) = self.site_exists(site_name) if site_exists: diff --git a/plugins/modules/swim_workflow_manager.py b/plugins/modules/swim_workflow_manager.py index df1e816aa3..5f6c53cb78 100644 --- a/plugins/modules/swim_workflow_manager.py +++ b/plugins/modules/swim_workflow_manager.py @@ -914,7 +914,7 @@ def get_have(self): # check if given site exists, store siteid # if not then use global site site_name = tagging_details.get("site_name") - if site_name: + if site_name and site_name != "Global": site_exists = False (site_exists, site_id) = self.site_exists(site_name) if site_exists: diff --git a/plugins/modules/user_role_workflow_manager.py b/plugins/modules/user_role_workflow_manager.py index 8575b78f50..8256da0627 100644 --- a/plugins/modules/user_role_workflow_manager.py +++ b/plugins/modules/user_role_workflow_manager.py @@ -43,7 +43,9 @@ elements: dict suboptions: username: - description: The 'username' associated with the user account. + description: + - The 'username' associated with the user account. + - Required for user create, update and delete operations. type: str first_name: description: The first name of the user. @@ -61,7 +63,7 @@ description: - The password for the user account, which must adhere to specified complexity requirements. - Must contain at least one special character, one capital letter, one lowercase letter, - and a minimum length of 15 characters. + and a minimum length of 8 characters. - Required for creating a new user account. type: str role_list: @@ -319,7 +321,7 @@ overall: description: Provides the same choice for all sub-parameters. choices: ["deny", "read", "write"] - default: "read" + default: "deny" type: str apis: description: Access Cisco Catalyst Center through REST APIs to drive value. @@ -424,13 +426,13 @@ remote_device_support: description: Allow Cisco support team to remotely troubleshoot any network devices managed by Cisco DNA Center. choices: ["deny", "read", "write"] - default: "read" + default: "deny" type: str scheduler: description: Run, schedule, and monitor network tasks and activities such as deploying policies, provisioning, or upgrading the network, integrated with other back-end services. choices: ["deny", "read", "write"] - default: "read" + default: "write" type: str search: description: Search for various objects in Cisco Catalyst Center, including sites, @@ -537,7 +539,7 @@ config_verify: True dnac_api_task_timeout: 1000 dnac_task_poll_interval: 1 - state: merged + state: deleted config: user_details: username: "ajithandrewj" @@ -563,7 +565,7 @@ monitoring_settings: "read" troubleshooting_tools: "deny" network_analytics: - data_access: "write" + - data_access: "write" network_design: - advanced_network_settings: "deny" image_repository: "deny" @@ -573,6 +575,7 @@ virtual_network: "read" network_provision: - compliance: "deny" + eox: "read" image_update: "write" inventory_management: - device_configuration: "write" @@ -605,6 +608,7 @@ - audit_log: "read" event_viewer: "deny" network_reasoner: "write" + remote_device_support: "read" scheduler: "read" search: "write" @@ -692,7 +696,7 @@ config_verify: True dnac_api_task_timeout: 1000 dnac_task_poll_interval: 1 - state: merged + state: deleted config: role_details: - rolename: "role_name" @@ -861,6 +865,9 @@ def __init__(self, module): self.supported_states = ["merged", "deleted"] self.payload = module.params self.keymap = {} + self.created_user, self.updated_user, self.no_update_user = [], [], [] + self.created_role, self.updated_role, self.no_update_role = [], [], [] + self.deleted_user, self.deleted_role = [], [] def validate_input_yml(self, user_role_details): """ @@ -882,14 +889,22 @@ def validate_input_yml(self, user_role_details): - If the validation succeeds, this will allow to go next step, unless this will stop execution based on the fields. """ self.log("Validating the Playbook Yaml File..", "INFO") + config = self.payload.get("config") + self.key = self.generate_key() - if user_role_details is None or isinstance(user_role_details, dict): + if self.key and "error_message" in self.key: + self.msg = self.key.get("error_message") + self.log(self.msg, "ERROR") + self.status = "failed" + return self + + if user_role_details is None or not isinstance(user_role_details, list): self.msg = "Configuration is not available in the playbook for validation or user/role details are not type list" self.log(self.msg, "ERROR") self.status = "failed" return self - if "role_name" in user_role_details[0] and user_role_details[0].get("role_name") is not None: + if "role_details" in config and "role_name" in user_role_details[0] and user_role_details[0].get("role_name") is not None: role_details = { "role_name": {"required": True, "type": "str"}, "description": {"required": False, "type": "str"}, @@ -917,7 +932,19 @@ def validate_input_yml(self, user_role_details): self.status = "success" return self - if "username" in user_role_details[0] or "email" in user_role_details[0]: + if "user_details" in config and "username" in user_role_details[0] or "email" in user_role_details[0]: + for user in user_role_details: + if 'password' in user: + encrypt_password_response = self.encrypt_password(user['password'], self.key.get("generate_key")) + + if encrypt_password_response and "error_message" in encrypt_password_response: + self.msg = encrypt_password_response.get("error_message") + self.log(self.msg, "ERROR") + self.status = "failed" + return self + + user["password"] = encrypt_password_response.get("encrypt_password") + if user_role_details[0].get("username") is not None or user_role_details[0].get("email") is not None: user_details = { "first_name": {"required": False, "type": "str"}, @@ -941,7 +968,11 @@ def validate_input_yml(self, user_role_details): self.status = "success" return self - self.msg = "Configuration params like 'username' or 'email' or 'role_name' is not available in the playbook" + self.msg = ( + "'Configuration parameters such as 'username', 'email', or 'role_name' are missing from the playbook' or " + "'The 'user_details' key is invalid for role creation, updation, or deletion' or " + "'The 'role_details' key is invalid for user creation, updation, or deletion'" + ) self.log(self.msg, "ERROR") self.status = "failed" return self @@ -961,6 +992,43 @@ def validate_string_field(self, field_value, regex, error_message, error_message if field_value and not regex.match(field_value): error_messages.append(error_message) + def validate_password(self, password, error_messages): + """ + Validate the provided password and append an error message if it does not meet the criteria. + Args: + - password (str): The password to be validated. Must be a string. + - error_messages (list): A list where error messages are appended if the password does not meet the criteria. + Returns: + None: This function does not return a value, but it may append an error message to `error_messages` if the password is invalid. + Criteria: + - The password must be 8 to 20 characters long. + - The password must include characters from at least three of the following classes: + lowercase letters, uppercase letters, digits, and special characters. + """ + is_valid_password = False + password_criteria_message = ( + "Password must be 8 to 20 characters long and include characters from at least three of " + "the following classes: lowercase letters, uppercase letters, digits, and special characters." + ) + + self.log(password_criteria_message, "DEBUG") + password_regexs = [ + re.compile(r'^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)(?!.*[\W_]).{8,20}$'), + re.compile(r'^(?=.*[a-z])(?=.*[A-Z])(?=.*[\W_])(?!.*\d).{8,20}$'), + re.compile(r'^(?=.*[a-z])(?=.*\d)(?=.*[\W_])(?!.*[A-Z]).{8,20}$'), + re.compile(r'^(?=.*[A-Z])(?=.*\d)(?=.*[\W_])(?!.*[a-z]).{8,20}$'), + re.compile(r'^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)(?=.*[\W_]).{8,20}$') + ] + + for password_regex in password_regexs: + if password_regex.match(password): + is_valid_password = True + break + + if not is_valid_password: + self.log("Password validation failed: {0}".format(password_criteria_message), "DEBUG") + error_messages.append(password_criteria_message) + def validate_role_parameters(self, role_key, params_list, role_config, role_param_map, error_messages): """ Helper function to validate role parameters. @@ -985,6 +1053,48 @@ def validate_role_parameters(self, role_key, params_list, role_config, role_para "DEBUG") self.validate_string_parameter(param, inventory_management[param], error_messages) + def identify_invalid_params(self, params, mismatches): + """ + Identify and collect invalid parameters from a dictionary or list based on allowed parameters. + Args: + - params (dict | list): The dictionary or list of parameters to be checked. Nested dictionaries or lists are supported. + - mismatches (list): A list where invalid parameter names are appended. This list is used to collect all + parameters that are not in 'allowed_params'. + Returns: + - mismatches (list): This function returns the 'mismatches' list containing the names of any parameters that are not in the 'allowed_params' set. + Criteria: + - Parameters in 'params' must be checked recursively if they are dictionaries or lists. + - Only parameters that are not in the 'allowed_params' set are appended to the 'mismatches' list. + """ + allowed_params = [ + "monitoring_and_troubleshooting", "monitoring_settings", "troubleshooting_tools", "data_access", "advanced_network_settings", + "image_repository", "network_hierarchy", "network_profiles", "network_settings", "virtual_network", "compliance", + "eox", "image_update", "inventory_management", "license", "network_telemetry", "pnp", "provision", "device_configuration", + "discovery", "network_device", "port_management", "topology", "app_hosting", "bonjour", "stealthwatch", "umbrella", + "apis", "bundles", "events", "reports", "group_based_policy", "ip_based_access_control", "security_advisories", + "machine_reasoning", "system_management", "audit_log", "event_viewer", "network_reasoner", "remote_device_support", + "scheduler", "search", 'role_name', 'description', 'assurance', 'network_analytics', 'network_design', 'network_provision', + 'network_services', 'platform', 'security', 'system', 'utilities', 'overall' + ] + self.log("Starting to iterate through params to identify unknown parameters.", "DEBUG") + + if isinstance(params, dict): + for key, value in params.items(): + if key not in allowed_params: + self.log("Invalid parameter detected: {0}".format(key), "ERROR") + mismatches.append(key) + + if isinstance(value, dict) or isinstance(value, list): + self.identify_invalid_params(value, mismatches) + elif isinstance(params, list): + for item in params: + self.identify_invalid_params(item, mismatches) + + if not mismatches: + self.log("No invalid parameters found.", "INFO") + + return mismatches + def valid_role_config_parameters(self, role_config): """ Additional validation for the create role configuration payload. @@ -1001,15 +1111,33 @@ def valid_role_config_parameters(self, role_config): - If it fails, "self.status" will be "failed", and "self.msg" will describe the validation issues. """ self.log("Validating role configuration parameters...", "INFO") + + invalid_params = [] + self.identify_invalid_params(role_config, invalid_params) + + if invalid_params: + self.msg = "Invalid parameters in playbook config: Mismatched parameter(s) '{0}' in role '{1}'".format( + "', '".join(invalid_params), role_config.get("role_name")) + self.log(self.msg, "ERROR") + self.status = "failed" + return self + error_messages = [] + role_name = role_config.get("role_name") + role_name_regex = re.compile(r"^[a-zA-Z0-9._-]{1,25}$") + role_name_regex_msg = "Role names must be 1 to 25 characters long and should contain only letters, numbers, periods, underscores, and hyphens." - role_name_regex = re.compile(r"^[A-Za-z0-9_-]+$") - self.validate_string_field(role_config.get("role_name"), role_name_regex, - "Role name: 'role_name' must only contain letters, numbers, underscores,\ - and hyphens and should not contain spaces or other special characters.", error_messages) + if role_name: + self.validate_string_field(role_name, role_name_regex, "role_name: '{0}' {1}".format(role_name, role_name_regex_msg), error_messages) + else: + error_messages.append(role_name_regex_msg) - if role_config.get("description"): - self.validate_string_parameter("description", role_config["description"], error_messages) + description = role_config["description"] + if description: + if len(description) > 1000: + error_messages.append("Role description exceeds the maximum length of 1000 characters.") + else: + self.validate_string_parameter("description", description, error_messages) role_param_map = { "assurance": ["overall", "monitoring_and_troubleshooting", "monitoring_settings", "troubleshooting_tools"], @@ -1056,28 +1184,47 @@ def valid_user_config_parameters(self, user_config): """ self.log("Validating user configuration parameters...", "INFO") error_messages = [] + name_regex = re.compile(r"^[A-Za-z0-9@._-]{2,50}$") + name_regex_msg = "can have alphanumeric characters only and must be 2 to 50 characters long." + + first_name = user_config.get("first_name") + self.validate_string_field(first_name, name_regex, + "first_name: First name '{0}' {1}".format(first_name, name_regex_msg), error_messages) + + last_name = user_config.get("last_name") + self.validate_string_field(last_name, name_regex, + "last_name: Last name '{0}' {1}".format(last_name, name_regex_msg), error_messages) + + password = user_config.get("password") + + if password: + decrypt_password_response = self.decrypt_password(password, self.key.get("generate_key")) - first_name_regex = re.compile(r"^[A-Za-z0-9_-]+$") - self.validate_string_field(user_config.get("first_name"), first_name_regex, "first_name: 'first_name' must only contain letters, numbers, \ - underscores, and hyphens and should not contain spaces or other special characters.", error_messages) + if decrypt_password_response and "error_message" in decrypt_password_response: + self.msg = decrypt_password_response.get("error_message") + self.log(self.msg, "ERROR") + self.status = "failed" + return self - last_name_regex = re.compile(r"^[A-Za-z0-9_-]+$") - self.validate_string_field(user_config.get("last_name"), last_name_regex, "last_name: 'last_name' must only contain letters, numbers, \ - underscores, and hyphens and should not contain spaces or other special characters.", error_messages) + user_config['password'] = decrypt_password_response.get("decrypt_password") + plain_password = user_config.get("password") + self.validate_password(plain_password, error_messages) + encrypt_password_response = self.encrypt_password(plain_password, self.key.get("generate_key")) - email_regex = re.compile(r"[^@]+@[^@]+\.[^@]+") - if user_config.get("email"): - self.validate_string_field(user_config.get("email"), email_regex, - "email: Invalid email format for 'email': {0}".format(user_config.get("email")), error_messages) + if encrypt_password_response and "error_message" in encrypt_password_response: + self.msg = encrypt_password_response.get("error_message") + self.log(self.msg, "ERROR") + self.status = "failed" + return self - password_regex = re.compile(r"^(?=.*[A-Z])(?=.*[a-z])(?=.*\d)(?=.*[@$!%*?&])[A-Za-z\d@$!%*?&]{8,}$") - if user_config.get("password"): - self.validate_string_field(user_config.get("password"), password_regex, "password: 'Password' does not meet complexity requirements\ - for password: {0}".format(user_config.get("email")), error_messages) + user_config['password'] = encrypt_password_response.get("encrypt_password").decode() + self.log("Password decrypted, validated, and re-encrypted successfully.", "DEBUG") - username_regex = re.compile(r"^[A-Za-z0-9_-]+$") - self.validate_string_field(user_config.get("username"), username_regex, "username: 'Username' must only contain letters, numbers, \ - underscores, and hyphens and should not contain spaces or other special characters.", error_messages) + username_regex = re.compile(r"^[A-Za-z0-9@._-]{3,50}$") + username_regex_msg = "The username must not contain any special characters and must be 3 to 50 characters long." + username = user_config.get("username") + self.validate_string_field(username, username_regex, + "username: '{0}' {1}".format(username, username_regex_msg), error_messages) if user_config.get("role_list"): param_spec = dict(type="list", elements="str") @@ -1193,11 +1340,12 @@ def get_diff_merged(self, config): desired_role = self.generate_role_payload(self.want, "update") self.log("desired role with config {0}".format(str(desired_role)), "DEBUG") - if "error" not in desired_role: + if "error_message" not in desired_role: consolidated_data, update_required_param = self.role_requires_update(self.have["current_role_config"], desired_role) if not consolidated_data: - self.msg = "Role does not need any update" + self.msg = "Role with role_name '{0}' already exists and does not require an update.".format(self.have.get("role_name")) + self.no_update_role.append(self.have.get("role_name")) self.log(self.msg, "INFO") responses["role_operation"] = {"response": config} self.result["response"] = self.msg @@ -1213,7 +1361,7 @@ def get_diff_merged(self, config): self.log("Creating role with config {0}".format(str(config)), "DEBUG") role_info_params = self.generate_role_payload(self.want, "create") - if "error" not in role_info_params: + if "error_message" not in role_info_params: filtered_data, overall_update_required = self.get_permissions(self.want, role_info_params, "create") denied_permissions = self.find_denied_permissions(self.want) denied_required, create_role_params = self.remove_denied_operations(filtered_data, denied_permissions) @@ -1232,7 +1380,8 @@ def get_diff_merged(self, config): (consolidated_data, update_required_param) = self.user_requires_update(self.have["current_user_config"], self.have["current_role_id_config"]) if not consolidated_data: - self.msg = "User does not need any update" + self.msg = "User with username '{0}' already exists and does not require an update.".format(self.have.get("username")) + self.no_update_user.append(self.have.get("username")) self.log(self.msg, "INFO") responses["role_operation"] = {"response": config} self.result["response"] = self.msg @@ -1240,14 +1389,16 @@ def get_diff_merged(self, config): return self if update_required_param.get("role_list"): - user_in_have = self.have["current_user_config"] - update_param = update_required_param - update_param["username"] = user_in_have.get("username") - update_param["user_id"] = user_in_have.get("user_id") - user_info_params = self.snake_to_camel_case(update_param) - task_response = self.update_user(user_info_params) + if self.want["username"] not in self.have["current_user_config"]["username"]: + task_response = {"error_message": "Username for an existing user cannot be updated."} + else: + user_in_have = self.have["current_user_config"] + update_param = update_required_param + update_param["user_id"] = user_in_have.get("user_id") + user_info_params = self.snake_to_camel_case(update_param) + task_response = self.update_user(user_info_params) else: - task_response = {"error": "The role name in the user details role_list is not present in the Cisco Catalyst Center," + task_response = {"error_message": "The role name in the user details role_list is not present in the Cisco Catalyst Center," " Please provide a valid role name"} else: # Create the user @@ -1279,10 +1430,10 @@ def get_diff_merged(self, config): user_info_params = self.snake_to_camel_case(user_details) task_response = self.create_user(user_info_params) else: - task_response = {"error": "The role name in the user details role_list is not present in the Cisco Catalyst Center," + task_response = {"error_message": "The role name in the user details role_list is not present in the Cisco Catalyst Center," " Please provide a valid role name"} - if task_response and "error" not in task_response: + if task_response and "error_message" not in task_response: self.log("Task respoonse {0}".format(str(task_response)), "INFO") responses["operation"] = {"response": task_response} self.msg = responses @@ -1292,7 +1443,7 @@ def get_diff_merged(self, config): self.log(self.msg, "INFO") return self - self.msg = task_response.get("error") + self.msg = task_response.get("error_message") self.log(self.msg, "ERROR") self.status = "failed" return self @@ -1328,6 +1479,8 @@ def get_current_config(self, input_config): current_role_id = {} if "role_name" in input_config and input_config["role_name"] is not None: + self.log("Retrieving role details for role_name: {0}".format(str(input_config["role_name"])), "DEBUG") + response_role = self.get_role() response_role = self.camel_to_snake_case(response_role) roles = response_role.get("response", {}).get("roles", []) @@ -1337,9 +1490,13 @@ def get_current_config(self, input_config): current_role_configuration = role role_exists = True + self.log("Role retrieval result - role_exists: {0}, current_role_configuration: {1}".format( + str(role_exists), str(current_role_configuration)), "DEBUG") return role_exists, current_role_configuration if "username" in input_config or "email" in input_config: + self.log("Retrieving user details for username: {0}, email: {1}".format( + str(input_config.get("username")), str(input_config.get("email"))), "DEBUG") response_user = self.get_user() response_role = self.get_role() response_user = self.camel_to_snake_case(response_user) @@ -1356,6 +1513,9 @@ def get_current_config(self, input_config): current_user_configuration = user user_exists = True + self.log("User retrieval result - user_exists: {0}, current_user_configuration: {1}".format( + str(user_exists), str(current_user_configuration)), "DEBUG") + if input_config.get("role_list"): for role_name in input_config["role_list"]: for role in roles: @@ -1366,6 +1526,7 @@ def get_current_config(self, input_config): if role.get("name").lower() == "observer-role": current_role_id[role.get("name").lower()] = role.get("role_id") + self.log("Role ID retrieval result - current_role_id: {0}".format(str(current_role_id)), "DEBUG") return user_exists, current_user_configuration, current_role_id def create_user(self, user_params): @@ -1382,8 +1543,29 @@ def create_user(self, user_params): - Logs the provided user parameters and the received API response. - Returns the API response from the "create_user" function. """ + self.log("Create user with 'user_params' argument...", "DEBUG") + decrypt_password_response = self.decrypt_password(user_params['password'], self.key.get("generate_key")) + + if decrypt_password_response and "error_message" in decrypt_password_response: + self.msg = decrypt_password_response.get("error_message") + self.log(self.msg, "ERROR") + self.status = "failed" + return self + + user_params['password'] = decrypt_password_response.get("decrypt_password") + required_keys = ['username', 'password'] + missing_keys = [] + + self.log("Check if each required key is present in the user_params dictionary...", "DEBUG") + for key in required_keys: + if key not in user_params: + missing_keys.append(key) + + if missing_keys: + error_message = "Mandatory parameter(s) '{0}' not present in the user details.".format(", ".join(missing_keys)) + return {"error_message": error_message} + try: - self.log("Create user with user_info_params: {0}".format(str(user_params)), "DEBUG") response = self.dnac._exec( family="user_and_roles", function="add_user_api", @@ -1391,11 +1573,20 @@ def create_user(self, user_params): params=user_params, ) self.log("Received API response from create_user: {0}".format(str(response)), "DEBUG") + self.created_user.append(user_params.get("username")) return response - except Exception: - error_message = "Mandatory field not present: An error occurred while creating the user" - return {"error": error_message} + except Exception as e: + self.log("Unexpected error occurred: {0}".format(str(e)), "ERROR") + if "[403]" in str(e): + error_message = ( + "The Catalyst Center user '{0}' does not have the necessary permissions to 'create or update' a user through the API.".format( + self.payload.get("dnac_username")) + ) + else: + error_message = "Invalid email format for email '{0}' under username '{1}'".format(user_params.get("email"), user_params.get("username")) + + return {"error_message": error_message} def create_role(self, role_params): """ @@ -1420,11 +1611,14 @@ def create_role(self, role_params): params=role_params, ) self.log("Received API response from create_role: {0}".format(str(response)), "DEBUG") + self.created_role.append(role_params.get("role")) return response - except Exception: - error_message = "An error occurred while creating the role without access-level parameters and permissions" - return {"error": error_message} + except Exception as e: + self.log("Unexpected error occurred: {0}".format(str(e)), "ERROR") + error_message = "The Catalyst Center user '{0}' does not have the necessary permissions to 'create a role' through the API.".format( + self.payload.get("dnac_username")) + return {"error_message": error_message} def get_user(self): """ @@ -1513,9 +1707,10 @@ def process_assurance_rules(self, role_config, role_operation, unique_types): permission = permission.lower() if permission not in ["read", "write", "deny"]: - error_message = "Invalid permission {0} for assurance resource {1}".format(permission, resource_name) + error_message = "Invalid permission '{0}' for assurance resource '{1}' under the role '{2}'".format( + permission, resource_name, self.have.get("role_name")) self.log(error_message, "DEBUG") - return {"error": error_message} + return {"error_message": error_message} if permission == "deny": self.log("Skipping resource {0} because permission is 'deny'".format(resource_name), "DEBUG") @@ -1577,9 +1772,10 @@ def process_network_analytics_rules(self, role_config, role_operation, unique_ty permission = permission.lower() if permission not in ["read", "write", "deny"]: - error_message = "Invalid permission {0} for network analytics resource {1}".format(permission, resource_name) + error_message = "Invalid permission '{0}' for network analytics resource '{1}' under the role '{2}'".format( + permission, resource_name, self.have.get("role_name")) self.log(error_message, "DEBUG") - return {"error": error_message} + return {"error_message": error_message} if permission == "deny": self.log("Skipping resource {0} because permission is 'deny'".format(resource_name), "DEBUG") @@ -1638,9 +1834,10 @@ def process_network_design_rules(self, role_config, role_operation, unique_types permission = permission.lower() if permission not in ["read", "write", "deny"]: - error_message = "Invalid permission {0} for network design resource {1}".format(permission, resource_name) + error_message = "Invalid permission '{0}' for network design resource '{1}' under the role '{2}'".format( + permission, resource_name, self.have.get("role_name")) self.log(error_message, "DEBUG") - return {"error": error_message} + return {"error_message": error_message} if permission == "deny": self.log("Skipping resource {0} because permission is 'deny'".format(resource_name), "DEBUG") @@ -1699,7 +1896,7 @@ def process_network_provision_rules(self, role_config, role_operation, unique_ty if not isinstance(role_config["network_provision"], list): error_message = "The given network_provision is not in type: list" self.log(error_message, "DEBUG") - return {"error": error_message} + return {"error_message": error_message} for provision in role_config["network_provision"]: for resource_name, permission in provision.items(): @@ -1713,9 +1910,10 @@ def process_network_provision_rules(self, role_config, role_operation, unique_ty sub_permission = sub_permission.lower() if sub_permission not in ["read", "write", "deny"]: - error_message = "Invalid permission {0} for network provision for sub-resource {1}".format(sub_permission, sub_resource_name) + error_message = "Invalid permission '{0}' for network provision for sub-resource '{1}' under the role '{2}'".format( + sub_permission, sub_resource_name, self.have.get("role_name")) self.log(error_message, "DEBUG") - return {"error": error_message} + return {"error_message": error_message} if sub_permission == "deny": self.log("Skipping sub-resource {0} because permission is 'deny'".format(sub_resource_name), "DEBUG") @@ -1749,9 +1947,10 @@ def process_network_provision_rules(self, role_config, role_operation, unique_ty permission = permission.lower() if permission not in ["read", "write", "deny"]: - error_message = "Invalid permission {0} for network provision resource {1}".format(permission, resource_name) + error_message = "Invalid permission '{0}' for network provision resource '{1}' under the role '{2}'".format( + permission, resource_name, self.have.get("role_name")) self.log(error_message, "DEBUG") - return {"error": error_message} + return {"error_message": error_message} if permission == "deny": self.log("Skipping resource {0} because permission is 'deny'".format(resource_name), "DEBUG") @@ -1822,9 +2021,10 @@ def process_network_services_rules(self, role_config, role_operation, unique_typ permission = permission.lower() if permission not in ["read", "write", "deny"]: - error_message = "Invalid permission {0} for network services resource {1}".format(permission, resource_name) + error_message = "Invalid permission '{0}' for network services resource '{1}' under the role '{2}'".format( + permission, resource_name, self.have.get("role_name")) self.log(error_message, "DEBUG") - return {"error": error_message} + return {"error_message": error_message} if permission == "deny": self.log("Skipping resource {0} because permission is 'deny'".format(resource_name), "DEBUG") @@ -1867,9 +2067,10 @@ def process_platform_rules(self, role_config, unique_types): permission = permission.lower() if permission not in ["read", "write", "deny"]: - error_message = "Invalid permission {0} for platform resource {1}".format(permission, resource_name) + error_message = "Invalid permission '{0}' for platform resource '{1}' under the role '{2}'".format( + permission, resource_name, self.have.get("role_name")) self.log(error_message, "DEBUG") - return {"error": error_message} + return {"error_message": error_message} if permission == "deny": self.log("Skipping resource {0} because permission is 'deny'".format(resource_name), "DEBUG") @@ -1938,9 +2139,10 @@ def process_security_rules(self, role_config, role_operation, unique_types): permission = permission.lower() if permission not in ["read", "write", "deny"]: - error_message = "Invalid permission {0} for security resource {1}".format(permission, resource_name) + error_message = "Invalid permission '{0}' for security resource '{1}' under the role '{2}'".format( + permission, resource_name, self.have.get("role_name")) self.log(error_message, "DEBUG") - return {"error": error_message} + return {"error_message": error_message} if permission == "deny": self.log("Skipping resource {0} because permission is 'deny'".format(resource_name), "DEBUG") @@ -2009,9 +2211,10 @@ def process_system_rules(self, role_config, role_operation, unique_types): permission = permission.lower() if permission not in ["read", "write", "deny"]: - error_message = "Invalid permission {0} for system resource {1}".format(permission, resource_name) + error_message = "Invalid permission '{0}' for system resource '{1}' under the role '{2}'".format( + permission, resource_name, self.have.get("role_name")) self.log(error_message, "DEBUG") - return {"error": error_message} + return {"error_message": error_message} if permission == "deny": self.log("Skipping resource {0} because permission is 'deny'".format(resource_name), "DEBUG") @@ -2072,9 +2275,10 @@ def process_utilities_rules(self, role_config, role_operation, unique_types): permission = permission.lower() if permission not in ["read", "write", "deny"]: - error_message = "Invalid permission {0} for utilities resource {1}".format(permission, resource_name) + error_message = "Invalid permission '{0}' for utilities resource '{1}' under the role '{2}'".format( + permission, resource_name, self.have.get("role_name")) self.log(error_message, "DEBUG") - return {"error": error_message} + return {"error_message": error_message} if permission == "deny": self.log("Skipping resource {0} because permission is 'deny'".format(resource_name), "DEBUG") @@ -2228,7 +2432,7 @@ def role_requires_update(self, current_role, desired_role): if current_description != desired_description: self.log("Updating description from {0} to {1}.".format(current_description, desired_description), "DEBUG") update_role_params["description"] = desired_description - update_needed = True + update_required = True elif "description" not in update_role_params: update_role_params["description"] = current_description else: @@ -2300,6 +2504,19 @@ def user_requires_update(self, current_user, current_role): else: update_user_params["last_name"] = current_last_name + # Compare and update username + desired_username = self.want.get("username") + current_username = current_user.get("username") + if desired_username is not None: + if current_username != desired_username: + self.log("Username for an existing User cannot be updated from {0} to {1}.".format(current_username, desired_username), "DEBUG") + update_user_params["username"] = desired_username + update_needed = True + elif "username" not in update_user_params: + update_user_params["username"] = current_username + else: + update_user_params["username"] = current_username + # Compare and update email desired_email = self.want.get("email") current_email = current_user.get("email") @@ -2349,15 +2566,22 @@ def update_user(self, user_params): - This method sends a request to update a user in Cisco Catalyst Center using the provided - user parameters. It logs the response and returns it. """ - self.log("Updating user with parameters: {0}".format(user_params), "DEBUG") - response = self.dnac._exec( - family="user_and_roles", - function="update_user_api", - op_modifies=True, - params=user_params, - ) - self.log("Received API response from update_user: {0}".format(str(response)), "DEBUG") - return response + try: + self.log("Updating user with parameters: {0}".format(user_params), "DEBUG") + response = self.dnac._exec( + family="user_and_roles", + function="update_user_api", + op_modifies=True, + params=user_params, + ) + self.log("Received API response from update_user: {0}".format(str(response)), "DEBUG") + self.updated_user.append(user_params.get("username")) + return response + + except Exception as e: + self.log("Unexpected error occurred: {0}".format(str(e)), "ERROR") + error_message = "Invalid email format for email '{0}' under username '{1}'".format(user_params.get("email"), user_params.get("username")) + return {"error_message": error_message} def update_role(self, role_params): """ @@ -2374,16 +2598,23 @@ def update_role(self, role_params): and the "update_role_api" function. The method logs the received API response at the "DEBUG" level and finally returns the response. """ - self.log("Update role with role_info_params: {0}".format(str(role_params)), "DEBUG") - response = self.dnac._exec( - family="user_and_roles", - function="update_role_api", - op_modifies=True, - params=role_params, - ) - self.log("Received API response from update_role: {0}".format(str(response)), "DEBUG") + try: + self.log("Updating role with role_info_params: {0}".format(str(role_params)), "DEBUG") + response = self.dnac._exec( + family="user_and_roles", + function="update_role_api", + op_modifies=True, + params=role_params, + ) + self.log("Received API response from update_role: {0}".format(str(response)), "DEBUG") + self.updated_role.append(self.have.get("role_name")) + return response - return response + except Exception as e: + self.log("Unexpected error occurred: {0}".format(str(e)), "ERROR") + error_message = "The catalyst center user '{0}' does not have the necessary permissions to update role through the API.".format( + self.payload.get("dnac_username")) + return {"error_message": error_message} def find_denied_permissions(self, config, parent_key=""): """ @@ -2676,7 +2907,7 @@ def get_diff_deleted(self, config): task_response = self.delete_role(role_id_to_delete) self.log("Task response {0}".format(str(task_response)), "INFO") - if task_response and "error" not in task_response: + if task_response and "error_message" not in task_response: responses = {"role_operation": {"response": task_response}} self.msg = responses self.result["response"] = self.msg @@ -2714,7 +2945,11 @@ def get_diff_deleted(self, config): self.log(self.msg, "INFO") return self - self.msg = "Please provide a valid 'username' or 'email' for user deletion" + self.msg = ( + "Please provide a valid 'username' or 'email' for user deletion, or " + "The Catalyst Center user '{0}' does not have the necessary permissions " + "to delete a user through the API.".format(self.payload.get("dnac_username")) + ) self.log(self.msg, "ERROR") self.status = "failed" return self @@ -2740,6 +2975,7 @@ def delete_user(self, user_params): params=user_params, ) self.log("Received API response from delete_user: {0}".format(str(response)), "DEBUG") + self.deleted_user.append(self.have.get("username")) return response def delete_role(self, role_params): @@ -2764,13 +3000,21 @@ def delete_role(self, role_params): params=role_params, ) self.log("Received API response from delete_role: {0}".format(str(response)), "DEBUG") - except Exception: - error_message = "An error occurred while deleting the role. Check whether user(s) are assigned to this role \ - {0}".format(str(self.have.get("role_name"))) + self.deleted_role.append(self.have.get("role_name")) + return response - return {"error": error_message} + except Exception as e: + self.log("Unexpected error occurred: {0}".format(str(e)), "ERROR") + if "[403]" in str(e): + error_message = ( + "The Catalyst Center user '{0}' does not have the necessary permissions to delete the role through the API.".format( + self.payload.get("dnac_username")) + ) + else: + error_message = "An error occurred while deleting the role. Check whether user(s) are assigned to the role '{0}'.".format( + self.have.get("role_name")) - return response + return {"error_message": error_message} def verify_diff_merged(self, config): """ @@ -2891,6 +3135,66 @@ def verify_diff_deleted(self, config): return self + def update_user_role_profile_messages(self): + """ + Updates and logs messages based on the status of users and roles. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + Returns: + self (object): Returns the current instance of the class with updated `result` and `msg` attributes. + Description: + This method aggregates status messages related to the creation, update, or deletion of users and roles. + It checks various instance variables (`create_user`, `update_user`, `no_update_user`, `delete_user`, + `create_role`, `update_role`, `no_update_role`, `delete_role`) to determine the status and generates + corresponding messages. The method also updates the `result["response"]` attribute with the concatenated status messages. + """ + + self.result["changed"] = False + result_msg_list = [] + + if self.created_user: + create_user_msg = "User(s) '{0}' created successfully in Cisco Catalyst Center.".format("', '".join(self.created_user)) + result_msg_list.append(create_user_msg) + + if self.updated_user: + update_user_msg = "User(s) '{0}' updated successfully in Cisco Catalyst Center.".format("', '".join(self.updated_user)) + result_msg_list.append(update_user_msg) + + if self.no_update_user: + no_update_user_msg = "User(s) '{0}' need no update in Cisco Catalyst Center.".format("', '".join(self.no_update_user)) + result_msg_list.append(no_update_user_msg) + + if self.deleted_user: + delete_user_msg = "User(s) '{0}' deleted successfully from the Cisco Catalyst Center.".format("', '".join(self.deleted_user)) + result_msg_list.append(delete_user_msg) + + if self.created_role: + create_role_msg = "Role(s) '{0}' created successfully in Cisco Catalyst Center.".format("', '".join(self.created_role)) + result_msg_list.append(create_role_msg) + + if self.updated_role: + update_role_msg = "Role(s) '{0}' updated successfully in Cisco Catalyst Center.".format("', '".join(self.updated_role)) + result_msg_list.append(update_role_msg) + + if self.no_update_role: + no_update_role_msg = "Role(s) '{0}' need no update in Cisco Catalyst Center.".format("', '".join(self.no_update_role)) + result_msg_list.append(no_update_role_msg) + + if self.deleted_role: + delete_role_msg = "Role(s) '{0}' deleted successfully from the Cisco Catalyst Center.".format("', '".join(self.deleted_role)) + result_msg_list.append(delete_role_msg) + + if result_msg_list: + self.result["changed"] = True + self.msg = " ".join(result_msg_list) + else: + self.msg = "No changes were made. No user or role actions were performed in Cisco Catalyst Center." + + self.log(self.msg, "INFO") + self.result["response"] = self.msg + + return self + def snake_to_camel_case(self, data): """ Convert keys from snake_case to camelCase in a given dictionary or list of dictionaries recursively. @@ -2987,6 +3291,8 @@ def main(): if config_verify: ccc_user_role.verify_diff_state_apply[state](config).check_return_status() + ccc_user_role.update_user_role_profile_messages().check_return_status() + module.exit_json(**ccc_user_role.result) diff --git a/run_tests.sh b/run_tests.sh index 644a6292f1..6e4fb689c2 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -21,4 +21,4 @@ do echo " - $role" >> ccc_test_roles.yml done -ansible-playbook -i hosts ccc_test_roles.yml +ansible-playbook -i hosts ccc_test_roles.yml > "sanity_tests_logs_$CIRCLE_NODE_INDEX" diff --git a/tests/integration/ccc_accesspoint_workflow_management/defaults/main.yml b/tests/integration/ccc_accesspoint_workflow_management/defaults/main.yml new file mode 100644 index 0000000000..5f709c5aac --- /dev/null +++ b/tests/integration/ccc_accesspoint_workflow_management/defaults/main.yml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/tests/integration/ccc_accesspoint_workflow_management/meta/main.yml b/tests/integration/ccc_accesspoint_workflow_management/meta/main.yml new file mode 100644 index 0000000000..32cf5dda7e --- /dev/null +++ b/tests/integration/ccc_accesspoint_workflow_management/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/tests/integration/ccc_accesspoint_workflow_management/tasks/main.yml b/tests/integration/ccc_accesspoint_workflow_management/tasks/main.yml new file mode 100644 index 0000000000..d842bc1747 --- /dev/null +++ b/tests/integration/ccc_accesspoint_workflow_management/tasks/main.yml @@ -0,0 +1,34 @@ +--- +- name: collect ccc test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yml" + connection: local + register: ccc_cases + tags: sanity + +- debug: + msg: "CCC Cases: {{ ccc_cases }}" + +- set_fact: + test_cases: + files: "{{ ccc_cases.files }}" + tags: sanity + +- debug: + msg: "Test Cases: {{ test_cases }}" + +- name: set test_items + set_fact: + test_items: "{{ test_cases.files | map(attribute='path') | list }}" + tags: sanity + +- debug: + msg: "Test Items: {{ test_items }}" + +- name: run test cases (connection=httpapi) + include_tasks: "{{ test_case_to_run }}" + loop: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run + tags: sanity diff --git a/tests/integration/ccc_accesspoint_workflow_management/tests/test_accesspoint_workflow_manager.yml b/tests/integration/ccc_accesspoint_workflow_management/tests/test_accesspoint_workflow_manager.yml new file mode 100644 index 0000000000..377414b7fc --- /dev/null +++ b/tests/integration/ccc_accesspoint_workflow_management/tests/test_accesspoint_workflow_manager.yml @@ -0,0 +1,109 @@ +--- +- debug: msg="Starting accesspoint workflow manager test" +- debug: msg="accesspoint Path {{ role_path }}" + +- block: + - name: accesspoint workflow manager + include_vars: + file: "{{ role_path }}/vars/vars_accesspoint_workflow_manager.yml" + name: vars_map + vars: + dnac_login: &dnac_login + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + + # - debug: + # msg: "{{ vars_map. }}" + # - debug: + # msg: "{{ vars_map. }}" + # - debug: + # msg: "{{ vars_map. }}" + +############################################# +# Clean Up # +############################################# + + # - name: Delete wlc + # cisco.dnac.accesspoint_workflow_manager: + # <<: *dnac_login + # state: deleted + # config: + # - "{{ item }}" + # loop: "{{ vars_map.delete_wlc }}" + # register: result_deleted_wlc + +########################################### + # PROVISION ACCESSPOINT # +########################################### + + - name: Provision accesspoint + cisco.dnac.accesspoint_workflow_manager: + <<: *dnac_login + state: merged + config: + - "{{ item }}" + loop: "{{ vars_map.provision_accesspoint }}" + register: result_provision_accesspoint + + # - name: Debug item + # debug: + # var: item + # loop: "{{ result_provision_accesspoint.results }}" + # when: result_provision_accesspoint is defined + + - name: Assert Provision accesspoint + assert: + that: + - item.changed == false + - "'AP - AP34B8.8315.7C6C does not need any update' in item.ap_update_msg" + loop: "{{ result_provision_accesspoint.results }}" + when: result_provision_accesspoint is defined + + +############################################# +# UPDATE ACCESSPOINT CONFIGURATION # +############################################# + + - name: Update accesspoint configuration + cisco.dnac.accesspoint_workflow_manager: + <<: *dnac_login + state: merged + config: + - "{{ item }}" + loop: "{{ vars_map.update_accesspoint_config }}" + register: result_update_accesspoint_config + + # - name: Debug item + # debug: + # var: item + # loop: "{{ result_update_accesspoint_config.results }}" + # when: result_update_accesspoint_config is defined + + - name: Assert Update accesspoint configuration + assert: + that: + - item.changed == true + - "'AP Configuration - LTTS-test1 updated Successfully' in item.ap_update_msg" + loop: "{{ result_update_accesspoint_config.results }}" + when: result_update_accesspoint_config is defined + +############################################# +# POST Clean Up # +############################################# + + # - name: Delete wlc + # cisco.dnac.accesspoint_workflow_manager: + # <<: *dnac_login + # state: deleted + # config: + # - "{{ item }}" + # loop: "{{ vars_map.delete_wlc }}" + # register: result_deleted_wlc \ No newline at end of file diff --git a/tests/integration/ccc_accesspoint_workflow_management/vars/vars_accesspoint_workflow_manager.yml b/tests/integration/ccc_accesspoint_workflow_management/vars/vars_accesspoint_workflow_manager.yml new file mode 100644 index 0000000000..b5a33cf96e --- /dev/null +++ b/tests/integration/ccc_accesspoint_workflow_management/vars/vars_accesspoint_workflow_manager.yml @@ -0,0 +1,30 @@ +--- +provision_accesspoint: + - mac_address: "34:5d:a8:3b:d8:e0" + rf_profile: "HIGH" + site: + floor: + name: "FLOOR1" + parent_name: "Global/USA/New York/BLDNYC" + +update_accesspoint_config: + - mac_address: "90:e9:5e:03:f3:40" + ap_name: "LTTS-test1" + led_status: "Enabled" + led_brightness_level: 3 + ap_mode: "Local" + location: "LTTS/Cisco/Chennai" + failover_priority: "Low" + 2.4ghz_radio: + admin_status: "Enabled" + antenna_name: "C-ANT9104-2.4GHz" + radio_role_assignment: "Client-Serving" + channel_number: 3 + powerlevel: 3 + 5ghz_radio: + admin_status: "Enabled" + antenna_name: "AIR-ANT2513P4M-N-5GHz" + radio_role_assignment: "Client-Serving" + channel_number: 48 + powerlevel: 3 + channel_width: "20 MHz" \ No newline at end of file diff --git a/tests/integration/ccc_site_management/vars/vars_site_management.yml b/tests/integration/ccc_site_management/vars/vars_site_management.yml index 8ad35c6177..96eb82e445 100644 --- a/tests/integration/ccc_site_management/vars/vars_site_management.yml +++ b/tests/integration/ccc_site_management/vars/vars_site_management.yml @@ -38,7 +38,6 @@ design_sites: site_type: area - - site: building: name: BLD10 @@ -48,6 +47,7 @@ design_sites: longitude: -78.8829258991226 country: United States site_type: building + - site: building: name: BLD11 @@ -57,6 +57,7 @@ design_sites: longitude: -78.88105620286412 country: United States site_type: building + - site: building: name: BLD12 @@ -66,6 +67,7 @@ design_sites: longitude: -78.88217248318003 country: United States site_type: building + - site: building: name: BLD23 @@ -75,6 +77,7 @@ design_sites: longitude: -121.912974 country: United States site_type: building + - site: building: name: BLD20 diff --git a/tests/unit/modules/dnac/fixtures/user_role_workflow_manager.json b/tests/unit/modules/dnac/fixtures/user_role_workflow_manager.json index baf89c9e5c..0a63f3d646 100644 --- a/tests/unit/modules/dnac/fixtures/user_role_workflow_manager.json +++ b/tests/unit/modules/dnac/fixtures/user_role_workflow_manager.json @@ -460,7 +460,7 @@ } }, "user_invalid_param_not_correct_formate_responce":{ - "message": "Invalid parameters in playbook config: first_name: 'first_name' must only contain letters, numbers, underscores, and hyphens and should not contain spaces or other special characters., last_name: 'last_name' must only contain letters, numbers, underscores, and hyphens and should not contain spaces or other special characters., email: Invalid email format for 'email': ajith.andrewexample.com, password: 'Password' does not meet complexity requirements for password: ajith.andrewexample.com, username: 'Username' must only contain letters, numbers, underscores, and hyphens and should not contain spaces or other special characters." + "message": "Invalid parameters in playbook config: first_name: 'ajith ' must only contain letters, numbers, underscores and hyphens and should not contain spaces or other special characters., last_name: 'andrew ' must only contain letters, numbers, underscores and hyphens and should not contain spaces or other special characters., email: Invalid email format for 'email': ajith.andrewexample.com, password: 'Password' does not meet complexity requirements for password: Ajith123, username: 'ajithandrewj ' must only contain letters, numbers, underscores and hyphens and should not contain spaces or other special characters." }, "user_invalid_param_not_type_list_response":{ "message": "Invalid parameter(s) found in playbook: Super-Admin-Role : is not a valid list" @@ -1372,7 +1372,7 @@ } }, "role_invalid_param_rolename_not_correct_formate_responce":{ - "message": "Invalid parameters in playbook config: Role name: 'role_name' must only contain letters, numbers, underscores, and hyphens and should not contain spaces or other special characters." + "message": "Invalid parameters in playbook config: role_name: 'Test_Role_1 ' must only contain letters, numbers, underscores and hyphens and should not contain spaces or other special characters." }, "invalid_param_type_list_missing_response":{ "message": "Configuration is not available in the playbook for validation or user/role details are not type list" diff --git a/tests/unit/modules/dnac/test_user_role_workflow_manager.py b/tests/unit/modules/dnac/test_user_role_workflow_manager.py index ef7e4f9430..82dcd48e33 100644 --- a/tests/unit/modules/dnac/test_user_role_workflow_manager.py +++ b/tests/unit/modules/dnac/test_user_role_workflow_manager.py @@ -85,7 +85,7 @@ def load_fixtures(self, response=None, device=""): self.test_data.get("update_user_needed_get_role_response"), self.test_data.get("update_needed_user_response") ] - if "user_update_not_needed" in self._testMethodName: + elif "user_update_not_needed" in self._testMethodName: self.run_dnac_exec.side_effect = [ self.test_data.get("update_not_needed_get_user_response"), self.test_data.get("update_user_not_needed_get_role_response"), @@ -390,12 +390,12 @@ def test_user_role_workflow_manager_user_invalid_param_not_correct_formate(self) print(result) self.assertEqual( result.get("msg"), - "Invalid parameters in playbook config: first_name: 'first_name' must only contain letters, \ -numbers, underscores, and hyphens and should not contain spaces or other \ -special characters., last_name: 'last_name' must only contain letters, numbers, underscores, \ + "Invalid parameters in playbook config: first_name: 'ajith ' must only contain letters, \ +numbers, underscores and hyphens and should not contain spaces or other \ +special characters., last_name: 'andrew ' must only contain letters, numbers, underscores \ and hyphens and should not contain spaces or other special characters., email: Invalid email format for 'email': ajith.andrewexample.com, \ -password: 'Password' does not meet complexity requirements for password: \ -ajith.andrewexample.com, username: 'Username' must only contain letters, numbers, underscores, \ +password: 'Password' does not meet complexity requirements for password: \ +Ajith123, username: 'ajithandrewj ' must only contain letters, numbers, underscores \ and hyphens and should not contain spaces or other special characters." ) @@ -695,8 +695,8 @@ def test_user_role_workflow_manager_role_invalid_param_rolename_not_correct_form print(result) self.assertEqual( result.get("msg"), - "Invalid parameters in playbook config: Role name: 'role_name' must only contain letters, numbers, underscores,\ - and hyphens and should not contain spaces or other special characters." + "Invalid parameters in playbook config: role_name: 'Test_Role_1 ' must only contain letters, numbers, underscores \ +and hyphens and should not contain spaces or other special characters." ) def test_user_role_workflow_manager_invalid_param_type_list_missing(self):