From 7610f0e9381380f37744d9fca55c1806f5058a9b Mon Sep 17 00:00:00 2001 From: "Oglesby, Michael" Date: Fri, 13 Oct 2023 15:12:50 -0400 Subject: [PATCH 01/56] Remove Astra Control functionality --- netapp_dataops_k8s/README.md | 17 -- .../docs/inference_server_management.md | 20 +- netapp_dataops_k8s/docs/volume_management.md | 40 +-- .../docs/workspace_management.md | 261 ++-------------- .../netapp_dataops/k8s/__init__.py | 278 ++---------------- .../netapp_dataops/netapp_dataops_k8s_cli.py | 170 +---------- netapp_dataops_k8s/setup.cfg | 14 +- 7 files changed, 86 insertions(+), 714 deletions(-) diff --git a/netapp_dataops_k8s/README.md b/netapp_dataops_k8s/README.md index a30f75f..baed048 100644 --- a/netapp_dataops_k8s/README.md +++ b/netapp_dataops_k8s/README.md @@ -67,23 +67,6 @@ In the [Examples](Examples/) directory, you will find the following examples per Refer to the [Kubernetes documentation](https://kubernetes.io/docs/tasks/run-application/access-api-from-pod/) for more information on accessing the Kubernetes API from within a pod. -## Extended Functionality with Astra Control - -The NetApp DataOps Toolkit provides several extended capabilities that require [Astra Control](https://cloud.netapp.com/astra). Any operation that requires Astra Control is specifically noted within the documentation as requiring Astra Control. The prerequisites outlined in this section are required in order to perform any operation that requires Astra Control. - -The toolkit uses the Astra Control Python SDK to interface with the Astra Control API. The Astra Control Python SDK is installed automatically when you install the NetApp DataOps Toolkit using pip. - -In order for the Astra Control Python SDK to be able to communicate with the Astra Control API, you must create a 'config.yaml' file containing your Astra Control API connection details. Refer to the [Astra Control Python SDK README](https://github.com/NetApp/netapp-astra-toolkits/tree/v2.1.3) for formatting details. Note that you do not need to follow the installation instructions outlined in the Astra Control Python SDK README; you only need to create the 'config.yaml' file. Once you have created the 'config.yaml' file, you must store it in one of the following locations: -- ~/.config/astra-toolkits/ -- /etc/astra-toolkits/ -- The directory pointed to by the shell environment variable 'ASTRATOOLKITS_CONF' - -Additionally, you must set the shell environment variable 'ASTRA_K8S_CLUSTER_NAME' to the name of your specific Kubernetes cluster in Astra Control. - -```sh -export ASTRA_K8S_CLUSTER_NAME=" - -#### Clone a JupyterLab Workspace to a Brand New Namespace - -The NetApp DataOps Toolkit can be used to rapidly provision a new JupyterLab workspace (within a brand new Kubernetes namespace) that is an exact copy of an existing JupyterLab workspace. In other words, the NetApp DataOps Toolkit can be used to rapidly clone a JupyterLab workspace to a brand new namespace. The command for cloning a JupyterLab workspace to a brand new namespace is `netapp_dataops_k8s_cli.py clone-to-new-ns jupyterlab`. - -Note: This command requires Astra Control. - -The following options/arguments are required: - -``` - -j, --source-workspace-name= Name of JupyterLab workspace to use as source for clone. - -n, --new-namespace= Kubernetes namespace to create new workspace in. This namespace must not exist; it will be created during this operation. -``` - -The following options/arguments are optional: - -``` - -c, --clone-to-cluster-name= Name of destination Kubernetes cluster within Astra Control. Workspace will be cloned a to a new namespace in this cluster. If not specified, then the workspace will be cloned to a new namespace within the user's current cluster. - -h, --help Print help text. - -s, --source-namespace= Kubernetes namespace that source workspace is located in. If not specified, namespace "default" will be used. -``` - -##### Example Usage - -Clone the JupyterLab workspace 'ws1' in namespace 'default' to a brand new namespace named 'team2'. - -```sh -netapp_dataops_k8s_cli.py clone-to-new-ns jupyterlab --source-workspace-name=ws1 --new-namespace=team2 -Creating new JupyterLab workspace 'ws1' in namespace 'team2' within your cluster using Astra Control... -New workspace is being cloned from source workspace 'ws1' in namespace 'default' within your cluster... - -Astra SDK output: -{'type': 'application/astra-managedApp', 'version': '1.1', 'id': '9949c21c-8c36-44e8-b3cf-317eb393177f', 'name': 'ntap-dsutil-jupyterlab-ws1', 'state': 'provisioning', 'stateUnready': [], 'managedState': 'managed', 'managedStateUnready': [], 'managedTimestamp': '2021-10-11T20:36:09Z', 'protectionState': '', 'protectionStateUnready': [], 'appDefnSource': 'other', 'appLabels': [], 'system': 'false', 'namespace': 'team2', 'clusterName': 'ocp1', 'clusterID': '50b1e635-075f-42bb-bf81-3a6fd5518d2b', 'clusterType': 'openshift', 'sourceAppID': 'e6ac2e92-6abf-43c9-ac94-0437dc543149', 'sourceClusterID': '50b1e635-075f-42bb-bf81-3a6fd5518d2b', 'backupID': 'ee24afec-93c7-4226-9da3-006b2a870458', 'metadata': {'labels': [{'name': 'astra.netapp.io/labels/read-only/appType', 'value': 'clone'}], 'creationTimestamp': '2021-10-11T20:36:09Z', 'modificationTimestamp': '2021-10-11T20:36:09Z', 'createdBy': '946d8bb0-0d88-4469-baf4-8cfef52a7a90'}} - -Clone operation has been initiated. The operation may take several minutes to complete. -If the new workspace is being created within your cluster, run 'netapp_dataops_k8s_cli.py list jupyterlabs -n team2 -a' to check the status of the new workspace. -``` - #### Create a New JupyterLab Workspace @@ -182,7 +140,6 @@ The following options/arguments are optional: -n, --namespace= Kubernetes namespace to create new workspace in. If not specified, workspace will be created in namespace "default". -p, --cpu= Number of CPUs to reserve for JupyterLab workspace. Format: '0.5', '1', etc. If not specified, no CPUs will be reserved. -b, --load-balancer Option to choose a LoadBalancer service instead of using NodePort service. If not specified, NodePort service will be utilized. - -a, --register-with-astra Register new workspace with Astra Control (requires Astra Control). -v, --mount-pvc Option to attach an additional existing PVC that can be mounted at a spefic path whithin the container. Format: -v/--mount-pvc=existing_pvc_name:mount_point. If not specified, no additional PVC will be attached. -r, --allocate-resource= Option to specify custom resource allocations, ex. 'nvidia.com/mig-1g.5gb=1'. If not specified, no custom resource will be allocated. ``` @@ -284,7 +241,6 @@ The following options/arguments are optional: ``` -h, --help Print help text. -n, --namespace= Kubernetes namespace for which to retrieve list of workspaces. If not specified, namespace "default" will be used. - -a, --include-astra-app-id Include Astra Control app IDs in the output (requires Astra Control API access). ``` ##### Example Usage @@ -450,74 +406,6 @@ Waiting for Deployment 'ntap-dsutil-jupyterlab-mike' to reach Ready state. JupyterLab workspace snapshot successfully restored. ``` - - -#### Register an Existing JupyterLab Workspace with Astra Control - -The NetApp DataOps Toolkit can be used to register an existing JupyterLab workspace with Astra Control. The command for registering an existing JupyterLab workspace with Astra Control is `netapp_dataops_k8s_cli.py register-with-astra jupyterlab`. - -Note: This command requires Astra Control. - -The following options/arguments are required: - -``` - -w, --workspace-name= Name of JupyterLab workspace to be registered. -``` - -The following options/arguments are optional: - -``` - -h, --help Print help text. - -n, --namespace= Kubernetes namespace that the workspace is located in. If not specified, namespace "default" will be used. -``` - -##### Example Usage - -Register the workspace 'mike' in namespace 'project1' with Astra Control. - -```sh -netapp_dataops_k8s_cli.py register-with-astra jupyterlab -n project1 -w mike -Registering JupyterLab workspace 'mike' in namespace 'project1' with Astra Control... -JupyterLab workspace is now managed by Astra Control. -``` - - - -#### Backup a JupyterLab Workspace Using Astra Control - -The NetApp DataOps Toolkit can be used to trigger a backup of an existing JupyterLab workspace using Astra Control. The command for triggering a backup of an existing JupyterLab workspace using Astra Control is `netapp_dataops_k8s_cli.py backup-with-astra jupyterlab`. - -Note: This command requires Astra Control. - -The following options/arguments are required: - -``` - -w, --workspace-name= Name of JupyterLab workspace to be backed up. - -b, --backup-name= Name to be applied to new backup. -``` - -The following options/arguments are optional: - -``` - -h, --help Print help text. - -n, --namespace= Kubernetes namespace that the workspace is located in. If not specified, namespace "default" will be used. -``` - -##### Example Usage - -Backup the workspace 'ws1' in namespace 'default' using Astra Control; name the backup 'backup1'. - -```sh -netapp_dataops_k8s_cli.py backup-with-astra jupyterlab --workspace-name=ws1 --backup-name=backup1 -Trigerring backup of workspace 'ws1' in namespace 'default' using Astra Control... - -Astra SDK output: -{'type': 'application/astra-appBackup', 'version': '1.1', 'id': 'bd4ee39e-a3f6-4cf4-a75e-2a09d71b2b03', 'name': 'backup1', 'bucketID': '1e547cee-fbb9-4097-9a64-f542a79d6e80', 'state': 'pending', 'stateUnready': [], 'metadata': {'labels': [], 'creationTimestamp': '2021-10-11T20:39:59Z', 'modificationTimestamp': '2021-10-11T20:39:59Z', 'createdBy': '946d8bb0-0d88-4469-baf4-8cfef52a7a90'}} - -Backup operation has been initiated. The operation may take several minutes to complete. -Access the Astra Control dashboard to check the status of the backup operation. -``` - ## Advanced: Set of Functions @@ -525,24 +413,21 @@ Access the Astra Control dashboard to check the status of the backup operation. The NetApp DataOps Toolkit for Kubernetes provides a set of functions that can be imported into any Python program or Jupyter Notebook. In this manner, data scientists and data engineers can easily incorporate Kubernetes-native data management tasks into their existing projects, programs, and workflows. This functionality is only recommended for advanced users who are proficient in Python. ```py -from netapp_dataops.k8s import clone_jupyter_lab, clone_jupyter_lab_to_new_namespace, create_jupyter_lab, delete_jupyter_lab, list_jupyter_labs, create_jupyter_lab_snapshot, list_jupyter_lab_snapshots, restore_jupyter_lab_snapshot, register_jupyter_lab_with_astra, backup_jupyter_lab_with_astra +from netapp_dataops.k8s import clone_jupyter_lab, create_jupyter_lab, delete_jupyter_lab, list_jupyter_labs, create_jupyter_lab_snapshot, list_jupyter_lab_snapshots, restore_jupyter_lab_snapshot ``` The following workspace management operations are available within the set of functions. -| JupyterLab workspace management operations | Supported by BeeGFS | Supported by Trident | Requires Astra Control | -| ------------------------------------------------------------------------------------ | ------------------- | -------------------- | ---------------------- | -| [Clone a JupyterLab workspace within the same namespace.](#lib-clone-jupyterlab) | No | Yes | No | -| [Clone a JupyterLab workspace to a brand new namespace.](#lib-clone-new-jupyterlab) | No | Yes | Yes | -| [Create a new JupyterLab workspace.](#lib-create-jupyterlab) | Yes | Yes | No | -| [Delete an existing JupyterLab workspace.](#lib-delete-jupyterlab) | Yes | Yes | No | -| [List all JupyterLab workspaces.](#lib-list-jupyterlabs) | Yes | Yes | No | -| [Create a new snapshot for a JupyterLab workspace.](#lib-create-jupyterlab-snapshot) | No | Yes | No | -| [Delete an existing snapshot.](#lib-delete-jupyterlab-snapshot) | No | Yes | No | -| [List all snapshots.](#lib-list-jupyterlab-snapshots) | No | Yes | No | -| [Restore a snapshot.](#lib-restore-jupyterlab-snapshot) | No | Yes | No | -| [Register a JupyterLab workspace with Astra Control.](#lib-register-jupyterlab) | No | Yes | Yes | -| [Backup a JupyterLab workspace using Astra Control.](#lib-backup-jupyterlab) | No | Yes | Yes | +| JupyterLab workspace management operations | Supported by BeeGFS | Supported by Trident | +| ------------------------------------------------------------------------------------ | ------------------- | -------------------- | +| [Clone a JupyterLab workspace within the same namespace.](#lib-clone-jupyterlab) | No | Yes | +| [Create a new JupyterLab workspace.](#lib-create-jupyterlab) | Yes | Yes | +| [Delete an existing JupyterLab workspace.](#lib-delete-jupyterlab) | Yes | Yes | +| [List all JupyterLab workspaces.](#lib-list-jupyterlabs) | Yes | Yes | +| [Create a new snapshot for a JupyterLab workspace.](#lib-create-jupyterlab-snapshot) | No | Yes | +| [Delete an existing snapshot.](#lib-delete-jupyterlab-snapshot) | No | Yes | +| [List all snapshots.](#lib-list-jupyterlab-snapshots) | No | Yes | +| [Restore a snapshot.](#lib-restore-jupyterlab-snapshot) | No | Yes | ### JupyterLab Workspace Management Operations @@ -587,41 +472,6 @@ APIConnectionError # The Kubernetes API returned an error. ServiceUnavailableError # A Kubernetes service is not available. ``` - - -#### Clone a JupyterLab Workspace to a Brand New Naamespace - -The NetApp DataOps Toolkit can be used to rapidly provision a new JupyterLab workspace (within a brand new Kubernetes namespace) that is an exact copy of an existing JupyterLab workspace, as part of any Python program or workflow. In other words, the NetApp DataOps Toolkit can be used to rapidly clone a JupyterLab workspace to a brand new namespace. - -Note: This function requires Astra Control. - -##### Function Definition - -```py -def clone_jupyter_lab_to_new_namespace( - source_workspace_name: str, # Name of JupyterLab workspace to use as source for clone (required). - new_namespace: str, # Kubernetes namespace to create new workspace in (required). This namespace must not exist; it will be created during this operation. - source_workspace_namespace: str = "default", # Kubernetes namespace that source workspace is located in. If not specified, namespace "default" will be used. - clone_to_cluster_name: str = None, # Name of destination Kubernetes cluster within Astra Control. Workspace will be cloned a to a new namespace in this cluster. If not specified, then the workspace will be cloned to a new namespace within the user's current cluster. - print_output: bool = False # Denotes whether or not to print messages to the console during execution. -) : -``` - -##### Return Value - -None - -##### Error Handling - -If an error is encountered, the function will raise an exception of one of the following types. These exception types are defined in `netapp_dataops.k8s`. - -```py -InvalidConfigError # kubeconfig file is missing or is invalid. -APIConnectionError # The Kubernetes or Astra API returned an error. -AstraAppNotManagedError # The source JupyterLab workspace has not been registered with Astra Control. -AstraClusterDoesNotExistError # The destination cluster does not exist in Astra Control. -``` - #### Create a New JupyterLab Workspace @@ -645,7 +495,6 @@ def create_jupyter_lab( request_cpu: str = None, # Number of CPUs to reserve for JupyterLab workspace. Format: '0.5', '1', etc. If not specified, no CPUs will be reserved. request_memory: str = None, # Amount of memory to reserve for JupyterLab workspace. Format: '1024Mi', '100Gi', '10Ti', etc. If not specified, no memory will be reserved. request_nvidia_gpu: str = None, # Number of NVIDIA GPUs to allocate to JupyterLab workspace. Format: '1', '4', etc. If not specified, no GPUs will be allocated. - register_with_astra: bool = False, # Register new workspace with Astra Control (requires Astra Control). allocate_resource: str = None, # Option to specify custom resource allocations, ex. 'nvidia.com/mig-1g.5gb=1'. If not specified, no custom resource will be allocated. print_output: bool = False # Denotes whether or not to print messages to the console during execution. ) -> str : @@ -706,14 +555,13 @@ The NetApp DataOps Toolkit can be used to retrieve a list of all existing Jupyte ```py def list_jupyter_labs( namespace: str = "default", # Kubernetes namespace for which to retrieve list of workspaces. If not specified, namespace "default" will be used. - include_astra_app_id: bool = False, # Include Astra Control app IDs in the output (requires Astra Control API access). print_output: bool = False # Denotes whether or not to print messages to the console during execution. ) -> list : ``` ##### Return Value -The function returns a list of all existing JupyterLab workspaces. Each item in the list will be a dictionary containing details regarding a specific workspace. The keys for the values in this dictionary are "Workspace Name", "Status", "Size", "StorageClass", "Access URL", "Clone" (Yes/No), "Source Workspace", and "Source VolumeSnapshot". If `include_astra_app_id` is set to `True`, then "Astra Control App ID" will also be included as a key in the dictionary. +The function returns a list of all existing JupyterLab workspaces. Each item in the list will be a dictionary containing details regarding a specific workspace. The keys for the values in this dictionary are "Workspace Name", "Status", "Size", "StorageClass", "Access URL", "Clone" (Yes/No), "Source Workspace", and "Source VolumeSnapshot". Note: The value of the "Clone" field will be "Yes" only if the workspace was cloned, using the DataOps Toolkit, from a source workspace within the same namespace. @@ -828,68 +676,3 @@ If an error is encountered, the function will raise an exception of one of the f InvalidConfigError # kubeconfig file is missing or is invalid. APIConnectionError # The Kubernetes API returned an error. ``` - - - -#### Register an Existing JupyterLab Workspace with Astra Control - -The NetApp DataOps Toolkit can be used to register an existing JupyterLab workspace with Astra Control as part of any Python program or workflow. - -Note: This function requires Astra Control. - -##### Function Definition - -```py -def register_jupyter_lab_with_astra( - workspace_name: str, # Name of JupyterLab workspace to be registered (required). - namespace: str = "default", # Kubernetes namespace that the workspace is located in. If not specified, namespace "default" will be used. - print_output: bool = False # Denotes whether or not to print messages to the console during execution. -) : -``` - -##### Return Value - -None - -##### Error Handling - -If an error is encountered, the function will raise an exception of one of the following types. These exception types are defined in `netapp_dataops.k8s`. - -```py -InvalidConfigError # kubeconfig or AstraSDK config file is missing or is invalid. -APIConnectionError # The Kubernetes or Astra Control API returned an error. -AstraAppDoesNotExistError # App does not exist in Astra. Are you sure that the workspace name is correct? -``` - - - -#### Backup a JupyterLab Workspace Using Astra Control - -The NetApp DataOps Toolkit can be used to trigger a backup of an existing JupyterLab workspace using Astra Control as part of any Python program or workflow. - -Note: This function requires Astra Control. - -##### Function Definition - -```py -def backup_jupyter_lab_with_astra( - workspace_name: str, # Name of JupyterLab workspace to be backed up (required). - backup_name: str, # Name to be applied to new backup (required) - namespace: str = "default", # Kubernetes namespace that the workspace is located in. If not specified, namespace "default" will be used. - print_output: bool = False # Denotes whether or not to print messages to the console during execution. -) : -``` - -##### Return Value - -None - -##### Error Handling - -If an error is encountered, the function will raise an exception of one of the following types. These exception types are defined in `netapp_dataops.k8s`. - -```py -InvalidConfigError # kubeconfig or AstraSDK config file is missing or is invalid. -APIConnectionError # The Kubernetes or Astra Control API returned an error. -AstraAppNotManagedError # JupyterLab workspace has not been registered with Astra Control. -``` diff --git a/netapp_dataops_k8s/netapp_dataops/k8s/__init__.py b/netapp_dataops_k8s/netapp_dataops/k8s/__init__.py index 7ec6412..148a1f1 100644 --- a/netapp_dataops_k8s/netapp_dataops/k8s/__init__.py +++ b/netapp_dataops_k8s/netapp_dataops/k8s/__init__.py @@ -24,7 +24,6 @@ from kubernetes.client.rest import ApiException from tabulate import tabulate import pandas as pd -import astraSDK # Using this decorator in lieu of using a dependency to manage deprecation @@ -53,21 +52,6 @@ class InvalidConfigError(Exception): pass -class AstraAppNotManagedError(Exception): - '''Error that will be raised when an application hasn't been registered with Astra''' - pass - - -class AstraClusterDoesNotExistError(Exception): - '''Error that will be raised when a cluster doesn't exist within Astra Control''' - pass - - -class AstraAppDoesNotExistError(Exception): - '''Error that will be raised when an app doesn't exist within Astra Control''' - pass - - class ServiceUnavailableError(Exception): '''Error that will be raised when a service is not available''' pass @@ -142,20 +126,20 @@ def _load_kube_config2(print_output: bool = False): if print_output: _print_invalid_config_error() raise InvalidConfigError() + + +def _astra_not_supported_message(print_output: bool = False) : + error_text = "Error: Astra Control functionality within the DataOps Toolkit is no longer supported. Please use the Astra SDK and/or toolkit. For details, visit https://github.com/NetApp/netapp-astra-toolkits." + if print_output : + print(error_text) + raise APIConnectionError(error_text) -def _get_astra_k8s_cluster_name() -> str : - return os.environ['ASTRA_K8S_CLUSTER_NAME'] - def _print_invalid_config_error(): print( "Error: Missing or invalid kubeconfig file. The NetApp DataOps Toolkit for Kubernetes requires that a valid kubeconfig file be present on the host, located at $HOME/.kube or at another path specified by the KUBECONFIG environment variable.") -def _print_astra_k8s_cluster_name_error() : - print("Error: ASTRA_K8S_CLUSTER_NAME environment variable is not set. This environment variable should be set to the name of your Kubernetes cluster within Astra Control.") - - def _retrieve_image_for_jupyter_lab_deployment(workspaceName: str, namespace: str = "default", printOutput: bool = False) -> str: # Retrieve kubeconfig @@ -516,36 +500,6 @@ def _wait_for_triton_dev_deployment(server_name: str, namespace: str = "default" sleep(5) -def _retrieve_astra_app_id_for_jupyter_lab(astra_apps: dict, workspace_name: str, include_full_app_details: bool = False) -> str : - # Get Astra K8s cluster name - try : - astra_k8s_cluster_name = _get_astra_k8s_cluster_name() - except KeyError : - raise InvalidConfigError() - - # Parse Astra Apps - for app_details in astra_apps["items"] : - # Check cluster name - if app_details["clusterName"] != astra_k8s_cluster_name : - pass - - # Get app label for workspace - workspace_app_label = _get_jupyter_lab_labels(workspaceName=workspace_name)["app"] - - # See if app label matches - for app_labels in app_details["appLabels"] : - if (app_labels["name"] == "app") and (app_labels["value"] == workspace_app_label) : - if include_full_app_details : - return app_details["id"], app_details - else : - return app_details["id"] - - if include_full_app_details : - return "", None - else : - return "" - - # # Public classes # @@ -656,79 +610,6 @@ def clone_jupyter_lab(new_workspace_name: str, source_workspace_name: str, sourc return url -def clone_jupyter_lab_to_new_namespace(source_workspace_name: str, new_namespace: str, source_workspace_namespace: str = "default", clone_to_cluster_name: str = None, print_output: bool = False) : - # Retrieve list of Astra apps - try : - astra_apps = astraSDK.getApps().main(namespace=source_workspace_namespace) - except Exception as err : - if print_output : - print("Error: Astra Control API Error: ", err) - raise APIConnectionError(err) - - # Determine Astra App ID for source workspace - try : - source_astra_app_id, source_astra_app_details = _retrieve_astra_app_id_for_jupyter_lab(astra_apps=astra_apps, workspace_name=source_workspace_name, include_full_app_details=True) - except InvalidConfigError : - if print_output : - _print_astra_k8s_cluster_name_error() - raise InvalidConfigError() - - # Handle situation where workspace has not been registered with Astra. - if not source_astra_app_id : - error_message = "Source JupyterLab workspace has not been registered with Astra Control." - if print_output : - print("Error:", error_message) - print("Hint: use the 'netapp_dataops_k8s_cli.py register-with-astra jupyterlab' command to register a JupyterLab workspace with Astra Control.") - raise AstraAppNotManagedError(error_message) - - # Determine Astra cluster ID for source workspace - source_astra_cluster_id = source_astra_app_details["clusterID"] - - # Determine Astra cluster ID for "clone-to" cluster - if clone_to_cluster_name : - clone_to_cluster_id = None - try : - astra_clusters = astraSDK.getClusters().main(hideUnmanaged=True) - except Exception as err : - if print_output : - print("Error: Astra Control API Error: ", err) - raise APIConnectionError(err) - - for cluster_info in astra_clusters["items"] : - if cluster_info["name"] == clone_to_cluster_name : - clone_to_cluster_id = cluster_info["id"] - - if not clone_to_cluster_id : - error_message = "Cluster '" + clone_to_cluster_name + "' does not exist in Astra Control." - if print_output : - print("Error:", error_message) - raise AstraClusterDoesNotExistError(error_message) - - print("Creating new JupyterLab workspace '" + source_workspace_name + "' in namespace '" + new_namespace + "' within cluster '" + clone_to_cluster_name + "' using Astra Control...") - else : - clone_to_cluster_id = source_astra_cluster_id - print("Creating new JupyterLab workspace '" + source_workspace_name + "' in namespace '" + new_namespace + "' within your cluster using Astra Control...") - - # Clone workspace to new namespace using Astra - print("New workspace is being cloned from source workspace '" + source_workspace_name + "' in namespace '" + source_workspace_namespace + "' within your cluster...") - print("\nAstra SDK output:") - try : - ret = astraSDK.cloneApp(quiet=False).main(cloneName=_get_jupyter_lab_deployment(source_workspace_name), clusterID=clone_to_cluster_id, sourceClusterID=source_astra_cluster_id, namespace=new_namespace, sourceAppID=source_astra_app_id) - except Exception as err : - if print_output : - print("\nError: Astra Control API Error: ", err) - raise APIConnectionError(err) - - if ret == False : - if print_output : - print("\nError: Astra Control API error. See Astra SDK output above for details") - raise APIConnectionError("Astra Control API error.") - - if print_output : - print("\nClone operation has been initiated. The operation may take several minutes to complete.") - print("If the new workspace is being created within your cluster, run 'netapp_dataops_k8s_cli.py list jupyterlabs -n " + new_namespace + " -a' to check the status of the new workspace.") - - def clone_volume(new_pvc_name: str, source_pvc_name: str, source_snapshot_name: str = None, volume_snapshot_class: str = "csi-snapclass", namespace: str = "default", print_output: bool = False, pvc_labels: dict = None): @@ -1020,9 +901,7 @@ def create_jupyter_lab(workspace_name: str, workspace_size: str, mount_pvc: str # (Optional) Step 5 - Register workspace with Astra Control if register_with_astra : - if print_output : - print() - register_jupyter_lab_with_astra(workspace_name=workspace_name, namespace=namespace, print_output=print_output) + _astra_not_supported_message(print_output=print_output) if print_output: print("\nWorkspace successfully created.") @@ -1524,6 +1403,7 @@ def delete_jupyter_lab(workspace_name: str, namespace: str = "default", preserve if print_output: print("Workspace successfully deleted.") + def delete_triton_server(server_name: str, namespace: str = "default", print_output: bool = False): # Retrieve kubeconfig @@ -1558,6 +1438,7 @@ def delete_triton_server(server_name: str, namespace: str = "default", if print_output: print("Triton Server instance successfully deleted.") + def delete_k8s_config_map(name: str, namespace: str, print_output: bool = False): """Delete a Kubernetes config map with the provided name from the provided namespace. @@ -1700,12 +1581,7 @@ def list_jupyter_labs(namespace: str = "default", include_astra_app_id: bool = F # Retrieve list of Astra apps if include_astra_app_id : - try : - astra_apps = astraSDK.getApps().main(namespace=namespace) - except Exception as err : - if print_output: - print("Error: Astra Control API Error: ", err) - raise APIConnectionError(err) + _astra_not_supported_message(print_output=print_output) # Construct list of workspaces workspacesList = list() @@ -1776,15 +1652,6 @@ def list_jupyter_labs(namespace: str = "default", include_astra_app_id: bool = F workspaceDict["Source Workspace"] = "" workspaceDict["Source VolumeSnapshot"] = "" - # Retrieve Astra App ID - if include_astra_app_id : - try : - workspaceDict["Astra Control App ID"] = _retrieve_astra_app_id_for_jupyter_lab(astra_apps=astra_apps, workspace_name=workspaceName) - except InvalidConfigError : - if print_output : - _print_astra_k8s_cluster_name_error() - raise InvalidConfigError() - # Append dict to list of workspaces workspacesList.append(workspaceDict) @@ -2026,115 +1893,6 @@ def list_volume_snapshots(pvc_name: str = None, namespace: str = "default", prin return snapshotsList -def register_jupyter_lab_with_astra(workspace_name: str, namespace: str = "default", print_output: bool = False) : - # Retrieve list of unmanaged Astra apps - try : - astra_apps_unmanaged = astraSDK.getApps().main(discovered=True, namespace=namespace) - except Exception as err : - if print_output: - print("Error: Astra Control API Error: ", err) - raise APIConnectionError(err) - - # Determine Astra App ID for workspace - try : - astra_app_id, astra_app_details = _retrieve_astra_app_id_for_jupyter_lab(astra_apps=astra_apps_unmanaged, workspace_name=workspace_name, include_full_app_details=True) - except InvalidConfigError : - if print_output : - _print_astra_k8s_cluster_name_error() - raise InvalidConfigError() - - # Fail if app doesn't exist in Astra - if not astra_app_details : - if print_output : - print("Error: App does not exist in Astra. Are you sure that the workspace name is correct?") - raise AstraAppDoesNotExistError() - - # Wait until app has a status of "running" in Astra - while True : - try : - if astra_app_details["state"] == "running" : - break - except KeyError : - pass - - if print_output : - print("It appears that Astra Control is still discovering the JupyterLab workspace. If this persists, confirm that you typed the workspace name correctly and/or check your Astra Control setup. Sleeping for 60 seconds before checking again...") - sleep(60) - - # Retrieve list of unmanaged Astra apps again - try : - astra_apps_unmanaged = astraSDK.getApps().main(discovered=True, namespace=namespace) - astra_app_id, astra_app_details = _retrieve_astra_app_id_for_jupyter_lab(astra_apps=astra_apps_unmanaged, workspace_name=workspace_name, include_full_app_details=True) - except Exception as err : - if print_output: - print("Error: Astra Control API Error: ", err) - raise APIConnectionError(err) - - # Manage app (i.e. register app with Astra) - if print_output : - print("Registering JupyterLab workspace '" + workspace_name + "' in namespace '" + namespace + "' with Astra Control...") - try : - managed = astraSDK.manageApp().main(appID=astra_app_id) - except Exception as err : - if print_output: - print("Error: Astra Control API Error: ", err) - raise APIConnectionError(err) - - # Determine success or error - if managed : - if print_output : - print("JupyterLab workspace is now managed by Astra Control.") - else : - if print_output : - print("Error: Astra Control API Error.") - raise APIConnectionError() - - -def backup_jupyter_lab_with_astra(workspace_name: str, backup_name: str, namespace: str = "default", print_output: bool = False) : - # Retrieve list of Astra apps - try : - astra_apps = astraSDK.getApps().main(namespace=namespace) - except Exception as err : - if print_output : - print("Error: Astra Control API Error: ", err) - raise APIConnectionError(err) - - # Determine Astra App ID for source workspace - try : - astra_app_id = _retrieve_astra_app_id_for_jupyter_lab(astra_apps=astra_apps, workspace_name=workspace_name) - except InvalidConfigError : - if print_output : - _print_astra_k8s_cluster_name_error() - raise InvalidConfigError() - - # Handle situation where workspace has not been registered with Astra. - if not astra_app_id : - error_message = "JupyterLab workspace has not been registered with Astra Control." - if print_output : - print("Error:", error_message) - print("Hint: use the 'netapp_dataops_k8s_cli.py register-with-astra jupyterlab' command to register a JupyterLab workspace with Astra Control.") - raise AstraAppNotManagedError(error_message) - - # Trigger backup - print("Trigerring backup of workspace '" + workspace_name + "' in namespace '" + namespace + "' using Astra Control...") - print("\nAstra SDK output:") - try : - ret = astraSDK.takeBackup(quiet=False).main(appID=astra_app_id, backupName=backup_name) - except Exception as err : - if print_output : - print("\nError: Astra Control API Error: ", err) - raise APIConnectionError(err) - - if ret == False : - if print_output : - print("\nError: Astra Control API error. See Astra SDK output above for details") - raise APIConnectionError("Astra Control API error.") - - if print_output : - print("\nBackup operation has been initiated. The operation may take several minutes to complete.") - print("Access the Astra Control dashboard to check the status of the backup operation.") - - def restore_jupyter_lab_snapshot(snapshot_name: str = None, namespace: str = "default", print_output: bool = False): # Retrieve source PVC name sourcePvcName = _retrieve_source_volume_details_for_volume_snapshot(snapshotName=snapshot_name, namespace=namespace, @@ -2293,3 +2051,17 @@ def restoreJupyterLabSnapshot(snapshotName: str = None, namespace: str = "defaul def restoreVolumeSnapshot(snapshotName: str, namespace: str = "default", printOutput: bool = False, pvcLabels: dict = {"created-by": "ntap-dsutil", "created-by-operation": "restore-volume-snapshot"}) : restore_volume_snapshot(snapshot_name=snapshotName, namespace=namespace, print_output=printOutput, pvc_labels=pvcLabels) + +@deprecated +def clone_jupyter_lab_to_new_namespace(source_workspace_name: str, new_namespace: str, source_workspace_namespace: str = "default", clone_to_cluster_name: str = None, print_output: bool = False) : + _astra_not_supported_message(print_output=print_output) + + +@deprecated +def register_jupyter_lab_with_astra(workspace_name: str, namespace: str = "default", print_output: bool = False) : + _astra_not_supported_message(print_output=print_output) + + +@deprecated +def backup_jupyter_lab_with_astra(workspace_name: str, backup_name: str, namespace: str = "default", print_output: bool = False) : + _astra_not_supported_message(print_output=print_output) \ No newline at end of file diff --git a/netapp_dataops_k8s/netapp_dataops/netapp_dataops_k8s_cli.py b/netapp_dataops_k8s/netapp_dataops/netapp_dataops_k8s_cli.py index 7c60076..3b88b7f 100755 --- a/netapp_dataops_k8s/netapp_dataops/netapp_dataops_k8s_cli.py +++ b/netapp_dataops_k8s/netapp_dataops/netapp_dataops_k8s_cli.py @@ -2,12 +2,10 @@ """NetApp DataOps Toolkit for Kubernetes Script Interface.""" from netapp_dataops import k8s from netapp_dataops.k8s import ( - backup_jupyter_lab_with_astra, clone_volume, create_volume_snapshot, create_volume, clone_jupyter_lab, - clone_jupyter_lab_to_new_namespace, create_triton_server, create_jupyter_lab, create_jupyter_lab_snapshot, @@ -20,13 +18,9 @@ list_jupyter_lab_snapshots, list_volumes, list_triton_servers, - register_jupyter_lab_with_astra, restore_jupyter_lab_snapshot, restore_volume_snapshot, APIConnectionError, - AstraAppNotManagedError, - AstraClusterDoesNotExistError, - AstraAppDoesNotExistError, CAConfigMap, InvalidConfigError ) @@ -37,6 +31,7 @@ ) # Define contents of help text +astra_error_text = "Error: Astra Control functionality within the DataOps Toolkit is no longer supported. Please use the Astra SDK and/or toolkit. For details, visit https://github.com/NetApp/netapp-astra-toolkits." helpTextStandard = ''' The NetApp DataOps Toolkit for Kubernetes is a Python library that makes it simple for data scientists and data engineers to perform various data management tasks, such as provisioning a new data volume, near-instantaneously cloning a data volume, and near-instantaneously snapshotting a data volume for traceability/baselining. @@ -49,15 +44,12 @@ Note: To view details regarding options/arguments for a specific command, run the command with the '-h' or '--help' option. \tclone jupyterlab\t\tClone a JupyterLab workspace within the same namespace. -\tclone-to-new-ns jupyterlab\tClone a JupyterLab workspace to a brand new namespace. \tcreate jupyterlab\t\tProvision a JupyterLab workspace. \tdelete jupyterlab\t\tDelete an existing JupyterLab workspace. \tlist jupyterlabs\t\tList all JupyterLab workspaces. \tcreate jupyterlab-snapshot\tCreate a new snapshot for a JupyterLab workspace. \tlist jupyterlab-snapshots\tList all snapshots. \trestore jupyterlab-snapshot\tRestore a snapshot. -\tregister-with-astra jupyterlab\tRegister an existing JupyterLab workspace with Astra Control. -\tbackup-with-astra jupyterlab\tBackup an existing JupyterLab workspace using Astra Control. NVIDIA Triton Inference Server Management Commands: Note: To view details regarding options/arguments for a specific command, run the command with the '-h' or '--help' option. @@ -92,24 +84,7 @@ \tshow s3-job\t\t\tShow the status of the specifed Kubernetes job. \tdelete s3-job\t\t\tDelete a Kubernetes S3 job. ''' -helpTextBackupJupyterLab = ''' -Command: backup-with-astra jupyterlab - -Backup an existing JupyterLab workspace using Astra Control. - -Note: This command requires Astra Control. - -Required Options/Arguments: -\t-w, --workspace-name=\tName of JupyterLab workspace to be backed up. -\t-b, --backup-name=\tName to be applied to new backup. - -Optional Options/Arguments: -\t-h, --help\t\tPrint help text. -\t-n, --namespace=\tKubernetes namespace that the workspace is located in. If not specified, namespace "default" will be used. - -Examples: -\tnetapp_dataops_k8s_cli.py backup-with-astra jupyterlab --workspace-name=mike --backup-name=backup1 -''' +helpTextBackupJupyterLab = astra_error_text helpTextCloneJupyterLab = ''' Command: clone jupyterlab @@ -136,26 +111,7 @@ \tnetapp_dataops_k8s_cli.py clone jupyterlab --new-workspace-name=project1-experiment1 --source-workspace-name=project1 --nvidia-gpu=1 \tnetapp_dataops_k8s_cli.py clone jupyterlab -w project2-mike -s project2-snap1 -n team1 -g 1 -p 0.5 -m 1Gi -b ''' -helpTextCloneToNewNsJupyterLab = ''' -Command: clone-to-new-ns jupyterlab - -Clone a JupyterLab workspace to a brand new namespace. - -Note: This command requires Astra Control. - -Required Options/Arguments: -\t-j, --source-workspace-name=\tName of JupyterLab workspace to use as source for clone. -\t-n, --new-namespace=\t\tKubernetes namespace to create new workspace in. This namespace must not exist; it will be created during this operation. - -Optional Options/Arguments: -\t-c, --clone-to-cluster-name=\tName of destination Kubernetes cluster within Astra Control. Workspace will be cloned a to a new namespace in this cluster. If not specified, then the workspace will be cloned to a new namespace within the user's current cluster. -\t-h, --help\t\t\tPrint help text. -\t-s, --source-namespace=\t\tKubernetes namespace that source workspace is located in. If not specified, namespace "default" will be used. - -Examples: -\tnetapp_dataops_k8s_cli.py clone-to-new-ns jupyterlab --source-workspace-name=ws1 --new-namespace=project1 -\tnetapp_dataops_k8s_cli.py clone-to-new-ns jupyterlab -j ws1 -n team2 -s team1 -c ocp1 -''' +helpTextCloneToNewNsJupyterLab = astra_error_text helpTextCloneVolume = ''' Command: clone volume @@ -212,7 +168,6 @@ \t-n, --namespace=\t\tKubernetes namespace to create new workspace in. If not specified, workspace will be created in namespace "default". \t-p, --cpu=\t\t\tNumber of CPUs to reserve for JupyterLab workspace. Format: '0.5', '1', etc. If not specified, no CPUs will be reserved. \t-b, --load-balancer\t\tOption to use a LoadBalancer instead of using NodePort service. If not specified, NodePort service will be utilized. -\t-a, --register-with-astra\tRegister new workspace with Astra Control (requires Astra Control). \t-v, --mount-pvc=\t\tOption to attach an additional existing PVC that can be mounted at a spefic path whithin the container. Format: -v/--mount-pvc=existing_pvc_name:mount_point. If not specified, no additional PVC will be attached. \t-r, --allocate-resource=\tOption to specify custom resource allocations, ex. 'nvidia.com/mig-1g.5gb=1'. If not specified, no custom resource will be allocated. @@ -522,7 +477,6 @@ Optional Options/Arguments: \t-h, --help\t\t\tPrint help text. \t-n, --namespace=\t\tKubernetes namespace for which to retrieve list of workspaces. If not specified, namespace "default" will be used. -\t-a, --include-astra-app-id\tInclude Astra Control app IDs in the output (requires Astra Control). Examples: \tnetapp_dataops_k8s_cli.py list jupyterlabs -n team1 @@ -653,24 +607,7 @@ \tnetapp_dataops_k8s_cli.py put-s3 object -c mycreds -o host.example.com -b one -p mypvc -u -v -k sample.txt \tnetapp_dataops_k8s_cli.py put-s3 object -c mycreds -o host.example.com -b one -p mypvc -u -v -f "dir5/" -k sample.txt ''' -helpTextRegisterJupyterLab = ''' -Command: register-with-astra jupyterlab - -Register an existing JupyterLab workspace with Astra Control. - -Note: This command requires Astra Control. - -Required Options/Arguments: -\t-w, --workspace-name=\tName of JupyterLab workspace to be registered. - -Optional Options/Arguments: -\t-h, --help\t\tPrint help text. -\t-n, --namespace=\tKubernetes namespace that the workspace is located in. If not specified, namespace "default" will be used. - -Examples: -\tnetapp_dataops_k8s_cli.py register-with-astra jupyterlab --workspace-name=mike -\tnetapp_dataops_k8s_cli.py register-with-astra jupyterlab -w dave -n dst-test -''' +helpTextRegisterJupyterLab = astra_error_text helpTextRestoreJupyterLabSnapshot = ''' Command: restore jupyterlab-snapshot @@ -761,38 +698,7 @@ def getTarget(args: list) -> str: # Invoke desired action based on target if target in ("jupyterlab", "jupyter"): - workspace_name = None - backup_name = None - namespace = "default" - - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hw:b:n:", - ["help", "workspace-name=", "backup-name=", "namespace="]) - except: - handleInvalidCommand(helpText=helpTextBackupJupyterLab, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help"): - print(helpTextBackupJupyterLab) - sys.exit(0) - elif opt in ("-w", "--workspace-name"): - workspace_name = arg - elif opt in ("-b", "--backup-name"): - backup_name = arg - elif opt in ("-n", "--namespace"): - namespace = arg - - # Check for required options - if not workspace_name or not backup_name: - handleInvalidCommand(helpText=helpTextBackupJupyterLab, invalidOptArg=True) - - # Back up JupyterLab workspace - try: - backup_jupyter_lab_with_astra(workspace_name=workspace_name, backup_name=backup_name, namespace=namespace, print_output=True) - except (InvalidConfigError, APIConnectionError, AstraAppNotManagedError): - sys.exit(1) + handleInvalidCommand(helpText=helpTextBackupJupyterLab) else: handleInvalidCommand() @@ -921,41 +827,7 @@ def getTarget(args: list) -> str: # Invoke desired action based on target if target in ("jupyterlab", "jupyter"): - source_workspace_name = None - new_namespace = None - clone_to_cluster_name = None - source_namespace = "default" - - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hj:n:c:s:", - ["help", "source-workspace-name=", "new-namespace=", "clone-to-cluster-name=", "source-namespace="]) - except: - handleInvalidCommand(helpText=helpTextCloneToNewNsJupyterLab, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help"): - print(helpTextCloneToNewNsJupyterLab) - sys.exit(0) - elif opt in ("-j", "--source-workspace-name"): - source_workspace_name = arg - elif opt in ("-n", "--new-namespace"): - new_namespace = arg - elif opt in ("-c", "--clone-to-cluster-name"): - clone_to_cluster_name = arg - elif opt in ("-s", "--source-namespace"): - source_namespace = arg - - # Check for required options - if not source_workspace_name or not new_namespace : - handleInvalidCommand(helpText=helpTextCloneToNewNsJupyterLab, invalidOptArg=True) - - # Clone JupyterLab to new namespace - try: - clone_jupyter_lab_to_new_namespace(source_workspace_name=source_workspace_name, new_namespace=new_namespace, source_workspace_namespace=source_namespace, clone_to_cluster_name=clone_to_cluster_name, print_output=True) - except (InvalidConfigError, APIConnectionError, AstraAppNotManagedError, AstraClusterDoesNotExistError): - sys.exit(1) + handleInvalidCommand(helpText=helpTextCloneToNewNsJupyterLab) else: handleInvalidCommand() @@ -2199,35 +2071,7 @@ def getTarget(args: list) -> str: # Invoke desired action based on target if target in ("jupyterlab", "jupyter"): - workspaceName = None - namespace = "default" - - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hw:n:", - ["help", "workspace-name=", "namespace="]) - except: - handleInvalidCommand(helpText=helpTextRegisterJupyterLab, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help"): - print(helpTextRegisterJupyterLab) - sys.exit(0) - elif opt in ("-w", "--workspace-name"): - workspaceName = arg - elif opt in ("-n", "--namespace"): - namespace = arg - - # Check for required options - if not workspaceName: - handleInvalidCommand(helpText=helpTextRegisterJupyterLab, invalidOptArg=True) - - # Register JupyterLab workspace - try: - register_jupyter_lab_with_astra(workspace_name=workspaceName, namespace=namespace, print_output=True) - except (InvalidConfigError, APIConnectionError, AstraAppDoesNotExistError): - sys.exit(1) + handleInvalidCommand(helpText=helpTextRegisterJupyterLab) else: handleInvalidCommand() diff --git a/netapp_dataops_k8s/setup.cfg b/netapp_dataops_k8s/setup.cfg index 260fe15..4a5a591 100644 --- a/netapp_dataops_k8s/setup.cfg +++ b/netapp_dataops_k8s/setup.cfg @@ -28,18 +28,8 @@ install_requires = notebook pandas numpy>=1.22.0 - actoolkit==2.1.3 - certifi==2020.12.5 - chardet==4.0.0 - dnspython==2.1.0 - idna==2.10 - PyYAML==5.4.1 - requests==2.25.1 - tabulate==0.8.9 - termcolor==1.1.0 - urllib3==1.26.5 - func_timeout==4.3.5 - kubernetes==23.6.0 + tabulate + kubernetes python_requires = >=3.8 [options.packages.find] From e254de5de1b11f00b32103fd9ee609a097feb261 Mon Sep 17 00:00:00 2001 From: "Oglesby, Michael" Date: Fri, 13 Oct 2023 15:35:32 -0400 Subject: [PATCH 02/56] Update snapshot API version --- netapp_dataops_k8s/README.md | 2 +- netapp_dataops_k8s/netapp_dataops/k8s/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/netapp_dataops_k8s/README.md b/netapp_dataops_k8s/README.md index baed048..33bdfbf 100644 --- a/netapp_dataops_k8s/README.md +++ b/netapp_dataops_k8s/README.md @@ -9,7 +9,7 @@ The NetApp DataOps Toolkit for Kubernetes supports Linux and macOS hosts. The toolkit must be used in conjunction with a Kubernetes cluster in order to be useful. Additionally, [Trident](https://netapp.io/persistent-storage-provisioner-for-kubernetes/), NetApp's dynamic storage orchestrator for Kubernetes, and/or the [BeeGFS CSI driver](https://github.com/NetApp/beegfs-csi-driver/) must be installed within the Kubernetes cluster. The toolkit simplifies performing of various data management tasks that are actually executed by a NetApp maintained CSI driver. In order to facilitate this, the toolkit communicates with the appropriate driver via the Kubernetes API. -The toolkit is currently compatible with Kubernetes versions 1.17 and above, and OpenShift versions 4.4 and above. +The toolkit is currently compatible with Kubernetes versions 1.20 and above, and OpenShift versions 4.7 and above. The toolkit is currently compatible with Trident versions 20.07 and above. Additionally, the toolkit is compatible with the following Trident backend types: diff --git a/netapp_dataops_k8s/netapp_dataops/k8s/__init__.py b/netapp_dataops_k8s/netapp_dataops/k8s/__init__.py index 148a1f1..16c8a16 100644 --- a/netapp_dataops_k8s/netapp_dataops/k8s/__init__.py +++ b/netapp_dataops_k8s/netapp_dataops/k8s/__init__.py @@ -443,7 +443,7 @@ def _get_snapshot_api_group() -> str: def _get_snapshot_api_version() -> str: - return "v1beta1" + return "v1" def _wait_for_jupyter_lab_deployment_ready(workspaceName: str, namespace: str = "default", printOutput: bool = False): From 0c8d7c7a5556f5488d22917cc5b076066274ea7d Mon Sep 17 00:00:00 2001 From: "Oglesby, Michael" Date: Tue, 17 Oct 2023 14:02:02 -0400 Subject: [PATCH 03/56] update prereq and version number --- netapp_dataops_k8s/netapp_dataops/k8s/__init__.py | 4 ++-- netapp_dataops_k8s/setup.cfg | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/netapp_dataops_k8s/netapp_dataops/k8s/__init__.py b/netapp_dataops_k8s/netapp_dataops/k8s/__init__.py index 16c8a16..033432c 100644 --- a/netapp_dataops_k8s/netapp_dataops/k8s/__init__.py +++ b/netapp_dataops_k8s/netapp_dataops/k8s/__init__.py @@ -4,7 +4,7 @@ by applications using the import method of utilizing the toolkit. """ -__version__ = "2.4.0" +__version__ = "2.5.0b1" import base64 from datetime import datetime @@ -2064,4 +2064,4 @@ def register_jupyter_lab_with_astra(workspace_name: str, namespace: str = "defau @deprecated def backup_jupyter_lab_with_astra(workspace_name: str, backup_name: str, namespace: str = "default", print_output: bool = False) : - _astra_not_supported_message(print_output=print_output) \ No newline at end of file + _astra_not_supported_message(print_output=print_output) diff --git a/netapp_dataops_k8s/setup.cfg b/netapp_dataops_k8s/setup.cfg index 4a5a591..b5b127b 100644 --- a/netapp_dataops_k8s/setup.cfg +++ b/netapp_dataops_k8s/setup.cfg @@ -25,7 +25,7 @@ packages = find_namespace: scripts = netapp_dataops/netapp_dataops_k8s_cli.py install_requires = - notebook + notebook<7.0.0 pandas numpy>=1.22.0 tabulate From 1921a01aa32eed901b256f32ff7a91b04598e59d Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Thu, 5 May 2022 16:31:41 +0000 Subject: [PATCH 04/56] added options argument to the mount_volume function --- .../netapp_dataops/netapp_dataops_cli.py | 9 ++++-- .../netapp_dataops/traditional.py | 31 +++++++++++++++---- 2 files changed, 32 insertions(+), 8 deletions(-) diff --git a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py index 6b98f97..95d4f09 100755 --- a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py +++ b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py @@ -278,11 +278,13 @@ Optional Options/Arguments: \t-h, --help\t\tPrint help text. \t-x, --readonly\t\tMount volume locally as read-only. +\t-o, --options\t\tEnables users to specify custom mount options. Examples: \tsudo -E netapp_dataops_cli.py mount volume --name=project1 --mountpoint=/mnt/project1 \tsudo -E netapp_dataops_cli.py mount volume -m ~/testvol -n testvol -x \tsudo -E netapp_dataops_cli.py mount volume --name=project1 --mountpoint=/mnt/project1 --readonly +\tsudo -E netapp_dataops_cli.py mount volume --name=project1 --mountpoint=/mnt/project1 --readonly --options=rsize=262144,wsize=262144,nconnect=16 ''' helpTextPullFromS3Bucket = ''' Command: pull-from-s3 bucket @@ -1019,10 +1021,11 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = if target in ("volume", "vol"): volumeName = None mountpoint = None + mount_options = None readonly = False # Get command line options try: - opts, args = getopt.getopt(sys.argv[3:], "hn:m:x", ["help", "name=", "mountpoint=", "readonly"]) + opts, args = getopt.getopt(sys.argv[3:], "hn:m:o:x", ["help", "name=", "mountpoint=", "options=", "readonly"]) except: handleInvalidCommand(helpText=helpTextMountVolume, invalidOptArg=True) @@ -1035,12 +1038,14 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = volumeName = arg elif opt in ("-m", "--mountpoint"): mountpoint = arg + elif opt in ("-o", "--options"): + mount_options = arg elif opt in ("-x", "--readonly"): readonly = True # Mount volume try: - mount_volume(volume_name=volumeName, mountpoint=mountpoint, readonly=readonly, print_output=True) + mount_volume(volume_name=volumeName, mountpoint=mountpoint, mount_options=mount_options, readonly=readonly, print_output=True) except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): sys.exit(1) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index 401f6c7..19c8429 100755 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -1182,7 +1182,7 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: raise ConnectionTypeError() -def mount_volume(volume_name: str, mountpoint: str, readonly: bool = False, print_output: bool = False): +def mount_volume(volume_name: str, mountpoint: str, mount_options: str = None, readonly: bool = False, print_output: bool = False): nfsMountTarget = None # Retrieve list of volumes @@ -1204,13 +1204,12 @@ def mount_volume(volume_name: str, mountpoint: str, readonly: bool = False, prin if volume_name == volume["Volume Name"]: # Retrieve NFS mount target nfsMountTarget = volume["NFS Mount Target"] - + nfsMountTarget = nfsMountTarget.strip() # Raise error if invalid volume name was entered if not nfsMountTarget: if print_output: print("Error: Invalid volume name specified.") raise InvalidVolumeParameterError("name") - # Print message describing action to be understaken if print_output: if readonly: @@ -1225,8 +1224,9 @@ def mount_volume(volume_name: str, mountpoint: str, readonly: bool = False, prin except FileExistsError: pass + # Mount volume - if readonly: + if readonly and not mount_options: try: subprocess.check_call(['mount', '-o', 'ro', nfsMountTarget, mountpoint]) if print_output: @@ -1235,9 +1235,10 @@ def mount_volume(volume_name: str, mountpoint: str, readonly: bool = False, prin if print_output: print("Error: Error running mount command: ", err) raise MountOperationError(err) - else: + + elif readonly and mount_options: try: - subprocess.check_call(['mount', nfsMountTarget, mountpoint]) + subprocess.check_call(['mount', '-o', 'ro,'+mount_options, nfsMountTarget, mountpoint]) if print_output: print("Volume mounted successfully.") except subprocess.CalledProcessError as err: @@ -1245,7 +1246,25 @@ def mount_volume(volume_name: str, mountpoint: str, readonly: bool = False, prin print("Error: Error running mount command: ", err) raise MountOperationError(err) + elif mount_options: + try: + subprocess.check_call(['mount', '-o', mount_options, nfsMountTarget, mountpoint]) + if print_output: + print("Volume mounted successfully.") + except subprocess.CalledProcessError as err: + if print_output: + print("Error: Error running mount command: ", err) + raise MountOperationError(err) + else: + try: + subprocess.check_call(['mount', nfsMountTarget, mountpoint]) + if print_output: + print("Volume mounted successfully.") + except subprocess.CalledProcessError as err: + if print_output: + print("Error: Error running mount command: ", err) + raise MountOperationError(err) # Function to unmount volume def unmount_volume(mountpoint: str, print_output: bool = False): From bc92a2e4edcb2b36d5d647d910a3c59aabc09249 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Thu, 5 May 2022 12:07:08 -0500 Subject: [PATCH 05/56] Added details for "option" argument for mount volume function. --- netapp_dataops_traditional/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/netapp_dataops_traditional/README.md b/netapp_dataops_traditional/README.md index 1762b68..b61194a 100644 --- a/netapp_dataops_traditional/README.md +++ b/netapp_dataops_traditional/README.md @@ -339,6 +339,7 @@ The following options/arguments are optional: ``` -h, --help Print help text. -x, --readonly Mount volume locally as read-only. + -o, --options Enables users to specify custom mount options. ``` ##### Example Usage @@ -1059,7 +1060,7 @@ APIConnectionError # The storage system/service API returned an err #### Mount an Existing Data Volume Locally -The NetApp DataOps Toolkit can be used to mount an existing data volume as "read-only" or "read-write" on your local host as part of any Python program or workflow. On Linux hosts, mounting requires root privileges, so any Python program that invokes this function must be run as root. It is usually not necessary to invoke this function as root on macOS hosts. +The NetApp DataOps Toolkit can be used to mount an existing data volume with custom mount options as "read-only" or "read-write" on your local host as part of any Python program or workflow. On Linux hosts, mounting requires root privileges, so any Python program that invokes this function must be run as root. It is usually not necessary to invoke this function as root on macOS hosts. ##### Function Definition @@ -1067,6 +1068,7 @@ The NetApp DataOps Toolkit can be used to mount an existing data volume as "read def mount_volume( volume_name: str, # Name of volume (required). mountpoint: str, # Local mountpoint to mount volume at (required). + mount_options: str = None # Enables users to specify custom mount options. If not specified volume will be mounted without custom options as "read-write" or "read-only". readonly: bool = False, # Mount volume locally as "read-only." If not specified volume will be mounted as "read-write". On Linux hosts - if specified, calling program must be run as root. print_output: bool = False # Denotes whether or not to print messages to the console during execution. ) : From 052788f7edcfc6d943d74b46211fceffce06f3a0 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Mon, 9 May 2022 09:32:33 -0500 Subject: [PATCH 06/56] Fixing merge conflicts. --- .../netapp_dataops/traditional.py | 37 ++++++++++++++++--- 1 file changed, 31 insertions(+), 6 deletions(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index 19c8429..a3cd675 100755 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -1182,12 +1182,29 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: raise ConnectionTypeError() -def mount_volume(volume_name: str, mountpoint: str, mount_options: str = None, readonly: bool = False, print_output: bool = False): +def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, svm_name: str = None, lif_name: str = None, mount_options: str = None, readonly: bool = False, print_output: bool = False): nfsMountTarget = None + + svm = None + try: + config = _retrieve_config(print_output=print_output) + except InvalidConfigError: + raise + try: + svm = config["svm"] + if svm_name: + svm = svm_name + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() + + if cluster_name: + config["hostname"] = cluster_name # Retrieve list of volumes try: - volumes = list_volumes(check_local_mounts=True) + volumes = list_volumes(check_local_mounts=True, svm_name = svm) except (InvalidConfigError, APIConnectionError): if print_output: print("Error: Error retrieving NFS mount target for volume.") @@ -1204,18 +1221,27 @@ def mount_volume(volume_name: str, mountpoint: str, mount_options: str = None, r if volume_name == volume["Volume Name"]: # Retrieve NFS mount target nfsMountTarget = volume["NFS Mount Target"] - nfsMountTarget = nfsMountTarget.strip() + # Raise error if invalid volume name was entered if not nfsMountTarget: if print_output: print("Error: Invalid volume name specified.") raise InvalidVolumeParameterError("name") + + try: + if lif_name: + nfsMountTarget = lif_name+':'+nfsMountTarget.split(':')[1] + except: + if print_output: + print("Error: Error retrieving NFS mount target for volume.") + raise + # Print message describing action to be understaken if print_output: if readonly: - print("Mounting volume '" + volume_name + "' at '" + mountpoint + "' as read-only.") + print("Mounting volume '" + svm+':'+volume_name + "' as '"+nfsMountTarget+"' at '" + mountpoint + "' as read-only.") else: - print("Mounting volume '" + volume_name + "' at '" + mountpoint + "'.") + print("Mounting volume '" + svm+':'+volume_name + "' as '"+nfsMountTarget+"' at '" + mountpoint + "'.") # Create mountpoint if it doesn't already exist mountpoint = os.path.expanduser(mountpoint) @@ -1224,7 +1250,6 @@ def mount_volume(volume_name: str, mountpoint: str, mount_options: str = None, r except FileExistsError: pass - # Mount volume if readonly and not mount_options: try: From 121473c6a773f6db82c9c2a11bb9c167a559b15d Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Mon, 9 May 2022 09:34:52 -0500 Subject: [PATCH 07/56] Fixing merge issues. --- netapp_dataops_traditional/netapp_dataops/traditional.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index a3cd675..de1227b 100755 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -1182,7 +1182,7 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: raise ConnectionTypeError() -def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, svm_name: str = None, lif_name: str = None, mount_options: str = None, readonly: bool = False, print_output: bool = False): +def mount_volume(volume_name: str, mountpoint: str, mount_options: str = None, cluster_name: str = None, svm_name: str = None, lif_name: str = None, readonly: bool = False, print_output: bool = False): nfsMountTarget = None svm = None From a0d3993b22afb49c45be20f720f3c6bf4733b74b Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Mon, 9 May 2022 10:08:01 -0500 Subject: [PATCH 08/56] Merge issues --- netapp_dataops_traditional/netapp_dataops/traditional.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index de1227b..058a3ea 100755 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -1182,7 +1182,7 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: raise ConnectionTypeError() -def mount_volume(volume_name: str, mountpoint: str, mount_options: str = None, cluster_name: str = None, svm_name: str = None, lif_name: str = None, readonly: bool = False, print_output: bool = False): +def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, mount_options: str = None, svm_name: str = None, lif_name: str = None, readonly: bool = False, print_output: bool = False): nfsMountTarget = None svm = None From 17074d27cf970cb7b6ff8915eaa349363a885051 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Mon, 9 May 2022 10:09:17 -0500 Subject: [PATCH 09/56] merge conflicts. --- netapp_dataops_traditional/netapp_dataops/traditional.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index 058a3ea..eddb227 100755 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -1182,7 +1182,7 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: raise ConnectionTypeError() -def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, mount_options: str = None, svm_name: str = None, lif_name: str = None, readonly: bool = False, print_output: bool = False): +def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, svm_name: str = None, lif_name: str = None, readonly: bool = False, print_output: bool = False): nfsMountTarget = None svm = None From 0079ed3a0604fcee16f3192c36867da4fb9b3fd8 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Mon, 9 May 2022 10:10:04 -0500 Subject: [PATCH 10/56] Merge conflicts. --- netapp_dataops_traditional/netapp_dataops/traditional.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index eddb227..a3cd675 100755 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -1182,7 +1182,7 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: raise ConnectionTypeError() -def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, svm_name: str = None, lif_name: str = None, readonly: bool = False, print_output: bool = False): +def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, svm_name: str = None, lif_name: str = None, mount_options: str = None, readonly: bool = False, print_output: bool = False): nfsMountTarget = None svm = None From 19b2862dff811d4f0521b9facc3f62f58090cd7f Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Mon, 9 May 2022 10:14:33 -0500 Subject: [PATCH 11/56] merge conflicts. --- netapp_dataops_traditional/netapp_dataops/traditional.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index a3cd675..e1ef778 100755 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -1182,7 +1182,7 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: raise ConnectionTypeError() -def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, svm_name: str = None, lif_name: str = None, mount_options: str = None, readonly: bool = False, print_output: bool = False): +def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, svm_name: str = None, lif_name: str = None, readonly: bool = False, print_output: bool = False, mount_options: str = None): nfsMountTarget = None svm = None From 74f52bdc6c1acbb8bc68ba5a5eb4b20c128c3775 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Mon, 9 May 2022 10:16:39 -0500 Subject: [PATCH 12/56] merge conflicts. --- netapp_dataops_traditional/netapp_dataops/traditional.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index e1ef778..932d4b3 100755 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -1182,7 +1182,7 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: raise ConnectionTypeError() -def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, svm_name: str = None, lif_name: str = None, readonly: bool = False, print_output: bool = False, mount_options: str = None): +def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, svm_name: str = None, mount_options: str = None, lif_name: str = None, readonly: bool = False, print_output: bool = False): nfsMountTarget = None svm = None From 50a3a897b511dea4f2dd4f1d51bebfdedab0fc70 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Mon, 9 May 2022 10:34:05 -0500 Subject: [PATCH 13/56] merge conflicts. --- netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py index 95d4f09..f3c99ee 100755 --- a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py +++ b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py @@ -1025,7 +1025,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = readonly = False # Get command line options try: - opts, args = getopt.getopt(sys.argv[3:], "hn:m:o:x", ["help", "name=", "mountpoint=", "options=", "readonly"]) + opts, args = getopt.getopt(sys.argv[3:], "hv:n:l:m:u:o:x", ["cluster-name=","help", "lif=","svm=", "name=", "mountpoint=", "readonly", "options="]) except: handleInvalidCommand(helpText=helpTextMountVolume, invalidOptArg=True) From 8e8062a0b0834b97a13adffe58c098ca0b5678eb Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Mon, 9 May 2022 10:40:51 -0500 Subject: [PATCH 14/56] Merge conflicts. --- .../netapp_dataops/netapp_dataops_cli.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py index f3c99ee..5fb24e0 100755 --- a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py +++ b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py @@ -1026,9 +1026,10 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hv:n:l:m:u:o:x", ["cluster-name=","help", "lif=","svm=", "name=", "mountpoint=", "readonly", "options="]) - except: + except Exception as err: + print(err) handleInvalidCommand(helpText=helpTextMountVolume, invalidOptArg=True) - + # Parse command line options for opt, arg in opts: if opt in ("-h", "--help"): From 1da04af067a811dfacdc45e8bf79ad6d1cb84607 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Thu, 12 May 2022 08:49:38 -0500 Subject: [PATCH 15/56] Pulled "mount_volume" from master and made some changes. --- .../netapp_dataops/traditional.py | 26 ++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index 932d4b3..5f40d8c 100755 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -1037,7 +1037,7 @@ def list_snapshots(volume_name: str, print_output: bool = False) -> list(): raise ConnectionTypeError() -def list_volumes(check_local_mounts: bool = False, include_space_usage_details: bool = False, print_output: bool = False) -> list(): +def list_volumes(check_local_mounts: bool = False, include_space_usage_details: bool = False, print_output: bool = False, cluster_name: str = None, svm_name: str = None) -> list(): # Retrieve config details from config file try: config = _retrieve_config(print_output=print_output) @@ -1049,6 +1049,8 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: if print_output : _print_invalid_config_error() raise InvalidConfigError() + if cluster_name: + config["hostname"] = cluster_name if connectionType == "ONTAP": # Instantiate connection to ONTAP cluster @@ -1058,8 +1060,12 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: raise try: + svmname=config["svm"] + if svm_name: + svmname = svm_name + # Retrieve all volumes for SVM - volumes = NetAppVolume.get_collection(svm=config["svm"]) + volumes = NetAppVolume.get_collection(svm=svmname) # Retrieve local mounts if desired if check_local_mounts : @@ -1096,12 +1102,18 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: nfsMountTarget = None else : nfsMountTarget = config["dataLif"]+":"+volume.nas.path + if svmname != config["svm"]: + nfsMountTarget = svmname+":"+volume.nas.path + # Construct clone source clone = "no" + cloneParentSvm = "" cloneParentVolume = "" cloneParentSnapshot = "" + try: + cloneParentSvm = volume.clone.parent_svm.name cloneParentVolume = volume.clone.parent_volume.name cloneParentSnapshot = volume.clone.parent_snapshot.name clone = "yes" @@ -1159,6 +1171,7 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: volumeDict["Local Mountpoint"] = localMountpoint volumeDict["FlexCache"] = flexcache volumeDict["Clone"] = clone + volumeDict["Source SVM"] = cloneParentSvm volumeDict["Source Volume"] = cloneParentVolume volumeDict["Source Snapshot"] = cloneParentSnapshot @@ -1181,10 +1194,9 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: else: raise ConnectionTypeError() - def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, svm_name: str = None, mount_options: str = None, lif_name: str = None, readonly: bool = False, print_output: bool = False): nfsMountTarget = None - + svm = None try: config = _retrieve_config(print_output=print_output) @@ -1200,7 +1212,7 @@ def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, sv raise InvalidConfigError() if cluster_name: - config["hostname"] = cluster_name + config["hostname"] = cluster_name # Retrieve list of volumes try: @@ -1221,13 +1233,14 @@ def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, sv if volume_name == volume["Volume Name"]: # Retrieve NFS mount target nfsMountTarget = volume["NFS Mount Target"] + nfsMountTarget = nfsMountTarget.strip() # Raise error if invalid volume name was entered if not nfsMountTarget: if print_output: print("Error: Invalid volume name specified.") raise InvalidVolumeParameterError("name") - + try: if lif_name: nfsMountTarget = lif_name+':'+nfsMountTarget.split(':')[1] @@ -1808,3 +1821,4 @@ def syncCloudSyncRelationship(relationshipID: str, waitUntilComplete: bool = Fal @deprecated def syncSnapMirrorRelationship(uuid: str, waitUntilComplete: bool = False, printOutput: bool = False) : sync_snap_mirror_relationship(uuid=uuid, wait_until_complete=waitUntilComplete, print_output=printOutput) + From 2b2b50f0a4ad9ececfc8e363bbafe61cac2126a2 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Thu, 12 May 2022 08:50:26 -0500 Subject: [PATCH 16/56] Pulled master and made changes to mount_volume function. --- netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py | 1 + 1 file changed, 1 insertion(+) diff --git a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py index 5fb24e0..ad4119c 100755 --- a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py +++ b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py @@ -1410,3 +1410,4 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = else: handleInvalidCommand() + From 69e02ab0c389b998df82fe31446c42376572e562 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Wed, 25 May 2022 09:37:42 -0500 Subject: [PATCH 17/56] Fixing typos. --- netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py index 3d1d491..be77b5b 100755 --- a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py +++ b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py @@ -321,7 +321,7 @@ \t-l, --lif \t\tnon default lif (nfs server ip/name) \t-h, --help\t\tPrint help text. \t-x, --readonly\t\tMount volume locally as read-only. -\t-o, --options\t\tEnables users to specify custom mount options. +\t-o, --options\t\tEnables users to Specify custom NFS mount options. Examples: \tsudo -E netapp_dataops_cli.py mount volume --name=project1 --mountpoint=/mnt/project1 From abd19d1b357292d2f471e956fd3ae0fc35299ffc Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Wed, 25 May 2022 09:48:58 -0500 Subject: [PATCH 18/56] Changed the "mount_vol" function to fix repeated code. --- .../netapp_dataops/traditional.py | 74 +++++-------------- 1 file changed, 19 insertions(+), 55 deletions(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index 0daece6..775c118 100644 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -1571,23 +1571,6 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, svm_name: str = None, mount_options: str = None, lif_name: str = None, readonly: bool = False, print_output: bool = False): nfsMountTarget = None - svm = None - try: - config = _retrieve_config(print_output=print_output) - except InvalidConfigError: - raise - try: - svm = config["svm"] - if svm_name: - svm = svm_name - except: - if print_output: - _print_invalid_config_error() - raise InvalidConfigError() - - if cluster_name: - config["hostname"] = cluster_name - # Retrieve list of volumes try: volumes = list_volumes(check_local_mounts=True, svm_name = svm) @@ -1638,45 +1621,26 @@ def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, sv pass # Mount volume - if readonly and not mount_options: - try: - subprocess.check_call(['mount', '-o', 'ro', nfsMountTarget, mountpoint]) - if print_output: - print("Volume mounted successfully.") - except subprocess.CalledProcessError as err: - if print_output: - print("Error: Error running mount command: ", err) - raise MountOperationError(err) - - elif readonly and mount_options: - try: - subprocess.check_call(['mount', '-o', 'ro,'+mount_options, nfsMountTarget, mountpoint]) - if print_output: - print("Volume mounted successfully.") - except subprocess.CalledProcessError as err: - if print_output: - print("Error: Error running mount command: ", err) - raise MountOperationError(err) - + mount_cmd_opts = [] + + if readonly: + mount_cmd_opts.append('-o') + mount_cmd_opts.append('ro') + if mount_options: + mount_cmd_opts.append(','+ mount_options) elif mount_options: - try: - subprocess.check_call(['mount', '-o', mount_options, nfsMountTarget, mountpoint]) - if print_output: - print("Volume mounted successfully.") - except subprocess.CalledProcessError as err: - if print_output: - print("Error: Error running mount command: ", err) - raise MountOperationError(err) - - else: - try: - subprocess.check_call(['mount', nfsMountTarget, mountpoint]) - if print_output: - print("Volume mounted successfully.") - except subprocess.CalledProcessError as err: - if print_output: - print("Error: Error running mount command: ", err) - raise MountOperationError(err) + mount_cmd_opts.append('-o') + mount_cmd_opts.append(mount_options) + + mount_cmd = ['mount'] + mount_cmd_opts + [nfsMountTarget, mountpoint] + try: + subprocess.check_call(mount_cmd) + if print_output: + print("Volume mounted successfully.") + except subprocess.CalledProcessError as err: + if print_output: + print("Error: Error running mount command: ", err) + raise MountOperationError(err) # Function to unmount volume def unmount_volume(mountpoint: str, print_output: bool = False): From dd3876e761d061b4cefe04b8ec90ca4d5454dc95 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Wed, 25 May 2022 09:53:36 -0500 Subject: [PATCH 19/56] Edited descriptions. --- netapp_dataops_traditional/README.md | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/netapp_dataops_traditional/README.md b/netapp_dataops_traditional/README.md index c618824..6ad56bb 100644 --- a/netapp_dataops_traditional/README.md +++ b/netapp_dataops_traditional/README.md @@ -364,7 +364,7 @@ The following options/arguments are optional: -l, --lif= non default lif (nfs server ip/name) -h, --help Print help text. -x, --readonly Mount volume locally as read-only. - -o, --options Enables users to specify custom mount options. + -o, --options Enables users to specify custom NFS mount options. ``` ##### Example Usage @@ -1164,17 +1164,16 @@ APIConnectionError # The storage system/service API returned an err #### Mount an Existing Data Volume Locally -The NetApp DataOps Toolkit can be used to mount an existing data volume with custom mount options as "read-only" or "read-write" on your local host as part of any Python program or workflow. On Linux hosts, mounting requires root privileges, so any Python program that invokes this function must be run as root. It is usually not necessary to invoke this function as root on macOS hosts. - +The NetApp DataOps Toolkit can be used to mount an existing data volume as "read-only" or "read-write" on your local host as part of any Python program or workflow. On Linux hosts, mounting requires root privileges, so any Python program that invokes this function must be run as root. It is usually not necessary to invoke this function as root on macOS hosts. ##### Function Definition ```py def mount_volume( volume_name: str, # Name of volume (required). - cluster_name: str = None, # Non default cluster name, same credentials as the default credentials should be used - svm_name: str = None, # Non default svm name, same credentials as the default credentials should be used + cluster_name: str = None, # Non default cluster name, same credentials as the default credentials should be used + svm_name: str = None, # Non default svm name, same credentials as the default credentials should be used mountpoint: str, # Local mountpoint to mount volume at (required). - mount_options: str = None # Enables users to specify custom mount options. If not specified volume will be mounted without custom options as "read-write" or "read-only". + mount_options: str = None # Enables users to specify custom NFS mount options. readonly: bool = False, # Mount volume locally as "read-only." If not specified volume will be mounted as "read-write". On Linux hosts - if specified, calling program must be run as root. print_output: bool = False # Denotes whether or not to print messages to the console during execution. ) : From 0f6691ac28482da95b0b52cbc3f75f085c2bb976 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Thu, 9 Jun 2022 12:39:08 -0500 Subject: [PATCH 20/56] unmount volume if delete function is engaged. --- .../netapp_dataops/traditional.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index 775c118..39aba48 100644 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -1077,7 +1077,7 @@ def delete_snapshot(volume_name: str, snapshot_name: str, cluster_name: str = No raise ConnectionTypeError() -def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = None, delete_mirror: bool = False, +def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = None, mountpoint: str = None, delete_mirror: bool = False, delete_non_clone: bool = False, print_output: bool = False): # Retrieve config details from config file try: @@ -1167,6 +1167,14 @@ def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = No except NetAppRestError as err: if print_output: print("Error: ONTAP Rest API Error: ", err) + + if mountpoint: + try: + unmount_volume(mountpoint=mountpoint, print_output=True) + except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): + if print_output: + print("Error: Error mounting volume.") + raise try: if print_output: From f85f3f8730103c75f7c188ce27b44c559cf7a622 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Thu, 9 Jun 2022 13:04:07 -0500 Subject: [PATCH 21/56] Added a comment for clarity. --- netapp_dataops_traditional/netapp_dataops/traditional.py | 1 + 1 file changed, 1 insertion(+) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index 39aba48..2ee3507 100644 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -1169,6 +1169,7 @@ def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = No print("Error: ONTAP Rest API Error: ", err) if mountpoint: + #check if volume is mounted locally, and then unmount it. try: unmount_volume(mountpoint=mountpoint, print_output=True) except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): From 1b1217207b2b0a86c128465fb6244d951cccd1b9 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Tue, 14 Jun 2022 19:37:36 -0500 Subject: [PATCH 22/56] Added unmounting option for deleted volume. --- .../netapp_dataops/netapp_dataops_cli.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py index be77b5b..766b900 100755 --- a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py +++ b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py @@ -229,6 +229,7 @@ \t-u, --cluster-name=\tnon default hosting cluster \t-v, --svm \t\tnon default SVM name \t-f, --force\t\tDo not prompt user to confirm operation. +\t-p, --mountpoint\t\tMount point for the locally mounted volume. \t-m, --delete-mirror\tdelete/release snapmirror relationship prior to volume deletion \t --delete-non-clone\tEnable deletion of volume not created as clone by this tool \t-h, --help\t\tPrint help text. @@ -1087,10 +1088,11 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = force = False deleteMirror = False deleteNonClone = False + mountpoint = None # Get command line options try: - opts, args = getopt.getopt(sys.argv[3:], "hfv:n:u:m", ["cluster-name=","help", "svm=", "name=", "force", "delete-non-clone","delete-mirror"]) + opts, args = getopt.getopt(sys.argv[3:], "hfv:n:u:m:p:", ["cluster-name=","help", "svm=", "name=", "force", "delete-non-clone", "delete-mirror", "mountpoint="]) except Exception as err: print(err) handleInvalidCommand(helpText=helpTextDeleteVolume, invalidOptArg=True) @@ -1106,12 +1108,14 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = clusterName = arg elif opt in ("-n", "--name"): volumeName = arg + elif opt in ("-p", "--mount-point"): + mountpoint = arg elif opt in ("-f", "--force"): force = True elif opt in ("-m", "--delete-mirror"): deleteMirror = True elif opt in ("--delete-non-clone"): - deleteNonClone = True + deleteNonClone = True # Check for required options if not volumeName: From 7b4d4f38147e0bfaa4af97f3a3de027b48b35290 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Wed, 15 Jun 2022 13:47:45 -0500 Subject: [PATCH 23/56] Fixed mount options bug and added sudo check. --- .../netapp_dataops/traditional.py | 27 ++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index 2ee3507..550b7df 100644 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -1580,6 +1580,23 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, svm_name: str = None, mount_options: str = None, lif_name: str = None, readonly: bool = False, print_output: bool = False): nfsMountTarget = None + svm = None + try: + config = _retrieve_config(print_output=print_output) + except InvalidConfigError: + raise + try: + svm = config["svm"] + if svm_name: + svm = svm_name + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() + + if cluster_name: + config["hostname"] = cluster_name + # Retrieve list of volumes try: volumes = list_volumes(check_local_mounts=True, svm_name = svm) @@ -1615,6 +1632,8 @@ def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, sv print("Error: Error retrieving NFS mount target for volume.") raise + if os.getuid() != 0: + exit("You need to have root privileges to run 'Mount' command.\nPlease try again, this time using 'sudo'. Exiting.") # Print message describing action to be understaken if print_output: if readonly: @@ -1631,17 +1650,18 @@ def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, sv # Mount volume mount_cmd_opts = [] - + if readonly: mount_cmd_opts.append('-o') mount_cmd_opts.append('ro') if mount_options: - mount_cmd_opts.append(','+ mount_options) + mount_cmd_opts.remove('ro') + mount_cmd_opts.append('ro'+','+mount_options) elif mount_options: mount_cmd_opts.append('-o') mount_cmd_opts.append(mount_options) - mount_cmd = ['mount'] + mount_cmd_opts + [nfsMountTarget, mountpoint] + try: subprocess.check_call(mount_cmd) if print_output: @@ -1651,6 +1671,7 @@ def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, sv print("Error: Error running mount command: ", err) raise MountOperationError(err) + # Function to unmount volume def unmount_volume(mountpoint: str, print_output: bool = False): # Print message describing action to be understaken From 749e1dd4f76e7e48d3f51344f3a962fb3160397d Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Thu, 16 Jun 2022 10:06:47 -0500 Subject: [PATCH 24/56] Added arguments for unmounting when deleting volumes. --- .../netapp_dataops/netapp_dataops_cli.py | 200 +++++++++--------- 1 file changed, 100 insertions(+), 100 deletions(-) diff --git a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py index 766b900..5a9a55f 100755 --- a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py +++ b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py @@ -100,8 +100,8 @@ Optional Options/Arguments: \t-l, --cluster-name=\tnon default hosting cluster -\t-c, --source-svm=\tnon default source svm name -\t-t, --target-svm=\tnon default target svm name +\t-c, --source-svm=\tnon default source svm name +\t-t, --target-svm=\tnon default target svm name \t-g, --gid=\t\tUnix filesystem group id (gid) to apply when creating new volume (if not specified, gid of source volume will be retained) (Note: cannot apply gid of '0' when creating clone). \t-h, --help\t\tPrint help text. \t-m, --mountpoint=\tLocal mountpoint to mount new volume at after creating. If not specified, new volume will not be mounted locally. On Linux hosts - if specified, must be run as root. @@ -114,8 +114,8 @@ \t-e, --export-policy\texport policy name to attach to the volume, default policy will be used if export-hosts/export-policy not provided \t-d, --snapshot-policy\tsnapshot-policy to attach to the volume, default snapshot policy will be used if not provided \t-s, --split\t\tstart clone split after creation -\t-r, --refresh\t\tdelete existing clone if exists before creating a new one -\t-d, --svm-dr-unprotect\tdisable svm dr protection if svm-dr protection exists +\t-r, --refresh\t\tdelete existing clone if exists before creating a new one +\t-d, --svm-dr-unprotect\tdisable svm dr protection if svm-dr protection exists Examples (basic usage): \tnetapp_dataops_cli.py clone volume --name=project1 --source-volume=gold_dataset @@ -147,9 +147,9 @@ \t-s, --svm=\t\tNon defaul svm name. \t-h, --help\t\tPrint help text. \t-n, --name=\t\tName of new snapshot. If not specified, will be set to 'netapp_dataops_'. -\t-r, --retention=\tSnapshot name will be suffixed by and excesive snapshots will be deleted. +\t-r, --retention=\tSnapshot name will be suffixed by and excesive snapshots will be deleted. \t \tCan be count of snapshots when int (ex. 10) or days when retention is suffixed by d (ex. 10d) -\t-l, --snapmirror-label=\tif provided snapmirror label will be configured on the created snapshot +\t-l, --snapmirror-label=\tif provided snapmirror label will be configured on the created snapshot Examples: \tnetapp_dataops_cli.py create snapshot --volume=project1 --name=snap1 @@ -169,7 +169,7 @@ Optional Options/Arguments: \t-l, --cluster-name=\tnon default hosting cluster -\t-v, --svm=\t\tnon default svm name +\t-v, --svm=\t\tnon default svm name \t-a, --aggregate=\tAggregate to use when creating new volume (flexvol) or optional comma seperated aggrlist when specific aggregates are required for FG. \t-d, --snapshot-policy=\tSnapshot policy to apply for new volume. \t-e, --export-policy=\tNFS export policy to use when exporting new volume. @@ -230,7 +230,7 @@ \t-v, --svm \t\tnon default SVM name \t-f, --force\t\tDo not prompt user to confirm operation. \t-p, --mountpoint\t\tMount point for the locally mounted volume. -\t-m, --delete-mirror\tdelete/release snapmirror relationship prior to volume deletion +\t-m, --delete-mirror\tdelete/release snapmirror relationship prior to volume deletion \t --delete-non-clone\tEnable deletion of volume not created as clone by this tool \t-h, --help\t\tPrint help text. @@ -494,7 +494,7 @@ helpTextCreateSnapMirrorRelationship = ''' Command: create snapmirror-relationship -create snapmirror relationship +create snapmirror relationship Required Options/Arguments: \t-n, --target-vol=\tName of target volume @@ -502,7 +502,7 @@ \t-v, --source-vol=\tSource volume name Optional Options/Arguments: -\t-u, --cluster-name=\tnon default hosting cluster +\t-u, --cluster-name=\tnon default hosting cluster \t-t, --target-svm=\tnon default target SVM \t-c, --schedule=\t\tnon default schedule (default is hourly) \t-p, --policy=\t\tnon default policy (default is MirrorAllSnapshots @@ -510,7 +510,7 @@ \t-h, --help\t\tPrint help text. Examples: -\tnetapp_dataops_cli.py create snapmirror-relationship -u cluster1 -s svm1 -t svm2 -v vol1 -n vol1 -p MirrorAllSnapshots -c hourly +\tnetapp_dataops_cli.py create snapmirror-relationship -u cluster1 -s svm1 -t svm2 -v vol1 -n vol1 -p MirrorAllSnapshots -c hourly \tnetapp_dataops_cli.py create snapmirror-relationship -u cluster1 -s svm1 -t svm2 -v vol1 -n vol1 -p MirrorAllSnapshots -c hourly -a resync ''' @@ -740,9 +740,9 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Invoke desired action based on target if target in ("volume", "vol"): newVolumeName = None - clusterName = None - sourceSVM = None - targetSVM = None + clusterName = None + sourceSVM = None + targetSVM = None sourceVolumeName = None sourceSnapshotName = None mountpoint = None @@ -760,7 +760,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hl:c:t:n:v:s:m:u:g:j:xe:p:i:srd", ["help", "cluster-name=", "source-svm=","target-svm=","name=", "source-volume=", "source-snapshot=", "mountpoint=", "uid=", "gid=", "junction=", "readonly","export-hosts=","export-policy=","snapshot-policy=","split","refresh","svm-dr-unprotect"]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextCloneVolume, invalidOptArg=True) @@ -770,15 +770,15 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = print(helpTextCloneVolume) sys.exit(0) elif opt in ("-l", "--cluster-name"): - clusterName = arg + clusterName = arg elif opt in ("-n", "--name"): newVolumeName = arg elif opt in ("-c", "--source-svm"): sourceSVM = arg elif opt in ("-t", "--target-svm"): - targetSVM = arg + targetSVM = arg elif opt in ("-v", "--source-volume"): - sourceVolumeName = arg + sourceVolumeName = arg elif opt in ("-s", "--source-snapshot"): sourceSnapshotName = arg elif opt in ("-m", "--mountpoint"): @@ -792,17 +792,17 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-x", "--readonly"): readonly = True elif opt in ("-s", "--split"): - split = True + split = True elif opt in ("-r", "--refresh"): - refresh = True + refresh = True elif opt in ("-d", "--svm-dr-unprotect"): - svmDrUnprotect = True + svmDrUnprotect = True elif opt in ("-p", "--export-policy"): - exportPolicy = arg + exportPolicy = arg elif opt in ("-i", "--snapshot-policy"): - snapshotPolicy = arg + snapshotPolicy = arg elif opt in ("-e", "--export-hosts"): - exportHosts = arg + exportHosts = arg # Check for required options if not newVolumeName or not sourceVolumeName: @@ -816,9 +816,9 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Clone volume try: - clone_volume(new_volume_name=newVolumeName, source_volume_name=sourceVolumeName, source_snapshot_name=sourceSnapshotName, - cluster_name=clusterName, source_svm=sourceSVM, target_svm=targetSVM, export_policy=exportPolicy, export_hosts=exportHosts, - snapshot_policy=snapshotPolicy, split=split, refresh=refresh, mountpoint=mountpoint, unix_uid=unixUID, unix_gid=unixGID, + clone_volume(new_volume_name=newVolumeName, source_volume_name=sourceVolumeName, source_snapshot_name=sourceSnapshotName, + cluster_name=clusterName, source_svm=sourceSVM, target_svm=targetSVM, export_policy=exportPolicy, export_hosts=exportHosts, + snapshot_policy=snapshotPolicy, split=split, refresh=refresh, mountpoint=mountpoint, unix_uid=unixUID, unix_gid=unixGID, junction=junction, svm_dr_unprotect=svmDrUnprotect, readonly=readonly, print_output=True) except (InvalidConfigError, APIConnectionError, InvalidSnapshotParameterError, InvalidVolumeParameterError, MountOperationError): @@ -849,8 +849,8 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = if target in ("snapshot", "snap"): volumeName = None snapshotName = None - clusterName = None - svmName = None + clusterName = None + svmName = None retentionCount = 0 retentionDays = False snapmirrorLabel = None @@ -858,7 +858,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hn:v:s:r:u:l:", ["cluster-name=","help", "svm=", "name=", "volume=", "retention=", "snapmirror-label="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextCreateSnapshot, invalidOptArg=True) @@ -870,20 +870,20 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-n", "--name"): snapshotName = arg elif opt in ("-u", "--cluster-name"): - clusterName = arg + clusterName = arg elif opt in ("-s", "--svm"): svmName = arg elif opt in ("-r", "--retention"): - retentionCount = arg + retentionCount = arg elif opt in ("-v", "--volume"): volumeName = arg elif opt in ("-l", "--snapmirror-label"): - snapmirrorLabel = arg + snapmirrorLabel = arg # Check for required options if not volumeName: handleInvalidCommand(helpText=helpTextCreateSnapshot, invalidOptArg=True) - + if retentionCount: if not retentionCount.isnumeric(): matchObj = re.match("^(\d+)d$",retentionCount) @@ -900,8 +900,8 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = sys.exit(1) elif target in ("volume", "vol"): - clusterName = None - svmName = None + clusterName = None + svmName = None volumeName = None volumeSize = None guaranteeSpace = False @@ -915,13 +915,13 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = aggregate = None junction = None readonly = False - tieringPolicy = None + tieringPolicy = None volDP = False # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "l:hv:t:n:s:rt:p:u:g:e:d:m:a:j:xu:y", ["cluster-name=","help", "svm=", "name=", "size=", "guarantee-space", "type=", "permissions=", "uid=", "gid=", "export-policy=", "snapshot-policy=", "mountpoint=", "aggregate=", "junction=" ,"readonly","tiering-policy=","dp"]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextCreateVolume, invalidOptArg=True) @@ -931,9 +931,9 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = print(helpTextCreateVolume) sys.exit(0) elif opt in ("-v", "--svm"): - svmName = arg + svmName = arg elif opt in ("-l", "--cluster-name"): - clusterName = arg + clusterName = arg elif opt in ("-n", "--name"): volumeName = arg elif opt in ("-s", "--size"): @@ -977,17 +977,17 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Create volume try: create_volume(svm_name=svmName, volume_name=volumeName, cluster_name=clusterName, volume_size=volumeSize, guarantee_space=guaranteeSpace, volume_type=volumeType, unix_permissions=unixPermissions, unix_uid=unixUID, - unix_gid=unixGID, export_policy=exportPolicy, snapshot_policy=snapshotPolicy, aggregate=aggregate, mountpoint=mountpoint, junction=junction, readonly=readonly, + unix_gid=unixGID, export_policy=exportPolicy, snapshot_policy=snapshotPolicy, aggregate=aggregate, mountpoint=mountpoint, junction=junction, readonly=readonly, print_output=True, tiering_policy=tieringPolicy, vol_dp=volDP) except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): sys.exit(1) elif target in ("snapmirror-relationship", "sm","snapmirror"): - clusterName = None - sourceSvm = None - targetSvm = None - sourceVol = None - targetVol = None + clusterName = None + sourceSvm = None + targetSvm = None + sourceVol = None + targetVol = None policy = 'MirrorAllSnapshots' schedule = "hourly" volumeSize = None @@ -1008,13 +1008,13 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-t", "--target-svm"): targetSvm = arg elif opt in ("-n", "--target-vol"): - targetVol = arg + targetVol = arg elif opt in ("-s", "--source-svm"): - sourceSvm = arg + sourceSvm = arg elif opt in ("-v", "--source-vol"): - sourceVol = arg + sourceVol = arg elif opt in ("-u", "--cluster-name"): - clusterName = arg + clusterName = arg elif opt in ("-c", "--schedule"): schedule = arg elif opt in ("-p", "--policy"): @@ -1029,9 +1029,9 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = if action not in [None,'resync','initialize']: handleInvalidCommand(helpText=helpTextCreateSnapMirrorRelationship, invalidOptArg=True) - # Create snapmirror + # Create snapmirror try: - create_snap_mirror_relationship(source_svm=sourceSvm, target_svm=targetSvm, source_vol=sourceVol, target_vol=targetVol, schedule=schedule, policy=policy, + create_snap_mirror_relationship(source_svm=sourceSvm, target_svm=targetSvm, source_vol=sourceVol, target_vol=targetVol, schedule=schedule, policy=policy, cluster_name=clusterName, action=action, print_output=True) except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): sys.exit(1) @@ -1047,13 +1047,13 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = if target in ("snapshot", "snap"): volumeName = None snapshotName = None - svmName = None - clusterName = None + svmName = None + clusterName = None # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hn:v:s:u:", ["cluster-name=","help", "svm=", "name=", "volume="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextDeleteSnapshot, invalidOptArg=True) @@ -1067,7 +1067,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-s", "--svm"): svmName = arg elif opt in ("-u", "--cluster-name"): - clusterName = arg + clusterName = arg elif opt in ("-v", "--volume"): volumeName = arg @@ -1084,16 +1084,16 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif target in ("volume", "vol"): volumeName = None svmName = None - clusterName = None + clusterName = None force = False - deleteMirror = False + deleteMirror = False deleteNonClone = False mountpoint = None # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hfv:n:u:m:p:", ["cluster-name=","help", "svm=", "name=", "force", "delete-non-clone", "delete-mirror", "mountpoint="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextDeleteVolume, invalidOptArg=True) @@ -1105,15 +1105,15 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-v", "--svm"): svmName = arg elif opt in ("-u", "--cluster-name"): - clusterName = arg + clusterName = arg elif opt in ("-n", "--name"): volumeName = arg - elif opt in ("-p", "--mount-point"): - mountpoint = arg + elif opt in ("-p", "--mountpoint"): + mountpoint = arg elif opt in ("-f", "--force"): force = True elif opt in ("-m", "--delete-mirror"): - deleteMirror = True + deleteMirror = True elif opt in ("--delete-non-clone"): deleteNonClone = True @@ -1167,14 +1167,14 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif target in ("snapmirror-relationship", "snapmirror", "snapmirror-relationships", "snapmirrors","sm"): svmName = None - clusterName = None + clusterName = None # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hv:u:", ["cluster-name=","help", "svm="]) - except Exception as err: + except Exception as err: print(err) - handleInvalidCommand(helpText=helpTextListSnapMirrorRelationships, invalidOptArg=True) + handleInvalidCommand(helpText=helpTextListSnapMirrorRelationships, invalidOptArg=True) # Parse command line options for opt, arg in opts: @@ -1184,9 +1184,9 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-v", "--svm"): svmName = arg elif opt in ("-u", "--cluster-name"): - clusterName = arg + clusterName = arg - # List snapmirror relationships + # List snapmirror relationships try: list_snap_mirror_relationships(print_output=True, cluster_name=clusterName) except (InvalidConfigError, APIConnectionError): @@ -1194,13 +1194,13 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif target in ("snapshot", "snap", "snapshots", "snaps"): volumeName = None - clusterName = None - svmName = None + clusterName = None + svmName = None # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hv:s:u:", ["cluster-name=","help", "volume=","svm="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextListSnapshots, invalidOptArg=True) @@ -1214,7 +1214,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-s", "--svm"): svmName = arg elif opt in ("-u", "--cluster-name"): - clusterName = arg + clusterName = arg # Check for required options if not volumeName: @@ -1229,12 +1229,12 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif target in ("volume", "vol", "volumes", "vols"): includeSpaceUsageDetails = False svmName = None - clusterName = None + clusterName = None # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hsv:u:", ["cluster-name=","help", "include-space-usage-details","svm="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextListVolumes, invalidOptArg=True) @@ -1248,7 +1248,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-s", "--include-space-usage-details"): includeSpaceUsageDetails = True elif opt in ("-u", "--cluster-name"): - clusterName = arg + clusterName = arg # List volumes try: @@ -1266,19 +1266,19 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Invoke desired action based on target if target in ("volume", "vol"): volumeName = None - svmName = None - clusterName = None - lifName = None + svmName = None + clusterName = None + lifName = None mountpoint = None mount_options = None readonly = False # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hv:n:l:m:u:o:x", ["cluster-name=","help", "lif=","svm=", "name=", "mountpoint=", "readonly", "options="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextMountVolume, invalidOptArg=True) - + # Parse command line options for opt, arg in opts: if opt in ("-h", "--help"): @@ -1287,9 +1287,9 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-v", "--svm"): svmName = arg elif opt in ("-u", "--cluster-name"): - clusterName = arg + clusterName = arg elif opt in ("-l", "--lif"): - lifName = arg + lifName = arg elif opt in ("-n", "--name"): volumeName = arg elif opt in ("-m", "--mountpoint"): @@ -1318,7 +1318,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hm:", ["help", "mountpoint="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextUnmountVolume, invalidOptArg=True) @@ -1354,7 +1354,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hn:p:", ["help", "name=", "paths="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextPrepopulateFlexCache, invalidOptArg=True) @@ -1397,7 +1397,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hb:p:d:e:", ["help", "bucket=", "key-prefix=", "directory="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextPullFromS3Bucket, invalidOptArg=True) @@ -1431,7 +1431,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hb:k:f:", ["help", "bucket=", "key=", "file=", "extra-args="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextPullFromS3Object, invalidOptArg=True) @@ -1474,7 +1474,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hb:p:d:e:", ["help", "bucket=", "key-prefix=", "directory=", "extra-args="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextPushToS3Directory, invalidOptArg=True) @@ -1511,7 +1511,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hb:k:f:e:", ["help", "bucket=", "key=", "file=", "extra-args="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextPushToS3File, invalidOptArg=True) @@ -1550,14 +1550,14 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = if target in ("snapshot", "snap"): volumeName = None snapshotName = None - svmName = None - clusterName = None + svmName = None + clusterName = None force = False # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hs:n:v:fu:", ["cluster-name=","help", "svm=", "name=", "volume=", "force"]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextRestoreSnapshot, invalidOptArg=True) @@ -1569,9 +1569,9 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-n", "--name"): snapshotName = arg elif opt in ("-s", "--svm"): - svmName = arg + svmName = arg elif opt in ("-u", "--cluster-name"): - clusterName = arg + clusterName = arg elif opt in ("-v", "--volume"): volumeName = arg elif opt in ("-f", "--force"): @@ -1614,7 +1614,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hi:w", ["help", "id=", "wait"]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextSyncCloudSyncRelationship, invalidOptArg=True) @@ -1641,14 +1641,14 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif target in ("snapmirror-relationship", "snapmirror"): uuid = None volumeName = None - svmName = None - clusterName = None + svmName = None + clusterName = None waitUntilComplete = False # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hi:wn:u:v:", ["help", "cluster-name=","svm=","name=","uuid=", "wait"]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextSyncSnapMirrorRelationship, invalidOptArg=True) @@ -1658,11 +1658,11 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = print(helpTextSyncSnapMirrorRelationship) sys.exit(0) elif opt in ("-v", "--svm"): - svmName = arg + svmName = arg elif opt in ("-u", "--cluster-name"): - clusterName = arg + clusterName = arg elif opt in ("-n", "--name"): - volumeName = arg + volumeName = arg elif opt in ("-i", "--uuid"): uuid = arg elif opt in ("-w", "--wait"): From a745748f4485ec1665f03cddd763e48e2293e6ae Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Thu, 16 Jun 2022 10:23:05 -0500 Subject: [PATCH 25/56] Added info for unmount volume for delete function. --- netapp_dataops_traditional/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/netapp_dataops_traditional/README.md b/netapp_dataops_traditional/README.md index 6ad56bb..b31f518 100644 --- a/netapp_dataops_traditional/README.md +++ b/netapp_dataops_traditional/README.md @@ -262,6 +262,7 @@ The following options/arguments are optional: -v, --svm= Non default SVM name -f, --force Do not prompt user to confirm operation. -m, --delete-mirror Delete/release snapmirror relationship prior to volume deletion + -p, --mountpoint Mount point where volume is locally mounted. If specified volume will be unmounted (optional). --delete-non-clone Enable deletion of volume not created as clone by this tool -h, --help Print help text. @@ -1090,7 +1091,8 @@ def delete_volume( volume_name: str, # Name of volume (required). print_output: bool = False # Denotes whether or not to print messages to the console during execution. cluster_name: str = None, # Non default cluster name, same credentials as the default credentials should be used - svm_name: str = None, # Non default svm name, same credentials as the default credentials should be used + svm_name: str = None, # Non default svm name, same credentials as the default credentials should be used + mountpoint: str = None, # Mount point where volume is locally mounted. If specified volume will be unmounted (optional). delete_mirror: bool = False, # release snapmirror on source volume/delete snapmirror relation on destination volume delete_non_clone: bool = False, # Enable deletion of non clone volume (extra step not to incedently delete important volume) print_output: bool = False # Denotes whether or not to print messages to the console during execution. From 02f2cc1f9de6dd4e10c23e4682e6c46ff765491c Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Thu, 16 Jun 2022 10:32:11 -0500 Subject: [PATCH 26/56] Fixed some errors and added mount sudo check. --- .../netapp_dataops/traditional.py | 3734 +++++++---------- 1 file changed, 1541 insertions(+), 2193 deletions(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index 550b7df..5a9a55f 100644 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -1,2347 +1,1695 @@ -"""NetApp DataOps Toolkit for Traditional Environments import module. - -This module provides the public functions available to be imported directly -by applications using the import method of utilizing the toolkit. -""" +#!/usr/bin/env python3 import base64 -import functools import json import os import re -import subprocess -import sys -import time -import warnings -import datetime -from concurrent.futures import ThreadPoolExecutor -import boto3 -from botocore.client import Config as BotoConfig -from netapp_ontap import config as netappConfig -from netapp_ontap.error import NetAppRestError -from netapp_ontap.host_connection import HostConnection as NetAppHostConnection -from netapp_ontap.resources import Flexcache as NetAppFlexCache -from netapp_ontap.resources import SnapmirrorRelationship as NetAppSnapmirrorRelationship -from netapp_ontap.resources import SnapmirrorTransfer as NetAppSnapmirrorTransfer -from netapp_ontap.resources import Snapshot as NetAppSnapshot -from netapp_ontap.resources import Volume as NetAppVolume -from netapp_ontap.resources import ExportPolicy as NetAppExportPolicy -from netapp_ontap.resources import SnapshotPolicy as NetAppSnapshotPolicy -from netapp_ontap.resources import CLI as NetAppCLI -import pandas as pd -import requests -from tabulate import tabulate -import yaml - - -__version__ = "2.3.0" - +from getpass import getpass -# Using this decorator in lieu of using a dependency to manage deprecation -def deprecated(func): - @functools.wraps(func) - def warned_func(*args, **kwargs): - warnings.warn("Function {} is deprecated.".format(func.__name__), - category=DeprecationWarning, - stacklevel=2) - return func(*args, **kwargs) - return warned_func - - -class CloudSyncSyncOperationError(Exception) : - """Error that will be raised when a Cloud Sync sync operation fails""" - pass +import sys +sys.path.insert(0, "/root/netapp-dataops-toolkit/netapp_dataops_traditional/netapp_dataops") + +from netapp_dataops import traditional +from netapp_dataops.traditional import ( + clone_volume, + InvalidConfigError, + InvalidVolumeParameterError, + InvalidSnapMirrorParameterError, + InvalidSnapshotParameterError, + APIConnectionError, + mount_volume, + unmount_volume, + MountOperationError, + ConnectionTypeError, + list_volumes, + create_snapshot, + create_volume, + delete_snapshot, + delete_volume, + list_cloud_sync_relationships, + list_snap_mirror_relationships, + create_snap_mirror_relationship, + list_snapshots, + prepopulate_flex_cache, + pull_bucket_from_s3, + pull_object_from_s3, + push_directory_to_s3, + push_file_to_s3, + restore_snapshot, + CloudSyncSyncOperationError, + sync_cloud_sync_relationship, + sync_snap_mirror_relationship, + SnapMirrorSyncOperationError +) + + +## Define contents of help text +helpTextStandard = ''' +The NetApp DataOps Toolkit is a Python library that makes it simple for data scientists and data engineers to perform various data management tasks, such as provisioning a new data volume, near-instantaneously cloning a data volume, and near-instantaneously snapshotting a data volume for traceability/baselining. + +Basic Commands: + +\tconfig\t\t\t\tCreate a new config file (a config file is required to perform other commands). +\thelp\t\t\t\tPrint help text. +\tversion\t\t\t\tPrint version details. + +Data Volume Management Commands: +Note: To view details regarding options/arguments for a specific command, run the command with the '-h' or '--help' option. + +\tclone volume\t\t\tCreate a new data volume that is an exact copy of an existing volume. +\tcreate volume\t\t\tCreate a new data volume. +\tdelete volume\t\t\tDelete an existing data volume. +\tlist volumes\t\t\tList all data volumes. +\tmount volume\t\t\tMount an existing data volume locally. Note: on Linux hosts - must be run as root. +\tunmount volume\t\t\tUnmount an existing data volume. Note: on Linux hosts - must be run as root. + +Snapshot Management Commands: +Note: To view details regarding options/arguments for a specific command, run the command with the '-h' or '--help' option. + +\tcreate snapshot\t\t\tCreate a new snapshot for a data volume. +\tdelete snapshot\t\t\tDelete an existing snapshot for a data volume. +\tlist snapshots\t\t\tList all snapshots for a data volume. +\trestore snapshot\t\tRestore a snapshot for a data volume (restore the volume to its exact state at the time that the snapshot was created). + +Data Fabric Commands: +Note: To view details regarding options/arguments for a specific command, run the command with the '-h' or '--help' option. + +\tlist cloud-sync-relationships\tList all existing Cloud Sync relationships. +\tsync cloud-sync-relationship\tTrigger a sync operation for an existing Cloud Sync relationship. +\tpull-from-s3 bucket\t\tPull the contents of a bucket from S3. +\tpull-from-s3 object\t\tPull an object from S3. +\tpush-to-s3 directory\t\tPush the contents of a directory to S3 (multithreaded). +\tpush-to-s3 file\t\t\tPush a file to S3. + +Advanced Data Fabric Commands: +Note: To view details regarding options/arguments for a specific command, run the command with the '-h' or '--help' option. + +\tprepopulate flexcache\t\tPrepopulate specific files/directories on a FlexCache volume (ONTAP 9.8 and above ONLY). +\tlist snapmirror-relationships\tList all existing SnapMirror relationships. +\tsync snapmirror-relationship\tTrigger a sync operation for an existing SnapMirror relationship. +\tcreate snapmirror-relationship\tCreate new SnapMirror relationship. +''' +helpTextCloneVolume = ''' +Command: clone volume + +Create a new data volume that is an exact copy of an existing volume. + +Required Options/Arguments: +\t-n, --name=\t\tName of new volume.. +\t-v, --source-volume=\tName of volume to be cloned. + +Optional Options/Arguments: +\t-l, --cluster-name=\tnon default hosting cluster +\t-c, --source-svm=\tnon default source svm name +\t-t, --target-svm=\tnon default target svm name +\t-g, --gid=\t\tUnix filesystem group id (gid) to apply when creating new volume (if not specified, gid of source volume will be retained) (Note: cannot apply gid of '0' when creating clone). +\t-h, --help\t\tPrint help text. +\t-m, --mountpoint=\tLocal mountpoint to mount new volume at after creating. If not specified, new volume will not be mounted locally. On Linux hosts - if specified, must be run as root. +\t-s, --source-snapshot=\tName of the snapshot to be cloned (if specified, the clone will be created from a specific snapshot on the source volume as opposed to the current state of the volume). +\t\t\t\twhen snapshot name suffixed with * the latest snapshot will be used (hourly* will use the latest snapshot prefixed with hourly ) +\t-u, --uid=\t\tUnix filesystem user id (uid) to apply when creating new volume (if not specified, uid of source volume will be retained) (Note: cannot apply uid of '0' when creating clone). +\t-x, --readonly\t\tRead-only option for mounting volumes locally. +\t-j, --junction\t\tSpecify a custom junction path for the volume to be exported at. +\t-e, --export-hosts\tcolon(:) seperated hosts/cidrs to to use for export. hosts will be exported for rw and root access +\t-e, --export-policy\texport policy name to attach to the volume, default policy will be used if export-hosts/export-policy not provided +\t-d, --snapshot-policy\tsnapshot-policy to attach to the volume, default snapshot policy will be used if not provided +\t-s, --split\t\tstart clone split after creation +\t-r, --refresh\t\tdelete existing clone if exists before creating a new one +\t-d, --svm-dr-unprotect\tdisable svm dr protection if svm-dr protection exists + +Examples (basic usage): +\tnetapp_dataops_cli.py clone volume --name=project1 --source-volume=gold_dataset +\tnetapp_dataops_cli.py clone volume -n project2 -v gold_dataset -s snap1 +\tnetapp_dataops_cli.py clone volume --name=project1 --source-volume=gold_dataset --mountpoint=~/project1 --readonly + + +Examples (advanced usage): +\tnetapp_dataops_cli.py clone volume -n testvol -v gold_dataset -u 1000 -g 1000 -x -j /project1 -d snappolicy1 +\tnetapp_dataops_cli.py clone volume --name=project1 --source-volume=gold_dataset --source-svm=svm1 --target-svm=svm2 --source-snapshot=daily* --export-hosts 10.5.5.3:host1:10.6.4.0/24 --split +''' +helpTextConfig = ''' +Command: config + +Create a new config file (a config file is required to perform other commands). + +No additional options/arguments required. +''' +helpTextCreateSnapshot = ''' +Command: create snapshot + +Create a new snapshot for a data volume. + +Required Options/Arguments: +\t-v, --volume=\tName of volume. + +Optional Options/Arguments: +\t-u, --cluster-name=\tnon default hosting cluster +\t-s, --svm=\t\tNon defaul svm name. +\t-h, --help\t\tPrint help text. +\t-n, --name=\t\tName of new snapshot. If not specified, will be set to 'netapp_dataops_'. +\t-r, --retention=\tSnapshot name will be suffixed by and excesive snapshots will be deleted. +\t \tCan be count of snapshots when int (ex. 10) or days when retention is suffixed by d (ex. 10d) +\t-l, --snapmirror-label=\tif provided snapmirror label will be configured on the created snapshot + +Examples: +\tnetapp_dataops_cli.py create snapshot --volume=project1 --name=snap1 +\tnetapp_dataops_cli.py create snapshot -v project2 -n final_dataset +\tnetapp_dataops_cli.py create snapshot --volume=test1 +\tnetapp_dataops_cli.py create snapshot -v project2 -n daily_consistent -r 7 -l daily +\tnetapp_dataops_cli.py create snapshot -v project2 -n daily_for_month -r 30d -l daily +''' +helpTextCreateVolume = ''' +Command: create volume + +Create a new data volume. + +Required Options/Arguments: +\t-n, --name=\t\tName of new volume. +\t-s, --size=\t\tSize of new volume. Format: '1024MB', '100GB', '10TB', etc. + +Optional Options/Arguments: +\t-l, --cluster-name=\tnon default hosting cluster +\t-v, --svm=\t\tnon default svm name +\t-a, --aggregate=\tAggregate to use when creating new volume (flexvol) or optional comma seperated aggrlist when specific aggregates are required for FG. +\t-d, --snapshot-policy=\tSnapshot policy to apply for new volume. +\t-e, --export-policy=\tNFS export policy to use when exporting new volume. +\t-g, --gid=\t\tUnix filesystem group id (gid) to apply when creating new volume (ex. '0' for root group). +\t-h, --help\t\tPrint help text. +\t-m, --mountpoint=\tLocal mountpoint to mount new volume at after creating. If not specified, new volume will not be mounted locally. On Linux hosts - if specified, must be run as root. +\t-p, --permissions=\tUnix filesystem permissions to apply when creating new volume (ex. '0777' for full read/write permissions for all users and groups). +\t-r, --guarantee-space\tGuarantee sufficient storage space for full capacity of the volume (i.e. do not use thin provisioning). +\t-t, --type=\t\tVolume type to use when creating new volume (flexgroup/flexvol). +\t-u, --uid=\t\tUnix filesystem user id (uid) to apply when creating new volume (ex. '0' for root user). +\t-x, --readonly\t\tRead-only option for mounting volumes locally. +\t-j, --junction\t\tSpecify a custom junction path for the volume to be exported at. +\t-f, --tiering-policy\tSpecify tiering policy for fabric-pool enabled systems (default is 'none'). +\t-y, --dp\t\tCreate volume as DP volume (the volume will be used as snapmirror target) + + +Examples (basic usage): +\tnetapp_dataops_cli.py create volume --name=project1 --size=10GB +\tnetapp_dataops_cli.py create volume -n datasets -s 10TB +\tsudo -E netapp_dataops_cli.py create volume --name=project2 --size=2TB --mountpoint=~/project2 --readonly + +Examples (advanced usage): +\tsudo -E netapp_dataops_cli.py create volume --name=project1 --size=10GB --permissions=0755 --type=flexvol --mountpoint=~/project1 --readonly --junction=/project1 +\tsudo -E netapp_dataops_cli.py create volume --name=project2_flexgroup --size=2TB --type=flexgroup --mountpoint=/mnt/project2 +\tnetapp_dataops_cli.py create volume --name=testvol --size=10GB --type=flexvol --aggregate=n2_data +\tnetapp_dataops_cli.py create volume -n testvol -s 10GB -t flexvol -p 0755 -u 1000 -g 1000 -j /project1 +\tsudo -E netapp_dataops_cli.py create volume -n vol1 -s 5GB -t flexvol --export-policy=team1 -m /mnt/vol1 +\tnetapp_dataops_cli.py create vol -n test2 -s 10GB -t flexvol --snapshot-policy=default --tiering-policy=auto +''' +helpTextDeleteSnapshot = ''' +Command: delete snapshot + +Delete an existing snapshot for a data volume. + +Required Options/Arguments: +\t-n, --name=\tName of snapshot to be deleted. +\t-v, --volume=\tName of volume. + +Optional Options/Arguments: +\t-u, --cluster-name=\tNon default hosting cluster +\t-s, --svm=\t\tNon default svm +\t-h, --help\t\tPrint help text. + +Examples: +\tnetapp_dataops_cli.py delete snapshot --volume=project1 --name=snap1 +\tnetapp_dataops_cli.py delete snapshot -v project2 -n netapp_dataops_20201113_221917 +''' +helpTextDeleteVolume = ''' +Command: delete volume + +Delete an existing data volume. + +Required Options/Arguments: +\t-n, --name=\tName of volume to be deleted. + +Optional Options/Arguments: +\t-u, --cluster-name=\tnon default hosting cluster +\t-v, --svm \t\tnon default SVM name +\t-f, --force\t\tDo not prompt user to confirm operation. +\t-p, --mountpoint\t\tMount point for the locally mounted volume. +\t-m, --delete-mirror\tdelete/release snapmirror relationship prior to volume deletion +\t --delete-non-clone\tEnable deletion of volume not created as clone by this tool +\t-h, --help\t\tPrint help text. + +Examples: +\tnetapp_dataops_cli.py delete volume --name=project1 +\tnetapp_dataops_cli.py delete volume -n project2 +''' + +helpTextUnmountVolume = ''' +Command: unmount volume + +Unmount an existing data volume that is currently mounted locally. + +Required Options/Arguments: +\t-m, --mountpoint=\tMountpoint where volume is mounted at. + +Optional Options/Arguments: +\t-h, --help\t\tPrint help text. + +Examples: +\tnetapp_dataops_cli.py unmount volume --mountpoint=/project2 +\tnetapp_dataops_cli.py unmount volume -m /project2 +''' + +helpTextListCloudSyncRelationships = ''' +Command: list cloud-sync-relationships + +List all existing Cloud Sync relationships. + +No additional options/arguments required. +''' +helpTextListSnapMirrorRelationships = ''' +Command: list snapmirror-relationships + +List all SnapMirror relationships. + +Optional Options/Arguments: +\t-u, --cluster-name=\tNon default hosting cluster +\t-s, --svm=\t\tNon default svm. +\t-h, --help\t\tPrint help text. +''' +helpTextListSnapshots = ''' +Command: list snapshots + +List all snapshots for a data volume. + +Required Options/Arguments: +\t-v, --volume=\tName of volume. + +Optional Options/Arguments: +\t-u, --cluster-name=\tNon default hosting cluster +\t-s, --svm=\t\tNon default svm. +\t-h, --help\t\tPrint help text. +Examples: +\tnetapp_dataops_cli.py list snapshots --volume=project1 +\tnetapp_dataops_cli.py list snapshots -v test1 +''' +helpTextListVolumes = ''' +Command: list volumes + +List all data volumes. + +No options/arguments are required. + +Optional Options/Arguments: +\t-u, --cluster-name=\t\t\tnon default hosting cluster +\t-v, --svm=\t\t\t\tlist volume on non default svm +\t-h, --help\t\t\t\tPrint help text. +\t-s, --include-space-usage-details\tInclude storage space usage details in output (see README for explanation). -class ConnectionTypeError(Exception): - """Error that will be raised when an invalid connection type is given""" - pass +Examples: +\tnetapp_dataops_cli.py list volumes +\tnetapp_dataops_cli.py list volumes --include-space-usage-details +''' +helpTextMountVolume = ''' +Command: mount volume +Mount an existing data volume locally. -class InvalidConfigError(Exception): - """Error that will be raised when the config file is invalid or missing""" - pass +Requirement: On Linux hosts, must be run as root. +Required Options/Arguments: +\t-m, --mountpoint=\tLocal mountpoint to mount volume at. +\t-n, --name=\t\tName of volume. -class InvalidSnapMirrorParameterError(Exception) : - """Error that will be raised when an invalid SnapMirror parameter is given""" - pass +Optional Options/Arguments: +\t-v, --svm \t\tnon default SVM name +\t-l, --lif \t\tnon default lif (nfs server ip/name) +\t-h, --help\t\tPrint help text. +\t-x, --readonly\t\tMount volume locally as read-only. +\t-o, --options\t\tEnables users to Specify custom NFS mount options. +Examples: +\tsudo -E netapp_dataops_cli.py mount volume --name=project1 --mountpoint=/mnt/project1 +\tsudo -E netapp_dataops_cli.py mount volume -m ~/testvol -n testvol -x +\tsudo -E netapp_dataops_cli.py mount volume --name=project1 --mountpoint=/mnt/project1 --readonly +\tsudo -E netapp_dataops_cli.py mount volume --name=project1 --mountpoint=/mnt/project1 --readonly --options=rsize=262144,wsize=262144,nconnect=16 +''' +helpTextPullFromS3Bucket = ''' +Command: pull-from-s3 bucket -class InvalidSnapshotParameterError(Exception): - """Error that will be raised when an invalid snapshot parameter is given""" - pass +Pull the contents of a bucket from S3 (multithreaded). +Note: To pull to a data volume, the volume must be mounted locally. -class InvalidVolumeParameterError(Exception): - """Error that will be raised when an invalid volume parameter is given""" - pass +Warning: This operation has not been tested at scale and may not be appropriate for extremely large datasets. +Required Options/Arguments: +\t-b, --bucket=\t\tS3 bucket to pull from. +\t-d, --directory=\tLocal directory to save contents of bucket to. -class MountOperationError(Exception): - """Error that will be raised when a mount operation fails""" - pass +Optional Options/Arguments: +\t-h, --help\t\tPrint help text. +\t-p, --key-prefix=\tObject key prefix (pull will be limited to objects with key that starts with this prefix). +Examples: +\tnetapp_dataops_cli.py pull-from-s3 bucket --bucket=project1 --directory=/mnt/project1 +\tnetapp_dataops_cli.py pull-from-s3 bucket -b project1 -p project1/ -d ./project1/ +''' +helpTextPullFromS3Object = ''' +Command: pull-from-s3 object -class SnapMirrorSyncOperationError(Exception) : - """Error that will be raised when a SnapMirror sync operation fails""" - pass +Pull an object from S3. +Note: To pull to a data volume, the volume must be mounted locally. -class APIConnectionError(Exception) : - '''Error that will be raised when an API connection cannot be established''' - pass +Required Options/Arguments: +\t-b, --bucket=\t\tS3 bucket to pull from. +\t-k, --key=\t\tKey of S3 object to pull. +Optional Options/Arguments: +\t-f, --file=\t\tLocal filepath (including filename) to save object to (if not specified, value of -k/--key argument will be used) +\t-h, --help\t\tPrint help text. -def _print_api_response(response: requests.Response): - print("API Response:") - print("Status Code: ", response.status_code) - print("Header: ", response.headers) - if response.text: - print("Body: ", response.text) +Examples: +\tnetapp_dataops_cli.py pull-from-s3 object --bucket=project1 --key=data.csv --file=./project1/data.csv +\tnetapp_dataops_cli.py pull-from-s3 object -b project1 -k data.csv +''' +helpTextPushToS3Directory = ''' +Command: push-to-s3 directory +Push the contents of a directory to S3 (multithreaded). -def _download_from_s3(s3Endpoint: str, s3AccessKeyId: str, s3SecretAccessKey: str, s3VerifySSLCert: bool, - s3CACertBundle: str, s3Bucket: str, s3ObjectKey: str, localFile: str, print_output: bool = False): - # Instantiate S3 session - try: - s3 = _instantiate_s3_session(s3Endpoint=s3Endpoint, s3AccessKeyId=s3AccessKeyId, - s3SecretAccessKey=s3SecretAccessKey, s3VerifySSLCert=s3VerifySSLCert, - s3CACertBundle=s3CACertBundle, print_output=print_output) - except Exception as err: - if print_output: - print("Error: S3 API error: ", err) - raise APIConnectionError(err) - - if print_output: - print( - "Downloading object '" + s3ObjectKey + "' from bucket '" + s3Bucket + "' and saving as '" + localFile + "'.") - - # Create directories that don't exist - if localFile.find(os.sep) != -1: - dirs = localFile.split(os.sep) - dirpath = os.sep.join(dirs[:len(dirs) - 1]) - if not os.path.exists(dirpath): - os.makedirs(dirpath) - - # Download the file - try: - s3.Object(s3Bucket, s3ObjectKey).download_file(localFile) - except Exception as err: - if print_output: - print("Error: S3 API error: ", err) - raise APIConnectionError(err) - - -def _get_cloud_central_access_token(refreshToken: str, print_output: bool = False) -> str: - # Define parameters for API call - url = "https://netapp-cloud-account.auth0.com/oauth/token" - headers = { - "Content-Type": "application/json" - } - data = { - "grant_type": "refresh_token", - "refresh_token": refreshToken, - "client_id": "Mu0V1ywgYteI6w1MbD15fKfVIUrNXGWC" - } - - # Call API to optain access token - response = requests.post(url=url, headers=headers, data=json.dumps(data)) - - # Parse response to retrieve access token - try: - responseBody = json.loads(response.text) - accessToken = responseBody["access_token"] - except: - errorMessage = "Error obtaining access token from Cloud Sync API" - if print_output: - print("Error:", errorMessage) - _print_api_response(response) - raise APIConnectionError(errorMessage, response) +Note: To push from a data volume, the volume must be mounted locally. - return accessToken +Warning: This operation has not been tested at scale and may not be appropriate for extremely large datasets. -def _get_cloud_sync_access_parameters(refreshToken: str, print_output: bool = False) -> (str, str): - try: - accessToken = _get_cloud_central_access_token(refreshToken=refreshToken, print_output=print_output) - except APIConnectionError: - raise +Required Options/Arguments: +\t-b, --bucket=\t\tS3 bucket to push to. +\t-d, --directory=\tLocal directory to push contents of. - # Define parameters for API call - url = "https://cloudsync.netapp.com/api/accounts" - headers = { - "Content-Type": "application/json", - "Authorization": "Bearer " + accessToken - } +Optional Options/Arguments: +\t-e, --extra-args=\tExtra args to apply to newly-pushed S3 objects (For details on this field, refer to https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html#the-extraargs-parameter). +\t-h, --help\t\tPrint help text. +\t-p, --key-prefix=\tPrefix to add to key for newly-pushed S3 objects (Note: by default, key will be local filepath relative to directory being pushed). - # Call API to obtain account ID - response = requests.get(url=url, headers=headers) +Examples: +\tnetapp_dataops_cli.py push-to-s3 directory --bucket=project1 --directory=/mnt/project1 +\tnetapp_dataops_cli.py push-to-s3 directory -b project1 -d /mnt/project1 -p project1/ -e '{"Metadata": {"mykey": "myvalue"}}' +''' +helpTextPushToS3File = ''' +Command: push-to-s3 file - # Parse response to retrieve account ID - try: - responseBody = json.loads(response.text) - accountId = responseBody[0]["accountId"] - except: - errorMessage = "Error obtaining account ID from Cloud Sync API" - if print_output: - print("Error:", errorMessage) - _print_api_response(response) - raise APIConnectionError(errorMessage, response) +Push a file to S3. - # Return access token and account ID - return accessToken, accountId +Note: To push from a data volume, the volume must be mounted locally. +Required Options/Arguments: +\t-b, --bucket=\t\tS3 bucket to push to. +\t-f, --file=\t\tLocal file to push. -def _instantiate_connection(config: dict, connectionType: str = "ONTAP", print_output: bool = False): - if connectionType == "ONTAP": - ## Connection details for ONTAP cluster - try: - ontapClusterMgmtHostname = config["hostname"] - ontapClusterAdminUsername = config["username"] - ontapClusterAdminPasswordBase64 = config["password"] - verifySSLCert = config["verifySSLCert"] - except: - if print_output: - _print_invalid_config_error() - raise InvalidConfigError() - - # Decode base64-encoded password - ontapClusterAdminPasswordBase64Bytes = ontapClusterAdminPasswordBase64.encode("ascii") - ontapClusterAdminPasswordBytes = base64.b64decode(ontapClusterAdminPasswordBase64Bytes) - ontapClusterAdminPassword = ontapClusterAdminPasswordBytes.decode("ascii") - - # Instantiate connection to ONTAP cluster - netappConfig.CONNECTION = NetAppHostConnection( - host=ontapClusterMgmtHostname, - username=ontapClusterAdminUsername, - password=ontapClusterAdminPassword, - verify=verifySSLCert - ) +Optional Options/Arguments: +\t-e, --extra-args=\tExtra args to apply to newly-pushed S3 object (For details on this field, refer to https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html#the-extraargs-parameter). +\t-h, --help\t\tPrint help text. +\t-k, --key=\t\tKey to assign to newly-pushed S3 object (if not specified, key will be set to value of -f/--file argument). - else: - raise ConnectionTypeError() +Examples: +\tnetapp_dataops_cli.py push-to-s3 file --bucket=project1 --file=data.csv +\tnetapp_dataops_cli.py push-to-s3 file -b project1 -k data.csv -f /mnt/project1/data.csv -e '{"Metadata": {"mykey": "myvalue"}}' +''' +helpTextPrepopulateFlexCache = ''' +Command: prepopulate flexcache + +Prepopulate specific files/directories on a FlexCache volume. + +Compatibility: ONTAP 9.8 and above ONLY + +Required Options/Arguments: +\t-n, --name=\tName of FlexCache volume. +\t-p, --paths=\tComma-separated list of dirpaths/filepaths to prepopulate. +Optional Options/Arguments: +\t-h, --help\tPrint help text. + +Examples: +\tnetapp_dataops_cli.py prepopulate flexcache --name=project1 --paths=/datasets/project1,/datasets/project2 +\tnetapp_dataops_cli.py prepopulate flexcache -n test1 -p /datasets/project1,/datasets/project2 +''' +helpTextRestoreSnapshot = ''' +Command: restore snapshot -def _instantiate_s3_session(s3Endpoint: str, s3AccessKeyId: str, s3SecretAccessKey: str, s3VerifySSLCert: bool, s3CACertBundle: str, print_output: bool = False): - # Instantiate session - session = boto3.session.Session(aws_access_key_id=s3AccessKeyId, aws_secret_access_key=s3SecretAccessKey) - config = BotoConfig(signature_version='s3v4') +Restore a snapshot for a data volume (restore the volume to its exact state at the time that the snapshot was created). - if s3VerifySSLCert: - if s3CACertBundle: - s3 = session.resource(service_name='s3', endpoint_url=s3Endpoint, verify=s3CACertBundle, config=config) - else: - s3 = session.resource(service_name='s3', endpoint_url=s3Endpoint, config=config) - else: - s3 = session.resource(service_name='s3', endpoint_url=s3Endpoint, verify=False, config=config) +Required Options/Arguments: +\t-n, --name=\tName of snapshot to be restored. +\t-v, --volume=\tName of volume. - return s3 +Optional Options/Arguments: +\t-u, --cluster-name=\tNon default hosting cluster +\t-s, --svm=\t\tNon default svm. +\t-f, --force\t\tDo not prompt user to confirm operation. +\t-h, --help\t\tPrint help text. + +Examples: +\tnetapp_dataops_cli.py restore snapshot --volume=project1 --name=snap1 +\tnetapp_dataops_cli.py restore snapshot -v project2 -n netapp_dataops_20201113_221917 +''' +helpTextSyncCloudSyncRelationship = ''' +Command: sync cloud-sync-relationship +Trigger a sync operation for an existing Cloud Sync relationship. -def _print_invalid_config_error() : - print("Error: Missing or invalid config file. Run `netapp_dataops_cli.py config` to create config file.") +Tip: Run `netapp_dataops_cli.py list cloud-sync-relationships` to obtain relationship ID. +Required Options/Arguments: +\t-i, --id=\tID of the relationship for which the sync operation is to be triggered. + +Optional Options/Arguments: +\t-h, --help\tPrint help text. +\t-w, --wait\tWait for sync operation to complete before exiting. + +Examples: +\tnetapp_dataops_cli.py sync cloud-sync-relationship --id=5ed00996ca85650009a83db2 +\tnetapp_dataops_cli.py sync cloud-sync-relationship -i 5ed00996ca85650009a83db2 -w +''' +helpTextSyncSnapMirrorRelationship = ''' +Command: sync snapmirror-relationship + +Trigger a sync operation for an existing SnapMirror relationship. -def _retrieve_config(configDirPath: str = "~/.netapp_dataops", configFilename: str = "config.json", - print_output: bool = False) -> dict: +Tip: Run `netapp_dataops_cli.py list snapmirror-relationships` to obtain relationship UUID. + +Required Options/Arguments: +\t-i, --uuid=\tUUID of the relationship for which the sync operation is to be triggered. +or +\t-n, --name=\tName of target volume to be sync . + +Optional Options/Arguments: +\t-u, --cluster-name=\tnon default hosting cluster +\t-v, --svm \t\tnon default target SVM name +\t-h, --help\t\tPrint help text. +\t-w, --wait\t\tWait for sync operation to complete before exiting. + +Examples: +\tnetapp_dataops_cli.py sync snapmirror-relationship --uuid=132aab2c-4557-11eb-b542-005056932373 +\tnetapp_dataops_cli.py sync snapmirror-relationship -i 132aab2c-4557-11eb-b542-005056932373 -w +\tnetapp_dataops_cli.py sync snapmirror-relationship -u cluster1 -v svm1 -n vol1 -w +''' + +helpTextCreateSnapMirrorRelationship = ''' +Command: create snapmirror-relationship + +create snapmirror relationship + +Required Options/Arguments: +\t-n, --target-vol=\tName of target volume +\t-s, --source-svm=\tSource SVM name +\t-v, --source-vol=\tSource volume name + +Optional Options/Arguments: +\t-u, --cluster-name=\tnon default hosting cluster +\t-t, --target-svm=\tnon default target SVM +\t-c, --schedule=\t\tnon default schedule (default is hourly) +\t-p, --policy=\t\tnon default policy (default is MirrorAllSnapshots +\t-a, --action=\t\tresync,initialize following creation +\t-h, --help\t\tPrint help text. + +Examples: +\tnetapp_dataops_cli.py create snapmirror-relationship -u cluster1 -s svm1 -t svm2 -v vol1 -n vol1 -p MirrorAllSnapshots -c hourly +\tnetapp_dataops_cli.py create snapmirror-relationship -u cluster1 -s svm1 -t svm2 -v vol1 -n vol1 -p MirrorAllSnapshots -c hourly -a resync +''' + +## Function for creating config file +def createConfig(configDirPath: str = "~/.netapp_dataops", configFilename: str = "config.json", connectionType: str = "ONTAP"): + # Check to see if user has an existing config file configDirPath = os.path.expanduser(configDirPath) configFilePath = os.path.join(configDirPath, configFilename) - try: - with open(configFilePath, 'r') as configFile: - # Read connection details from config file; read into dict - config = json.load(configFile) - except: - if print_output: - _print_invalid_config_error() - raise InvalidConfigError() - return config - - -def _retrieve_cloud_central_refresh_token(print_output: bool = False) -> str: - # Retrieve refresh token from config file - try: - config = _retrieve_config(print_output=print_output) - except InvalidConfigError: - raise - try: - refreshTokenBase64 = config["cloudCentralRefreshToken"] - except: - if print_output: - _print_invalid_config_error() - raise InvalidConfigError() - - # Decode base64-encoded refresh token - refreshTokenBase64Bytes = refreshTokenBase64.encode("ascii") - refreshTokenBytes = base64.b64decode(refreshTokenBase64Bytes) - refreshToken = refreshTokenBytes.decode("ascii") - - return refreshToken - - -def _retrieve_s3_access_details(print_output: bool = False) -> (str, str, str, bool, str): - # Retrieve refresh token from config file - try: - config = _retrieve_config(print_output=print_output) - except InvalidConfigError: - raise - try: - s3Endpoint = config["s3Endpoint"] - s3AccessKeyId = config["s3AccessKeyId"] - s3SecretAccessKeyBase64 = config["s3SecretAccessKey"] - s3VerifySSLCert = config["s3VerifySSLCert"] - s3CACertBundle = config["s3CACertBundle"] - except: - if print_output: - _print_invalid_config_error() - raise InvalidConfigError() - - # Decode base64-encoded refresh token - s3SecretAccessKeyBase64Bytes = s3SecretAccessKeyBase64.encode("ascii") - s3SecretAccessKeyBytes = base64.b64decode(s3SecretAccessKeyBase64Bytes) - s3SecretAccessKey = s3SecretAccessKeyBytes.decode("ascii") - - return s3Endpoint, s3AccessKeyId, s3SecretAccessKey, s3VerifySSLCert, s3CACertBundle - - -def _upload_to_s3(s3Endpoint: str, s3AccessKeyId: str, s3SecretAccessKey: str, s3VerifySSLCert: bool, s3CACertBundle: str, - s3Bucket: str, localFile: str, s3ObjectKey: str, s3ExtraArgs: str = None, print_output: bool = False): - # Instantiate S3 session - try: - s3 = _instantiate_s3_session(s3Endpoint=s3Endpoint, s3AccessKeyId=s3AccessKeyId, - s3SecretAccessKey=s3SecretAccessKey, s3VerifySSLCert=s3VerifySSLCert, - s3CACertBundle=s3CACertBundle, print_output=print_output) - except Exception as err: - if print_output: - print("Error: S3 API error: ", err) - raise APIConnectionError(err) - - # Upload file - if print_output: - print("Uploading file '" + localFile + "' to bucket '" + s3Bucket + "' and applying key '" + s3ObjectKey + "'.") - - try: - if s3ExtraArgs: - s3.Object(s3Bucket, s3ObjectKey).upload_file(localFile, ExtraArgs=json.loads(s3ExtraArgs)) - else: - s3.Object(s3Bucket, s3ObjectKey).upload_file(localFile) - except Exception as err: - if print_output: - print("Error: S3 API error: ", err) - raise APIConnectionError(err) - - -def _convert_bytes_to_pretty_size(size_in_bytes: str, num_decimal_points: int = 2) -> str : - # Convert size in bytes to "pretty" size (size in KB, MB, GB, or TB) - prettySize = float(size_in_bytes) / 1024 - if prettySize >= 1024: - prettySize = float(prettySize) / 1024 - if prettySize >= 1024: - prettySize = float(prettySize) / 1024 - if prettySize >= 1024: - prettySize = float(prettySize) / 1024 - prettySize = round(prettySize, 2) - prettySize = str(prettySize) + "TB" - else: - prettySize = round(prettySize, 2) - prettySize = str(prettySize) + "GB" - else: - prettySize = round(prettySize, 2) - prettySize = str(prettySize) + "MB" - else: - prettySize = round(prettySize, 2) - prettySize = str(prettySize) + "KB" - - return prettySize - - -# -# Public importable functions specific to the traditional package -# - - -def clone_volume(new_volume_name: str, source_volume_name: str, cluster_name: str = None, source_snapshot_name: str = None, - source_svm: str = None, target_svm: str = None, export_hosts: str = None, export_policy: str = None, split: bool = False, - unix_uid: str = None, unix_gid: str = None, mountpoint: str = None, junction: str= None, readonly: bool = False, - snapshot_policy: str = None, refresh: bool = False, svm_dr_unprotect: bool = False, print_output: bool = False): - # Retrieve config details from config file - try: - config = _retrieve_config(print_output=print_output) - except InvalidConfigError: - raise - try: - connectionType = config["connectionType"] - except: - if print_output: - _print_invalid_config_error() - raise InvalidConfigError() - - if cluster_name: - config["hostname"] = cluster_name - - if connectionType == "ONTAP": - # Instantiate connection to ONTAP cluster - try: - _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) - except InvalidConfigError: - raise - - # Retrieve values from config file if not passed into function - try: - sourcesvm = config["svm"] - if source_svm: - sourcesvm = source_svm - - targetsvm = sourcesvm - if target_svm: - targetsvm = target_svm - - if not unix_uid: - unix_uid = config["defaultUnixUID"] - if not unix_gid: - unix_gid = config["defaultUnixGID"] - - except Exception as e: - if print_output: - print(e) - _print_invalid_config_error() - raise InvalidConfigError() - - # Check unix uid for validity - try: - unix_uid = int(unix_uid) - except: - if print_output: - print("Error: Invalid unix uid specified. Value be an integer. Example: '0' for root user.") - raise InvalidVolumeParameterError("unixUID") - - # Check unix gid for validity - try: - unix_gid = int(unix_gid) - except: - if print_output: - print("Error: Invalid unix gid specified. Value must be an integer. Example: '0' for root group.") - raise InvalidVolumeParameterError("unixGID") - - #check if clone volume already exists - try: - currentVolume = NetAppVolume.find(name=new_volume_name, svm=targetsvm) - if currentVolume and not refresh: - if print_output: - print("Error: clone:"+new_volume_name+" already exists.") - raise InvalidVolumeParameterError("name") - - #for refresh we want to keep the existing policy - if currentVolume and refresh and not export_policy and not export_hosts: - export_policy = currentVolume.nas.export_policy.name - - # if refresh and not provided new snapshot_policy - if currentVolume and refresh and not snapshot_policy: - snapshot_policy = currentVolume.snapshot_policy.name - - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - - #delete existing clone when refresh - try: - if currentVolume and refresh: - if "CLONENAME:" in currentVolume.comment: - delete_volume(volume_name=new_volume_name, cluster_name=cluster_name, svm_name=target_svm, delete_mirror=True, print_output=True) - else: - if print_output: - print("Error: refresh clone is only supported when existing clone created using the tool (based on volume comment)") - raise InvalidVolumeParameterError("name") - except: - print("Error: could not delete previous clone") - raise InvalidVolumeParameterError("name") - - try: - if not snapshot_policy : - snapshot_policy = config["defaultSnapshotPolicy"] - except: - print("Error: default snapshot policy could not be found in config file") - raise InvalidVolumeParameterError("name") - - # check export policies - try: - if not export_policy and not export_hosts: - export_policy = config["defaultExportPolicy"] - elif export_policy: - currentExportPolicy = NetAppExportPolicy.find(name=export_policy, svm=targetsvm) - if not currentExportPolicy: - if print_output: - print("Error: export policy:"+export_policy+" dones not exists.") - raise InvalidVolumeParameterError("name") - elif export_hosts: - export_policy = "netapp_dataops_"+new_volume_name - currentExportPolicy = NetAppExportPolicy.find(name=export_policy, svm=targetsvm) - if currentExportPolicy: - currentExportPolicy.delete() - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - - #exists check if snapshot-policy - try: - snapshotPoliciesDetails = NetAppSnapshotPolicy.get_collection(**{"name":snapshot_policy}) - clusterSnapshotPolicy = False - svmSnapshotPolicy = False - for snapshotPolicyDetails in snapshotPoliciesDetails: - if str(snapshotPolicyDetails.name) == snapshot_policy: - try: - if str(snapshotPolicyDetails.svm.name) == targetsvm: - svmSnapshotPolicy = True - except: - clusterSnapshotPolicy = True - - if not clusterSnapshotPolicy and not svmSnapshotPolicy: - if print_output: - print("Error: snapshot-policy:"+snapshot_policy+" could not be found") - raise InvalidVolumeParameterError("snapshot_policy") - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - - # Create volume - if print_output: - print("Creating clone volume '" + targetsvm+':'+new_volume_name + "' from source volume '" + sourcesvm+':'+source_volume_name + "'.") - - try: - # Retrieve source volume - sourceVolume = NetAppVolume.find(name=source_volume_name, svm=sourcesvm) - if not sourceVolume: - if print_output: - print("Error: Invalid source volume name.") - raise InvalidVolumeParameterError("name") - - # Create option to choose junction path. - if junction: - junction=junction - else: - junction = "/"+new_volume_name - - - # Construct dict representing new volume - newVolumeDict = { - "name": new_volume_name, - "svm": {"name": targetsvm}, - "nas": { - "path": junction - }, - "clone": { - "is_flexclone": True, - "parent_svm": { - #"name": sourceVolume.svm.name, - "name": sourcesvm, - #"uuid": sourceVolume.svm.uuid - }, - "parent_volume": { - "name": sourceVolume.name, - "uuid": sourceVolume.uuid - } - } - } - - if unix_uid != 0: - newVolumeDict["nas"]["uid"] = unix_uid - else: - if print_output: - print("Warning: Cannot apply uid of '0' when creating clone; uid of source volume will be retained.") - if unix_gid != 0: - newVolumeDict["nas"]["gid"] = unix_gid + if os.path.isfile(configFilePath): + print("You already have an existing config file. Creating a new config file will overwrite this existing config.") + # If existing config file is present, ask user if they want to proceed + # Verify value entered; prompt user to re-enter if invalid + while True: + proceed = input("Are you sure that you want to proceed? (yes/no): ") + if proceed in ("yes", "Yes", "YES"): + break + elif proceed in ("no", "No", "NO"): + sys.exit(0) else: - if print_output: - print("Warning: Cannot apply gid of '0' when creating clone; gid of source volume will be retained.") - - # Add source snapshot details to volume dict if specified - if source_snapshot_name and not source_snapshot_name.endswith("*"): - # Retrieve source snapshot - sourceSnapshot = NetAppSnapshot.find(sourceVolume.uuid, name=source_snapshot_name) - if not sourceSnapshot: - if print_output: - print("Error: Invalid source snapshot name.") - raise InvalidSnapshotParameterError("name") - - - # Append source snapshot details to volume dict - newVolumeDict["clone"]["parent_snapshot"] = { - "name": sourceSnapshot.name, - "uuid": sourceSnapshot.uuid - } - - if source_snapshot_name and source_snapshot_name.endswith("*"): - source_snapshot_prefix = source_snapshot_name[:-1] - latest_source_snapshot = None - latest_source_snapshot_uuid = None - - # Retrieve all source snapshot from last to 1st - for snapshot in NetAppSnapshot.get_collection(sourceVolume.uuid): - snapshot.get() - if snapshot.name.startswith(source_snapshot_prefix): - latest_source_snapshot = snapshot.name - latest_source_snapshot_uuid = snapshot.uuid - - if not latest_source_snapshot: - if print_output: - print("Error: Could not find snapshot prefixed by '"+source_snapshot_prefix+"'.") - raise InvalidSnapshotParameterError("name") - # Append source snapshot details to volume dict - newVolumeDict["clone"]["parent_snapshot"] = { - "name": latest_source_snapshot, - "uuid": latest_source_snapshot_uuid - } - print("Snapshot '" + latest_source_snapshot+ "' will be used to create the clone.") - - # set clone volume commnet parameter - comment = 'PARENTSVM:'+sourcesvm+',PARENTVOL:'+newVolumeDict["clone"]["parent_volume"]["name"]+',CLONESVM:'+targetsvm+',CLONENAME:'+newVolumeDict["name"] - if source_snapshot_name: comment += ' SNAP:'+newVolumeDict["clone"]["parent_snapshot"]["name"] - comment += " netapp-dataops" - - newVolumeDict["comment"] = comment - - # Create new volume clone - newVolume = NetAppVolume.from_dict(newVolumeDict) - newVolume.post(poll=True, poll_timeout=120) - if print_output: - print("Clone volume created successfully.") - - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - - if svm_dr_unprotect: - try: - if print_output: - print("Disabling svm-dr protection") - response = NetAppCLI().execute("volume modify",vserver=targetsvm,volume=new_volume_name,body={"vserver_dr_protection": "unprotected"}) - except NetAppRestError as err: - if "volume is not part of a Vserver DR configuration" in str(err): - if print_output: - print("Warning: could not disable svm-dr-protection since volume is not protected using svm-dr") - else: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - - #create custom export policy if needed - if export_hosts: - try: - if print_output: - print("Creating export-policy:"+export_policy) - # Construct dict representing new export policy - newExportPolicyDict = { - "name" : export_policy, - "svm": {"name": targetsvm}, - "rules": [] - } - for client in export_hosts.split(":"): - newExportPolicyDict['rules'].append({ "clients": [{"match": client }], "ro_rule": ["sys"], "rw_rule": ["sys"], "superuser": ["sys"]}) - - # Create new export policy - newExportPolicy = NetAppExportPolicy.from_dict(newExportPolicyDict) - newExportPolicy.post(poll=True, poll_timeout=120) - - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - - #set export policy and snapshot policy - try: - if print_output: - print("Setting export-policy:"+export_policy+ " snapshot-policy:"+snapshot_policy) - volumeDetails = NetAppVolume.find(name=new_volume_name, svm=targetsvm) - updatedVolumeDetails = NetAppVolume(uuid=volumeDetails.uuid) - updatedVolumeDetails.nas = {"export_policy": {"name": export_policy}} - updatedVolumeDetails.snapshot_policy = {"name": snapshot_policy} - updatedVolumeDetails.patch(poll=True, poll_timeout=120) - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - - #split clone - try: - if split: - if print_output: - print("Splitting clone") - volumeDetails = NetAppVolume.find(name=new_volume_name, svm=targetsvm) - #get volume details - updatedVolumeDetails = NetAppVolume(uuid=volumeDetails.uuid) - updatedVolumeDetails.clone = {"split_initiated": True} - updatedVolumeDetails.patch() - - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - - # Optionally mount newly created volume - if mountpoint: - try: - mount_volume(volume_name=new_volume_name, svm_name=targetsvm, mountpoint=mountpoint, readonly=readonly, print_output=True) - except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): - if print_output: - print("Error: Error mounting clone volume.") - raise - - else: - raise ConnectionTypeError() - - -def create_snapshot(volume_name: str, cluster_name: str = None, svm_name: str = None, snapshot_name: str = None, retention_count: int = 0, retention_days: bool = False, snapmirror_label: str = None, print_output: bool = False): - # Retrieve config details from config file - try: - config = _retrieve_config(print_output=print_output) - except InvalidConfigError: - raise - try: - connectionType = config["connectionType"] - except: - if print_output: - _print_invalid_config_error() - raise InvalidConfigError() - - if cluster_name: - config["hostname"] = cluster_name - - if connectionType == "ONTAP": - # Instantiate connection to ONTAP cluster - try: - _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) - except InvalidConfigError: - raise - - if not snapshot_name: - snapshot_name = "netapp_dataops" - - # Retrieve svm from config file - try: - svm = config["svm"] - if svm_name: - svm = svm_name - except: - if print_output: - _print_invalid_config_error() - raise InvalidConfigError() - - snapshot_name_original = snapshot_name - # Set snapshot name if not passed into function or retention provided - if not snapshot_name or int(retention_count) > 0: - timestamp = '.'+datetime.datetime.today().strftime("%Y-%m-%d_%H%M%S") - snapshot_name += timestamp - - if print_output: - print("Creating snapshot '" + snapshot_name + "'.") - - try: - # Retrieve volume - volume = NetAppVolume.find(name=volume_name, svm=svm) - if not volume: - if print_output: - print("Error: Invalid volume name.") - raise InvalidVolumeParameterError("name") - - # create snapshot dict - snapshotDict = { - 'name': snapshot_name, - 'volume': volume.to_dict() - } - if snapmirror_label: - if print_output: - print("Setting snapmirror label as:"+snapmirror_label) - snapshotDict['snapmirror_label'] = snapmirror_label - - # Create snapshot - snapshot = NetAppSnapshot.from_dict(snapshotDict) - snapshot.post(poll=True) - - if print_output: - print("Snapshot created successfully.") - - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - - #delete snapshots exceeding retention count if provided - retention_count = int(retention_count) - if retention_count > 0: - try: - # Retrieve all source snapshot from last to 1st - # Retrieve volume - volume = NetAppVolume.find(name=volume_name, svm=svm) - if not volume: - if print_output: - print("Error: Invalid volume name.") - raise InvalidVolumeParameterError("name") - - if retention_days: - retention_date = datetime.datetime.today() - datetime.timedelta(days=retention_count) - - last_snapshot_list = [] - snapshot_list = [] - for snapshot in NetAppSnapshot.get_collection(volume.uuid): - snapshot.get() - if snapshot.name.startswith(snapshot_name_original+'.'): - if not retention_days: - snapshot_list.append(snapshot.name) - last_snapshot_list.append(snapshot.name) - if len(last_snapshot_list) > retention_count: - last_snapshot_list.pop(0) - else: - rx = r'^{0}\.(.+)$'.format(snapshot_name_original) - matchObj = re.match(rx,snapshot.name) - if matchObj: - snapshot_date = matchObj.group(1) - snapshot_date_obj = datetime.datetime.strptime(snapshot_date, "%Y-%m-%d_%H%M%S") - snapshot_list.append(snapshot.name) - last_snapshot_list.append(snapshot.name) - if snapshot_date_obj < retention_date: - last_snapshot_list.pop(0) - - #delete snapshots not in retention - for snap in snapshot_list: - if snap not in last_snapshot_list: - delete_snapshot(volume_name=volume_name, svm_name = svm, snapshot_name=snap, skip_owned=True, print_output=True) - - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - else: - raise ConnectionTypeError() - - -def create_volume(volume_name: str, volume_size: str, guarantee_space: bool = False, cluster_name: str = None, svm_name: str = None, - volume_type: str = "flexvol", unix_permissions: str = "0777", - unix_uid: str = "0", unix_gid: str = "0", export_policy: str = "default", - snapshot_policy: str = None, aggregate: str = None, mountpoint: str = None, junction: str = None, readonly: bool = False, - print_output: bool = False, tiering_policy: str = None, vol_dp: bool = False): - # Retrieve config details from config file - try: - config = _retrieve_config(print_output=print_output) - except InvalidConfigError: - raise - try: - connectionType = config["connectionType"] - except: - if print_output: - _print_invalid_config_error() - raise InvalidConfigError() + print("Invalid value. Must enter 'yes' or 'no'.") - if cluster_name: - config["hostname"] = cluster_name + # Instantiate dict for storing connection details + config = dict() if connectionType == "ONTAP": - # Instantiate connection to ONTAP cluster - try: - _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) - except InvalidConfigError: - raise - - # Retrieve values from config file if not passed into function - try: - svm = config["svm"] - if svm_name: - svm = svm_name - if not volume_type : - volume_type = config["defaultVolumeType"] - if not unix_permissions : - unix_permissions = config["defaultUnixPermissions"] - if not unix_uid : - unix_uid = config["defaultUnixUID"] - if not unix_gid : - unix_gid = config["defaultUnixGID"] - if not export_policy : - export_policy = config["defaultExportPolicy"] - if not snapshot_policy : - snapshot_policy = config["defaultSnapshotPolicy"] - if not aggregate and volume_type == 'flexvol' : - aggregate = config["defaultAggregate"] - except: - if print_output : - _print_invalid_config_error() - raise InvalidConfigError() - - # Check volume type for validity - if volume_type not in ("flexvol", "flexgroup"): - if print_output: - print("Error: Invalid volume type specified. Acceptable values are 'flexvol' and 'flexgroup'.") - raise InvalidVolumeParameterError("size") - - # Check unix permissions for validity - if not re.search("^0[0-7]{3}", unix_permissions): - if print_output: - print("Error: Invalid unix permissions specified. Acceptable values are '0777', '0755', '0744', etc.") - raise InvalidVolumeParameterError("unixPermissions") - - # Check unix uid for validity - try: - unix_uid = int(unix_uid) - except: - if print_output : - print("Error: Invalid unix uid specified. Value be an integer. Example: '0' for root user.") - raise InvalidVolumeParameterError("unixUID") - - # Check unix gid for validity - try: - unix_gid = int(unix_gid) - except: - if print_output: - print("Error: Invalid unix gid specified. Value must be an integer. Example: '0' for root group.") - raise InvalidVolumeParameterError("unixGID") - - # Convert volume size to Bytes - if re.search("^[0-9]+MB$", volume_size): - # Convert from MB to Bytes - volumeSizeBytes = int(volume_size[:len(volume_size)-2]) * 1024**2 - elif re.search("^[0-9]+GB$", volume_size): - # Convert from GB to Bytes - volumeSizeBytes = int(volume_size[:len(volume_size)-2]) * 1024**3 - elif re.search("^[0-9]+TB$", volume_size): - # Convert from TB to Bytes - volumeSizeBytes = int(volume_size[:len(volume_size)-2]) * 1024**4 - else : - if print_output: - print("Error: Invalid volume size specified. Acceptable values are '1024MB', '100GB', '10TB', etc.") - raise InvalidVolumeParameterError("size") - - # Create option to choose junction path. - if junction: - junction=junction - else: - junction = "/"+volume_name - - - #check tiering policy - if not tiering_policy in ['none','auto','snapshot-only','all', None]: - if print_output: - print("Error: tiering policy can be: none,auto,snapshot-only or all") - raise InvalidVolumeParameterError("tieringPolicy") - - #vol dp type - if vol_dp: - # Create dict representing volume of type dp - volumeDict = { - "name": volume_name, - "comment": "netapp-dataops", - "svm": {"name": svm}, - "size": volumeSizeBytes, - "style": volume_type, - "type": 'dp' - } - else: - # Create dict representing volume - volumeDict = { - "name": volume_name, - "comment": "netapp-dataops", - "svm": {"name": svm}, - "size": volumeSizeBytes, - "style": volume_type, - "nas": { - "path": junction, - "export_policy": {"name": export_policy}, - "security_style": "unix", - "unix_permissions": unix_permissions, - "uid": unix_uid, - "gid": unix_gid - }, - "snapshot_policy": {"name": snapshot_policy}, - } - - # Set space guarantee field - if guarantee_space: - volumeDict["guarantee"] = {"type": "volume"} - else: - volumeDict["guarantee"] = {"type": "none"} - - # If flexvol -> set aggregate field - if volume_type == "flexvol": - volumeDict["aggregates"] = [{'name': aggregate}] - else: - if aggregate: - volumeDict["aggregates"] = [] - for aggr in aggregate.split(','): - volumeDict["aggregates"].append({'name': aggr}) - #if tiering policy provided - if tiering_policy: - volumeDict['tiering'] = {'policy': tiering_policy} - - # Create volume - if print_output: - print("Creating volume '" + volume_name + "' on svm '" + svm + "'") - try: - volume = NetAppVolume.from_dict(volumeDict) - volume.post(poll=True) - if print_output: - print("Volume created successfully.") - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - - # Optionally mount newly created volume - if mountpoint: - try: - mount_volume(volume_name=volume_name, svm_name=svm, mountpoint=mountpoint, readonly=readonly, print_output=True) - except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): - if print_output: - print("Error: Error mounting volume.") - raise - - else: - raise ConnectionTypeError() - + config["connectionType"] = connectionType -def delete_snapshot(volume_name: str, snapshot_name: str, cluster_name: str = None, svm_name: str = None, skip_owned: bool = False, print_output: bool = False): - # Retrieve config details from config file - try: - config = _retrieve_config(print_output=print_output) - except InvalidConfigError: - raise - try: - connectionType = config["connectionType"] - except: - if print_output: - _print_invalid_config_error() - raise InvalidConfigError() + # Prompt user to enter config details + config["hostname"] = input("Enter ONTAP management LIF hostname or IP address (Recommendation: Use SVM management interface): ") + config["svm"] = input("Enter SVM (Storage VM) name: ") + config["dataLif"] = input("Enter SVM NFS data LIF hostname or IP address: ") - if cluster_name: - config["hostname"] = cluster_name - - if connectionType == "ONTAP": - # Instantiate connection to ONTAP cluster - try: - _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) - except InvalidConfigError: - raise - - # Retrieve svm from config file - try: - svm = config["svm"] - if svm_name: - svm = svm_name - except: - if print_output: - _print_invalid_config_error() - raise InvalidConfigError() - - if print_output: - print("Deleting snapshot '" + snapshot_name + "'.") - - try: - # Retrieve volume - volume = NetAppVolume.find(name=volume_name, svm=svm) - if not volume: - if print_output: - print("Error: Invalid volume name.") - raise InvalidVolumeParameterError("name") - - # Retrieve snapshot - snapshot = NetAppSnapshot.find(volume.uuid, name=snapshot_name) - - - if not snapshot: - if print_output: - print("Error: Invalid snapshot name.") - raise InvalidSnapshotParameterError("name") - - if hasattr(snapshot,'owners'): - - if not skip_owned: - if print_output: - print('Error: Snapshot cannot be deleted since it has owners:'+','.join(snapshot.owners)) - raise InvalidSnapshotParameterError("name") - else: - if print_output: - print('Warning: Snapshot cannot be deleted since it has owners:'+','.join(snapshot.owners)) - return - - # Delete snapshot - snapshot.delete(poll=True) - - if print_output: - print("Snapshot deleted successfully.") - - except NetAppRestError as err : - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - - else: - raise ConnectionTypeError() + # Prompt user to enter default volume type + # Verify value entered; promopt user to re-enter if invalid + while True: + config["defaultVolumeType"] = input("Enter default volume type to use when creating new volumes (flexgroup/flexvol) [flexgroup]: ") + if not config["defaultVolumeType"] : + config["defaultVolumeType"] = "flexgroup" + break + elif config["defaultVolumeType"] in ("flexgroup", "FlexGroup"): + config["defaultVolumeType"] = "flexgroup" + break + elif config["defaultVolumeType"] in ("flexvol", "FlexVol"): + config["defaultVolumeType"] = "flexvol" + break + else: + print("Invalid value. Must enter 'flexgroup' or 'flexvol'.") + # prompt user to enter default export policy + config["defaultExportPolicy"] = input("Enter export policy to use by default when creating new volumes [default]: ") + if not config["defaultExportPolicy"]: + config["defaultExportPolicy"] = "default" -def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = None, mountpoint: str = None, delete_mirror: bool = False, - delete_non_clone: bool = False, print_output: bool = False): - # Retrieve config details from config file - try: - config = _retrieve_config(print_output=print_output) - except InvalidConfigError: - raise - try: - connectionType = config["connectionType"] - except: - if print_output: - _print_invalid_config_error() - raise InvalidConfigError() + # prompt user to enter default snapshot policy + config["defaultSnapshotPolicy"] = input("Enter snapshot policy to use by default when creating new volumes [none]: ") + if not config["defaultSnapshotPolicy"]: + config["defaultSnapshotPolicy"] = "none" - if cluster_name: - config["hostname"] = cluster_name - - if connectionType == "ONTAP": - # Instantiate connection to ONTAP cluster - try: - _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) - except InvalidConfigError: - raise - - # Retrieve svm from config file - try: - svm = config["svm"] - if svm_name: - svm = svm_name - except: - if print_output : - _print_invalid_config_error() - raise InvalidConfigError() - - try: - # Retrieve volume - volume = NetAppVolume.find(name=volume_name, svm=svm) - if not volume: - if print_output: - print("Error: Invalid volume name.") - raise InvalidVolumeParameterError("name") - - if not "CLONENAME:" in volume.comment and not delete_non_clone: - if print_output: - print("Error: volume is not a clone created by this tool. add --delete-non-clone to delete it") - raise InvalidVolumeParameterError("delete-non-clone") - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - - - if delete_mirror: - #check if this volume has snapmirror destination relationship - uuid = None - try: - snapmirror_relationship = NetAppSnapmirrorRelationship.get_collection(**{"destination.path": svm+":"+volume_name}) - for rel in snapmirror_relationship: - # Retrieve relationship details - rel.get() - uuid = rel.uuid - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - - if uuid: - if print_output: - print("Deleting snapmirror relationship: "+svm+":"+volume_name) - try: - deleteRelation = NetAppSnapmirrorRelationship(uuid=uuid) - deleteRelation.delete(poll=True, poll_timeout=120) - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - - #check if this volume has snapmirror destination relationship - uuid = None + # Prompt user to enter default uid, gid, and unix permissions + # Verify values entered; promopt user to re-enter if invalid + while True: + config["defaultUnixUID"] = input("Enter unix filesystem user id (uid) to apply by default when creating new volumes (ex. '0' for root user) [0]: ") + if not config["defaultUnixUID"]: + config["defaultUnixUID"] = "0" + break try: - snapmirror_relationship = NetAppSnapmirrorRelationship.get_collection(list_destinations_only=True,**{"source.path": svm+":"+volume_name}) - for rel in snapmirror_relationship: - # Retrieve relationship details - rel.get(list_destinations_only=True) - uuid = rel.uuid - if print_output: - print("release relationship: "+rel.source.path+" -> "+rel.destination.path) - deleteRelation = NetAppSnapmirrorRelationship(uuid=uuid) - deleteRelation.delete(poll=True, poll_timeout=120,source_only=True) - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - - if mountpoint: - #check if volume is mounted locally, and then unmount it. + int(config["defaultUnixUID"]) + break + except: + print("Invalid value. Must enter an integer.") + while True: + config["defaultUnixGID"] = input("Enter unix filesystem group id (gid) to apply by default when creating new volumes (ex. '0' for root group) [0]: ") + if not config["defaultUnixGID"]: + config["defaultUnixGID"] = "0" + break try: - unmount_volume(mountpoint=mountpoint, print_output=True) - except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): - if print_output: - print("Error: Error mounting volume.") - raise + int(config["defaultUnixGID"]) + break + except: + print("Invalid value. Must enter an integer.") + while True: + config["defaultUnixPermissions"] = input("Enter unix filesystem permissions to apply by default when creating new volumes (ex. '0777' for full read/write permissions for all users and groups) [0777]: ") + if not config["defaultUnixPermissions"] : + config["defaultUnixPermissions"] = "0777" + break + elif not re.search("^0[0-7]{3}", config["defaultUnixPermissions"]): + print("Invalud value. Must enter a valid unix permissions value. Acceptable values are '0777', '0755', '0744', etc.") + else: + break - try: - if print_output: - print("Deleting volume '" + svm+':'+volume_name + "'.") - # Delete volume - volume.delete(poll=True) + # Prompt user to enter additional config details + config["defaultAggregate"] = input("Enter aggregate to use by default when creating new FlexVol volumes: ") + config["username"] = input("Enter ONTAP API username (Recommendation: Use SVM account): ") + passwordString = getpass("Enter ONTAP API password (Recommendation: Use SVM account): ") - if print_output: - print("Volume deleted successfully.") + # Convert password to base64 enconding + passwordBytes = passwordString.encode("ascii") + passwordBase64Bytes = base64.b64encode(passwordBytes) + config["password"] = passwordBase64Bytes.decode("ascii") - except NetAppRestError as err: - if print_output: - if "You must delete the SnapMirror relationships before" in str(err): - print("Error: volume is snapmirror destination. add --delete-mirror to delete snapmirror relationship before deleting the volume") - elif "the source endpoint of one or more SnapMirror relationships" in str(err): - print("Error: volume is snapmirror source. add --delete-mirror to release snapmirror relationship before deleting the volume") - else: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) + # Prompt user to enter value denoting whether or not to verify SSL cert when calling ONTAP API + # Verify value entered; prompt user to re-enter if invalid + while True: + verifySSLCert = input("Verify SSL certificate when calling ONTAP API (true/false): ") + if verifySSLCert in ("true", "True") : + config["verifySSLCert"] = True + break + elif verifySSLCert in ("false", "False") : + config["verifySSLCert"] = False + break + else: + print("Invalid value. Must enter 'true' or 'false'.") else: raise ConnectionTypeError() + # Ask user if they want to use cloud sync functionality + # Verify value entered; prompt user to re-enter if invalid + while True: + useCloudSync = input("Do you intend to use this toolkit to trigger Cloud Sync operations? (yes/no): ") -def list_cloud_sync_relationships(print_output: bool = False) -> list(): - # Step 1: Obtain access token and account ID for accessing Cloud Sync API - - # Retrieve refresh token - try: - refreshToken = _retrieve_cloud_central_refresh_token(print_output=print_output) - except InvalidConfigError: - raise - - # Obtain access token and account ID - try: - accessToken, accountId = _get_cloud_sync_access_parameters(refreshToken=refreshToken, print_output=print_output) - except APIConnectionError: - raise - - # Step 2: Retrieve list of relationships - - # Define parameters for API call - url = "https://cloudsync.netapp.com/api/relationships-v2" - headers = { - "Accept": "application/json", - "x-account-id": accountId, - "Authorization": "Bearer " + accessToken - } - - # Call API to retrieve list of relationships - response = requests.get(url = url, headers = headers) - - # Check for API response status code of 200; if not 200, raise error - if response.status_code != 200: - errorMessage = "Error calling Cloud Sync API to retrieve list of relationships." - if print_output: - print("Error:", errorMessage) - _print_api_response(response) - raise APIConnectionError(errorMessage, response) - - # Constrict list of relationships - relationships = json.loads(response.text) - relationshipsList = list() - for relationship in relationships: - relationshipDetails = dict() - relationshipDetails["id"] = relationship["id"] - relationshipDetails["source"] = relationship["source"] - relationshipDetails["target"] = relationship["target"] - relationshipsList.append(relationshipDetails) - - # Print list of relationships - if print_output: - print(yaml.dump(relationshipsList)) - - return relationshipsList - - -def list_snap_mirror_relationships(print_output: bool = False, cluster_name: str = None) -> list(): - # Retrieve config details from config file - try: - config = _retrieve_config(print_output=print_output) - except InvalidConfigError: - raise - try: - connectionType = config["connectionType"] - except: - if print_output: - _print_invalid_config_error() - raise InvalidConfigError() - - if cluster_name: - config["hostname"] = cluster_name - - if connectionType == "ONTAP": - # Instantiate connection to ONTAP cluster - try: - _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) - except InvalidConfigError: - raise - - try: - # Retrieve all relationships for which destination is on current cluster - destinationRelationships = NetAppSnapmirrorRelationship.get_collection() - - # Do not retrieve relationships for which source is on current cluster - # Note: Uncomment below line to retrieve all relationships for which source is on current cluster, then add sourceRelationships to for loop - # sourceRelationships = NetAppSnapmirrorRelationship.get_collection(list_destinations_only=True) - - # Construct list of relationships - relationshipsList = list() - for relationship in destinationRelationships: - # Retrieve relationship details - try: - relationship.get() - except NetAppRestError as err: - relationship.get(list_destinations_only=True) - - # Set cluster value - if hasattr(relationship.source, "cluster"): - sourceCluster = relationship.source.cluster.name - else: - sourceCluster = "user's cluster" - if hasattr(relationship.destination, "cluster"): - destinationCluster = relationship.destination.cluster.name - else: - destinationCluster = "user's cluster" + if useCloudSync in ("yes", "Yes", "YES"): + # Prompt user to enter cloud central refresh token + print("Note: If you do not have a Cloud Central refresh token, visit https://services.cloud.netapp.com/refresh-token to create one.") + refreshTokenString = getpass("Enter Cloud Central refresh token: ") - # Set transfer state value - if hasattr(relationship, "transfer"): - transferState = relationship.transfer.state - else: - transferState = None + # Convert refresh token to base64 enconding + refreshTokenBytes = refreshTokenString.encode("ascii") + refreshTokenBase64Bytes = base64.b64encode(refreshTokenBytes) + config["cloudCentralRefreshToken"] = refreshTokenBase64Bytes.decode("ascii") - # Set healthy value - if hasattr(relationship, "healthy"): - healthy = relationship.healthy - else: - healthy = "unknown" - - # Construct dict containing relationship details - relationshipDict = { - "UUID": relationship.uuid, - "Type": relationship.policy.type, - "Healthy": healthy, - "Current Transfer Status": transferState, - "Source Cluster": sourceCluster, - "Source SVM": relationship.source.svm.name, - "Source Volume": relationship.source.path.split(":")[1], - "Dest Cluster": destinationCluster, - "Dest SVM": relationship.destination.svm.name, - "Dest Volume": relationship.destination.path.split(":")[1] - } - - # Append dict to list of relationships - relationshipsList.append(relationshipDict) - - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - - # Print list of relationships - if print_output: - # Convert relationships array to Pandas DataFrame - relationshipsDF = pd.DataFrame.from_dict(relationshipsList, dtype="string") - print(tabulate(relationshipsDF, showindex=False, headers=relationshipsDF.columns)) - - return relationshipsList + break - else: - raise ConnectionTypeError() + elif useCloudSync in ("no", "No", "NO"): + break + else: + print("Invalid value. Must enter 'yes' or 'no'.") -def list_snapshots(volume_name: str, cluster_name: str = None, svm_name: str = None, print_output: bool = False) -> list(): - # Retrieve config details from config file - try: - config = _retrieve_config(print_output=print_output) - except InvalidConfigError: - raise - try: - connectionType = config["connectionType"] - except: - if print_output: - _print_invalid_config_error() - raise InvalidConfigError() - - if cluster_name: - config["hostname"] = cluster_name + # Ask user if they want to use S3 functionality + # Verify value entered; prompt user to re-enter if invalid + while True: + useS3 = input("Do you intend to use this toolkit to push/pull from S3? (yes/no): ") - if connectionType == "ONTAP": - # Instantiate connection to ONTAP cluster - try: - _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) - except InvalidConfigError: - raise - - # Retrieve svm from config file - try: - svm = config["svm"] - if svm_name: - svm = svm_name - except: - if print_output: - _print_invalid_config_error() - raise InvalidConfigError() - - # Retrieve snapshots - try: - # Retrieve volume - volume = NetAppVolume.find(name=volume_name, svm=svm) - if not volume: - if print_output: - print("Error: Invalid volume name.") - raise InvalidVolumeParameterError("name") - - # Construct list of snapshots - snapshotsList = list() - for snapshot in NetAppSnapshot.get_collection(volume.uuid): - # Retrieve snapshot - snapshot.get() - - # Construct dict of snapshot details - snapshotDict = {"Snapshot Name": snapshot.name, "Create Time": snapshot.create_time} - - # Append dict to list of snapshots - snapshotsList.append(snapshotDict) - - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - - # Print list of snapshots - if print_output: - # Convert snapshots array to Pandas DataFrame - snapshotsDF = pd.DataFrame.from_dict(snapshotsList, dtype="string") - print(tabulate(snapshotsDF, showindex=False, headers=snapshotsDF.columns)) - - return snapshotsList + if useS3 in ("yes", "Yes", "YES"): + # Promt user to enter S3 endpoint details + config["s3Endpoint"] = input("Enter S3 endpoint: ") - else: - raise ConnectionTypeError() + # Prompt user to enter S3 credentials + config["s3AccessKeyId"] = input("Enter S3 Access Key ID: ") + s3SecretAccessKeyString = getpass("Enter S3 Secret Access Key: ") + # Convert refresh token to base64 enconding + s3SecretAccessKeyBytes = s3SecretAccessKeyString.encode("ascii") + s3SecretAccessKeyBase64Bytes = base64.b64encode(s3SecretAccessKeyBytes) + config["s3SecretAccessKey"] = s3SecretAccessKeyBase64Bytes.decode("ascii") -def list_volumes(check_local_mounts: bool = False, include_space_usage_details: bool = False, print_output: bool = False, cluster_name: str = None, svm_name: str = None) -> list(): - # Retrieve config details from config file - try: - config = _retrieve_config(print_output=print_output) - except InvalidConfigError: - raise - try: - connectionType = config["connectionType"] - except: - if print_output : - _print_invalid_config_error() - raise InvalidConfigError() - if cluster_name: - config["hostname"] = cluster_name - - if connectionType == "ONTAP": - # Instantiate connection to ONTAP cluster - try: - _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) - except InvalidConfigError: - raise - - try: - svmname=config["svm"] - if svm_name: - svmname = svm_name - - # Retrieve all volumes for SVM - volumes = NetAppVolume.get_collection(svm=svmname) - - # Retrieve local mounts if desired - if check_local_mounts : - mounts = subprocess.check_output(['mount']).decode() - - # Construct list of volumes; do not include SVM root volume - volumesList = list() - for volume in volumes: - baseVolumeFields = "nas.path,size,style,clone,flexcache_endpoint_type" - try : - volumeFields = baseVolumeFields - if include_space_usage_details : - volumeFields += ",space,constituents" - volume.get(fields=volumeFields) - except NetAppRestError as err : - volumeFields = baseVolumeFields - if include_space_usage_details : - volumeFields += ",space" - volume.get(fields=volumeFields) - - # Retrieve volume export path; handle case where volume is not exported - if hasattr(volume, "nas"): - volumeExportPath = volume.nas.path + # Prompt user to enter value denoting whether or not to verify SSL cert when calling S3 API + # Verify value entered; prompt user to re-enter if invalid + while True: + s3VerifySSLCert = input("Verify SSL certificate when calling S3 API (true/false): ") + if s3VerifySSLCert in ("true", "True"): + config["s3VerifySSLCert"] = True + config["s3CACertBundle"] = input("Enter CA cert bundle to use when calling S3 API (optional) []: ") + break + elif s3VerifySSLCert in ("false", "False"): + config["s3VerifySSLCert"] = False + config["s3CACertBundle"] = "" + break else: - volumeExportPath = None - - # Include all vols except for SVM root vol - if volumeExportPath != "/": - # Determine volume type - type = volume.style - - # Construct NFS mount target - if not volumeExportPath : - nfsMountTarget = None - else : - nfsMountTarget = config["dataLif"]+":"+volume.nas.path - if svmname != config["svm"]: - nfsMountTarget = svmname+":"+volume.nas.path - - - # Construct clone source - clone = "no" - cloneParentSvm = "" - cloneParentVolume = "" - cloneParentSnapshot = "" - - try: - cloneParentSvm = volume.clone.parent_svm.name - cloneParentVolume = volume.clone.parent_volume.name - cloneParentSnapshot = volume.clone.parent_snapshot.name - clone = "yes" - except: - pass - - # Determine if FlexCache - if volume.flexcache_endpoint_type == "cache": - flexcache = "yes" - else: - flexcache = "no" - - # Convert size in bytes to "pretty" size (size in KB, MB, GB, or TB) - prettySize = _convert_bytes_to_pretty_size(size_in_bytes=volume.size) - if include_space_usage_details : - try : - snapshotReserve = str(volume.space.snapshot.reserve_percent) + "%" - logicalCapacity = float(volume.space.size) * (1 - float(volume.space.snapshot.reserve_percent)/100) - prettyLogicalCapacity = _convert_bytes_to_pretty_size(size_in_bytes=logicalCapacity) - logicalUsage = float(volume.space.used) - prettyLogicalUsage = _convert_bytes_to_pretty_size(size_in_bytes=logicalUsage) - except : - snapshotReserve = "Unknown" - prettyLogicalCapacity = "Unknown" - prettyLogicalUsage = "Unknown" - try : - if type == "flexgroup" : - totalFootprint: float = 0.0 - for constituentVolume in volume.constituents : - totalFootprint += float(constituentVolume["space"]["total_footprint"]) - else : - totalFootprint = float(volume.space.footprint) + float(volume.space.metadata) - prettyFootprint = _convert_bytes_to_pretty_size(size_in_bytes=totalFootprint) - except : - prettyFootprint = "Unknown" - - # Construct dict containing volume details; optionally include local mountpoint - volumeDict = { - "Volume Name": volume.name, - "Size": prettySize - } - if include_space_usage_details : - volumeDict["Snap Reserve"] = snapshotReserve - volumeDict["Capacity"] = prettyLogicalCapacity - volumeDict["Usage"] = prettyLogicalUsage - volumeDict["Footprint"] = prettyFootprint - volumeDict["Type"] = volume.style - volumeDict["NFS Mount Target"] = nfsMountTarget - if check_local_mounts: - localMountpoint = "" - for mount in mounts.split("\n") : - mountDetails = mount.split(" ") - if mountDetails[0] == nfsMountTarget : - localMountpoint = mountDetails[2] - volumeDict["Local Mountpoint"] = localMountpoint - volumeDict["FlexCache"] = flexcache - volumeDict["Clone"] = clone - volumeDict["Source SVM"] = cloneParentSvm - volumeDict["Source Volume"] = cloneParentVolume - volumeDict["Source Snapshot"] = cloneParentSnapshot - - # Append dict to list of volumes - volumesList.append(volumeDict) - - except NetAppRestError as err: - if print_output : - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - - # Print list of volumes - if print_output: - # Convert volumes array to Pandas DataFrame - volumesDF = pd.DataFrame.from_dict(volumesList, dtype="string") - print(tabulate(volumesDF, showindex=False, headers=volumesDF.columns)) - - return volumesList - - else: - raise ConnectionTypeError() - -def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, svm_name: str = None, mount_options: str = None, lif_name: str = None, readonly: bool = False, print_output: bool = False): - nfsMountTarget = None + print("Invalid value. Must enter 'true' or 'false'.") - svm = None - try: - config = _retrieve_config(print_output=print_output) - except InvalidConfigError: - raise - try: - svm = config["svm"] - if svm_name: - svm = svm_name - except: - if print_output: - _print_invalid_config_error() - raise InvalidConfigError() + break - if cluster_name: - config["hostname"] = cluster_name - - # Retrieve list of volumes - try: - volumes = list_volumes(check_local_mounts=True, svm_name = svm) - except (InvalidConfigError, APIConnectionError): - if print_output: - print("Error: Error retrieving NFS mount target for volume.") - raise - - # Retrieve NFS mount target for volume, and check that no volume is currently mounted at specified mountpoint - for volume in volumes: - # Check mountpoint - if mountpoint == volume["Local Mountpoint"]: - if print_output: - print("Error: Volume '" + volume["Volume Name"] + "' is already mounted at '" + mountpoint + "'.") - raise MountOperationError("Another volume mounted at mountpoint") - - if volume_name == volume["Volume Name"]: - # Retrieve NFS mount target - nfsMountTarget = volume["NFS Mount Target"] - nfsMountTarget = nfsMountTarget.strip() - - # Raise error if invalid volume name was entered - if not nfsMountTarget: - if print_output: - print("Error: Invalid volume name specified.") - raise InvalidVolumeParameterError("name") + elif useS3 in ("no", "No", "NO"): + break - try: - if lif_name: - nfsMountTarget = lif_name+':'+nfsMountTarget.split(':')[1] - except: - if print_output: - print("Error: Error retrieving NFS mount target for volume.") - raise - - if os.getuid() != 0: - exit("You need to have root privileges to run 'Mount' command.\nPlease try again, this time using 'sudo'. Exiting.") - # Print message describing action to be understaken - if print_output: - if readonly: - print("Mounting volume '" + svm+':'+volume_name + "' as '"+nfsMountTarget+"' at '" + mountpoint + "' as read-only.") else: - print("Mounting volume '" + svm+':'+volume_name + "' as '"+nfsMountTarget+"' at '" + mountpoint + "'.") + print("Invalid value. Must enter 'yes' or 'no'.") - # Create mountpoint if it doesn't already exist - mountpoint = os.path.expanduser(mountpoint) + # Create config dir if it doesn't already exist try: - os.mkdir(mountpoint) - except FileExistsError: + os.mkdir(configDirPath) + except FileExistsError : pass - # Mount volume - mount_cmd_opts = [] - - if readonly: - mount_cmd_opts.append('-o') - mount_cmd_opts.append('ro') - if mount_options: - mount_cmd_opts.remove('ro') - mount_cmd_opts.append('ro'+','+mount_options) - elif mount_options: - mount_cmd_opts.append('-o') - mount_cmd_opts.append(mount_options) - mount_cmd = ['mount'] + mount_cmd_opts + [nfsMountTarget, mountpoint] + # Create config file in config dir + with open(configFilePath, 'w') as configFile: + # Write connection details to config file + json.dump(config, configFile) - try: - subprocess.check_call(mount_cmd) - if print_output: - print("Volume mounted successfully.") - except subprocess.CalledProcessError as err: - if print_output: - print("Error: Error running mount command: ", err) - raise MountOperationError(err) - - -# Function to unmount volume -def unmount_volume(mountpoint: str, print_output: bool = False): - # Print message describing action to be understaken - if print_output: - print("Unmounting volume at '" + mountpoint + "'.") - - # Un-mount volume - try: - subprocess.check_call(['umount', mountpoint]) - if print_output: - print("Volume unmounted successfully.") - except subprocess.CalledProcessError as err: - if print_output: - print("Error: Error running unmount command: ", err) - raise MountOperationError(err) + print("Created config file: '" + configFilePath + "'.") -def prepopulate_flex_cache(volume_name: str, paths: list, print_output: bool = False): - # Retrieve config details from config file +def getTarget(args: list) -> str: try: - config = _retrieve_config(print_output=print_output) - except InvalidConfigError: - raise - try: - connectionType = config["connectionType"] + target = args[2] except: - if print_output: - _print_invalid_config_error() - raise InvalidConfigError() - - if connectionType == "ONTAP": - # Instantiate connection to ONTAP cluster - try: - _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) - except InvalidConfigError: - raise - - # Retrieve svm from config file - try: - svm = config["svm"] - except: - if print_output: - _print_invalid_config_error() - raise InvalidConfigError() - - if print_output: - print("FlexCache '" + volume_name + "' - Prepopulating paths: ", paths) - - try: - # Retrieve FlexCache - flexcache = NetAppFlexCache.find(name=volume_name, svm=svm) - if not flexcache: - if print_output: - print("Error: Invalid volume name.") - raise InvalidVolumeParameterError("name") - - # Prepopulate FlexCache - flexcache.prepopulate = {"dir_paths": paths} - flexcache.patch() + handleInvalidCommand() + return target - if print_output: - print("FlexCache prepopulated successfully.") - - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) +def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = False): + if invalidOptArg: + print("Error: Invalid option/argument.") else: - raise ConnectionTypeError() - - -def pull_bucket_from_s3(s3_bucket: str, local_directory: str, s3_object_key_prefix: str = "", print_output: bool = False): - # Retrieve S3 access details from existing config file - try: - s3Endpoint, s3AccessKeyId, s3SecretAccessKey, s3VerifySSLCert, s3CACertBundle = _retrieve_s3_access_details(print_output=print_output) - except InvalidConfigError: - raise + print("Error: Invalid command.") + print(helpText) + sys.exit(1) - # Add slash to end of local directory path if not present - if not local_directory.endswith(os.sep): - local_directory += os.sep - # Multithread the download operation - with ThreadPoolExecutor() as executor: - try: - # Instantiate S3 session - s3 = _instantiate_s3_session(s3Endpoint=s3Endpoint, s3AccessKeyId=s3AccessKeyId, s3SecretAccessKey=s3SecretAccessKey, s3VerifySSLCert=s3VerifySSLCert, s3CACertBundle=s3CACertBundle, print_output=print_output) +## Main function +if __name__ == '__main__': + import sys, getopt - # Loop through all objects with prefix in bucket and download - bucket = s3.Bucket(s3_bucket) - for obj in bucket.objects.filter(Prefix=s3_object_key_prefix): - executor.submit(_download_from_s3, s3Endpoint=s3Endpoint, s3AccessKeyId=s3AccessKeyId, s3SecretAccessKey=s3SecretAccessKey, s3VerifySSLCert=s3VerifySSLCert, s3CACertBundle=s3CACertBundle, s3Bucket=s3_bucket, s3ObjectKey=obj.key, localFile=local_directory+obj.key, print_output=print_output) - - except APIConnectionError: - raise - - except Exception as err: - if print_output: - print("Error: S3 API error: ", err) - raise APIConnectionError(err) - - print("Download complete.") - - -def pull_object_from_s3(s3_bucket: str, s3_object_key: str, local_file: str = None, print_output: bool = False): - # Retrieve S3 access details from existing config file + # Get desired action from command line args try: - s3Endpoint, s3AccessKeyId, s3SecretAccessKey, s3VerifySSLCert, s3CACertBundle = _retrieve_s3_access_details(print_output=print_output) - except InvalidConfigError: - raise - - # Set S3 object key - if not local_file: - local_file = s3_object_key - - # Upload file - try: - _download_from_s3(s3Endpoint=s3Endpoint, s3AccessKeyId=s3AccessKeyId, s3SecretAccessKey=s3SecretAccessKey, s3VerifySSLCert=s3VerifySSLCert, s3CACertBundle=s3CACertBundle, s3Bucket=s3_bucket, s3ObjectKey=s3_object_key, localFile=local_file, print_output=print_output) - except APIConnectionError: - raise - - print("Download complete.") - - -def push_directory_to_s3(s3_bucket: str, local_directory: str, s3_object_key_prefix: str = "", - s3_extra_args: str = None, print_output: bool = False): - # Retrieve S3 access details from existing config file - try: - s3Endpoint, s3AccessKeyId, s3SecretAccessKey, s3VerifySSLCert, s3CACertBundle = _retrieve_s3_access_details(print_output=print_output) - except InvalidConfigError: - raise - - # Multithread the upload operation - with ThreadPoolExecutor() as executor: - # Loop through all files in directory - for dirpath, dirnames, filenames in os.walk(local_directory): - # Exclude hidden files and directories - filenames = [filename for filename in filenames if not filename[0] == '.'] - dirnames[:] = [dirname for dirname in dirnames if not dirname[0] == '.'] - - for filename in filenames: - # Build filepath - if local_directory.endswith(os.sep): - dirpathBeginIndex = len(local_directory) - else: - dirpathBeginIndex = len(local_directory) + 1 - - subdirpath = dirpath[dirpathBeginIndex:] - - if subdirpath: - filepath = subdirpath + os.sep + filename - else: - filepath = filename - - # Set S3 object details - s3ObjectKey = s3_object_key_prefix + filepath - localFile = dirpath + os.sep + filename - - # Upload file - try: - executor.submit(_upload_to_s3, s3Endpoint=s3Endpoint, s3AccessKeyId=s3AccessKeyId, s3SecretAccessKey=s3SecretAccessKey, s3VerifySSLCert=s3VerifySSLCert, s3CACertBundle=s3CACertBundle, s3Bucket=s3_bucket, localFile=localFile, s3ObjectKey=s3ObjectKey, s3ExtraArgs=s3_extra_args, print_output=print_output) - except APIConnectionError: - raise - - print("Upload complete.") - - -def push_file_to_s3(s3_bucket: str, local_file: str, s3_object_key: str = None, s3_extra_args: str = None, print_output: bool = False): - # Retrieve S3 access details from existing config file - try: - s3Endpoint, s3AccessKeyId, s3SecretAccessKey, s3VerifySSLCert, s3CACertBundle = _retrieve_s3_access_details(print_output=print_output) - except InvalidConfigError: - raise - - # Set S3 object key - if not s3_object_key: - s3_object_key = local_file - - # Upload file - try: - _upload_to_s3(s3Endpoint=s3Endpoint, s3AccessKeyId=s3AccessKeyId, s3SecretAccessKey=s3SecretAccessKey, s3VerifySSLCert=s3VerifySSLCert, s3CACertBundle=s3CACertBundle, s3Bucket=s3_bucket, localFile=local_file, s3ObjectKey=s3_object_key, s3ExtraArgs=s3_extra_args, print_output=print_output) - except APIConnectionError: - raise - - print("Upload complete.") - - -def restore_snapshot(volume_name: str, snapshot_name: str, cluster_name: str = None, svm_name : str = None, print_output: bool = False): - # Retrieve config details from config file - try: - config = _retrieve_config(print_output=print_output) - except InvalidConfigError: - raise - try: - connectionType = config["connectionType"] + action = sys.argv[1] except: - if print_output: - _print_invalid_config_error() - raise InvalidConfigError() - - if cluster_name: - config["hostname"] = cluster_name + handleInvalidCommand() + + # Invoke desired action + if action == "clone": + # Get desired target from command line args + target = getTarget(sys.argv) + + # Invoke desired action based on target + if target in ("volume", "vol"): + newVolumeName = None + clusterName = None + sourceSVM = None + targetSVM = None + sourceVolumeName = None + sourceSnapshotName = None + mountpoint = None + unixUID = None + unixGID = None + junction = None + readonly = False + split = False + refresh = False + exportPolicy = None + snapshotPolicy = None + exportHosts = None + svmDrUnprotect = False + + # Get command line options + try: + opts, args = getopt.getopt(sys.argv[3:], "hl:c:t:n:v:s:m:u:g:j:xe:p:i:srd", ["help", "cluster-name=", "source-svm=","target-svm=","name=", "source-volume=", "source-snapshot=", "mountpoint=", "uid=", "gid=", "junction=", "readonly","export-hosts=","export-policy=","snapshot-policy=","split","refresh","svm-dr-unprotect"]) + except Exception as err: + print(err) + handleInvalidCommand(helpText=helpTextCloneVolume, invalidOptArg=True) + + # Parse command line options + for opt, arg in opts: + if opt in ("-h", "--help"): + print(helpTextCloneVolume) + sys.exit(0) + elif opt in ("-l", "--cluster-name"): + clusterName = arg + elif opt in ("-n", "--name"): + newVolumeName = arg + elif opt in ("-c", "--source-svm"): + sourceSVM = arg + elif opt in ("-t", "--target-svm"): + targetSVM = arg + elif opt in ("-v", "--source-volume"): + sourceVolumeName = arg + elif opt in ("-s", "--source-snapshot"): + sourceSnapshotName = arg + elif opt in ("-m", "--mountpoint"): + mountpoint = arg + elif opt in ("-u", "--uid"): + unixUID = arg + elif opt in ("-g", "--gid"): + unixGID = arg + elif opt in ("-j", "--junction"): + junction = arg + elif opt in ("-x", "--readonly"): + readonly = True + elif opt in ("-s", "--split"): + split = True + elif opt in ("-r", "--refresh"): + refresh = True + elif opt in ("-d", "--svm-dr-unprotect"): + svmDrUnprotect = True + elif opt in ("-p", "--export-policy"): + exportPolicy = arg + elif opt in ("-i", "--snapshot-policy"): + snapshotPolicy = arg + elif opt in ("-e", "--export-hosts"): + exportHosts = arg + + # Check for required options + if not newVolumeName or not sourceVolumeName: + handleInvalidCommand(helpText=helpTextCloneVolume, invalidOptArg=True) + if (unixUID and not unixGID) or (unixGID and not unixUID): + print("Error: if either one of -u/--uid or -g/--gid is spefied, then both must be specified.") + handleInvalidCommand(helpText=helpTextCloneVolume, invalidOptArg=True) + if exportHosts and exportPolicy: + print("Error: cannot use both --export-policy and --export-hosts. only one of them can be specified.") + handleInvalidCommand(helpText=helpTextCloneVolume, invalidOptArg=True) + + # Clone volume + try: + clone_volume(new_volume_name=newVolumeName, source_volume_name=sourceVolumeName, source_snapshot_name=sourceSnapshotName, + cluster_name=clusterName, source_svm=sourceSVM, target_svm=targetSVM, export_policy=exportPolicy, export_hosts=exportHosts, + snapshot_policy=snapshotPolicy, split=split, refresh=refresh, mountpoint=mountpoint, unix_uid=unixUID, unix_gid=unixGID, + junction=junction, svm_dr_unprotect=svmDrUnprotect, readonly=readonly, print_output=True) + except (InvalidConfigError, APIConnectionError, InvalidSnapshotParameterError, InvalidVolumeParameterError, + MountOperationError): + sys.exit(1) - if connectionType == "ONTAP": - # Instantiate connection to ONTAP cluster - try: - _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) - except InvalidConfigError: - raise - - # Retrieve svm from config file - try: - svm = config["svm"] - if svm_name: - svm = svm_name - except: - if print_output: - _print_invalid_config_error() - raise InvalidConfigError() - - if print_output: - print("Restoring snapshot '" + snapshot_name + "'.") - - try: - # Retrieve volume - volume = NetAppVolume.find(name=volume_name, svm=svm) - if not volume: - if print_output: - print("Error: Invalid volume name.") - raise InvalidVolumeParameterError("name") - - # Retrieve snapshot - snapshot = NetAppSnapshot.find(volume.uuid, name=snapshot_name) - if not snapshot: - if print_output: - print("Error: Invalid snapshot name.") - raise InvalidSnapshotParameterError("name") + else: + handleInvalidCommand() - # Restore snapshot - volume.patch(volume.uuid, **{"restore_to.snapshot.name": snapshot.name, "restore_to.snapshot.uuid": snapshot.uuid}, poll=True) - if print_output: - print("Snapshot restored successfully.") + elif action in ("config", "setup"): + if len(sys.argv) > 2 : + if sys.argv[2] in ("-h", "--help"): + print(helpTextConfig) + sys.exit(0) + else: + handleInvalidCommand(helpTextConfig, invalidOptArg=True) - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) + #connectionType = input("Enter connection type (ONTAP): ") + connectionType = "ONTAP" - else: - raise ConnectionTypeError() + # Create config file + createConfig(connectionType=connectionType) + elif action == "create": + # Get desired target from command line args + target = getTarget(sys.argv) -def sync_cloud_sync_relationship(relationship_id: str, wait_until_complete: bool = False, print_output: bool = False): - # Step 1: Obtain access token and account ID for accessing Cloud Sync API + # Invoke desired action based on target + if target in ("snapshot", "snap"): + volumeName = None + snapshotName = None + clusterName = None + svmName = None + retentionCount = 0 + retentionDays = False + snapmirrorLabel = None - # Retrieve refresh token - try: - refreshToken = _retrieve_cloud_central_refresh_token(print_output=print_output) - except InvalidConfigError: - raise - - # Obtain access token and account ID - try: - accessToken, accountId = _get_cloud_sync_access_parameters(refreshToken=refreshToken, print_output=print_output) - except APIConnectionError: - raise - - # Step 2: Trigger Cloud Sync sync - - # Define parameters for API call - url = "https://cloudsync.netapp.com/api/relationships/%s/sync" % relationship_id - headers = { - "Content-Type": "application/json", - "Accept": "application/json", - "x-account-id": accountId, - "Authorization": "Bearer " + accessToken - } - - # Call API to trigger sync - if print_output: - print("Triggering sync operation for Cloud Sync relationship (ID = " + relationship_id + ").") - response = requests.put(url = url, headers = headers) - - # Check for API response status code of 202; if not 202, raise error - if response.status_code != 202: - errorMessage = "Error calling Cloud Sync API to trigger sync operation." - if print_output: - print("Error:", errorMessage) - _print_api_response(response) - raise APIConnectionError(errorMessage, response) - - if print_output: - print("Sync operation successfully triggered.") - - # Step 3: Obtain status of the sync operation; keep checking until the sync operation has completed - - if wait_until_complete: - while True: - # Define parameters for API call - url = "https://cloudsync.netapp.com/api/relationships-v2/%s" % relationship_id - headers = { - "Accept": "application/json", - "x-account-id": accountId, - "Authorization": "Bearer " + accessToken - } - - # Call API to obtain status of sync operation - response = requests.get(url = url, headers = headers) - - # Parse response to retrieve status of sync operation + # Get command line options try: - responseBody = json.loads(response.text) - latestActivityType = responseBody["activity"]["type"] - latestActivityStatus = responseBody["activity"]["status"] - except: - errorMessage = "Error obtaining status of sync operation from Cloud Sync API." - if print_output: - print("Error:", errorMessage) - _print_api_response(response) - raise APIConnectionError(errorMessage, response) - - # End execution if the latest update is complete - if latestActivityType == "Sync": - if latestActivityStatus == "DONE": - if print_output: - print("Success: Sync operation is complete.") - break - elif latestActivityStatus == "FAILED": - if print_output: - failureMessage = responseBody["activity"]["failureMessage"] - print("Error: Sync operation failed.") - print("Message:", failureMessage) - raise CloudSyncSyncOperationError(latestActivityStatus, failureMessage) - elif latestActivityStatus == "RUNNING": - # Print message re: progress - if print_output: - print("Sync operation is not yet complete. Status:", latestActivityStatus) - print("Checking again in 60 seconds...") - else: - if print_output: - print ("Error: Unknown sync operation status (" + latestActivityStatus + ") returned by Cloud Sync API.") - raise CloudSyncSyncOperationError(latestActivityStatus) + opts, args = getopt.getopt(sys.argv[3:], "hn:v:s:r:u:l:", ["cluster-name=","help", "svm=", "name=", "volume=", "retention=", "snapmirror-label="]) + except Exception as err: + print(err) + handleInvalidCommand(helpText=helpTextCreateSnapshot, invalidOptArg=True) + + # Parse command line options + for opt, arg in opts: + if opt in ("-h", "--help") : + print(helpTextCreateSnapshot) + sys.exit(0) + elif opt in ("-n", "--name"): + snapshotName = arg + elif opt in ("-u", "--cluster-name"): + clusterName = arg + elif opt in ("-s", "--svm"): + svmName = arg + elif opt in ("-r", "--retention"): + retentionCount = arg + elif opt in ("-v", "--volume"): + volumeName = arg + elif opt in ("-l", "--snapmirror-label"): + snapmirrorLabel = arg + + # Check for required options + if not volumeName: + handleInvalidCommand(helpText=helpTextCreateSnapshot, invalidOptArg=True) + + if retentionCount: + if not retentionCount.isnumeric(): + matchObj = re.match("^(\d+)d$",retentionCount) + if not matchObj: + handleInvalidCommand(helpText=helpTextCreateSnapshot, invalidOptArg=True) + else: + retentionCount = matchObj.group(1) + retentionDays = True - # Sleep for 60 seconds before checking progress again - time.sleep(60) + # Create snapshot + try: + create_snapshot(volume_name=volumeName, snapshot_name=snapshotName, retention_count=retentionCount, retention_days=retentionDays, cluster_name=clusterName, svm_name=svmName, snapmirror_label=snapmirrorLabel, print_output=True) + except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError): + sys.exit(1) + + elif target in ("volume", "vol"): + clusterName = None + svmName = None + volumeName = None + volumeSize = None + guaranteeSpace = False + volumeType = None + unixPermissions = None + unixUID = None + unixGID = None + exportPolicy = None + snapshotPolicy = None + mountpoint = None + aggregate = None + junction = None + readonly = False + tieringPolicy = None + volDP = False + + # Get command line options + try: + opts, args = getopt.getopt(sys.argv[3:], "l:hv:t:n:s:rt:p:u:g:e:d:m:a:j:xu:y", ["cluster-name=","help", "svm=", "name=", "size=", "guarantee-space", "type=", "permissions=", "uid=", "gid=", "export-policy=", "snapshot-policy=", "mountpoint=", "aggregate=", "junction=" ,"readonly","tiering-policy=","dp"]) + except Exception as err: + print(err) + handleInvalidCommand(helpText=helpTextCreateVolume, invalidOptArg=True) + + # Parse command line options + for opt, arg in opts: + if opt in ("-h", "--help"): + print(helpTextCreateVolume) + sys.exit(0) + elif opt in ("-v", "--svm"): + svmName = arg + elif opt in ("-l", "--cluster-name"): + clusterName = arg + elif opt in ("-n", "--name"): + volumeName = arg + elif opt in ("-s", "--size"): + volumeSize = arg + elif opt in ("-r", "--guarantee-space"): + guaranteeSpace = True + elif opt in ("-t", "--type"): + volumeType = arg + elif opt in ("-p", "--permissions"): + unixPermissions = arg + elif opt in ("-u", "--uid"): + unixUID = arg + elif opt in ("-g", "--gid"): + unixGID = arg + elif opt in ("-e", "--export-policy"): + exportPolicy = arg + elif opt in ("-d", "--snapshot-policy"): + snapshotPolicy = arg + elif opt in ("-m", "--mountpoint"): + mountpoint = arg + elif opt in ("-a", "--aggregate"): + aggregate = arg + elif opt in ("-j", "--junction"): + junction = arg + elif opt in ("-x", "--readonly"): + readonly = True + elif opt in ("-f", "--tiering-policy"): + tieringPolicy = arg + elif opt in ("-y", "--dp"): + volDP = True + + # Check for required options + if not volumeName or not volumeSize: + handleInvalidCommand(helpText=helpTextCreateVolume, invalidOptArg=True) + if (unixUID and not unixGID) or (unixGID and not unixUID): + print("Error: if either one of -u/--uid or -g/--gid is spefied, then both must be specified.") + handleInvalidCommand(helpText=helpTextCreateVolume, invalidOptArg=True) + if (volDP and (junction or mountpoint or snapshotPolicy or exportPolicy)): + handleInvalidCommand(helpText=helpTextCreateVolume, invalidOptArg=True) + + # Create volume + try: + create_volume(svm_name=svmName, volume_name=volumeName, cluster_name=clusterName, volume_size=volumeSize, guarantee_space=guaranteeSpace, volume_type=volumeType, unix_permissions=unixPermissions, unix_uid=unixUID, + unix_gid=unixGID, export_policy=exportPolicy, snapshot_policy=snapshotPolicy, aggregate=aggregate, mountpoint=mountpoint, junction=junction, readonly=readonly, + print_output=True, tiering_policy=tieringPolicy, vol_dp=volDP) + except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): + sys.exit(1) + + elif target in ("snapmirror-relationship", "sm","snapmirror"): + clusterName = None + sourceSvm = None + targetSvm = None + sourceVol = None + targetVol = None + policy = 'MirrorAllSnapshots' + schedule = "hourly" + volumeSize = None + action = None + + # Get command line options + try: + opts, args = getopt.getopt(sys.argv[3:], "hn:t:s:v:u:y:c:p:a:h", ["cluster-name=","help", "target-vol=", "target-svm=", "source-svm=", "source-vol=", "schedule=", "policy=", "action="]) + except Exception as err: + print(err) + handleInvalidCommand(helpText=helpTextCreateSnapMirrorRelationship, invalidOptArg=True) + + # Parse command line options + for opt, arg in opts: + if opt in ("-h", "--help"): + print(helpTextCreateSnapMirrorRelationship) + sys.exit(0) + elif opt in ("-t", "--target-svm"): + targetSvm = arg + elif opt in ("-n", "--target-vol"): + targetVol = arg + elif opt in ("-s", "--source-svm"): + sourceSvm = arg + elif opt in ("-v", "--source-vol"): + sourceVol = arg + elif opt in ("-u", "--cluster-name"): + clusterName = arg + elif opt in ("-c", "--schedule"): + schedule = arg + elif opt in ("-p", "--policy"): + policy = arg + elif opt in ("-a", "--action"): + action = arg + + # Check for required options + if not targetVol or not sourceSvm or not sourceVol: + handleInvalidCommand(helpText=helpTextCreateSnapMirrorRelationship, invalidOptArg=True) + + if action not in [None,'resync','initialize']: + handleInvalidCommand(helpText=helpTextCreateSnapMirrorRelationship, invalidOptArg=True) + + # Create snapmirror + try: + create_snap_mirror_relationship(source_svm=sourceSvm, target_svm=targetSvm, source_vol=sourceVol, target_vol=targetVol, schedule=schedule, policy=policy, + cluster_name=clusterName, action=action, print_output=True) + except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): + sys.exit(1) -def create_snap_mirror_relationship(source_svm: str, source_vol: str, target_vol: str, target_svm: str = None, cluster_name: str = None, - schedule: str = '', policy: str = 'MirrorAllSnapshots', action: str = None, print_output: bool = False): - # Retrieve config details from config file - try: - config = _retrieve_config(print_output=print_output) - except InvalidConfigError: - raise - try: - connectionType = config["connectionType"] - except: - if print_output : - _print_invalid_config_error() - raise InvalidConfigError() + else: + handleInvalidCommand() - if cluster_name: - config["hostname"] = cluster_name + elif action in ("delete", "del", "rm"): + # Get desired target from command line args + target = getTarget(sys.argv) - if connectionType == "ONTAP": - # Instantiate connection to ONTAP cluster - try: - _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) - except InvalidConfigError: - raise + # Invoke desired action based on target + if target in ("snapshot", "snap"): + volumeName = None + snapshotName = None + svmName = None + clusterName = None - svm = config["svm"] - if not target_svm: - target_svm = svm + # Get command line options + try: + opts, args = getopt.getopt(sys.argv[3:], "hn:v:s:u:", ["cluster-name=","help", "svm=", "name=", "volume="]) + except Exception as err: + print(err) + handleInvalidCommand(helpText=helpTextDeleteSnapshot, invalidOptArg=True) + + # Parse command line options + for opt, arg in opts: + if opt in ("-h", "--help"): + print(helpTextDeleteSnapshot) + sys.exit(0) + elif opt in ("-n", "--name"): + snapshotName = arg + elif opt in ("-s", "--svm"): + svmName = arg + elif opt in ("-u", "--cluster-name"): + clusterName = arg + elif opt in ("-v", "--volume"): + volumeName = arg + + # Check for required options + if not volumeName or not snapshotName: + handleInvalidCommand(helpText=helpTextDeleteSnapshot, invalidOptArg=True) - try: - uuid = None - snapmirror_relationship = NetAppSnapmirrorRelationship.get_collection(**{"destination.path": target_svm+":"+target_vol}) - for rel in snapmirror_relationship: - # Retrieve relationship details - try: - rel.get() - uuid = rel.uuid - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - if uuid: - if print_output: - print("Error: relationship alreay exists: "+target_svm+":"+target_vol) - raise InvalidConfigError() - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - - try: - newRelationDict = { - "source": { - "path": source_svm+":"+source_vol - }, - "destination": { - "path": target_svm+":"+target_vol - } - #due to bug 1311226 setting the policy wil be done using cli api - # "policy": { - # "name": policy, - # }, - } - # if schedule != '': - # newRelationDict['schedule'] = schedule - - if print_output: - print("Creating snapmirror relationship: "+source_svm+":"+source_vol+" -> "+target_svm+":"+target_vol) - newRelationship = NetAppSnapmirrorRelationship.from_dict(newRelationDict) - newRelationship.post(poll=True, poll_timeout=120) - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - - try: - if print_output: - print("Setting snapmirror policy as: "+policy+" schedule:"+schedule) - response = NetAppCLI().execute("snapmirror modify",destination_path=target_svm+":"+target_vol,body={"policy": policy, "schedule":schedule}) - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - - try: - uuid = None - relation = None - snapmirror_relationship = NetAppSnapmirrorRelationship.get_collection(**{"destination.path": target_svm+":"+target_vol}) - for relation in snapmirror_relationship: - # Retrieve relationship details - try: - relation.get() - uuid = relation.uuid - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - if not uuid: - if print_output: - print("Error: relationship was not created: "+target_svm+":"+target_vol) - raise InvalidConfigError() - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - - if action in ["resync","initialize"]: + # Delete snapshot try: - if print_output: - print("Setting state to snapmirrored, action:"+action) - patchRelation = NetAppSnapmirrorRelationship(uuid=uuid) - patchRelation.state = "snapmirrored" - patchRelation.patch(poll=True, poll_timeout=120) - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - -def sync_snap_mirror_relationship(uuid: str = None, svm_name: str = None, volume_name: str = None, cluster_name: str = None, wait_until_complete: bool = False, print_output: bool = False): - # Retrieve config details from config file - try: - config = _retrieve_config(print_output=print_output) - except InvalidConfigError: - raise - try: - connectionType = config["connectionType"] - except: - if print_output : - _print_invalid_config_error() - raise InvalidConfigError() + delete_snapshot(volume_name=volumeName, svm_name = svmName, cluster_name=clusterName, snapshot_name=snapshotName, print_output=True) + except (InvalidConfigError, APIConnectionError, InvalidSnapshotParameterError, InvalidVolumeParameterError): + sys.exit(1) + + elif target in ("volume", "vol"): + volumeName = None + svmName = None + clusterName = None + force = False + deleteMirror = False + deleteNonClone = False + mountpoint = None + + # Get command line options + try: + opts, args = getopt.getopt(sys.argv[3:], "hfv:n:u:m:p:", ["cluster-name=","help", "svm=", "name=", "force", "delete-non-clone", "delete-mirror", "mountpoint="]) + except Exception as err: + print(err) + handleInvalidCommand(helpText=helpTextDeleteVolume, invalidOptArg=True) + + # Parse command line options + for opt, arg in opts: + if opt in ("-h", "--help"): + print(helpTextDeleteVolume) + sys.exit(0) + elif opt in ("-v", "--svm"): + svmName = arg + elif opt in ("-u", "--cluster-name"): + clusterName = arg + elif opt in ("-n", "--name"): + volumeName = arg + elif opt in ("-p", "--mountpoint"): + mountpoint = arg + elif opt in ("-f", "--force"): + force = True + elif opt in ("-m", "--delete-mirror"): + deleteMirror = True + elif opt in ("--delete-non-clone"): + deleteNonClone = True + + # Check for required options + if not volumeName: + handleInvalidCommand(helpText=helpTextDeleteVolume, invalidOptArg=True) + + # Confirm delete operation + if not force: + print("Warning: All data and snapshots associated with the volume will be permanently deleted.") + while True: + proceed = input("Are you sure that you want to proceed? (yes/no): ") + if proceed in ("yes", "Yes", "YES"): + break + elif proceed in ("no", "No", "NO"): + sys.exit(0) + else: + print("Invalid value. Must enter 'yes' or 'no'.") - if cluster_name: - config["hostname"] = cluster_name + # Delete volume + try: + delete_volume(volume_name=volumeName, svm_name=svmName, cluster_name=clusterName, delete_mirror=deleteMirror, delete_non_clone=deleteNonClone, print_output=True) + except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError): + sys.exit(1) - if connectionType == "ONTAP": - # Instantiate connection to ONTAP cluster - try: - _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) - except InvalidConfigError: - raise - - if volume_name: - svm = config["svm"] - if svm_name: - svm = svm_name - - snapmirror_relationship = NetAppSnapmirrorRelationship.get_collection(**{"destination.path": svm+":"+volume_name}) - for rel in snapmirror_relationship: - # Retrieve relationship details - try: - rel.get() - uuid = rel.uuid - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - if not uuid: - snapmirror_relationship = NetAppSnapmirrorRelationship.get_collection(**{"destination.path": svm+":"}) - for rel in snapmirror_relationship: - try: - rel.get() - uuid = rel.uuid - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - if uuid: - if print_output: - print("volume is part of svm-dr relationshitp: "+svm+":") - - if not uuid: - if print_output: - print("Error: relationship could not be found.") - raise SnapMirrorSyncOperationError("not found") - - if print_output: - print("Triggering sync operation for SnapMirror relationship (UUID = " + uuid + ").") - - try: - # Trigger sync operation for SnapMirror relationship - transfer = NetAppSnapmirrorTransfer(uuid) - transfer.post(poll=True) - except NetAppRestError as err: - if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - - if print_output: - print("Sync operation successfully triggered.") - - if wait_until_complete: - # Wait to perform initial check - print("Waiting for sync operation to complete.") - print("Status check will be performed in 10 seconds...") - time.sleep(10) + else: + handleInvalidCommand() + + elif action in ("help", "h", "-h", "--help"): + print(helpTextStandard) + + elif action in ("list", "ls"): + # Get desired target from command line args + target = getTarget(sys.argv) + + # Invoke desired action based on target + if target in ("cloud-sync-relationship", "cloud-sync", "cloud-sync-relationships", "cloud-syncs") : + # Check command line options + if len(sys.argv) > 3: + if sys.argv[3] in ("-h", "--help"): + print(helpTextListCloudSyncRelationships) + sys.exit(0) + else: + handleInvalidCommand(helpTextListCloudSyncRelationships, invalidOptArg=True) - while True: - # Retrieve relationship - relationship = NetAppSnapmirrorRelationship.find(uuid=uuid) - relationship.get() + # List cloud sync relationships + try: + list_cloud_sync_relationships(print_output=True) + except (InvalidConfigError, APIConnectionError): + sys.exit(1) - # Check status of sync operation - if hasattr(relationship, "transfer"): - transferState = relationship.transfer.state - else: - transferState = None - - # if transfer is complete, end execution - if (not transferState) or (transferState == "success"): - healthy = relationship.healthy - if healthy: - if print_output: - print("Success: Sync operation is complete.") - break - else: - if print_output: - print("Error: Relationship is not healthy. Access ONTAP System Manager for details.") - raise SnapMirrorSyncOperationError("not healthy") - elif transferState != "transferring": - if print_output: - print ("Error: Unknown sync operation status (" + transferState + ") returned by ONTAP API.") - raise SnapMirrorSyncOperationError(transferState) - else: - # Print message re: progress - if print_output: - print("Sync operation is not yet complete. Status:", transferState) - print("Checking again in 10 seconds...") + elif target in ("snapmirror-relationship", "snapmirror", "snapmirror-relationships", "snapmirrors","sm"): + svmName = None + clusterName = None - # Sleep for 10 seconds before checking progress again - time.sleep(10) + # Get command line options + try: + opts, args = getopt.getopt(sys.argv[3:], "hv:u:", ["cluster-name=","help", "svm="]) + except Exception as err: + print(err) + handleInvalidCommand(helpText=helpTextListSnapMirrorRelationships, invalidOptArg=True) + + # Parse command line options + for opt, arg in opts: + if opt in ("-h", "--help"): + print(helpTextListSnapMirrorRelationships) + sys.exit(0) + elif opt in ("-v", "--svm"): + svmName = arg + elif opt in ("-u", "--cluster-name"): + clusterName = arg + + # List snapmirror relationships + try: + list_snap_mirror_relationships(print_output=True, cluster_name=clusterName) + except (InvalidConfigError, APIConnectionError): + sys.exit(1) - else: - raise ConnectionTypeError() + elif target in ("snapshot", "snap", "snapshots", "snaps"): + volumeName = None + clusterName = None + svmName = None -# -# Deprecated function names -# + # Get command line options + try: + opts, args = getopt.getopt(sys.argv[3:], "hv:s:u:", ["cluster-name=","help", "volume=","svm="]) + except Exception as err: + print(err) + handleInvalidCommand(helpText=helpTextListSnapshots, invalidOptArg=True) + + # Parse command line options + for opt, arg in opts: + if opt in ("-h", "--help") : + print(helpTextListSnapshots) + sys.exit(0) + elif opt in ("-v", "--volume"): + volumeName = arg + elif opt in ("-s", "--svm"): + svmName = arg + elif opt in ("-u", "--cluster-name"): + clusterName = arg + + # Check for required options + if not volumeName: + handleInvalidCommand(helpText=helpTextListSnapshots, invalidOptArg=True) + + # List snapsots + try: + list_snapshots(volume_name=volumeName, cluster_name=clusterName, svm_name=svmName, print_output=True) + except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError): + sys.exit(1) + elif target in ("volume", "vol", "volumes", "vols"): + includeSpaceUsageDetails = False + svmName = None + clusterName = None -@deprecated -def cloneVolume(newVolumeName: str, sourceVolumeName: str, sourceSnapshotName: str = None, unixUID: str = None, unixGID: str = None, mountpoint: str = None, printOutput: bool = False) : - clone_volume(new_volume_name=newVolumeName, source_volume_name=sourceVolumeName, source_snapshot_name=sourceSnapshotName, - mountpoint=mountpoint, unix_uid=unixUID, unix_gid=unixGID, print_output=printOutput) + # Get command line options + try: + opts, args = getopt.getopt(sys.argv[3:], "hsv:u:", ["cluster-name=","help", "include-space-usage-details","svm="]) + except Exception as err: + print(err) + handleInvalidCommand(helpText=helpTextListVolumes, invalidOptArg=True) + + # Parse command line options + for opt, arg in opts: + if opt in ("-h", "--help") : + print(helpTextListVolumes) + sys.exit(0) + elif opt in ("-v", "--svm") : + svmName = arg + elif opt in ("-s", "--include-space-usage-details"): + includeSpaceUsageDetails = True + elif opt in ("-u", "--cluster-name"): + clusterName = arg + + # List volumes + try: + list_volumes(check_local_mounts=True, include_space_usage_details=includeSpaceUsageDetails, print_output=True, svm_name=svmName, cluster_name=clusterName) + except (InvalidConfigError, APIConnectionError) : + sys.exit(1) + else: + handleInvalidCommand() + + elif action == "mount": + # Get desired target from command line args + target = getTarget(sys.argv) + + # Invoke desired action based on target + if target in ("volume", "vol"): + volumeName = None + svmName = None + clusterName = None + lifName = None + mountpoint = None + mount_options = None + readonly = False + # Get command line options + try: + opts, args = getopt.getopt(sys.argv[3:], "hv:n:l:m:u:o:x", ["cluster-name=","help", "lif=","svm=", "name=", "mountpoint=", "readonly", "options="]) + except Exception as err: + print(err) + handleInvalidCommand(helpText=helpTextMountVolume, invalidOptArg=True) + + # Parse command line options + for opt, arg in opts: + if opt in ("-h", "--help"): + print(helpTextMountVolume) + sys.exit(0) + elif opt in ("-v", "--svm"): + svmName = arg + elif opt in ("-u", "--cluster-name"): + clusterName = arg + elif opt in ("-l", "--lif"): + lifName = arg + elif opt in ("-n", "--name"): + volumeName = arg + elif opt in ("-m", "--mountpoint"): + mountpoint = arg + elif opt in ("-o", "--options"): + mount_options = arg + elif opt in ("-x", "--readonly"): + readonly = True + + # Mount volume + try: + mount_volume(svm_name = svmName, cluster_name=clusterName, lif_name = lifName, volume_name=volumeName, mountpoint=mountpoint, mount_options=mount_options, readonly=readonly, print_output=True) + except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): + sys.exit(1) -@deprecated -def createSnapshot(volumeName: str, snapshotName: str = None, printOutput: bool = False) : - create_snapshot(volume_name=volumeName, snapshot_name=snapshotName, print_output=printOutput) + else: + handleInvalidCommand() + elif action == "unmount": + # Get desired target from command line args + target = getTarget(sys.argv) -@deprecated -def createVolume(volumeName: str, volumeSize: str, guaranteeSpace: bool = False, volumeType: str = "flexvol", unixPermissions: str = "0777", unixUID: str = "0", unixGID: str = "0", exportPolicy: str = "default", snapshotPolicy: str = "none", aggregate: str = None, mountpoint: str = None, printOutput: bool = False) : - create_volume(volume_name=volumeName, volume_size=volumeSize, guarantee_space=guaranteeSpace, volume_type=volumeType, unix_permissions=unixPermissions, unix_uid=unixUID, - unix_gid=unixGID, export_policy=exportPolicy, snapshot_policy=snapshotPolicy, aggregate=aggregate, mountpoint=mountpoint, print_output=printOutput) + # Invoke desired action based on target + if target in ("volume", "vol"): + mountpoint = None + # Get command line options + try: + opts, args = getopt.getopt(sys.argv[3:], "hm:", ["help", "mountpoint="]) + except Exception as err: + print(err) + handleInvalidCommand(helpText=helpTextUnmountVolume, invalidOptArg=True) + + # Parse command line options + for opt, arg in opts: + if opt in ("-h", "--help"): + print(helpTextUnmountVolume) + sys.exit(0) + elif opt in ("-m", "--mountpoint"): + mountpoint = arg + + # Check for required options + if not mountpoint: + handleInvalidCommand(helpText=helpTextUnmountVolume, invalidOptArg=True) + + # Unmount volume + try: + unmount_volume(mountpoint=mountpoint, print_output= True) + except (MountOperationError): + sys.exit(1) + else: + handleInvalidCommand() + elif action in ("prepopulate"): + # Get desired target from command line args + target = getTarget(sys.argv) -@deprecated -def deleteSnapshot(volumeName: str, snapshotName: str, printOutput: bool = False) : - delete_snapshot(volume_name=volumeName, snapshot_name=snapshotName, print_output=printOutput) + # Invoke desired action based on target + if target in ("flexcache", "cache"): + volumeName = None + paths = None + # Get command line options + try: + opts, args = getopt.getopt(sys.argv[3:], "hn:p:", ["help", "name=", "paths="]) + except Exception as err: + print(err) + handleInvalidCommand(helpText=helpTextPrepopulateFlexCache, invalidOptArg=True) + + # Parse command line options + for opt, arg in opts: + if opt in ("-h", "--help"): + print(helpTextPrepopulateFlexCache) + sys.exit(0) + elif opt in ("-n", "--name"): + volumeName = arg + elif opt in ("-p", "--paths"): + paths = arg + + # Check for required options + if not volumeName or not paths : + handleInvalidCommand(helpText=helpTextPrepopulateFlexCache, invalidOptArg=True) + + # Convert paths string to list + pathsList = paths.split(",") -@deprecated -def deleteVolume(volumeName: str, printOutput: bool = False) : - delete_volume(volume_name=volumeName, print_output=printOutput) + # Prepopulate FlexCache + try: + prepopulate_flex_cache(volume_name=volumeName, paths=pathsList, print_output=True) + except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError): + sys.exit(1) + else: + handleInvalidCommand() -@deprecated -def listCloudSyncRelationships(printOutput: bool = False) -> list() : - return list_cloud_sync_relationships(print_output=printOutput) + elif action in ("pull-from-s3", "pull-s3", "s3-pull"): + # Get desired target from command line args + target = getTarget(sys.argv) + # Invoke desired action based on target + if target in ("bucket"): + s3Bucket = None + s3ObjectKeyPrefix = "" + localDirectory = None -@deprecated -def listSnapMirrorRelationships(printOutput: bool = False) -> list() : - return list_snap_mirror_relationships(print_output=printOutput) + # Get command line options + try: + opts, args = getopt.getopt(sys.argv[3:], "hb:p:d:e:", ["help", "bucket=", "key-prefix=", "directory="]) + except Exception as err: + print(err) + handleInvalidCommand(helpText=helpTextPullFromS3Bucket, invalidOptArg=True) + + # Parse command line options + for opt, arg in opts: + if opt in ("-h", "--help") : + print(helpTextPullFromS3Bucket) + sys.exit(0) + elif opt in ("-b", "--bucket"): + s3Bucket = arg + elif opt in ("-p", "--key-prefix"): + s3ObjectKeyPrefix = arg + elif opt in ("-d", "--directory"): + localDirectory = arg + + # Check for required options + if not s3Bucket or not localDirectory: + handleInvalidCommand(helpText=helpTextPullFromS3Bucket, invalidOptArg=True) + + # Push file to S3 + try: + pull_bucket_from_s3(s3_bucket=s3Bucket, local_directory=localDirectory, s3_object_key_prefix=s3ObjectKeyPrefix, print_output=True) + except (InvalidConfigError, APIConnectionError): + sys.exit(1) + elif target in ("object", "file"): + s3Bucket = None + s3ObjectKey = None + localFile = None -@deprecated -def listSnapshots(volumeName: str, printOutput: bool = False) -> list() : - return list_snapshots(volume_name=volumeName, print_output=printOutput) + # Get command line options + try: + opts, args = getopt.getopt(sys.argv[3:], "hb:k:f:", ["help", "bucket=", "key=", "file=", "extra-args="]) + except Exception as err: + print(err) + handleInvalidCommand(helpText=helpTextPullFromS3Object, invalidOptArg=True) + + # Parse command line options + for opt, arg in opts: + if opt in ("-h", "--help"): + print(helpTextPullFromS3Object) + sys.exit(0) + elif opt in ("-b", "--bucket"): + s3Bucket = arg + elif opt in ("-k", "--key"): + s3ObjectKey = arg + elif opt in ("-f", "--file"): + localFile = arg + + # Check for required options + if not s3Bucket or not s3ObjectKey: + handleInvalidCommand(helpText=helpTextPullFromS3Object, invalidOptArg=True) + + # Push file to S3 + try: + pull_object_from_s3(s3_bucket=s3Bucket, s3_object_key=s3ObjectKey, local_file=localFile, print_output=True) + except (InvalidConfigError, APIConnectionError): + sys.exit(1) + else: + handleInvalidCommand() -@deprecated -def listVolumes(checkLocalMounts: bool = False, includeSpaceUsageDetails: bool = False, printOutput: bool = False) -> list() : - return list_volumes(check_local_mounts=checkLocalMounts, include_space_usage_details=includeSpaceUsageDetails, print_output=printOutput) + elif action in ("push-to-s3", "push-s3", "s3-push"): + # Get desired target from command line args + target = getTarget(sys.argv) + # Invoke desired action based on target + if target in ("directory", "dir"): + s3Bucket = None + s3ObjectKeyPrefix = "" + localDirectory = None + s3ExtraArgs = None -@deprecated -def mountVolume(volumeName: str, mountpoint: str, printOutput: bool = False) : - mount_volume(volume_name=volumeName, mountpoint=mountpoint, print_output=printOutput) + # Get command line options + try: + opts, args = getopt.getopt(sys.argv[3:], "hb:p:d:e:", ["help", "bucket=", "key-prefix=", "directory=", "extra-args="]) + except Exception as err: + print(err) + handleInvalidCommand(helpText=helpTextPushToS3Directory, invalidOptArg=True) + + # Parse command line options + for opt, arg in opts: + if opt in ("-h", "--help") : + print(helpTextPushToS3Directory) + sys.exit(0) + elif opt in ("-b", "--bucket"): + s3Bucket = arg + elif opt in ("-p", "--key-prefix"): + s3ObjectKeyPrefix = arg + elif opt in ("-d", "--directory"): + localDirectory = arg + elif opt in ("-e", "--extra-args"): + s3ExtraArgs = arg + + # Check for required options + if not s3Bucket or not localDirectory: + handleInvalidCommand(helpText=helpTextPushToS3Directory, invalidOptArg=True) + + # Push file to S3 + try: + push_directory_to_s3(s3_bucket=s3Bucket, local_directory=localDirectory, s3_object_key_prefix=s3ObjectKeyPrefix, s3_extra_args=s3ExtraArgs, print_output=True) + except (InvalidConfigError, APIConnectionError): + sys.exit(1) + elif target in ("file"): + s3Bucket = None + s3ObjectKey = None + localFile = None + s3ExtraArgs = None -@deprecated -def prepopulateFlexCache(volumeName: str, paths: list, printOutput: bool = False) : - prepopulate_flex_cache(volume_name=volumeName, paths=paths, print_output=printOutput) + # Get command line options + try: + opts, args = getopt.getopt(sys.argv[3:], "hb:k:f:e:", ["help", "bucket=", "key=", "file=", "extra-args="]) + except Exception as err: + print(err) + handleInvalidCommand(helpText=helpTextPushToS3File, invalidOptArg=True) + + # Parse command line options + for opt, arg in opts: + if opt in ("-h", "--help"): + print(helpTextPushToS3File) + sys.exit(0) + elif opt in ("-b", "--bucket"): + s3Bucket = arg + elif opt in ("-k", "--key"): + s3ObjectKey = arg + elif opt in ("-f", "--file"): + localFile = arg + elif opt in ("-e", "--extra-args"): + s3ExtraArgs = arg + + # Check for required options + if not s3Bucket or not localFile: + handleInvalidCommand(helpText=helpTextPushToS3File, invalidOptArg=True) + + # Push file to S3 + try: + push_file_to_s3(s3_bucket=s3Bucket, s3_object_key=s3ObjectKey, local_file=localFile, s3_extra_args=s3ExtraArgs, print_output=True) + except (InvalidConfigError, APIConnectionError): + sys.exit(1) + else: + handleInvalidCommand() -@deprecated -def pullBucketFromS3(s3Bucket: str, localDirectory: str, s3ObjectKeyPrefix: str = "", printOutput: bool = False) : - pull_bucket_from_s3(s3_bucket=s3Bucket, local_directory=localDirectory, s3_object_key_prefix=s3ObjectKeyPrefix, print_output=printOutput) + elif action in ("restore"): + # Get desired target from command line args + target = getTarget(sys.argv) + # Invoke desired action based on target + if target in ("snapshot", "snap"): + volumeName = None + snapshotName = None + svmName = None + clusterName = None + force = False -@deprecated -def pullObjectFromS3(s3Bucket: str, s3ObjectKey: str, localFile: str = None, printOutput: bool = False) : - pull_object_from_s3(s3_bucket=s3Bucket, s3_object_key=s3ObjectKey, local_file=localFile, print_output=printOutput) + # Get command line options + try: + opts, args = getopt.getopt(sys.argv[3:], "hs:n:v:fu:", ["cluster-name=","help", "svm=", "name=", "volume=", "force"]) + except Exception as err: + print(err) + handleInvalidCommand(helpText=helpTextRestoreSnapshot, invalidOptArg=True) + + # Parse command line options + for opt, arg in opts: + if opt in ("-h", "--help"): + print(helpTextRestoreSnapshot) + sys.exit(0) + elif opt in ("-n", "--name"): + snapshotName = arg + elif opt in ("-s", "--svm"): + svmName = arg + elif opt in ("-u", "--cluster-name"): + clusterName = arg + elif opt in ("-v", "--volume"): + volumeName = arg + elif opt in ("-f", "--force"): + force = True + + # Check for required options + if not volumeName or not snapshotName: + handleInvalidCommand(helpText=helpTextRestoreSnapshot, invalidOptArg=True) + + # Confirm restore operation + if not force: + print("Warning: When you restore a snapshot, all subsequent snapshots are deleted.") + while True: + proceed = input("Are you sure that you want to proceed? (yes/no): ") + if proceed in ("yes", "Yes", "YES"): + break + elif proceed in ("no", "No", "NO"): + sys.exit(0) + else: + print("Invalid value. Must enter 'yes' or 'no'.") + # Restore snapshot + try: + restore_snapshot(volume_name=volumeName, snapshot_name=snapshotName, svm_name=svmName, cluster_name=clusterName, print_output=True) + except (InvalidConfigError, APIConnectionError, InvalidSnapshotParameterError, InvalidVolumeParameterError): + sys.exit(1) -@deprecated -def pushDirectoryToS3(s3Bucket: str, localDirectory: str, s3ObjectKeyPrefix: str = "", s3ExtraArgs: str = None, printOutput: bool = False) : - push_directory_to_s3(s3_bucket=s3Bucket, local_directory=localDirectory, s3_object_key_prefix=s3ObjectKeyPrefix, s3_extra_args=s3ExtraArgs, print_output=printOutput) + else: + handleInvalidCommand() + elif action == "sync": + # Get desired target from command line args + target = getTarget(sys.argv) -@deprecated -def pushFileToS3(s3Bucket: str, localFile: str, s3ObjectKey: str = None, s3ExtraArgs: str = None, printOutput: bool = False) : - push_file_to_s3(s3_bucket=s3Bucket, s3_object_key=s3ObjectKey, local_file=localFile, s3_extra_args=s3ExtraArgs, print_output=printOutput) + # Invoke desired action based on target + if target in ("cloud-sync-relationship", "cloud-sync"): + relationshipID = None + waitUntilComplete = False + # Get command line options + try: + opts, args = getopt.getopt(sys.argv[3:], "hi:w", ["help", "id=", "wait"]) + except Exception as err: + print(err) + handleInvalidCommand(helpText=helpTextSyncCloudSyncRelationship, invalidOptArg=True) + + # Parse command line options + for opt, arg in opts: + if opt in ("-h", "--help"): + print(helpTextSyncCloudSyncRelationship) + sys.exit(0) + elif opt in ("-i", "--id"): + relationshipID = arg + elif opt in ("-w", "--wait"): + waitUntilComplete = True + + # Check for required options + if not relationshipID: + handleInvalidCommand(helpText=helpTextSyncCloudSyncRelationship, invalidOptArg=True) + + # Update cloud sync relationship + try: + sync_cloud_sync_relationship(relationship_id=relationshipID, wait_until_complete=waitUntilComplete, print_output=True) + except (InvalidConfigError, APIConnectionError, CloudSyncSyncOperationError): + sys.exit(1) -@deprecated -def restoreSnapshot(volumeName: str, snapshotName: str, printOutput: bool = False) : - restore_snapshot(volume_name=volumeName, snapshot_name=snapshotName, print_output=printOutput) + elif target in ("snapmirror-relationship", "snapmirror"): + uuid = None + volumeName = None + svmName = None + clusterName = None + waitUntilComplete = False + # Get command line options + try: + opts, args = getopt.getopt(sys.argv[3:], "hi:wn:u:v:", ["help", "cluster-name=","svm=","name=","uuid=", "wait"]) + except Exception as err: + print(err) + handleInvalidCommand(helpText=helpTextSyncSnapMirrorRelationship, invalidOptArg=True) + + # Parse command line options + for opt, arg in opts: + if opt in ("-h", "--help"): + print(helpTextSyncSnapMirrorRelationship) + sys.exit(0) + elif opt in ("-v", "--svm"): + svmName = arg + elif opt in ("-u", "--cluster-name"): + clusterName = arg + elif opt in ("-n", "--name"): + volumeName = arg + elif opt in ("-i", "--uuid"): + uuid = arg + elif opt in ("-w", "--wait"): + waitUntilComplete = True + + # Check for required options + if not uuid and not volumeName: + handleInvalidCommand(helpText=helpTextSyncSnapMirrorRelationship, invalidOptArg=True) + + if uuid and volumeName: + handleInvalidCommand(helpText=helpTextSyncSnapMirrorRelationship, invalidOptArg=True) + + # Update SnapMirror relationship + try: + sync_snap_mirror_relationship(uuid=uuid, svm_name=svmName, volume_name=volumeName, cluster_name=clusterName, wait_until_complete=waitUntilComplete, print_output=True) + except ( + InvalidConfigError, APIConnectionError, InvalidSnapMirrorParameterError, + SnapMirrorSyncOperationError) : + sys.exit(1) -@deprecated -def syncCloudSyncRelationship(relationshipID: str, waitUntilComplete: bool = False, printOutput: bool = False) : - sync_cloud_sync_relationship(relationship_id=relationshipID, wait_until_complete=waitUntilComplete, print_output=printOutput) + else: + handleInvalidCommand() + elif action in ("version", "v", "-v", "--version"): + print("NetApp DataOps Toolkit for Traditional Environments - version " + + traditional.__version__) -@deprecated -def syncSnapMirrorRelationship(uuid: str, waitUntilComplete: bool = False, printOutput: bool = False) : - sync_snap_mirror_relationship(uuid=uuid, wait_until_complete=waitUntilComplete, print_output=printOutput) + else: + handleInvalidCommand() From ad7c9106d25ce70f36db5fd5f1bb854cc4303729 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Thu, 16 Jun 2022 13:52:58 -0500 Subject: [PATCH 27/56] Pushed wrong code, fixed now. --- .../netapp_dataops/traditional.py | 3739 ++++++++++------- 1 file changed, 2200 insertions(+), 1539 deletions(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index 5a9a55f..3d1e613 100644 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -1,1695 +1,2356 @@ -#!/usr/bin/env python3 +"""NetApp DataOps Toolkit for Traditional Environments import module. + +This module provides the public functions available to be imported directly +by applications using the import method of utilizing the toolkit. +""" import base64 +import functools import json import os import re -from getpass import getpass - +import subprocess import sys -sys.path.insert(0, "/root/netapp-dataops-toolkit/netapp_dataops_traditional/netapp_dataops") - -from netapp_dataops import traditional -from netapp_dataops.traditional import ( - clone_volume, - InvalidConfigError, - InvalidVolumeParameterError, - InvalidSnapMirrorParameterError, - InvalidSnapshotParameterError, - APIConnectionError, - mount_volume, - unmount_volume, - MountOperationError, - ConnectionTypeError, - list_volumes, - create_snapshot, - create_volume, - delete_snapshot, - delete_volume, - list_cloud_sync_relationships, - list_snap_mirror_relationships, - create_snap_mirror_relationship, - list_snapshots, - prepopulate_flex_cache, - pull_bucket_from_s3, - pull_object_from_s3, - push_directory_to_s3, - push_file_to_s3, - restore_snapshot, - CloudSyncSyncOperationError, - sync_cloud_sync_relationship, - sync_snap_mirror_relationship, - SnapMirrorSyncOperationError -) - - -## Define contents of help text -helpTextStandard = ''' -The NetApp DataOps Toolkit is a Python library that makes it simple for data scientists and data engineers to perform various data management tasks, such as provisioning a new data volume, near-instantaneously cloning a data volume, and near-instantaneously snapshotting a data volume for traceability/baselining. - -Basic Commands: - -\tconfig\t\t\t\tCreate a new config file (a config file is required to perform other commands). -\thelp\t\t\t\tPrint help text. -\tversion\t\t\t\tPrint version details. - -Data Volume Management Commands: -Note: To view details regarding options/arguments for a specific command, run the command with the '-h' or '--help' option. - -\tclone volume\t\t\tCreate a new data volume that is an exact copy of an existing volume. -\tcreate volume\t\t\tCreate a new data volume. -\tdelete volume\t\t\tDelete an existing data volume. -\tlist volumes\t\t\tList all data volumes. -\tmount volume\t\t\tMount an existing data volume locally. Note: on Linux hosts - must be run as root. -\tunmount volume\t\t\tUnmount an existing data volume. Note: on Linux hosts - must be run as root. - -Snapshot Management Commands: -Note: To view details regarding options/arguments for a specific command, run the command with the '-h' or '--help' option. - -\tcreate snapshot\t\t\tCreate a new snapshot for a data volume. -\tdelete snapshot\t\t\tDelete an existing snapshot for a data volume. -\tlist snapshots\t\t\tList all snapshots for a data volume. -\trestore snapshot\t\tRestore a snapshot for a data volume (restore the volume to its exact state at the time that the snapshot was created). - -Data Fabric Commands: -Note: To view details regarding options/arguments for a specific command, run the command with the '-h' or '--help' option. - -\tlist cloud-sync-relationships\tList all existing Cloud Sync relationships. -\tsync cloud-sync-relationship\tTrigger a sync operation for an existing Cloud Sync relationship. -\tpull-from-s3 bucket\t\tPull the contents of a bucket from S3. -\tpull-from-s3 object\t\tPull an object from S3. -\tpush-to-s3 directory\t\tPush the contents of a directory to S3 (multithreaded). -\tpush-to-s3 file\t\t\tPush a file to S3. - -Advanced Data Fabric Commands: -Note: To view details regarding options/arguments for a specific command, run the command with the '-h' or '--help' option. - -\tprepopulate flexcache\t\tPrepopulate specific files/directories on a FlexCache volume (ONTAP 9.8 and above ONLY). -\tlist snapmirror-relationships\tList all existing SnapMirror relationships. -\tsync snapmirror-relationship\tTrigger a sync operation for an existing SnapMirror relationship. -\tcreate snapmirror-relationship\tCreate new SnapMirror relationship. -''' -helpTextCloneVolume = ''' -Command: clone volume - -Create a new data volume that is an exact copy of an existing volume. - -Required Options/Arguments: -\t-n, --name=\t\tName of new volume.. -\t-v, --source-volume=\tName of volume to be cloned. - -Optional Options/Arguments: -\t-l, --cluster-name=\tnon default hosting cluster -\t-c, --source-svm=\tnon default source svm name -\t-t, --target-svm=\tnon default target svm name -\t-g, --gid=\t\tUnix filesystem group id (gid) to apply when creating new volume (if not specified, gid of source volume will be retained) (Note: cannot apply gid of '0' when creating clone). -\t-h, --help\t\tPrint help text. -\t-m, --mountpoint=\tLocal mountpoint to mount new volume at after creating. If not specified, new volume will not be mounted locally. On Linux hosts - if specified, must be run as root. -\t-s, --source-snapshot=\tName of the snapshot to be cloned (if specified, the clone will be created from a specific snapshot on the source volume as opposed to the current state of the volume). -\t\t\t\twhen snapshot name suffixed with * the latest snapshot will be used (hourly* will use the latest snapshot prefixed with hourly ) -\t-u, --uid=\t\tUnix filesystem user id (uid) to apply when creating new volume (if not specified, uid of source volume will be retained) (Note: cannot apply uid of '0' when creating clone). -\t-x, --readonly\t\tRead-only option for mounting volumes locally. -\t-j, --junction\t\tSpecify a custom junction path for the volume to be exported at. -\t-e, --export-hosts\tcolon(:) seperated hosts/cidrs to to use for export. hosts will be exported for rw and root access -\t-e, --export-policy\texport policy name to attach to the volume, default policy will be used if export-hosts/export-policy not provided -\t-d, --snapshot-policy\tsnapshot-policy to attach to the volume, default snapshot policy will be used if not provided -\t-s, --split\t\tstart clone split after creation -\t-r, --refresh\t\tdelete existing clone if exists before creating a new one -\t-d, --svm-dr-unprotect\tdisable svm dr protection if svm-dr protection exists - -Examples (basic usage): -\tnetapp_dataops_cli.py clone volume --name=project1 --source-volume=gold_dataset -\tnetapp_dataops_cli.py clone volume -n project2 -v gold_dataset -s snap1 -\tnetapp_dataops_cli.py clone volume --name=project1 --source-volume=gold_dataset --mountpoint=~/project1 --readonly - - -Examples (advanced usage): -\tnetapp_dataops_cli.py clone volume -n testvol -v gold_dataset -u 1000 -g 1000 -x -j /project1 -d snappolicy1 -\tnetapp_dataops_cli.py clone volume --name=project1 --source-volume=gold_dataset --source-svm=svm1 --target-svm=svm2 --source-snapshot=daily* --export-hosts 10.5.5.3:host1:10.6.4.0/24 --split -''' -helpTextConfig = ''' -Command: config - -Create a new config file (a config file is required to perform other commands). - -No additional options/arguments required. -''' -helpTextCreateSnapshot = ''' -Command: create snapshot - -Create a new snapshot for a data volume. - -Required Options/Arguments: -\t-v, --volume=\tName of volume. - -Optional Options/Arguments: -\t-u, --cluster-name=\tnon default hosting cluster -\t-s, --svm=\t\tNon defaul svm name. -\t-h, --help\t\tPrint help text. -\t-n, --name=\t\tName of new snapshot. If not specified, will be set to 'netapp_dataops_'. -\t-r, --retention=\tSnapshot name will be suffixed by and excesive snapshots will be deleted. -\t \tCan be count of snapshots when int (ex. 10) or days when retention is suffixed by d (ex. 10d) -\t-l, --snapmirror-label=\tif provided snapmirror label will be configured on the created snapshot - -Examples: -\tnetapp_dataops_cli.py create snapshot --volume=project1 --name=snap1 -\tnetapp_dataops_cli.py create snapshot -v project2 -n final_dataset -\tnetapp_dataops_cli.py create snapshot --volume=test1 -\tnetapp_dataops_cli.py create snapshot -v project2 -n daily_consistent -r 7 -l daily -\tnetapp_dataops_cli.py create snapshot -v project2 -n daily_for_month -r 30d -l daily -''' -helpTextCreateVolume = ''' -Command: create volume - -Create a new data volume. - -Required Options/Arguments: -\t-n, --name=\t\tName of new volume. -\t-s, --size=\t\tSize of new volume. Format: '1024MB', '100GB', '10TB', etc. - -Optional Options/Arguments: -\t-l, --cluster-name=\tnon default hosting cluster -\t-v, --svm=\t\tnon default svm name -\t-a, --aggregate=\tAggregate to use when creating new volume (flexvol) or optional comma seperated aggrlist when specific aggregates are required for FG. -\t-d, --snapshot-policy=\tSnapshot policy to apply for new volume. -\t-e, --export-policy=\tNFS export policy to use when exporting new volume. -\t-g, --gid=\t\tUnix filesystem group id (gid) to apply when creating new volume (ex. '0' for root group). -\t-h, --help\t\tPrint help text. -\t-m, --mountpoint=\tLocal mountpoint to mount new volume at after creating. If not specified, new volume will not be mounted locally. On Linux hosts - if specified, must be run as root. -\t-p, --permissions=\tUnix filesystem permissions to apply when creating new volume (ex. '0777' for full read/write permissions for all users and groups). -\t-r, --guarantee-space\tGuarantee sufficient storage space for full capacity of the volume (i.e. do not use thin provisioning). -\t-t, --type=\t\tVolume type to use when creating new volume (flexgroup/flexvol). -\t-u, --uid=\t\tUnix filesystem user id (uid) to apply when creating new volume (ex. '0' for root user). -\t-x, --readonly\t\tRead-only option for mounting volumes locally. -\t-j, --junction\t\tSpecify a custom junction path for the volume to be exported at. -\t-f, --tiering-policy\tSpecify tiering policy for fabric-pool enabled systems (default is 'none'). -\t-y, --dp\t\tCreate volume as DP volume (the volume will be used as snapmirror target) - - -Examples (basic usage): -\tnetapp_dataops_cli.py create volume --name=project1 --size=10GB -\tnetapp_dataops_cli.py create volume -n datasets -s 10TB -\tsudo -E netapp_dataops_cli.py create volume --name=project2 --size=2TB --mountpoint=~/project2 --readonly - -Examples (advanced usage): -\tsudo -E netapp_dataops_cli.py create volume --name=project1 --size=10GB --permissions=0755 --type=flexvol --mountpoint=~/project1 --readonly --junction=/project1 -\tsudo -E netapp_dataops_cli.py create volume --name=project2_flexgroup --size=2TB --type=flexgroup --mountpoint=/mnt/project2 -\tnetapp_dataops_cli.py create volume --name=testvol --size=10GB --type=flexvol --aggregate=n2_data -\tnetapp_dataops_cli.py create volume -n testvol -s 10GB -t flexvol -p 0755 -u 1000 -g 1000 -j /project1 -\tsudo -E netapp_dataops_cli.py create volume -n vol1 -s 5GB -t flexvol --export-policy=team1 -m /mnt/vol1 -\tnetapp_dataops_cli.py create vol -n test2 -s 10GB -t flexvol --snapshot-policy=default --tiering-policy=auto -''' -helpTextDeleteSnapshot = ''' -Command: delete snapshot - -Delete an existing snapshot for a data volume. - -Required Options/Arguments: -\t-n, --name=\tName of snapshot to be deleted. -\t-v, --volume=\tName of volume. - -Optional Options/Arguments: -\t-u, --cluster-name=\tNon default hosting cluster -\t-s, --svm=\t\tNon default svm -\t-h, --help\t\tPrint help text. - -Examples: -\tnetapp_dataops_cli.py delete snapshot --volume=project1 --name=snap1 -\tnetapp_dataops_cli.py delete snapshot -v project2 -n netapp_dataops_20201113_221917 -''' -helpTextDeleteVolume = ''' -Command: delete volume - -Delete an existing data volume. - -Required Options/Arguments: -\t-n, --name=\tName of volume to be deleted. - -Optional Options/Arguments: -\t-u, --cluster-name=\tnon default hosting cluster -\t-v, --svm \t\tnon default SVM name -\t-f, --force\t\tDo not prompt user to confirm operation. -\t-p, --mountpoint\t\tMount point for the locally mounted volume. -\t-m, --delete-mirror\tdelete/release snapmirror relationship prior to volume deletion -\t --delete-non-clone\tEnable deletion of volume not created as clone by this tool -\t-h, --help\t\tPrint help text. - -Examples: -\tnetapp_dataops_cli.py delete volume --name=project1 -\tnetapp_dataops_cli.py delete volume -n project2 -''' - -helpTextUnmountVolume = ''' -Command: unmount volume - -Unmount an existing data volume that is currently mounted locally. - -Required Options/Arguments: -\t-m, --mountpoint=\tMountpoint where volume is mounted at. - -Optional Options/Arguments: -\t-h, --help\t\tPrint help text. - -Examples: -\tnetapp_dataops_cli.py unmount volume --mountpoint=/project2 -\tnetapp_dataops_cli.py unmount volume -m /project2 -''' - -helpTextListCloudSyncRelationships = ''' -Command: list cloud-sync-relationships - -List all existing Cloud Sync relationships. - -No additional options/arguments required. -''' -helpTextListSnapMirrorRelationships = ''' -Command: list snapmirror-relationships - -List all SnapMirror relationships. - -Optional Options/Arguments: -\t-u, --cluster-name=\tNon default hosting cluster -\t-s, --svm=\t\tNon default svm. -\t-h, --help\t\tPrint help text. -''' -helpTextListSnapshots = ''' -Command: list snapshots - -List all snapshots for a data volume. - -Required Options/Arguments: -\t-v, --volume=\tName of volume. - -Optional Options/Arguments: -\t-u, --cluster-name=\tNon default hosting cluster -\t-s, --svm=\t\tNon default svm. -\t-h, --help\t\tPrint help text. +import time +import warnings +import datetime +from concurrent.futures import ThreadPoolExecutor +import boto3 +from botocore.client import Config as BotoConfig +from netapp_ontap import config as netappConfig +from netapp_ontap.error import NetAppRestError +from netapp_ontap.host_connection import HostConnection as NetAppHostConnection +from netapp_ontap.resources import Flexcache as NetAppFlexCache +from netapp_ontap.resources import SnapmirrorRelationship as NetAppSnapmirrorRelationship +from netapp_ontap.resources import SnapmirrorTransfer as NetAppSnapmirrorTransfer +from netapp_ontap.resources import Snapshot as NetAppSnapshot +from netapp_ontap.resources import Volume as NetAppVolume +from netapp_ontap.resources import ExportPolicy as NetAppExportPolicy +from netapp_ontap.resources import SnapshotPolicy as NetAppSnapshotPolicy +from netapp_ontap.resources import CLI as NetAppCLI +import pandas as pd +import requests +from tabulate import tabulate +import yaml + + +__version__ = "2.3.0" + -Examples: -\tnetapp_dataops_cli.py list snapshots --volume=project1 -\tnetapp_dataops_cli.py list snapshots -v test1 -''' -helpTextListVolumes = ''' -Command: list volumes - -List all data volumes. - -No options/arguments are required. - -Optional Options/Arguments: -\t-u, --cluster-name=\t\t\tnon default hosting cluster -\t-v, --svm=\t\t\t\tlist volume on non default svm -\t-h, --help\t\t\t\tPrint help text. -\t-s, --include-space-usage-details\tInclude storage space usage details in output (see README for explanation). +# Using this decorator in lieu of using a dependency to manage deprecation +def deprecated(func): + @functools.wraps(func) + def warned_func(*args, **kwargs): + warnings.warn("Function {} is deprecated.".format(func.__name__), + category=DeprecationWarning, + stacklevel=2) + return func(*args, **kwargs) + return warned_func -Examples: -\tnetapp_dataops_cli.py list volumes -\tnetapp_dataops_cli.py list volumes --include-space-usage-details -''' -helpTextMountVolume = ''' -Command: mount volume -Mount an existing data volume locally. +class CloudSyncSyncOperationError(Exception) : + """Error that will be raised when a Cloud Sync sync operation fails""" + pass -Requirement: On Linux hosts, must be run as root. +class ConnectionTypeError(Exception): + """Error that will be raised when an invalid connection type is given""" + pass -Required Options/Arguments: -\t-m, --mountpoint=\tLocal mountpoint to mount volume at. -\t-n, --name=\t\tName of volume. -Optional Options/Arguments: -\t-v, --svm \t\tnon default SVM name -\t-l, --lif \t\tnon default lif (nfs server ip/name) -\t-h, --help\t\tPrint help text. -\t-x, --readonly\t\tMount volume locally as read-only. -\t-o, --options\t\tEnables users to Specify custom NFS mount options. +class InvalidConfigError(Exception): + """Error that will be raised when the config file is invalid or missing""" + pass -Examples: -\tsudo -E netapp_dataops_cli.py mount volume --name=project1 --mountpoint=/mnt/project1 -\tsudo -E netapp_dataops_cli.py mount volume -m ~/testvol -n testvol -x -\tsudo -E netapp_dataops_cli.py mount volume --name=project1 --mountpoint=/mnt/project1 --readonly -\tsudo -E netapp_dataops_cli.py mount volume --name=project1 --mountpoint=/mnt/project1 --readonly --options=rsize=262144,wsize=262144,nconnect=16 -''' -helpTextPullFromS3Bucket = ''' -Command: pull-from-s3 bucket -Pull the contents of a bucket from S3 (multithreaded). +class InvalidSnapMirrorParameterError(Exception) : + """Error that will be raised when an invalid SnapMirror parameter is given""" + pass -Note: To pull to a data volume, the volume must be mounted locally. -Warning: This operation has not been tested at scale and may not be appropriate for extremely large datasets. +class InvalidSnapshotParameterError(Exception): + """Error that will be raised when an invalid snapshot parameter is given""" + pass -Required Options/Arguments: -\t-b, --bucket=\t\tS3 bucket to pull from. -\t-d, --directory=\tLocal directory to save contents of bucket to. -Optional Options/Arguments: -\t-h, --help\t\tPrint help text. -\t-p, --key-prefix=\tObject key prefix (pull will be limited to objects with key that starts with this prefix). +class InvalidVolumeParameterError(Exception): + """Error that will be raised when an invalid volume parameter is given""" + pass -Examples: -\tnetapp_dataops_cli.py pull-from-s3 bucket --bucket=project1 --directory=/mnt/project1 -\tnetapp_dataops_cli.py pull-from-s3 bucket -b project1 -p project1/ -d ./project1/ -''' -helpTextPullFromS3Object = ''' -Command: pull-from-s3 object -Pull an object from S3. +class MountOperationError(Exception): + """Error that will be raised when a mount operation fails""" + pass -Note: To pull to a data volume, the volume must be mounted locally. -Required Options/Arguments: -\t-b, --bucket=\t\tS3 bucket to pull from. -\t-k, --key=\t\tKey of S3 object to pull. +class SnapMirrorSyncOperationError(Exception) : + """Error that will be raised when a SnapMirror sync operation fails""" + pass -Optional Options/Arguments: -\t-f, --file=\t\tLocal filepath (including filename) to save object to (if not specified, value of -k/--key argument will be used) -\t-h, --help\t\tPrint help text. -Examples: -\tnetapp_dataops_cli.py pull-from-s3 object --bucket=project1 --key=data.csv --file=./project1/data.csv -\tnetapp_dataops_cli.py pull-from-s3 object -b project1 -k data.csv -''' -helpTextPushToS3Directory = ''' -Command: push-to-s3 directory +class APIConnectionError(Exception) : + '''Error that will be raised when an API connection cannot be established''' + pass -Push the contents of a directory to S3 (multithreaded). -Note: To push from a data volume, the volume must be mounted locally. +def _print_api_response(response: requests.Response): + print("API Response:") + print("Status Code: ", response.status_code) + print("Header: ", response.headers) + if response.text: + print("Body: ", response.text) + + +def _download_from_s3(s3Endpoint: str, s3AccessKeyId: str, s3SecretAccessKey: str, s3VerifySSLCert: bool, + s3CACertBundle: str, s3Bucket: str, s3ObjectKey: str, localFile: str, print_output: bool = False): + # Instantiate S3 session + try: + s3 = _instantiate_s3_session(s3Endpoint=s3Endpoint, s3AccessKeyId=s3AccessKeyId, + s3SecretAccessKey=s3SecretAccessKey, s3VerifySSLCert=s3VerifySSLCert, + s3CACertBundle=s3CACertBundle, print_output=print_output) + except Exception as err: + if print_output: + print("Error: S3 API error: ", err) + raise APIConnectionError(err) + + if print_output: + print( + "Downloading object '" + s3ObjectKey + "' from bucket '" + s3Bucket + "' and saving as '" + localFile + "'.") + + # Create directories that don't exist + if localFile.find(os.sep) != -1: + dirs = localFile.split(os.sep) + dirpath = os.sep.join(dirs[:len(dirs) - 1]) + if not os.path.exists(dirpath): + os.makedirs(dirpath) + + # Download the file + try: + s3.Object(s3Bucket, s3ObjectKey).download_file(localFile) + except Exception as err: + if print_output: + print("Error: S3 API error: ", err) + raise APIConnectionError(err) + + +def _get_cloud_central_access_token(refreshToken: str, print_output: bool = False) -> str: + # Define parameters for API call + url = "https://netapp-cloud-account.auth0.com/oauth/token" + headers = { + "Content-Type": "application/json" + } + data = { + "grant_type": "refresh_token", + "refresh_token": refreshToken, + "client_id": "Mu0V1ywgYteI6w1MbD15fKfVIUrNXGWC" + } + + # Call API to optain access token + response = requests.post(url=url, headers=headers, data=json.dumps(data)) + + # Parse response to retrieve access token + try: + responseBody = json.loads(response.text) + accessToken = responseBody["access_token"] + except: + errorMessage = "Error obtaining access token from Cloud Sync API" + if print_output: + print("Error:", errorMessage) + _print_api_response(response) + raise APIConnectionError(errorMessage, response) -Warning: This operation has not been tested at scale and may not be appropriate for extremely large datasets. + return accessToken -Required Options/Arguments: -\t-b, --bucket=\t\tS3 bucket to push to. -\t-d, --directory=\tLocal directory to push contents of. +def _get_cloud_sync_access_parameters(refreshToken: str, print_output: bool = False) -> (str, str): + try: + accessToken = _get_cloud_central_access_token(refreshToken=refreshToken, print_output=print_output) + except APIConnectionError: + raise -Optional Options/Arguments: -\t-e, --extra-args=\tExtra args to apply to newly-pushed S3 objects (For details on this field, refer to https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html#the-extraargs-parameter). -\t-h, --help\t\tPrint help text. -\t-p, --key-prefix=\tPrefix to add to key for newly-pushed S3 objects (Note: by default, key will be local filepath relative to directory being pushed). + # Define parameters for API call + url = "https://cloudsync.netapp.com/api/accounts" + headers = { + "Content-Type": "application/json", + "Authorization": "Bearer " + accessToken + } -Examples: -\tnetapp_dataops_cli.py push-to-s3 directory --bucket=project1 --directory=/mnt/project1 -\tnetapp_dataops_cli.py push-to-s3 directory -b project1 -d /mnt/project1 -p project1/ -e '{"Metadata": {"mykey": "myvalue"}}' -''' -helpTextPushToS3File = ''' -Command: push-to-s3 file + # Call API to obtain account ID + response = requests.get(url=url, headers=headers) -Push a file to S3. + # Parse response to retrieve account ID + try: + responseBody = json.loads(response.text) + accountId = responseBody[0]["accountId"] + except: + errorMessage = "Error obtaining account ID from Cloud Sync API" + if print_output: + print("Error:", errorMessage) + _print_api_response(response) + raise APIConnectionError(errorMessage, response) -Note: To push from a data volume, the volume must be mounted locally. + # Return access token and account ID + return accessToken, accountId -Required Options/Arguments: -\t-b, --bucket=\t\tS3 bucket to push to. -\t-f, --file=\t\tLocal file to push. -Optional Options/Arguments: -\t-e, --extra-args=\tExtra args to apply to newly-pushed S3 object (For details on this field, refer to https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html#the-extraargs-parameter). -\t-h, --help\t\tPrint help text. -\t-k, --key=\t\tKey to assign to newly-pushed S3 object (if not specified, key will be set to value of -f/--file argument). +def _instantiate_connection(config: dict, connectionType: str = "ONTAP", print_output: bool = False): + if connectionType == "ONTAP": + ## Connection details for ONTAP cluster + try: + ontapClusterMgmtHostname = config["hostname"] + ontapClusterAdminUsername = config["username"] + ontapClusterAdminPasswordBase64 = config["password"] + verifySSLCert = config["verifySSLCert"] + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() + + # Decode base64-encoded password + ontapClusterAdminPasswordBase64Bytes = ontapClusterAdminPasswordBase64.encode("ascii") + ontapClusterAdminPasswordBytes = base64.b64decode(ontapClusterAdminPasswordBase64Bytes) + ontapClusterAdminPassword = ontapClusterAdminPasswordBytes.decode("ascii") + + # Instantiate connection to ONTAP cluster + netappConfig.CONNECTION = NetAppHostConnection( + host=ontapClusterMgmtHostname, + username=ontapClusterAdminUsername, + password=ontapClusterAdminPassword, + verify=verifySSLCert + ) -Examples: -\tnetapp_dataops_cli.py push-to-s3 file --bucket=project1 --file=data.csv -\tnetapp_dataops_cli.py push-to-s3 file -b project1 -k data.csv -f /mnt/project1/data.csv -e '{"Metadata": {"mykey": "myvalue"}}' -''' -helpTextPrepopulateFlexCache = ''' -Command: prepopulate flexcache - -Prepopulate specific files/directories on a FlexCache volume. - -Compatibility: ONTAP 9.8 and above ONLY - -Required Options/Arguments: -\t-n, --name=\tName of FlexCache volume. -\t-p, --paths=\tComma-separated list of dirpaths/filepaths to prepopulate. + else: + raise ConnectionTypeError() -Optional Options/Arguments: -\t-h, --help\tPrint help text. - -Examples: -\tnetapp_dataops_cli.py prepopulate flexcache --name=project1 --paths=/datasets/project1,/datasets/project2 -\tnetapp_dataops_cli.py prepopulate flexcache -n test1 -p /datasets/project1,/datasets/project2 -''' -helpTextRestoreSnapshot = ''' -Command: restore snapshot -Restore a snapshot for a data volume (restore the volume to its exact state at the time that the snapshot was created). +def _instantiate_s3_session(s3Endpoint: str, s3AccessKeyId: str, s3SecretAccessKey: str, s3VerifySSLCert: bool, s3CACertBundle: str, print_output: bool = False): + # Instantiate session + session = boto3.session.Session(aws_access_key_id=s3AccessKeyId, aws_secret_access_key=s3SecretAccessKey) + config = BotoConfig(signature_version='s3v4') -Required Options/Arguments: -\t-n, --name=\tName of snapshot to be restored. -\t-v, --volume=\tName of volume. + if s3VerifySSLCert: + if s3CACertBundle: + s3 = session.resource(service_name='s3', endpoint_url=s3Endpoint, verify=s3CACertBundle, config=config) + else: + s3 = session.resource(service_name='s3', endpoint_url=s3Endpoint, config=config) + else: + s3 = session.resource(service_name='s3', endpoint_url=s3Endpoint, verify=False, config=config) -Optional Options/Arguments: -\t-u, --cluster-name=\tNon default hosting cluster -\t-s, --svm=\t\tNon default svm. -\t-f, --force\t\tDo not prompt user to confirm operation. -\t-h, --help\t\tPrint help text. - -Examples: -\tnetapp_dataops_cli.py restore snapshot --volume=project1 --name=snap1 -\tnetapp_dataops_cli.py restore snapshot -v project2 -n netapp_dataops_20201113_221917 -''' -helpTextSyncCloudSyncRelationship = ''' -Command: sync cloud-sync-relationship + return s3 -Trigger a sync operation for an existing Cloud Sync relationship. -Tip: Run `netapp_dataops_cli.py list cloud-sync-relationships` to obtain relationship ID. +def _print_invalid_config_error() : + print("Error: Missing or invalid config file. Run `netapp_dataops_cli.py config` to create config file.") -Required Options/Arguments: -\t-i, --id=\tID of the relationship for which the sync operation is to be triggered. - -Optional Options/Arguments: -\t-h, --help\tPrint help text. -\t-w, --wait\tWait for sync operation to complete before exiting. - -Examples: -\tnetapp_dataops_cli.py sync cloud-sync-relationship --id=5ed00996ca85650009a83db2 -\tnetapp_dataops_cli.py sync cloud-sync-relationship -i 5ed00996ca85650009a83db2 -w -''' -helpTextSyncSnapMirrorRelationship = ''' -Command: sync snapmirror-relationship - -Trigger a sync operation for an existing SnapMirror relationship. -Tip: Run `netapp_dataops_cli.py list snapmirror-relationships` to obtain relationship UUID. - -Required Options/Arguments: -\t-i, --uuid=\tUUID of the relationship for which the sync operation is to be triggered. -or -\t-n, --name=\tName of target volume to be sync . - -Optional Options/Arguments: -\t-u, --cluster-name=\tnon default hosting cluster -\t-v, --svm \t\tnon default target SVM name -\t-h, --help\t\tPrint help text. -\t-w, --wait\t\tWait for sync operation to complete before exiting. - -Examples: -\tnetapp_dataops_cli.py sync snapmirror-relationship --uuid=132aab2c-4557-11eb-b542-005056932373 -\tnetapp_dataops_cli.py sync snapmirror-relationship -i 132aab2c-4557-11eb-b542-005056932373 -w -\tnetapp_dataops_cli.py sync snapmirror-relationship -u cluster1 -v svm1 -n vol1 -w -''' - -helpTextCreateSnapMirrorRelationship = ''' -Command: create snapmirror-relationship - -create snapmirror relationship - -Required Options/Arguments: -\t-n, --target-vol=\tName of target volume -\t-s, --source-svm=\tSource SVM name -\t-v, --source-vol=\tSource volume name - -Optional Options/Arguments: -\t-u, --cluster-name=\tnon default hosting cluster -\t-t, --target-svm=\tnon default target SVM -\t-c, --schedule=\t\tnon default schedule (default is hourly) -\t-p, --policy=\t\tnon default policy (default is MirrorAllSnapshots -\t-a, --action=\t\tresync,initialize following creation -\t-h, --help\t\tPrint help text. - -Examples: -\tnetapp_dataops_cli.py create snapmirror-relationship -u cluster1 -s svm1 -t svm2 -v vol1 -n vol1 -p MirrorAllSnapshots -c hourly -\tnetapp_dataops_cli.py create snapmirror-relationship -u cluster1 -s svm1 -t svm2 -v vol1 -n vol1 -p MirrorAllSnapshots -c hourly -a resync -''' - -## Function for creating config file -def createConfig(configDirPath: str = "~/.netapp_dataops", configFilename: str = "config.json", connectionType: str = "ONTAP"): - # Check to see if user has an existing config file +def _retrieve_config(configDirPath: str = "~/.netapp_dataops", configFilename: str = "config.json", + print_output: bool = False) -> dict: configDirPath = os.path.expanduser(configDirPath) configFilePath = os.path.join(configDirPath, configFilename) - if os.path.isfile(configFilePath): - print("You already have an existing config file. Creating a new config file will overwrite this existing config.") - # If existing config file is present, ask user if they want to proceed - # Verify value entered; prompt user to re-enter if invalid - while True: - proceed = input("Are you sure that you want to proceed? (yes/no): ") - if proceed in ("yes", "Yes", "YES"): - break - elif proceed in ("no", "No", "NO"): - sys.exit(0) + try: + with open(configFilePath, 'r') as configFile: + # Read connection details from config file; read into dict + config = json.load(configFile) + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() + return config + + +def _retrieve_cloud_central_refresh_token(print_output: bool = False) -> str: + # Retrieve refresh token from config file + try: + config = _retrieve_config(print_output=print_output) + except InvalidConfigError: + raise + try: + refreshTokenBase64 = config["cloudCentralRefreshToken"] + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() + + # Decode base64-encoded refresh token + refreshTokenBase64Bytes = refreshTokenBase64.encode("ascii") + refreshTokenBytes = base64.b64decode(refreshTokenBase64Bytes) + refreshToken = refreshTokenBytes.decode("ascii") + + return refreshToken + + +def _retrieve_s3_access_details(print_output: bool = False) -> (str, str, str, bool, str): + # Retrieve refresh token from config file + try: + config = _retrieve_config(print_output=print_output) + except InvalidConfigError: + raise + try: + s3Endpoint = config["s3Endpoint"] + s3AccessKeyId = config["s3AccessKeyId"] + s3SecretAccessKeyBase64 = config["s3SecretAccessKey"] + s3VerifySSLCert = config["s3VerifySSLCert"] + s3CACertBundle = config["s3CACertBundle"] + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() + + # Decode base64-encoded refresh token + s3SecretAccessKeyBase64Bytes = s3SecretAccessKeyBase64.encode("ascii") + s3SecretAccessKeyBytes = base64.b64decode(s3SecretAccessKeyBase64Bytes) + s3SecretAccessKey = s3SecretAccessKeyBytes.decode("ascii") + + return s3Endpoint, s3AccessKeyId, s3SecretAccessKey, s3VerifySSLCert, s3CACertBundle + + +def _upload_to_s3(s3Endpoint: str, s3AccessKeyId: str, s3SecretAccessKey: str, s3VerifySSLCert: bool, s3CACertBundle: str, + s3Bucket: str, localFile: str, s3ObjectKey: str, s3ExtraArgs: str = None, print_output: bool = False): + # Instantiate S3 session + try: + s3 = _instantiate_s3_session(s3Endpoint=s3Endpoint, s3AccessKeyId=s3AccessKeyId, + s3SecretAccessKey=s3SecretAccessKey, s3VerifySSLCert=s3VerifySSLCert, + s3CACertBundle=s3CACertBundle, print_output=print_output) + except Exception as err: + if print_output: + print("Error: S3 API error: ", err) + raise APIConnectionError(err) + + # Upload file + if print_output: + print("Uploading file '" + localFile + "' to bucket '" + s3Bucket + "' and applying key '" + s3ObjectKey + "'.") + + try: + if s3ExtraArgs: + s3.Object(s3Bucket, s3ObjectKey).upload_file(localFile, ExtraArgs=json.loads(s3ExtraArgs)) + else: + s3.Object(s3Bucket, s3ObjectKey).upload_file(localFile) + except Exception as err: + if print_output: + print("Error: S3 API error: ", err) + raise APIConnectionError(err) + + +def _convert_bytes_to_pretty_size(size_in_bytes: str, num_decimal_points: int = 2) -> str : + # Convert size in bytes to "pretty" size (size in KB, MB, GB, or TB) + prettySize = float(size_in_bytes) / 1024 + if prettySize >= 1024: + prettySize = float(prettySize) / 1024 + if prettySize >= 1024: + prettySize = float(prettySize) / 1024 + if prettySize >= 1024: + prettySize = float(prettySize) / 1024 + prettySize = round(prettySize, 2) + prettySize = str(prettySize) + "TB" else: - print("Invalid value. Must enter 'yes' or 'no'.") + prettySize = round(prettySize, 2) + prettySize = str(prettySize) + "GB" + else: + prettySize = round(prettySize, 2) + prettySize = str(prettySize) + "MB" + else: + prettySize = round(prettySize, 2) + prettySize = str(prettySize) + "KB" - # Instantiate dict for storing connection details - config = dict() + return prettySize - if connectionType == "ONTAP": - config["connectionType"] = connectionType - # Prompt user to enter config details - config["hostname"] = input("Enter ONTAP management LIF hostname or IP address (Recommendation: Use SVM management interface): ") - config["svm"] = input("Enter SVM (Storage VM) name: ") - config["dataLif"] = input("Enter SVM NFS data LIF hostname or IP address: ") +# +# Public importable functions specific to the traditional package +# - # Prompt user to enter default volume type - # Verify value entered; promopt user to re-enter if invalid - while True: - config["defaultVolumeType"] = input("Enter default volume type to use when creating new volumes (flexgroup/flexvol) [flexgroup]: ") - if not config["defaultVolumeType"] : - config["defaultVolumeType"] = "flexgroup" - break - elif config["defaultVolumeType"] in ("flexgroup", "FlexGroup"): - config["defaultVolumeType"] = "flexgroup" - break - elif config["defaultVolumeType"] in ("flexvol", "FlexVol"): - config["defaultVolumeType"] = "flexvol" - break + +def clone_volume(new_volume_name: str, source_volume_name: str, cluster_name: str = None, source_snapshot_name: str = None, + source_svm: str = None, target_svm: str = None, export_hosts: str = None, export_policy: str = None, split: bool = False, + unix_uid: str = None, unix_gid: str = None, mountpoint: str = None, junction: str= None, readonly: bool = False, + snapshot_policy: str = None, refresh: bool = False, svm_dr_unprotect: bool = False, print_output: bool = False): + # Retrieve config details from config file + try: + config = _retrieve_config(print_output=print_output) + except InvalidConfigError: + raise + try: + connectionType = config["connectionType"] + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() + + if cluster_name: + config["hostname"] = cluster_name + + if connectionType == "ONTAP": + # Instantiate connection to ONTAP cluster + try: + _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) + except InvalidConfigError: + raise + + # Retrieve values from config file if not passed into function + try: + sourcesvm = config["svm"] + if source_svm: + sourcesvm = source_svm + + targetsvm = sourcesvm + if target_svm: + targetsvm = target_svm + + if not unix_uid: + unix_uid = config["defaultUnixUID"] + if not unix_gid: + unix_gid = config["defaultUnixGID"] + + except Exception as e: + if print_output: + print(e) + _print_invalid_config_error() + raise InvalidConfigError() + + # Check unix uid for validity + try: + unix_uid = int(unix_uid) + except: + if print_output: + print("Error: Invalid unix uid specified. Value be an integer. Example: '0' for root user.") + raise InvalidVolumeParameterError("unixUID") + + # Check unix gid for validity + try: + unix_gid = int(unix_gid) + except: + if print_output: + print("Error: Invalid unix gid specified. Value must be an integer. Example: '0' for root group.") + raise InvalidVolumeParameterError("unixGID") + + #check if clone volume already exists + try: + currentVolume = NetAppVolume.find(name=new_volume_name, svm=targetsvm) + if currentVolume and not refresh: + if print_output: + print("Error: clone:"+new_volume_name+" already exists.") + raise InvalidVolumeParameterError("name") + + #for refresh we want to keep the existing policy + if currentVolume and refresh and not export_policy and not export_hosts: + export_policy = currentVolume.nas.export_policy.name + + # if refresh and not provided new snapshot_policy + if currentVolume and refresh and not snapshot_policy: + snapshot_policy = currentVolume.snapshot_policy.name + + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + + #delete existing clone when refresh + try: + if currentVolume and refresh: + if "CLONENAME:" in currentVolume.comment: + delete_volume(volume_name=new_volume_name, cluster_name=cluster_name, svm_name=target_svm, delete_mirror=True, print_output=True) + else: + if print_output: + print("Error: refresh clone is only supported when existing clone created using the tool (based on volume comment)") + raise InvalidVolumeParameterError("name") + except: + print("Error: could not delete previous clone") + raise InvalidVolumeParameterError("name") + + try: + if not snapshot_policy : + snapshot_policy = config["defaultSnapshotPolicy"] + except: + print("Error: default snapshot policy could not be found in config file") + raise InvalidVolumeParameterError("name") + + # check export policies + try: + if not export_policy and not export_hosts: + export_policy = config["defaultExportPolicy"] + elif export_policy: + currentExportPolicy = NetAppExportPolicy.find(name=export_policy, svm=targetsvm) + if not currentExportPolicy: + if print_output: + print("Error: export policy:"+export_policy+" dones not exists.") + raise InvalidVolumeParameterError("name") + elif export_hosts: + export_policy = "netapp_dataops_"+new_volume_name + currentExportPolicy = NetAppExportPolicy.find(name=export_policy, svm=targetsvm) + if currentExportPolicy: + currentExportPolicy.delete() + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + + #exists check if snapshot-policy + try: + snapshotPoliciesDetails = NetAppSnapshotPolicy.get_collection(**{"name":snapshot_policy}) + clusterSnapshotPolicy = False + svmSnapshotPolicy = False + for snapshotPolicyDetails in snapshotPoliciesDetails: + if str(snapshotPolicyDetails.name) == snapshot_policy: + try: + if str(snapshotPolicyDetails.svm.name) == targetsvm: + svmSnapshotPolicy = True + except: + clusterSnapshotPolicy = True + + if not clusterSnapshotPolicy and not svmSnapshotPolicy: + if print_output: + print("Error: snapshot-policy:"+snapshot_policy+" could not be found") + raise InvalidVolumeParameterError("snapshot_policy") + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + + # Create volume + if print_output: + print("Creating clone volume '" + targetsvm+':'+new_volume_name + "' from source volume '" + sourcesvm+':'+source_volume_name + "'.") + + try: + # Retrieve source volume + sourceVolume = NetAppVolume.find(name=source_volume_name, svm=sourcesvm) + if not sourceVolume: + if print_output: + print("Error: Invalid source volume name.") + raise InvalidVolumeParameterError("name") + + # Create option to choose junction path. + if junction: + junction=junction else: - print("Invalid value. Must enter 'flexgroup' or 'flexvol'.") + junction = "/"+new_volume_name + + + # Construct dict representing new volume + newVolumeDict = { + "name": new_volume_name, + "svm": {"name": targetsvm}, + "nas": { + "path": junction + }, + "clone": { + "is_flexclone": True, + "parent_svm": { + #"name": sourceVolume.svm.name, + "name": sourcesvm, + #"uuid": sourceVolume.svm.uuid + }, + "parent_volume": { + "name": sourceVolume.name, + "uuid": sourceVolume.uuid + } + } + } + + if unix_uid != 0: + newVolumeDict["nas"]["uid"] = unix_uid + else: + if print_output: + print("Warning: Cannot apply uid of '0' when creating clone; uid of source volume will be retained.") + if unix_gid != 0: + newVolumeDict["nas"]["gid"] = unix_gid + else: + if print_output: + print("Warning: Cannot apply gid of '0' when creating clone; gid of source volume will be retained.") + + # Add source snapshot details to volume dict if specified + if source_snapshot_name and not source_snapshot_name.endswith("*"): + # Retrieve source snapshot + sourceSnapshot = NetAppSnapshot.find(sourceVolume.uuid, name=source_snapshot_name) + if not sourceSnapshot: + if print_output: + print("Error: Invalid source snapshot name.") + raise InvalidSnapshotParameterError("name") + + + # Append source snapshot details to volume dict + newVolumeDict["clone"]["parent_snapshot"] = { + "name": sourceSnapshot.name, + "uuid": sourceSnapshot.uuid + } + + if source_snapshot_name and source_snapshot_name.endswith("*"): + source_snapshot_prefix = source_snapshot_name[:-1] + latest_source_snapshot = None + latest_source_snapshot_uuid = None + + # Retrieve all source snapshot from last to 1st + for snapshot in NetAppSnapshot.get_collection(sourceVolume.uuid): + snapshot.get() + if snapshot.name.startswith(source_snapshot_prefix): + latest_source_snapshot = snapshot.name + latest_source_snapshot_uuid = snapshot.uuid + + if not latest_source_snapshot: + if print_output: + print("Error: Could not find snapshot prefixed by '"+source_snapshot_prefix+"'.") + raise InvalidSnapshotParameterError("name") + # Append source snapshot details to volume dict + newVolumeDict["clone"]["parent_snapshot"] = { + "name": latest_source_snapshot, + "uuid": latest_source_snapshot_uuid + } + print("Snapshot '" + latest_source_snapshot+ "' will be used to create the clone.") + + # set clone volume commnet parameter + comment = 'PARENTSVM:'+sourcesvm+',PARENTVOL:'+newVolumeDict["clone"]["parent_volume"]["name"]+',CLONESVM:'+targetsvm+',CLONENAME:'+newVolumeDict["name"] + if source_snapshot_name: comment += ' SNAP:'+newVolumeDict["clone"]["parent_snapshot"]["name"] + comment += " netapp-dataops" + + newVolumeDict["comment"] = comment + + # Create new volume clone + newVolume = NetAppVolume.from_dict(newVolumeDict) + newVolume.post(poll=True, poll_timeout=120) + if print_output: + print("Clone volume created successfully.") + + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + + if svm_dr_unprotect: + try: + if print_output: + print("Disabling svm-dr protection") + response = NetAppCLI().execute("volume modify",vserver=targetsvm,volume=new_volume_name,body={"vserver_dr_protection": "unprotected"}) + except NetAppRestError as err: + if "volume is not part of a Vserver DR configuration" in str(err): + if print_output: + print("Warning: could not disable svm-dr-protection since volume is not protected using svm-dr") + else: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) - # prompt user to enter default export policy - config["defaultExportPolicy"] = input("Enter export policy to use by default when creating new volumes [default]: ") - if not config["defaultExportPolicy"]: - config["defaultExportPolicy"] = "default" + #create custom export policy if needed + if export_hosts: + try: + if print_output: + print("Creating export-policy:"+export_policy) + # Construct dict representing new export policy + newExportPolicyDict = { + "name" : export_policy, + "svm": {"name": targetsvm}, + "rules": [] + } + for client in export_hosts.split(":"): + newExportPolicyDict['rules'].append({ "clients": [{"match": client }], "ro_rule": ["sys"], "rw_rule": ["sys"], "superuser": ["sys"]}) + + # Create new export policy + newExportPolicy = NetAppExportPolicy.from_dict(newExportPolicyDict) + newExportPolicy.post(poll=True, poll_timeout=120) + + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + + #set export policy and snapshot policy + try: + if print_output: + print("Setting export-policy:"+export_policy+ " snapshot-policy:"+snapshot_policy) + volumeDetails = NetAppVolume.find(name=new_volume_name, svm=targetsvm) + updatedVolumeDetails = NetAppVolume(uuid=volumeDetails.uuid) + updatedVolumeDetails.nas = {"export_policy": {"name": export_policy}} + updatedVolumeDetails.snapshot_policy = {"name": snapshot_policy} + updatedVolumeDetails.patch(poll=True, poll_timeout=120) + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + + #split clone + try: + if split: + if print_output: + print("Splitting clone") + volumeDetails = NetAppVolume.find(name=new_volume_name, svm=targetsvm) + #get volume details + updatedVolumeDetails = NetAppVolume(uuid=volumeDetails.uuid) + updatedVolumeDetails.clone = {"split_initiated": True} + updatedVolumeDetails.patch() + + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + + # Optionally mount newly created volume + if mountpoint: + try: + mount_volume(volume_name=new_volume_name, svm_name=targetsvm, mountpoint=mountpoint, readonly=readonly, print_output=True) + except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): + if print_output: + print("Error: Error mounting clone volume.") + raise - # prompt user to enter default snapshot policy - config["defaultSnapshotPolicy"] = input("Enter snapshot policy to use by default when creating new volumes [none]: ") - if not config["defaultSnapshotPolicy"]: - config["defaultSnapshotPolicy"] = "none" + else: + raise ConnectionTypeError() - # Prompt user to enter default uid, gid, and unix permissions - # Verify values entered; promopt user to re-enter if invalid - while True: - config["defaultUnixUID"] = input("Enter unix filesystem user id (uid) to apply by default when creating new volumes (ex. '0' for root user) [0]: ") - if not config["defaultUnixUID"]: - config["defaultUnixUID"] = "0" - break + +def create_snapshot(volume_name: str, cluster_name: str = None, svm_name: str = None, snapshot_name: str = None, retention_count: int = 0, retention_days: bool = False, snapmirror_label: str = None, print_output: bool = False): + # Retrieve config details from config file + try: + config = _retrieve_config(print_output=print_output) + except InvalidConfigError: + raise + try: + connectionType = config["connectionType"] + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() + + if cluster_name: + config["hostname"] = cluster_name + + if connectionType == "ONTAP": + # Instantiate connection to ONTAP cluster + try: + _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) + except InvalidConfigError: + raise + + if not snapshot_name: + snapshot_name = "netapp_dataops" + + # Retrieve svm from config file + try: + svm = config["svm"] + if svm_name: + svm = svm_name + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() + + snapshot_name_original = snapshot_name + # Set snapshot name if not passed into function or retention provided + if not snapshot_name or int(retention_count) > 0: + timestamp = '.'+datetime.datetime.today().strftime("%Y-%m-%d_%H%M%S") + snapshot_name += timestamp + + if print_output: + print("Creating snapshot '" + snapshot_name + "'.") + + try: + # Retrieve volume + volume = NetAppVolume.find(name=volume_name, svm=svm) + if not volume: + if print_output: + print("Error: Invalid volume name.") + raise InvalidVolumeParameterError("name") + + # create snapshot dict + snapshotDict = { + 'name': snapshot_name, + 'volume': volume.to_dict() + } + if snapmirror_label: + if print_output: + print("Setting snapmirror label as:"+snapmirror_label) + snapshotDict['snapmirror_label'] = snapmirror_label + + # Create snapshot + snapshot = NetAppSnapshot.from_dict(snapshotDict) + snapshot.post(poll=True) + + if print_output: + print("Snapshot created successfully.") + + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + + #delete snapshots exceeding retention count if provided + retention_count = int(retention_count) + if retention_count > 0: try: - int(config["defaultUnixUID"]) - break - except: - print("Invalid value. Must enter an integer.") - while True: - config["defaultUnixGID"] = input("Enter unix filesystem group id (gid) to apply by default when creating new volumes (ex. '0' for root group) [0]: ") - if not config["defaultUnixGID"]: - config["defaultUnixGID"] = "0" - break + # Retrieve all source snapshot from last to 1st + # Retrieve volume + volume = NetAppVolume.find(name=volume_name, svm=svm) + if not volume: + if print_output: + print("Error: Invalid volume name.") + raise InvalidVolumeParameterError("name") + + if retention_days: + retention_date = datetime.datetime.today() - datetime.timedelta(days=retention_count) + + last_snapshot_list = [] + snapshot_list = [] + for snapshot in NetAppSnapshot.get_collection(volume.uuid): + snapshot.get() + if snapshot.name.startswith(snapshot_name_original+'.'): + if not retention_days: + snapshot_list.append(snapshot.name) + last_snapshot_list.append(snapshot.name) + if len(last_snapshot_list) > retention_count: + last_snapshot_list.pop(0) + else: + rx = r'^{0}\.(.+)$'.format(snapshot_name_original) + matchObj = re.match(rx,snapshot.name) + if matchObj: + snapshot_date = matchObj.group(1) + snapshot_date_obj = datetime.datetime.strptime(snapshot_date, "%Y-%m-%d_%H%M%S") + snapshot_list.append(snapshot.name) + last_snapshot_list.append(snapshot.name) + if snapshot_date_obj < retention_date: + last_snapshot_list.pop(0) + + #delete snapshots not in retention + for snap in snapshot_list: + if snap not in last_snapshot_list: + delete_snapshot(volume_name=volume_name, svm_name = svm, snapshot_name=snap, skip_owned=True, print_output=True) + + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + else: + raise ConnectionTypeError() + + +def create_volume(volume_name: str, volume_size: str, guarantee_space: bool = False, cluster_name: str = None, svm_name: str = None, + volume_type: str = "flexvol", unix_permissions: str = "0777", + unix_uid: str = "0", unix_gid: str = "0", export_policy: str = "default", + snapshot_policy: str = None, aggregate: str = None, mountpoint: str = None, junction: str = None, readonly: bool = False, + print_output: bool = False, tiering_policy: str = None, vol_dp: bool = False): + # Retrieve config details from config file + try: + config = _retrieve_config(print_output=print_output) + except InvalidConfigError: + raise + try: + connectionType = config["connectionType"] + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() + + if cluster_name: + config["hostname"] = cluster_name + + if connectionType == "ONTAP": + # Instantiate connection to ONTAP cluster + try: + _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) + except InvalidConfigError: + raise + + # Retrieve values from config file if not passed into function + try: + svm = config["svm"] + if svm_name: + svm = svm_name + if not volume_type : + volume_type = config["defaultVolumeType"] + if not unix_permissions : + unix_permissions = config["defaultUnixPermissions"] + if not unix_uid : + unix_uid = config["defaultUnixUID"] + if not unix_gid : + unix_gid = config["defaultUnixGID"] + if not export_policy : + export_policy = config["defaultExportPolicy"] + if not snapshot_policy : + snapshot_policy = config["defaultSnapshotPolicy"] + if not aggregate and volume_type == 'flexvol' : + aggregate = config["defaultAggregate"] + except: + if print_output : + _print_invalid_config_error() + raise InvalidConfigError() + + # Check volume type for validity + if volume_type not in ("flexvol", "flexgroup"): + if print_output: + print("Error: Invalid volume type specified. Acceptable values are 'flexvol' and 'flexgroup'.") + raise InvalidVolumeParameterError("size") + + # Check unix permissions for validity + if not re.search("^0[0-7]{3}", unix_permissions): + if print_output: + print("Error: Invalid unix permissions specified. Acceptable values are '0777', '0755', '0744', etc.") + raise InvalidVolumeParameterError("unixPermissions") + + # Check unix uid for validity + try: + unix_uid = int(unix_uid) + except: + if print_output : + print("Error: Invalid unix uid specified. Value be an integer. Example: '0' for root user.") + raise InvalidVolumeParameterError("unixUID") + + # Check unix gid for validity + try: + unix_gid = int(unix_gid) + except: + if print_output: + print("Error: Invalid unix gid specified. Value must be an integer. Example: '0' for root group.") + raise InvalidVolumeParameterError("unixGID") + + # Convert volume size to Bytes + if re.search("^[0-9]+MB$", volume_size): + # Convert from MB to Bytes + volumeSizeBytes = int(volume_size[:len(volume_size)-2]) * 1024**2 + elif re.search("^[0-9]+GB$", volume_size): + # Convert from GB to Bytes + volumeSizeBytes = int(volume_size[:len(volume_size)-2]) * 1024**3 + elif re.search("^[0-9]+TB$", volume_size): + # Convert from TB to Bytes + volumeSizeBytes = int(volume_size[:len(volume_size)-2]) * 1024**4 + else : + if print_output: + print("Error: Invalid volume size specified. Acceptable values are '1024MB', '100GB', '10TB', etc.") + raise InvalidVolumeParameterError("size") + + # Create option to choose junction path. + if junction: + junction=junction + else: + junction = "/"+volume_name + + + #check tiering policy + if not tiering_policy in ['none','auto','snapshot-only','all', None]: + if print_output: + print("Error: tiering policy can be: none,auto,snapshot-only or all") + raise InvalidVolumeParameterError("tieringPolicy") + + #vol dp type + if vol_dp: + # Create dict representing volume of type dp + volumeDict = { + "name": volume_name, + "comment": "netapp-dataops", + "svm": {"name": svm}, + "size": volumeSizeBytes, + "style": volume_type, + "type": 'dp' + } + else: + # Create dict representing volume + volumeDict = { + "name": volume_name, + "comment": "netapp-dataops", + "svm": {"name": svm}, + "size": volumeSizeBytes, + "style": volume_type, + "nas": { + "path": junction, + "export_policy": {"name": export_policy}, + "security_style": "unix", + "unix_permissions": unix_permissions, + "uid": unix_uid, + "gid": unix_gid + }, + "snapshot_policy": {"name": snapshot_policy}, + } + + # Set space guarantee field + if guarantee_space: + volumeDict["guarantee"] = {"type": "volume"} + else: + volumeDict["guarantee"] = {"type": "none"} + + # If flexvol -> set aggregate field + if volume_type == "flexvol": + volumeDict["aggregates"] = [{'name': aggregate}] + else: + if aggregate: + volumeDict["aggregates"] = [] + for aggr in aggregate.split(','): + volumeDict["aggregates"].append({'name': aggr}) + #if tiering policy provided + if tiering_policy: + volumeDict['tiering'] = {'policy': tiering_policy} + + # Create volume + if print_output: + print("Creating volume '" + volume_name + "' on svm '" + svm + "'") + try: + volume = NetAppVolume.from_dict(volumeDict) + volume.post(poll=True) + if print_output: + print("Volume created successfully.") + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + + # Optionally mount newly created volume + if mountpoint: try: - int(config["defaultUnixGID"]) - break - except: - print("Invalid value. Must enter an integer.") - while True: - config["defaultUnixPermissions"] = input("Enter unix filesystem permissions to apply by default when creating new volumes (ex. '0777' for full read/write permissions for all users and groups) [0777]: ") - if not config["defaultUnixPermissions"] : - config["defaultUnixPermissions"] = "0777" - break - elif not re.search("^0[0-7]{3}", config["defaultUnixPermissions"]): - print("Invalud value. Must enter a valid unix permissions value. Acceptable values are '0777', '0755', '0744', etc.") - else: - break + mount_volume(volume_name=volume_name, svm_name=svm, mountpoint=mountpoint, readonly=readonly, print_output=True) + except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): + if print_output: + print("Error: Error mounting volume.") + raise - # Prompt user to enter additional config details - config["defaultAggregate"] = input("Enter aggregate to use by default when creating new FlexVol volumes: ") - config["username"] = input("Enter ONTAP API username (Recommendation: Use SVM account): ") - passwordString = getpass("Enter ONTAP API password (Recommendation: Use SVM account): ") + else: + raise ConnectionTypeError() - # Convert password to base64 enconding - passwordBytes = passwordString.encode("ascii") - passwordBase64Bytes = base64.b64encode(passwordBytes) - config["password"] = passwordBase64Bytes.decode("ascii") - # Prompt user to enter value denoting whether or not to verify SSL cert when calling ONTAP API - # Verify value entered; prompt user to re-enter if invalid - while True: - verifySSLCert = input("Verify SSL certificate when calling ONTAP API (true/false): ") - if verifySSLCert in ("true", "True") : - config["verifySSLCert"] = True - break - elif verifySSLCert in ("false", "False") : - config["verifySSLCert"] = False - break - else: - print("Invalid value. Must enter 'true' or 'false'.") +def delete_snapshot(volume_name: str, snapshot_name: str, cluster_name: str = None, svm_name: str = None, skip_owned: bool = False, print_output: bool = False): + # Retrieve config details from config file + try: + config = _retrieve_config(print_output=print_output) + except InvalidConfigError: + raise + try: + connectionType = config["connectionType"] + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() + + if cluster_name: + config["hostname"] = cluster_name + + if connectionType == "ONTAP": + # Instantiate connection to ONTAP cluster + try: + _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) + except InvalidConfigError: + raise + + # Retrieve svm from config file + try: + svm = config["svm"] + if svm_name: + svm = svm_name + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() + + if print_output: + print("Deleting snapshot '" + snapshot_name + "'.") + + try: + # Retrieve volume + volume = NetAppVolume.find(name=volume_name, svm=svm) + if not volume: + if print_output: + print("Error: Invalid volume name.") + raise InvalidVolumeParameterError("name") + + # Retrieve snapshot + snapshot = NetAppSnapshot.find(volume.uuid, name=snapshot_name) + + + if not snapshot: + if print_output: + print("Error: Invalid snapshot name.") + raise InvalidSnapshotParameterError("name") + + if hasattr(snapshot,'owners'): + + if not skip_owned: + if print_output: + print('Error: Snapshot cannot be deleted since it has owners:'+','.join(snapshot.owners)) + raise InvalidSnapshotParameterError("name") + else: + if print_output: + print('Warning: Snapshot cannot be deleted since it has owners:'+','.join(snapshot.owners)) + return + + # Delete snapshot + snapshot.delete(poll=True) + + if print_output: + print("Snapshot deleted successfully.") + + except NetAppRestError as err : + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) else: raise ConnectionTypeError() - # Ask user if they want to use cloud sync functionality - # Verify value entered; prompt user to re-enter if invalid - while True: - useCloudSync = input("Do you intend to use this toolkit to trigger Cloud Sync operations? (yes/no): ") - if useCloudSync in ("yes", "Yes", "YES"): - # Prompt user to enter cloud central refresh token - print("Note: If you do not have a Cloud Central refresh token, visit https://services.cloud.netapp.com/refresh-token to create one.") - refreshTokenString = getpass("Enter Cloud Central refresh token: ") +def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = None, mountpoint: str = None, delete_mirror: bool = False, + delete_non_clone: bool = False, print_output: bool = False): + # Retrieve config details from config file + try: + config = _retrieve_config(print_output=print_output) + except InvalidConfigError: + raise + try: + connectionType = config["connectionType"] + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() + + if cluster_name: + config["hostname"] = cluster_name + + if connectionType == "ONTAP": + # Instantiate connection to ONTAP cluster + try: + _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) + except InvalidConfigError: + raise + + # Retrieve svm from config file + try: + svm = config["svm"] + if svm_name: + svm = svm_name + except: + if print_output : + _print_invalid_config_error() + raise InvalidConfigError() + + try: + # Retrieve volume + volume = NetAppVolume.find(name=volume_name, svm=svm) + if not volume: + if print_output: + print("Error: Invalid volume name.") + raise InvalidVolumeParameterError("name") + + if not "CLONENAME:" in volume.comment and not delete_non_clone: + if print_output: + print("Error: volume is not a clone created by this tool. add --delete-non-clone to delete it") + raise InvalidVolumeParameterError("delete-non-clone") + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + + + if delete_mirror: + #check if this volume has snapmirror destination relationship + uuid = None + try: + snapmirror_relationship = NetAppSnapmirrorRelationship.get_collection(**{"destination.path": svm+":"+volume_name}) + for rel in snapmirror_relationship: + # Retrieve relationship details + rel.get() + uuid = rel.uuid + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + + if uuid: + if print_output: + print("Deleting snapmirror relationship: "+svm+":"+volume_name) + try: + deleteRelation = NetAppSnapmirrorRelationship(uuid=uuid) + deleteRelation.delete(poll=True, poll_timeout=120) + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + + #check if this volume has snapmirror destination relationship + uuid = None + try: + snapmirror_relationship = NetAppSnapmirrorRelationship.get_collection(list_destinations_only=True,**{"source.path": svm+":"+volume_name}) + for rel in snapmirror_relationship: + # Retrieve relationship details + rel.get(list_destinations_only=True) + uuid = rel.uuid + if print_output: + print("release relationship: "+rel.source.path+" -> "+rel.destination.path) + deleteRelation = NetAppSnapmirrorRelationship(uuid=uuid) + deleteRelation.delete(poll=True, poll_timeout=120,source_only=True) + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + + if mountpoint: + #check if volume is mounted locally, and then unmount it. + try: + unmount_volume(mountpoint=mountpoint, print_output=True) + except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): + if print_output: + print("Error: Error mounting volume.") + raise - # Convert refresh token to base64 enconding - refreshTokenBytes = refreshTokenString.encode("ascii") - refreshTokenBase64Bytes = base64.b64encode(refreshTokenBytes) - config["cloudCentralRefreshToken"] = refreshTokenBase64Bytes.decode("ascii") + try: + if print_output: + print("Deleting volume '" + svm+':'+volume_name + "'.") + # Delete volume + volume.delete(poll=True) - break + if print_output: + print("Volume deleted successfully.") - elif useCloudSync in ("no", "No", "NO"): - break + except NetAppRestError as err: + if print_output: + if "You must delete the SnapMirror relationships before" in str(err): + print("Error: volume is snapmirror destination. add --delete-mirror to delete snapmirror relationship before deleting the volume") + elif "the source endpoint of one or more SnapMirror relationships" in str(err): + print("Error: volume is snapmirror source. add --delete-mirror to release snapmirror relationship before deleting the volume") + else: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) - else: - print("Invalid value. Must enter 'yes' or 'no'.") + else: + raise ConnectionTypeError() - # Ask user if they want to use S3 functionality - # Verify value entered; prompt user to re-enter if invalid - while True: - useS3 = input("Do you intend to use this toolkit to push/pull from S3? (yes/no): ") - if useS3 in ("yes", "Yes", "YES"): - # Promt user to enter S3 endpoint details - config["s3Endpoint"] = input("Enter S3 endpoint: ") +def list_cloud_sync_relationships(print_output: bool = False) -> list(): + # Step 1: Obtain access token and account ID for accessing Cloud Sync API - # Prompt user to enter S3 credentials - config["s3AccessKeyId"] = input("Enter S3 Access Key ID: ") - s3SecretAccessKeyString = getpass("Enter S3 Secret Access Key: ") + # Retrieve refresh token + try: + refreshToken = _retrieve_cloud_central_refresh_token(print_output=print_output) + except InvalidConfigError: + raise - # Convert refresh token to base64 enconding - s3SecretAccessKeyBytes = s3SecretAccessKeyString.encode("ascii") - s3SecretAccessKeyBase64Bytes = base64.b64encode(s3SecretAccessKeyBytes) - config["s3SecretAccessKey"] = s3SecretAccessKeyBase64Bytes.decode("ascii") + # Obtain access token and account ID + try: + accessToken, accountId = _get_cloud_sync_access_parameters(refreshToken=refreshToken, print_output=print_output) + except APIConnectionError: + raise + + # Step 2: Retrieve list of relationships + + # Define parameters for API call + url = "https://cloudsync.netapp.com/api/relationships-v2" + headers = { + "Accept": "application/json", + "x-account-id": accountId, + "Authorization": "Bearer " + accessToken + } + + # Call API to retrieve list of relationships + response = requests.get(url = url, headers = headers) + + # Check for API response status code of 200; if not 200, raise error + if response.status_code != 200: + errorMessage = "Error calling Cloud Sync API to retrieve list of relationships." + if print_output: + print("Error:", errorMessage) + _print_api_response(response) + raise APIConnectionError(errorMessage, response) + + # Constrict list of relationships + relationships = json.loads(response.text) + relationshipsList = list() + for relationship in relationships: + relationshipDetails = dict() + relationshipDetails["id"] = relationship["id"] + relationshipDetails["source"] = relationship["source"] + relationshipDetails["target"] = relationship["target"] + relationshipsList.append(relationshipDetails) + + # Print list of relationships + if print_output: + print(yaml.dump(relationshipsList)) + + return relationshipsList + + +def list_snap_mirror_relationships(print_output: bool = False, cluster_name: str = None) -> list(): + # Retrieve config details from config file + try: + config = _retrieve_config(print_output=print_output) + except InvalidConfigError: + raise + try: + connectionType = config["connectionType"] + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() - # Prompt user to enter value denoting whether or not to verify SSL cert when calling S3 API - # Verify value entered; prompt user to re-enter if invalid - while True: - s3VerifySSLCert = input("Verify SSL certificate when calling S3 API (true/false): ") - if s3VerifySSLCert in ("true", "True"): - config["s3VerifySSLCert"] = True - config["s3CACertBundle"] = input("Enter CA cert bundle to use when calling S3 API (optional) []: ") - break - elif s3VerifySSLCert in ("false", "False"): - config["s3VerifySSLCert"] = False - config["s3CACertBundle"] = "" - break + if cluster_name: + config["hostname"] = cluster_name + + if connectionType == "ONTAP": + # Instantiate connection to ONTAP cluster + try: + _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) + except InvalidConfigError: + raise + + try: + # Retrieve all relationships for which destination is on current cluster + destinationRelationships = NetAppSnapmirrorRelationship.get_collection() + + # Do not retrieve relationships for which source is on current cluster + # Note: Uncomment below line to retrieve all relationships for which source is on current cluster, then add sourceRelationships to for loop + # sourceRelationships = NetAppSnapmirrorRelationship.get_collection(list_destinations_only=True) + + # Construct list of relationships + relationshipsList = list() + for relationship in destinationRelationships: + # Retrieve relationship details + try: + relationship.get() + except NetAppRestError as err: + relationship.get(list_destinations_only=True) + + # Set cluster value + if hasattr(relationship.source, "cluster"): + sourceCluster = relationship.source.cluster.name + else: + sourceCluster = "user's cluster" + if hasattr(relationship.destination, "cluster"): + destinationCluster = relationship.destination.cluster.name + else: + destinationCluster = "user's cluster" + + # Set transfer state value + if hasattr(relationship, "transfer"): + transferState = relationship.transfer.state else: - print("Invalid value. Must enter 'true' or 'false'.") + transferState = None - break + # Set healthy value + if hasattr(relationship, "healthy"): + healthy = relationship.healthy + else: + healthy = "unknown" + + # Construct dict containing relationship details + relationshipDict = { + "UUID": relationship.uuid, + "Type": relationship.policy.type, + "Healthy": healthy, + "Current Transfer Status": transferState, + "Source Cluster": sourceCluster, + "Source SVM": relationship.source.svm.name, + "Source Volume": relationship.source.path.split(":")[1], + "Dest Cluster": destinationCluster, + "Dest SVM": relationship.destination.svm.name, + "Dest Volume": relationship.destination.path.split(":")[1] + } + + # Append dict to list of relationships + relationshipsList.append(relationshipDict) + + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + + # Print list of relationships + if print_output: + # Convert relationships array to Pandas DataFrame + relationshipsDF = pd.DataFrame.from_dict(relationshipsList, dtype="string") + print(tabulate(relationshipsDF, showindex=False, headers=relationshipsDF.columns)) + + return relationshipsList - elif useS3 in ("no", "No", "NO"): - break + else: + raise ConnectionTypeError() - else: - print("Invalid value. Must enter 'yes' or 'no'.") - # Create config dir if it doesn't already exist +def list_snapshots(volume_name: str, cluster_name: str = None, svm_name: str = None, print_output: bool = False) -> list(): + # Retrieve config details from config file try: - os.mkdir(configDirPath) - except FileExistsError : - pass + config = _retrieve_config(print_output=print_output) + except InvalidConfigError: + raise + try: + connectionType = config["connectionType"] + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() - # Create config file in config dir - with open(configFilePath, 'w') as configFile: - # Write connection details to config file - json.dump(config, configFile) + if cluster_name: + config["hostname"] = cluster_name - print("Created config file: '" + configFilePath + "'.") + if connectionType == "ONTAP": + # Instantiate connection to ONTAP cluster + try: + _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) + except InvalidConfigError: + raise + + # Retrieve svm from config file + try: + svm = config["svm"] + if svm_name: + svm = svm_name + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() + + # Retrieve snapshots + try: + # Retrieve volume + volume = NetAppVolume.find(name=volume_name, svm=svm) + if not volume: + if print_output: + print("Error: Invalid volume name.") + raise InvalidVolumeParameterError("name") + + # Construct list of snapshots + snapshotsList = list() + for snapshot in NetAppSnapshot.get_collection(volume.uuid): + # Retrieve snapshot + snapshot.get() + + # Construct dict of snapshot details + snapshotDict = {"Snapshot Name": snapshot.name, "Create Time": snapshot.create_time} + + # Append dict to list of snapshots + snapshotsList.append(snapshotDict) + + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + + # Print list of snapshots + if print_output: + # Convert snapshots array to Pandas DataFrame + snapshotsDF = pd.DataFrame.from_dict(snapshotsList, dtype="string") + print(tabulate(snapshotsDF, showindex=False, headers=snapshotsDF.columns)) + + return snapshotsList + else: + raise ConnectionTypeError() -def getTarget(args: list) -> str: + +def list_volumes(check_local_mounts: bool = False, include_space_usage_details: bool = False, print_output: bool = False, cluster_name: str = None, svm_name: str = None) -> list(): + # Retrieve config details from config file + try: + config = _retrieve_config(print_output=print_output) + except InvalidConfigError: + raise try: - target = args[2] + connectionType = config["connectionType"] except: - handleInvalidCommand() - return target + if print_output : + _print_invalid_config_error() + raise InvalidConfigError() + if cluster_name: + config["hostname"] = cluster_name + if connectionType == "ONTAP": + # Instantiate connection to ONTAP cluster + try: + _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) + except InvalidConfigError: + raise + + try: + svmname=config["svm"] + if svm_name: + svmname = svm_name + + # Retrieve all volumes for SVM + volumes = NetAppVolume.get_collection(svm=svmname) + + # Retrieve local mounts if desired + if check_local_mounts : + mounts = subprocess.check_output(['mount']).decode() + + # Construct list of volumes; do not include SVM root volume + volumesList = list() + for volume in volumes: + baseVolumeFields = "nas.path,size,style,clone,flexcache_endpoint_type" + try : + volumeFields = baseVolumeFields + if include_space_usage_details : + volumeFields += ",space,constituents" + volume.get(fields=volumeFields) + except NetAppRestError as err : + volumeFields = baseVolumeFields + if include_space_usage_details : + volumeFields += ",space" + volume.get(fields=volumeFields) + + # Retrieve volume export path; handle case where volume is not exported + if hasattr(volume, "nas"): + volumeExportPath = volume.nas.path + else: + volumeExportPath = None + + # Include all vols except for SVM root vol + if volumeExportPath != "/": + # Determine volume type + type = volume.style + + # Construct NFS mount target + if not volumeExportPath : + nfsMountTarget = None + else : + nfsMountTarget = config["dataLif"]+":"+volume.nas.path + if svmname != config["svm"]: + nfsMountTarget = svmname+":"+volume.nas.path + + + # Construct clone source + clone = "no" + cloneParentSvm = "" + cloneParentVolume = "" + cloneParentSnapshot = "" + + try: + cloneParentSvm = volume.clone.parent_svm.name + cloneParentVolume = volume.clone.parent_volume.name + cloneParentSnapshot = volume.clone.parent_snapshot.name + clone = "yes" + except: + pass + + # Determine if FlexCache + if volume.flexcache_endpoint_type == "cache": + flexcache = "yes" + else: + flexcache = "no" + + # Convert size in bytes to "pretty" size (size in KB, MB, GB, or TB) + prettySize = _convert_bytes_to_pretty_size(size_in_bytes=volume.size) + if include_space_usage_details : + try : + snapshotReserve = str(volume.space.snapshot.reserve_percent) + "%" + logicalCapacity = float(volume.space.size) * (1 - float(volume.space.snapshot.reserve_percent)/100) + prettyLogicalCapacity = _convert_bytes_to_pretty_size(size_in_bytes=logicalCapacity) + logicalUsage = float(volume.space.used) + prettyLogicalUsage = _convert_bytes_to_pretty_size(size_in_bytes=logicalUsage) + except : + snapshotReserve = "Unknown" + prettyLogicalCapacity = "Unknown" + prettyLogicalUsage = "Unknown" + try : + if type == "flexgroup" : + totalFootprint: float = 0.0 + for constituentVolume in volume.constituents : + totalFootprint += float(constituentVolume["space"]["total_footprint"]) + else : + totalFootprint = float(volume.space.footprint) + float(volume.space.metadata) + prettyFootprint = _convert_bytes_to_pretty_size(size_in_bytes=totalFootprint) + except : + prettyFootprint = "Unknown" + + # Construct dict containing volume details; optionally include local mountpoint + volumeDict = { + "Volume Name": volume.name, + "Size": prettySize + } + if include_space_usage_details : + volumeDict["Snap Reserve"] = snapshotReserve + volumeDict["Capacity"] = prettyLogicalCapacity + volumeDict["Usage"] = prettyLogicalUsage + volumeDict["Footprint"] = prettyFootprint + volumeDict["Type"] = volume.style + volumeDict["NFS Mount Target"] = nfsMountTarget + if check_local_mounts: + localMountpoint = "" + for mount in mounts.split("\n") : + mountDetails = mount.split(" ") + if mountDetails[0] == nfsMountTarget : + localMountpoint = mountDetails[2] + volumeDict["Local Mountpoint"] = localMountpoint + volumeDict["FlexCache"] = flexcache + volumeDict["Clone"] = clone + volumeDict["Source SVM"] = cloneParentSvm + volumeDict["Source Volume"] = cloneParentVolume + volumeDict["Source Snapshot"] = cloneParentSnapshot + + # Append dict to list of volumes + volumesList.append(volumeDict) + + except NetAppRestError as err: + if print_output : + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + + # Print list of volumes + if print_output: + # Convert volumes array to Pandas DataFrame + volumesDF = pd.DataFrame.from_dict(volumesList, dtype="string") + print(tabulate(volumesDF, showindex=False, headers=volumesDF.columns)) + + return volumesList -def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = False): - if invalidOptArg: - print("Error: Invalid option/argument.") else: - print("Error: Invalid command.") - print(helpText) - sys.exit(1) + raise ConnectionTypeError() + +def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, svm_name: str = None, mount_options: str = None, lif_name: str = None, is_sudo: bool = False, readonly: bool = False, print_output: bool = False): + nfsMountTarget = None + + svm = None + try: + config = _retrieve_config(print_output=print_output) + except InvalidConfigError: + raise + try: + svm = config["svm"] + if svm_name: + svm = svm_name + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() + if cluster_name: + config["hostname"] = cluster_name -## Main function -if __name__ == '__main__': - import sys, getopt + # Retrieve list of volumes + try: + volumes = list_volumes(check_local_mounts=True, svm_name = svm) + except (InvalidConfigError, APIConnectionError): + if print_output: + print("Error: Error retrieving NFS mount target for volume.") + raise + + # Retrieve NFS mount target for volume, and check that no volume is currently mounted at specified mountpoint + for volume in volumes: + # Check mountpoint + if mountpoint == volume["Local Mountpoint"]: + if print_output: + print("Error: Volume '" + volume["Volume Name"] + "' is already mounted at '" + mountpoint + "'.") + raise MountOperationError("Another volume mounted at mountpoint") + + if volume_name == volume["Volume Name"]: + # Retrieve NFS mount target + nfsMountTarget = volume["NFS Mount Target"] + nfsMountTarget = nfsMountTarget.strip() + + # Raise error if invalid volume name was entered + if not nfsMountTarget: + if print_output: + print("Error: Invalid volume name specified.") + raise InvalidVolumeParameterError("name") - # Get desired action from command line args try: - action = sys.argv[1] + if lif_name: + nfsMountTarget = lif_name+':'+nfsMountTarget.split(':')[1] except: - handleInvalidCommand() - - # Invoke desired action - if action == "clone": - # Get desired target from command line args - target = getTarget(sys.argv) - - # Invoke desired action based on target - if target in ("volume", "vol"): - newVolumeName = None - clusterName = None - sourceSVM = None - targetSVM = None - sourceVolumeName = None - sourceSnapshotName = None - mountpoint = None - unixUID = None - unixGID = None - junction = None - readonly = False - split = False - refresh = False - exportPolicy = None - snapshotPolicy = None - exportHosts = None - svmDrUnprotect = False - - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hl:c:t:n:v:s:m:u:g:j:xe:p:i:srd", ["help", "cluster-name=", "source-svm=","target-svm=","name=", "source-volume=", "source-snapshot=", "mountpoint=", "uid=", "gid=", "junction=", "readonly","export-hosts=","export-policy=","snapshot-policy=","split","refresh","svm-dr-unprotect"]) - except Exception as err: - print(err) - handleInvalidCommand(helpText=helpTextCloneVolume, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help"): - print(helpTextCloneVolume) - sys.exit(0) - elif opt in ("-l", "--cluster-name"): - clusterName = arg - elif opt in ("-n", "--name"): - newVolumeName = arg - elif opt in ("-c", "--source-svm"): - sourceSVM = arg - elif opt in ("-t", "--target-svm"): - targetSVM = arg - elif opt in ("-v", "--source-volume"): - sourceVolumeName = arg - elif opt in ("-s", "--source-snapshot"): - sourceSnapshotName = arg - elif opt in ("-m", "--mountpoint"): - mountpoint = arg - elif opt in ("-u", "--uid"): - unixUID = arg - elif opt in ("-g", "--gid"): - unixGID = arg - elif opt in ("-j", "--junction"): - junction = arg - elif opt in ("-x", "--readonly"): - readonly = True - elif opt in ("-s", "--split"): - split = True - elif opt in ("-r", "--refresh"): - refresh = True - elif opt in ("-d", "--svm-dr-unprotect"): - svmDrUnprotect = True - elif opt in ("-p", "--export-policy"): - exportPolicy = arg - elif opt in ("-i", "--snapshot-policy"): - snapshotPolicy = arg - elif opt in ("-e", "--export-hosts"): - exportHosts = arg - - # Check for required options - if not newVolumeName or not sourceVolumeName: - handleInvalidCommand(helpText=helpTextCloneVolume, invalidOptArg=True) - if (unixUID and not unixGID) or (unixGID and not unixUID): - print("Error: if either one of -u/--uid or -g/--gid is spefied, then both must be specified.") - handleInvalidCommand(helpText=helpTextCloneVolume, invalidOptArg=True) - if exportHosts and exportPolicy: - print("Error: cannot use both --export-policy and --export-hosts. only one of them can be specified.") - handleInvalidCommand(helpText=helpTextCloneVolume, invalidOptArg=True) - - # Clone volume - try: - clone_volume(new_volume_name=newVolumeName, source_volume_name=sourceVolumeName, source_snapshot_name=sourceSnapshotName, - cluster_name=clusterName, source_svm=sourceSVM, target_svm=targetSVM, export_policy=exportPolicy, export_hosts=exportHosts, - snapshot_policy=snapshotPolicy, split=split, refresh=refresh, mountpoint=mountpoint, unix_uid=unixUID, unix_gid=unixGID, - junction=junction, svm_dr_unprotect=svmDrUnprotect, readonly=readonly, print_output=True) - except (InvalidConfigError, APIConnectionError, InvalidSnapshotParameterError, InvalidVolumeParameterError, - MountOperationError): - sys.exit(1) + if print_output: + print("Error: Error retrieving NFS mount target for volume.") + raise + + # Print message describing action to be understaken + if print_output: + if readonly: + print("Mounting volume '" + svm+':'+volume_name + "' as '"+nfsMountTarget+"' at '" + mountpoint + "' as read-only.") else: - handleInvalidCommand() + print("Mounting volume '" + svm+':'+volume_name + "' as '"+nfsMountTarget+"' at '" + mountpoint + "'.") - elif action in ("config", "setup"): - if len(sys.argv) > 2 : - if sys.argv[2] in ("-h", "--help"): - print(helpTextConfig) - sys.exit(0) - else: - handleInvalidCommand(helpTextConfig, invalidOptArg=True) + # Create mountpoint if it doesn't already exist + mountpoint = os.path.expanduser(mountpoint) + try: + os.mkdir(mountpoint) + except FileExistsError: + pass - #connectionType = input("Enter connection type (ONTAP): ") - connectionType = "ONTAP" + # Mount volume + mount_cmd_opts = [] + + if readonly: + mount_cmd_opts.append('-o') + mount_cmd_opts.append('ro') + if mount_options: + mount_cmd_opts.remove('ro') + mount_cmd_opts.append('ro'+','+mount_options) + elif mount_options: + mount_cmd_opts.append('-o') + mount_cmd_opts.append(mount_options) + mount_cmd = ['mount'] + mount_cmd_opts + [nfsMountTarget, mountpoint] + + if os.getuid() != 0: + mount_cmd_opts_str = "" + for item in mount_cmd_opts : + if item == "-o" : + continue + mount_cmd_opts_str = mount_cmd_opts_str + item + "," + mount_cmd_opts_str = mount_cmd_opts_str[:-1] + exit("You need to have root privileges to run mount command." + "\nTo mount the volume run the following command as root:" + "\n"+ "mount -o "+ mount_cmd_opts_str+ " " + nfsMountTarget + " " + mountpoint) - # Create config file - createConfig(connectionType=connectionType) + try: + subprocess.check_call(mount_cmd) + if print_output: + print("Volume mounted successfully.") + except subprocess.CalledProcessError as err: + if print_output: + print("Error: Error running mount command: ", err) + raise MountOperationError(err) + + +# Function to unmount volume +def unmount_volume(mountpoint: str, print_output: bool = False): + # Print message describing action to be understaken + if print_output: + print("Unmounting volume at '" + mountpoint + "'.") + + # Un-mount volume + try: + subprocess.check_call(['umount', mountpoint]) + if print_output: + print("Volume unmounted successfully.") + except subprocess.CalledProcessError as err: + if print_output: + print("Error: Error running unmount command: ", err) + raise MountOperationError(err) - elif action == "create": - # Get desired target from command line args - target = getTarget(sys.argv) - # Invoke desired action based on target - if target in ("snapshot", "snap"): - volumeName = None - snapshotName = None - clusterName = None - svmName = None - retentionCount = 0 - retentionDays = False - snapmirrorLabel = None +def prepopulate_flex_cache(volume_name: str, paths: list, print_output: bool = False): + # Retrieve config details from config file + try: + config = _retrieve_config(print_output=print_output) + except InvalidConfigError: + raise + try: + connectionType = config["connectionType"] + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hn:v:s:r:u:l:", ["cluster-name=","help", "svm=", "name=", "volume=", "retention=", "snapmirror-label="]) - except Exception as err: - print(err) - handleInvalidCommand(helpText=helpTextCreateSnapshot, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help") : - print(helpTextCreateSnapshot) - sys.exit(0) - elif opt in ("-n", "--name"): - snapshotName = arg - elif opt in ("-u", "--cluster-name"): - clusterName = arg - elif opt in ("-s", "--svm"): - svmName = arg - elif opt in ("-r", "--retention"): - retentionCount = arg - elif opt in ("-v", "--volume"): - volumeName = arg - elif opt in ("-l", "--snapmirror-label"): - snapmirrorLabel = arg - - # Check for required options - if not volumeName: - handleInvalidCommand(helpText=helpTextCreateSnapshot, invalidOptArg=True) - - if retentionCount: - if not retentionCount.isnumeric(): - matchObj = re.match("^(\d+)d$",retentionCount) - if not matchObj: - handleInvalidCommand(helpText=helpTextCreateSnapshot, invalidOptArg=True) - else: - retentionCount = matchObj.group(1) - retentionDays = True + if connectionType == "ONTAP": + # Instantiate connection to ONTAP cluster + try: + _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) + except InvalidConfigError: + raise + + # Retrieve svm from config file + try: + svm = config["svm"] + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() + + if print_output: + print("FlexCache '" + volume_name + "' - Prepopulating paths: ", paths) + + try: + # Retrieve FlexCache + flexcache = NetAppFlexCache.find(name=volume_name, svm=svm) + if not flexcache: + if print_output: + print("Error: Invalid volume name.") + raise InvalidVolumeParameterError("name") - # Create snapshot - try: - create_snapshot(volume_name=volumeName, snapshot_name=snapshotName, retention_count=retentionCount, retention_days=retentionDays, cluster_name=clusterName, svm_name=svmName, snapmirror_label=snapmirrorLabel, print_output=True) - except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError): - sys.exit(1) - - elif target in ("volume", "vol"): - clusterName = None - svmName = None - volumeName = None - volumeSize = None - guaranteeSpace = False - volumeType = None - unixPermissions = None - unixUID = None - unixGID = None - exportPolicy = None - snapshotPolicy = None - mountpoint = None - aggregate = None - junction = None - readonly = False - tieringPolicy = None - volDP = False - - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "l:hv:t:n:s:rt:p:u:g:e:d:m:a:j:xu:y", ["cluster-name=","help", "svm=", "name=", "size=", "guarantee-space", "type=", "permissions=", "uid=", "gid=", "export-policy=", "snapshot-policy=", "mountpoint=", "aggregate=", "junction=" ,"readonly","tiering-policy=","dp"]) - except Exception as err: - print(err) - handleInvalidCommand(helpText=helpTextCreateVolume, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help"): - print(helpTextCreateVolume) - sys.exit(0) - elif opt in ("-v", "--svm"): - svmName = arg - elif opt in ("-l", "--cluster-name"): - clusterName = arg - elif opt in ("-n", "--name"): - volumeName = arg - elif opt in ("-s", "--size"): - volumeSize = arg - elif opt in ("-r", "--guarantee-space"): - guaranteeSpace = True - elif opt in ("-t", "--type"): - volumeType = arg - elif opt in ("-p", "--permissions"): - unixPermissions = arg - elif opt in ("-u", "--uid"): - unixUID = arg - elif opt in ("-g", "--gid"): - unixGID = arg - elif opt in ("-e", "--export-policy"): - exportPolicy = arg - elif opt in ("-d", "--snapshot-policy"): - snapshotPolicy = arg - elif opt in ("-m", "--mountpoint"): - mountpoint = arg - elif opt in ("-a", "--aggregate"): - aggregate = arg - elif opt in ("-j", "--junction"): - junction = arg - elif opt in ("-x", "--readonly"): - readonly = True - elif opt in ("-f", "--tiering-policy"): - tieringPolicy = arg - elif opt in ("-y", "--dp"): - volDP = True - - # Check for required options - if not volumeName or not volumeSize: - handleInvalidCommand(helpText=helpTextCreateVolume, invalidOptArg=True) - if (unixUID and not unixGID) or (unixGID and not unixUID): - print("Error: if either one of -u/--uid or -g/--gid is spefied, then both must be specified.") - handleInvalidCommand(helpText=helpTextCreateVolume, invalidOptArg=True) - if (volDP and (junction or mountpoint or snapshotPolicy or exportPolicy)): - handleInvalidCommand(helpText=helpTextCreateVolume, invalidOptArg=True) - - # Create volume - try: - create_volume(svm_name=svmName, volume_name=volumeName, cluster_name=clusterName, volume_size=volumeSize, guarantee_space=guaranteeSpace, volume_type=volumeType, unix_permissions=unixPermissions, unix_uid=unixUID, - unix_gid=unixGID, export_policy=exportPolicy, snapshot_policy=snapshotPolicy, aggregate=aggregate, mountpoint=mountpoint, junction=junction, readonly=readonly, - print_output=True, tiering_policy=tieringPolicy, vol_dp=volDP) - except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): - sys.exit(1) - - elif target in ("snapmirror-relationship", "sm","snapmirror"): - clusterName = None - sourceSvm = None - targetSvm = None - sourceVol = None - targetVol = None - policy = 'MirrorAllSnapshots' - schedule = "hourly" - volumeSize = None - action = None - - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hn:t:s:v:u:y:c:p:a:h", ["cluster-name=","help", "target-vol=", "target-svm=", "source-svm=", "source-vol=", "schedule=", "policy=", "action="]) - except Exception as err: - print(err) - handleInvalidCommand(helpText=helpTextCreateSnapMirrorRelationship, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help"): - print(helpTextCreateSnapMirrorRelationship) - sys.exit(0) - elif opt in ("-t", "--target-svm"): - targetSvm = arg - elif opt in ("-n", "--target-vol"): - targetVol = arg - elif opt in ("-s", "--source-svm"): - sourceSvm = arg - elif opt in ("-v", "--source-vol"): - sourceVol = arg - elif opt in ("-u", "--cluster-name"): - clusterName = arg - elif opt in ("-c", "--schedule"): - schedule = arg - elif opt in ("-p", "--policy"): - policy = arg - elif opt in ("-a", "--action"): - action = arg - - # Check for required options - if not targetVol or not sourceSvm or not sourceVol: - handleInvalidCommand(helpText=helpTextCreateSnapMirrorRelationship, invalidOptArg=True) - - if action not in [None,'resync','initialize']: - handleInvalidCommand(helpText=helpTextCreateSnapMirrorRelationship, invalidOptArg=True) - - # Create snapmirror - try: - create_snap_mirror_relationship(source_svm=sourceSvm, target_svm=targetSvm, source_vol=sourceVol, target_vol=targetVol, schedule=schedule, policy=policy, - cluster_name=clusterName, action=action, print_output=True) - except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): - sys.exit(1) + # Prepopulate FlexCache + flexcache.prepopulate = {"dir_paths": paths} + flexcache.patch() - else: - handleInvalidCommand() + if print_output: + print("FlexCache prepopulated successfully.") - elif action in ("delete", "del", "rm"): - # Get desired target from command line args - target = getTarget(sys.argv) + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) - # Invoke desired action based on target - if target in ("snapshot", "snap"): - volumeName = None - snapshotName = None - svmName = None - clusterName = None + else: + raise ConnectionTypeError() - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hn:v:s:u:", ["cluster-name=","help", "svm=", "name=", "volume="]) - except Exception as err: - print(err) - handleInvalidCommand(helpText=helpTextDeleteSnapshot, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help"): - print(helpTextDeleteSnapshot) - sys.exit(0) - elif opt in ("-n", "--name"): - snapshotName = arg - elif opt in ("-s", "--svm"): - svmName = arg - elif opt in ("-u", "--cluster-name"): - clusterName = arg - elif opt in ("-v", "--volume"): - volumeName = arg - - # Check for required options - if not volumeName or not snapshotName: - handleInvalidCommand(helpText=helpTextDeleteSnapshot, invalidOptArg=True) - # Delete snapshot - try: - delete_snapshot(volume_name=volumeName, svm_name = svmName, cluster_name=clusterName, snapshot_name=snapshotName, print_output=True) - except (InvalidConfigError, APIConnectionError, InvalidSnapshotParameterError, InvalidVolumeParameterError): - sys.exit(1) - - elif target in ("volume", "vol"): - volumeName = None - svmName = None - clusterName = None - force = False - deleteMirror = False - deleteNonClone = False - mountpoint = None - - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hfv:n:u:m:p:", ["cluster-name=","help", "svm=", "name=", "force", "delete-non-clone", "delete-mirror", "mountpoint="]) - except Exception as err: - print(err) - handleInvalidCommand(helpText=helpTextDeleteVolume, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help"): - print(helpTextDeleteVolume) - sys.exit(0) - elif opt in ("-v", "--svm"): - svmName = arg - elif opt in ("-u", "--cluster-name"): - clusterName = arg - elif opt in ("-n", "--name"): - volumeName = arg - elif opt in ("-p", "--mountpoint"): - mountpoint = arg - elif opt in ("-f", "--force"): - force = True - elif opt in ("-m", "--delete-mirror"): - deleteMirror = True - elif opt in ("--delete-non-clone"): - deleteNonClone = True - - # Check for required options - if not volumeName: - handleInvalidCommand(helpText=helpTextDeleteVolume, invalidOptArg=True) - - # Confirm delete operation - if not force: - print("Warning: All data and snapshots associated with the volume will be permanently deleted.") - while True: - proceed = input("Are you sure that you want to proceed? (yes/no): ") - if proceed in ("yes", "Yes", "YES"): - break - elif proceed in ("no", "No", "NO"): - sys.exit(0) - else: - print("Invalid value. Must enter 'yes' or 'no'.") +def pull_bucket_from_s3(s3_bucket: str, local_directory: str, s3_object_key_prefix: str = "", print_output: bool = False): + # Retrieve S3 access details from existing config file + try: + s3Endpoint, s3AccessKeyId, s3SecretAccessKey, s3VerifySSLCert, s3CACertBundle = _retrieve_s3_access_details(print_output=print_output) + except InvalidConfigError: + raise - # Delete volume - try: - delete_volume(volume_name=volumeName, svm_name=svmName, cluster_name=clusterName, delete_mirror=deleteMirror, delete_non_clone=deleteNonClone, print_output=True) - except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError): - sys.exit(1) + # Add slash to end of local directory path if not present + if not local_directory.endswith(os.sep): + local_directory += os.sep - else: - handleInvalidCommand() - - elif action in ("help", "h", "-h", "--help"): - print(helpTextStandard) - - elif action in ("list", "ls"): - # Get desired target from command line args - target = getTarget(sys.argv) - - # Invoke desired action based on target - if target in ("cloud-sync-relationship", "cloud-sync", "cloud-sync-relationships", "cloud-syncs") : - # Check command line options - if len(sys.argv) > 3: - if sys.argv[3] in ("-h", "--help"): - print(helpTextListCloudSyncRelationships) - sys.exit(0) + # Multithread the download operation + with ThreadPoolExecutor() as executor: + try: + # Instantiate S3 session + s3 = _instantiate_s3_session(s3Endpoint=s3Endpoint, s3AccessKeyId=s3AccessKeyId, s3SecretAccessKey=s3SecretAccessKey, s3VerifySSLCert=s3VerifySSLCert, s3CACertBundle=s3CACertBundle, print_output=print_output) + + # Loop through all objects with prefix in bucket and download + bucket = s3.Bucket(s3_bucket) + for obj in bucket.objects.filter(Prefix=s3_object_key_prefix): + executor.submit(_download_from_s3, s3Endpoint=s3Endpoint, s3AccessKeyId=s3AccessKeyId, s3SecretAccessKey=s3SecretAccessKey, s3VerifySSLCert=s3VerifySSLCert, s3CACertBundle=s3CACertBundle, s3Bucket=s3_bucket, s3ObjectKey=obj.key, localFile=local_directory+obj.key, print_output=print_output) + + except APIConnectionError: + raise + + except Exception as err: + if print_output: + print("Error: S3 API error: ", err) + raise APIConnectionError(err) + + print("Download complete.") + + +def pull_object_from_s3(s3_bucket: str, s3_object_key: str, local_file: str = None, print_output: bool = False): + # Retrieve S3 access details from existing config file + try: + s3Endpoint, s3AccessKeyId, s3SecretAccessKey, s3VerifySSLCert, s3CACertBundle = _retrieve_s3_access_details(print_output=print_output) + except InvalidConfigError: + raise + + # Set S3 object key + if not local_file: + local_file = s3_object_key + + # Upload file + try: + _download_from_s3(s3Endpoint=s3Endpoint, s3AccessKeyId=s3AccessKeyId, s3SecretAccessKey=s3SecretAccessKey, s3VerifySSLCert=s3VerifySSLCert, s3CACertBundle=s3CACertBundle, s3Bucket=s3_bucket, s3ObjectKey=s3_object_key, localFile=local_file, print_output=print_output) + except APIConnectionError: + raise + + print("Download complete.") + + +def push_directory_to_s3(s3_bucket: str, local_directory: str, s3_object_key_prefix: str = "", + s3_extra_args: str = None, print_output: bool = False): + # Retrieve S3 access details from existing config file + try: + s3Endpoint, s3AccessKeyId, s3SecretAccessKey, s3VerifySSLCert, s3CACertBundle = _retrieve_s3_access_details(print_output=print_output) + except InvalidConfigError: + raise + + # Multithread the upload operation + with ThreadPoolExecutor() as executor: + # Loop through all files in directory + for dirpath, dirnames, filenames in os.walk(local_directory): + # Exclude hidden files and directories + filenames = [filename for filename in filenames if not filename[0] == '.'] + dirnames[:] = [dirname for dirname in dirnames if not dirname[0] == '.'] + + for filename in filenames: + # Build filepath + if local_directory.endswith(os.sep): + dirpathBeginIndex = len(local_directory) else: - handleInvalidCommand(helpTextListCloudSyncRelationships, invalidOptArg=True) + dirpathBeginIndex = len(local_directory) + 1 - # List cloud sync relationships - try: - list_cloud_sync_relationships(print_output=True) - except (InvalidConfigError, APIConnectionError): - sys.exit(1) + subdirpath = dirpath[dirpathBeginIndex:] - elif target in ("snapmirror-relationship", "snapmirror", "snapmirror-relationships", "snapmirrors","sm"): - svmName = None - clusterName = None + if subdirpath: + filepath = subdirpath + os.sep + filename + else: + filepath = filename - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hv:u:", ["cluster-name=","help", "svm="]) - except Exception as err: - print(err) - handleInvalidCommand(helpText=helpTextListSnapMirrorRelationships, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help"): - print(helpTextListSnapMirrorRelationships) - sys.exit(0) - elif opt in ("-v", "--svm"): - svmName = arg - elif opt in ("-u", "--cluster-name"): - clusterName = arg - - # List snapmirror relationships - try: - list_snap_mirror_relationships(print_output=True, cluster_name=clusterName) - except (InvalidConfigError, APIConnectionError): - sys.exit(1) + # Set S3 object details + s3ObjectKey = s3_object_key_prefix + filepath + localFile = dirpath + os.sep + filename - elif target in ("snapshot", "snap", "snapshots", "snaps"): - volumeName = None - clusterName = None - svmName = None + # Upload file + try: + executor.submit(_upload_to_s3, s3Endpoint=s3Endpoint, s3AccessKeyId=s3AccessKeyId, s3SecretAccessKey=s3SecretAccessKey, s3VerifySSLCert=s3VerifySSLCert, s3CACertBundle=s3CACertBundle, s3Bucket=s3_bucket, localFile=localFile, s3ObjectKey=s3ObjectKey, s3ExtraArgs=s3_extra_args, print_output=print_output) + except APIConnectionError: + raise - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hv:s:u:", ["cluster-name=","help", "volume=","svm="]) - except Exception as err: - print(err) - handleInvalidCommand(helpText=helpTextListSnapshots, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help") : - print(helpTextListSnapshots) - sys.exit(0) - elif opt in ("-v", "--volume"): - volumeName = arg - elif opt in ("-s", "--svm"): - svmName = arg - elif opt in ("-u", "--cluster-name"): - clusterName = arg - - # Check for required options - if not volumeName: - handleInvalidCommand(helpText=helpTextListSnapshots, invalidOptArg=True) - - # List snapsots - try: - list_snapshots(volume_name=volumeName, cluster_name=clusterName, svm_name=svmName, print_output=True) - except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError): - sys.exit(1) + print("Upload complete.") - elif target in ("volume", "vol", "volumes", "vols"): - includeSpaceUsageDetails = False - svmName = None - clusterName = None - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hsv:u:", ["cluster-name=","help", "include-space-usage-details","svm="]) - except Exception as err: - print(err) - handleInvalidCommand(helpText=helpTextListVolumes, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help") : - print(helpTextListVolumes) - sys.exit(0) - elif opt in ("-v", "--svm") : - svmName = arg - elif opt in ("-s", "--include-space-usage-details"): - includeSpaceUsageDetails = True - elif opt in ("-u", "--cluster-name"): - clusterName = arg - - # List volumes - try: - list_volumes(check_local_mounts=True, include_space_usage_details=includeSpaceUsageDetails, print_output=True, svm_name=svmName, cluster_name=clusterName) - except (InvalidConfigError, APIConnectionError) : - sys.exit(1) +def push_file_to_s3(s3_bucket: str, local_file: str, s3_object_key: str = None, s3_extra_args: str = None, print_output: bool = False): + # Retrieve S3 access details from existing config file + try: + s3Endpoint, s3AccessKeyId, s3SecretAccessKey, s3VerifySSLCert, s3CACertBundle = _retrieve_s3_access_details(print_output=print_output) + except InvalidConfigError: + raise - else: - handleInvalidCommand() - - elif action == "mount": - # Get desired target from command line args - target = getTarget(sys.argv) - - # Invoke desired action based on target - if target in ("volume", "vol"): - volumeName = None - svmName = None - clusterName = None - lifName = None - mountpoint = None - mount_options = None - readonly = False - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hv:n:l:m:u:o:x", ["cluster-name=","help", "lif=","svm=", "name=", "mountpoint=", "readonly", "options="]) - except Exception as err: - print(err) - handleInvalidCommand(helpText=helpTextMountVolume, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help"): - print(helpTextMountVolume) - sys.exit(0) - elif opt in ("-v", "--svm"): - svmName = arg - elif opt in ("-u", "--cluster-name"): - clusterName = arg - elif opt in ("-l", "--lif"): - lifName = arg - elif opt in ("-n", "--name"): - volumeName = arg - elif opt in ("-m", "--mountpoint"): - mountpoint = arg - elif opt in ("-o", "--options"): - mount_options = arg - elif opt in ("-x", "--readonly"): - readonly = True - - # Mount volume - try: - mount_volume(svm_name = svmName, cluster_name=clusterName, lif_name = lifName, volume_name=volumeName, mountpoint=mountpoint, mount_options=mount_options, readonly=readonly, print_output=True) - except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): - sys.exit(1) + # Set S3 object key + if not s3_object_key: + s3_object_key = local_file - else: - handleInvalidCommand() + # Upload file + try: + _upload_to_s3(s3Endpoint=s3Endpoint, s3AccessKeyId=s3AccessKeyId, s3SecretAccessKey=s3SecretAccessKey, s3VerifySSLCert=s3VerifySSLCert, s3CACertBundle=s3CACertBundle, s3Bucket=s3_bucket, localFile=local_file, s3ObjectKey=s3_object_key, s3ExtraArgs=s3_extra_args, print_output=print_output) + except APIConnectionError: + raise - elif action == "unmount": - # Get desired target from command line args - target = getTarget(sys.argv) + print("Upload complete.") - # Invoke desired action based on target - if target in ("volume", "vol"): - mountpoint = None - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hm:", ["help", "mountpoint="]) - except Exception as err: - print(err) - handleInvalidCommand(helpText=helpTextUnmountVolume, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help"): - print(helpTextUnmountVolume) - sys.exit(0) - elif opt in ("-m", "--mountpoint"): - mountpoint = arg - - # Check for required options - if not mountpoint: - handleInvalidCommand(helpText=helpTextUnmountVolume, invalidOptArg=True) - - # Unmount volume - try: - unmount_volume(mountpoint=mountpoint, print_output= True) - except (MountOperationError): - sys.exit(1) - else: - handleInvalidCommand() - elif action in ("prepopulate"): - # Get desired target from command line args - target = getTarget(sys.argv) +def restore_snapshot(volume_name: str, snapshot_name: str, cluster_name: str = None, svm_name : str = None, print_output: bool = False): + # Retrieve config details from config file + try: + config = _retrieve_config(print_output=print_output) + except InvalidConfigError: + raise + try: + connectionType = config["connectionType"] + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() - # Invoke desired action based on target - if target in ("flexcache", "cache"): - volumeName = None - paths = None + if cluster_name: + config["hostname"] = cluster_name - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hn:p:", ["help", "name=", "paths="]) - except Exception as err: - print(err) - handleInvalidCommand(helpText=helpTextPrepopulateFlexCache, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help"): - print(helpTextPrepopulateFlexCache) - sys.exit(0) - elif opt in ("-n", "--name"): - volumeName = arg - elif opt in ("-p", "--paths"): - paths = arg - - # Check for required options - if not volumeName or not paths : - handleInvalidCommand(helpText=helpTextPrepopulateFlexCache, invalidOptArg=True) - - # Convert paths string to list - pathsList = paths.split(",") + if connectionType == "ONTAP": + # Instantiate connection to ONTAP cluster + try: + _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) + except InvalidConfigError: + raise + + # Retrieve svm from config file + try: + svm = config["svm"] + if svm_name: + svm = svm_name + except: + if print_output: + _print_invalid_config_error() + raise InvalidConfigError() + + if print_output: + print("Restoring snapshot '" + snapshot_name + "'.") + + try: + # Retrieve volume + volume = NetAppVolume.find(name=volume_name, svm=svm) + if not volume: + if print_output: + print("Error: Invalid volume name.") + raise InvalidVolumeParameterError("name") + + # Retrieve snapshot + snapshot = NetAppSnapshot.find(volume.uuid, name=snapshot_name) + if not snapshot: + if print_output: + print("Error: Invalid snapshot name.") + raise InvalidSnapshotParameterError("name") - # Prepopulate FlexCache - try: - prepopulate_flex_cache(volume_name=volumeName, paths=pathsList, print_output=True) - except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError): - sys.exit(1) + # Restore snapshot + volume.patch(volume.uuid, **{"restore_to.snapshot.name": snapshot.name, "restore_to.snapshot.uuid": snapshot.uuid}, poll=True) + if print_output: + print("Snapshot restored successfully.") - else: - handleInvalidCommand() + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) - elif action in ("pull-from-s3", "pull-s3", "s3-pull"): - # Get desired target from command line args - target = getTarget(sys.argv) + else: + raise ConnectionTypeError() - # Invoke desired action based on target - if target in ("bucket"): - s3Bucket = None - s3ObjectKeyPrefix = "" - localDirectory = None - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hb:p:d:e:", ["help", "bucket=", "key-prefix=", "directory="]) - except Exception as err: - print(err) - handleInvalidCommand(helpText=helpTextPullFromS3Bucket, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help") : - print(helpTextPullFromS3Bucket) - sys.exit(0) - elif opt in ("-b", "--bucket"): - s3Bucket = arg - elif opt in ("-p", "--key-prefix"): - s3ObjectKeyPrefix = arg - elif opt in ("-d", "--directory"): - localDirectory = arg - - # Check for required options - if not s3Bucket or not localDirectory: - handleInvalidCommand(helpText=helpTextPullFromS3Bucket, invalidOptArg=True) - - # Push file to S3 - try: - pull_bucket_from_s3(s3_bucket=s3Bucket, local_directory=localDirectory, s3_object_key_prefix=s3ObjectKeyPrefix, print_output=True) - except (InvalidConfigError, APIConnectionError): - sys.exit(1) +def sync_cloud_sync_relationship(relationship_id: str, wait_until_complete: bool = False, print_output: bool = False): + # Step 1: Obtain access token and account ID for accessing Cloud Sync API - elif target in ("object", "file"): - s3Bucket = None - s3ObjectKey = None - localFile = None + # Retrieve refresh token + try: + refreshToken = _retrieve_cloud_central_refresh_token(print_output=print_output) + except InvalidConfigError: + raise - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hb:k:f:", ["help", "bucket=", "key=", "file=", "extra-args="]) - except Exception as err: - print(err) - handleInvalidCommand(helpText=helpTextPullFromS3Object, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help"): - print(helpTextPullFromS3Object) - sys.exit(0) - elif opt in ("-b", "--bucket"): - s3Bucket = arg - elif opt in ("-k", "--key"): - s3ObjectKey = arg - elif opt in ("-f", "--file"): - localFile = arg - - # Check for required options - if not s3Bucket or not s3ObjectKey: - handleInvalidCommand(helpText=helpTextPullFromS3Object, invalidOptArg=True) - - # Push file to S3 + # Obtain access token and account ID + try: + accessToken, accountId = _get_cloud_sync_access_parameters(refreshToken=refreshToken, print_output=print_output) + except APIConnectionError: + raise + + # Step 2: Trigger Cloud Sync sync + + # Define parameters for API call + url = "https://cloudsync.netapp.com/api/relationships/%s/sync" % relationship_id + headers = { + "Content-Type": "application/json", + "Accept": "application/json", + "x-account-id": accountId, + "Authorization": "Bearer " + accessToken + } + + # Call API to trigger sync + if print_output: + print("Triggering sync operation for Cloud Sync relationship (ID = " + relationship_id + ").") + response = requests.put(url = url, headers = headers) + + # Check for API response status code of 202; if not 202, raise error + if response.status_code != 202: + errorMessage = "Error calling Cloud Sync API to trigger sync operation." + if print_output: + print("Error:", errorMessage) + _print_api_response(response) + raise APIConnectionError(errorMessage, response) + + if print_output: + print("Sync operation successfully triggered.") + + # Step 3: Obtain status of the sync operation; keep checking until the sync operation has completed + + if wait_until_complete: + while True: + # Define parameters for API call + url = "https://cloudsync.netapp.com/api/relationships-v2/%s" % relationship_id + headers = { + "Accept": "application/json", + "x-account-id": accountId, + "Authorization": "Bearer " + accessToken + } + + # Call API to obtain status of sync operation + response = requests.get(url = url, headers = headers) + + # Parse response to retrieve status of sync operation try: - pull_object_from_s3(s3_bucket=s3Bucket, s3_object_key=s3ObjectKey, local_file=localFile, print_output=True) - except (InvalidConfigError, APIConnectionError): - sys.exit(1) + responseBody = json.loads(response.text) + latestActivityType = responseBody["activity"]["type"] + latestActivityStatus = responseBody["activity"]["status"] + except: + errorMessage = "Error obtaining status of sync operation from Cloud Sync API." + if print_output: + print("Error:", errorMessage) + _print_api_response(response) + raise APIConnectionError(errorMessage, response) + + # End execution if the latest update is complete + if latestActivityType == "Sync": + if latestActivityStatus == "DONE": + if print_output: + print("Success: Sync operation is complete.") + break + elif latestActivityStatus == "FAILED": + if print_output: + failureMessage = responseBody["activity"]["failureMessage"] + print("Error: Sync operation failed.") + print("Message:", failureMessage) + raise CloudSyncSyncOperationError(latestActivityStatus, failureMessage) + elif latestActivityStatus == "RUNNING": + # Print message re: progress + if print_output: + print("Sync operation is not yet complete. Status:", latestActivityStatus) + print("Checking again in 60 seconds...") + else: + if print_output: + print ("Error: Unknown sync operation status (" + latestActivityStatus + ") returned by Cloud Sync API.") + raise CloudSyncSyncOperationError(latestActivityStatus) - else: - handleInvalidCommand() + # Sleep for 60 seconds before checking progress again + time.sleep(60) - elif action in ("push-to-s3", "push-s3", "s3-push"): - # Get desired target from command line args - target = getTarget(sys.argv) +def create_snap_mirror_relationship(source_svm: str, source_vol: str, target_vol: str, target_svm: str = None, cluster_name: str = None, + schedule: str = '', policy: str = 'MirrorAllSnapshots', action: str = None, print_output: bool = False): + # Retrieve config details from config file + try: + config = _retrieve_config(print_output=print_output) + except InvalidConfigError: + raise + try: + connectionType = config["connectionType"] + except: + if print_output : + _print_invalid_config_error() + raise InvalidConfigError() - # Invoke desired action based on target - if target in ("directory", "dir"): - s3Bucket = None - s3ObjectKeyPrefix = "" - localDirectory = None - s3ExtraArgs = None + if cluster_name: + config["hostname"] = cluster_name - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hb:p:d:e:", ["help", "bucket=", "key-prefix=", "directory=", "extra-args="]) - except Exception as err: - print(err) - handleInvalidCommand(helpText=helpTextPushToS3Directory, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help") : - print(helpTextPushToS3Directory) - sys.exit(0) - elif opt in ("-b", "--bucket"): - s3Bucket = arg - elif opt in ("-p", "--key-prefix"): - s3ObjectKeyPrefix = arg - elif opt in ("-d", "--directory"): - localDirectory = arg - elif opt in ("-e", "--extra-args"): - s3ExtraArgs = arg - - # Check for required options - if not s3Bucket or not localDirectory: - handleInvalidCommand(helpText=helpTextPushToS3Directory, invalidOptArg=True) - - # Push file to S3 - try: - push_directory_to_s3(s3_bucket=s3Bucket, local_directory=localDirectory, s3_object_key_prefix=s3ObjectKeyPrefix, s3_extra_args=s3ExtraArgs, print_output=True) - except (InvalidConfigError, APIConnectionError): - sys.exit(1) + if connectionType == "ONTAP": + # Instantiate connection to ONTAP cluster + try: + _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) + except InvalidConfigError: + raise - elif target in ("file"): - s3Bucket = None - s3ObjectKey = None - localFile = None - s3ExtraArgs = None + svm = config["svm"] + if not target_svm: + target_svm = svm - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hb:k:f:e:", ["help", "bucket=", "key=", "file=", "extra-args="]) - except Exception as err: - print(err) - handleInvalidCommand(helpText=helpTextPushToS3File, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help"): - print(helpTextPushToS3File) - sys.exit(0) - elif opt in ("-b", "--bucket"): - s3Bucket = arg - elif opt in ("-k", "--key"): - s3ObjectKey = arg - elif opt in ("-f", "--file"): - localFile = arg - elif opt in ("-e", "--extra-args"): - s3ExtraArgs = arg - - # Check for required options - if not s3Bucket or not localFile: - handleInvalidCommand(helpText=helpTextPushToS3File, invalidOptArg=True) - - # Push file to S3 + try: + uuid = None + snapmirror_relationship = NetAppSnapmirrorRelationship.get_collection(**{"destination.path": target_svm+":"+target_vol}) + for rel in snapmirror_relationship: + # Retrieve relationship details + try: + rel.get() + uuid = rel.uuid + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + if uuid: + if print_output: + print("Error: relationship alreay exists: "+target_svm+":"+target_vol) + raise InvalidConfigError() + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + + try: + newRelationDict = { + "source": { + "path": source_svm+":"+source_vol + }, + "destination": { + "path": target_svm+":"+target_vol + } + #due to bug 1311226 setting the policy wil be done using cli api + # "policy": { + # "name": policy, + # }, + } + # if schedule != '': + # newRelationDict['schedule'] = schedule + + if print_output: + print("Creating snapmirror relationship: "+source_svm+":"+source_vol+" -> "+target_svm+":"+target_vol) + newRelationship = NetAppSnapmirrorRelationship.from_dict(newRelationDict) + newRelationship.post(poll=True, poll_timeout=120) + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + + try: + if print_output: + print("Setting snapmirror policy as: "+policy+" schedule:"+schedule) + response = NetAppCLI().execute("snapmirror modify",destination_path=target_svm+":"+target_vol,body={"policy": policy, "schedule":schedule}) + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + + try: + uuid = None + relation = None + snapmirror_relationship = NetAppSnapmirrorRelationship.get_collection(**{"destination.path": target_svm+":"+target_vol}) + for relation in snapmirror_relationship: + # Retrieve relationship details + try: + relation.get() + uuid = relation.uuid + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + if not uuid: + if print_output: + print("Error: relationship was not created: "+target_svm+":"+target_vol) + raise InvalidConfigError() + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + + if action in ["resync","initialize"]: try: - push_file_to_s3(s3_bucket=s3Bucket, s3_object_key=s3ObjectKey, local_file=localFile, s3_extra_args=s3ExtraArgs, print_output=True) - except (InvalidConfigError, APIConnectionError): - sys.exit(1) + if print_output: + print("Setting state to snapmirrored, action:"+action) + patchRelation = NetAppSnapmirrorRelationship(uuid=uuid) + patchRelation.state = "snapmirrored" + patchRelation.patch(poll=True, poll_timeout=120) + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + +def sync_snap_mirror_relationship(uuid: str = None, svm_name: str = None, volume_name: str = None, cluster_name: str = None, wait_until_complete: bool = False, print_output: bool = False): + # Retrieve config details from config file + try: + config = _retrieve_config(print_output=print_output) + except InvalidConfigError: + raise + try: + connectionType = config["connectionType"] + except: + if print_output : + _print_invalid_config_error() + raise InvalidConfigError() - else: - handleInvalidCommand() + if cluster_name: + config["hostname"] = cluster_name - elif action in ("restore"): - # Get desired target from command line args - target = getTarget(sys.argv) + if connectionType == "ONTAP": + # Instantiate connection to ONTAP cluster + try: + _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) + except InvalidConfigError: + raise + + if volume_name: + svm = config["svm"] + if svm_name: + svm = svm_name + + snapmirror_relationship = NetAppSnapmirrorRelationship.get_collection(**{"destination.path": svm+":"+volume_name}) + for rel in snapmirror_relationship: + # Retrieve relationship details + try: + rel.get() + uuid = rel.uuid + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + if not uuid: + snapmirror_relationship = NetAppSnapmirrorRelationship.get_collection(**{"destination.path": svm+":"}) + for rel in snapmirror_relationship: + try: + rel.get() + uuid = rel.uuid + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + if uuid: + if print_output: + print("volume is part of svm-dr relationshitp: "+svm+":") + + if not uuid: + if print_output: + print("Error: relationship could not be found.") + raise SnapMirrorSyncOperationError("not found") + + if print_output: + print("Triggering sync operation for SnapMirror relationship (UUID = " + uuid + ").") + + try: + # Trigger sync operation for SnapMirror relationship + transfer = NetAppSnapmirrorTransfer(uuid) + transfer.post(poll=True) + except NetAppRestError as err: + if print_output: + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) + + if print_output: + print("Sync operation successfully triggered.") + + if wait_until_complete: + # Wait to perform initial check + print("Waiting for sync operation to complete.") + print("Status check will be performed in 10 seconds...") + time.sleep(10) - # Invoke desired action based on target - if target in ("snapshot", "snap"): - volumeName = None - snapshotName = None - svmName = None - clusterName = None - force = False + while True: + # Retrieve relationship + relationship = NetAppSnapmirrorRelationship.find(uuid=uuid) + relationship.get() - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hs:n:v:fu:", ["cluster-name=","help", "svm=", "name=", "volume=", "force"]) - except Exception as err: - print(err) - handleInvalidCommand(helpText=helpTextRestoreSnapshot, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help"): - print(helpTextRestoreSnapshot) - sys.exit(0) - elif opt in ("-n", "--name"): - snapshotName = arg - elif opt in ("-s", "--svm"): - svmName = arg - elif opt in ("-u", "--cluster-name"): - clusterName = arg - elif opt in ("-v", "--volume"): - volumeName = arg - elif opt in ("-f", "--force"): - force = True - - # Check for required options - if not volumeName or not snapshotName: - handleInvalidCommand(helpText=helpTextRestoreSnapshot, invalidOptArg=True) - - # Confirm restore operation - if not force: - print("Warning: When you restore a snapshot, all subsequent snapshots are deleted.") - while True: - proceed = input("Are you sure that you want to proceed? (yes/no): ") - if proceed in ("yes", "Yes", "YES"): + # Check status of sync operation + if hasattr(relationship, "transfer"): + transferState = relationship.transfer.state + else: + transferState = None + + # if transfer is complete, end execution + if (not transferState) or (transferState == "success"): + healthy = relationship.healthy + if healthy: + if print_output: + print("Success: Sync operation is complete.") break - elif proceed in ("no", "No", "NO"): - sys.exit(0) else: - print("Invalid value. Must enter 'yes' or 'no'.") + if print_output: + print("Error: Relationship is not healthy. Access ONTAP System Manager for details.") + raise SnapMirrorSyncOperationError("not healthy") + elif transferState != "transferring": + if print_output: + print ("Error: Unknown sync operation status (" + transferState + ") returned by ONTAP API.") + raise SnapMirrorSyncOperationError(transferState) + else: + # Print message re: progress + if print_output: + print("Sync operation is not yet complete. Status:", transferState) + print("Checking again in 10 seconds...") - # Restore snapshot - try: - restore_snapshot(volume_name=volumeName, snapshot_name=snapshotName, svm_name=svmName, cluster_name=clusterName, print_output=True) - except (InvalidConfigError, APIConnectionError, InvalidSnapshotParameterError, InvalidVolumeParameterError): - sys.exit(1) + # Sleep for 10 seconds before checking progress again + time.sleep(10) - else: - handleInvalidCommand() + else: + raise ConnectionTypeError() - elif action == "sync": - # Get desired target from command line args - target = getTarget(sys.argv) +# +# Deprecated function names +# - # Invoke desired action based on target - if target in ("cloud-sync-relationship", "cloud-sync"): - relationshipID = None - waitUntilComplete = False - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hi:w", ["help", "id=", "wait"]) - except Exception as err: - print(err) - handleInvalidCommand(helpText=helpTextSyncCloudSyncRelationship, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help"): - print(helpTextSyncCloudSyncRelationship) - sys.exit(0) - elif opt in ("-i", "--id"): - relationshipID = arg - elif opt in ("-w", "--wait"): - waitUntilComplete = True - - # Check for required options - if not relationshipID: - handleInvalidCommand(helpText=helpTextSyncCloudSyncRelationship, invalidOptArg=True) - - # Update cloud sync relationship - try: - sync_cloud_sync_relationship(relationship_id=relationshipID, wait_until_complete=waitUntilComplete, print_output=True) - except (InvalidConfigError, APIConnectionError, CloudSyncSyncOperationError): - sys.exit(1) +@deprecated +def cloneVolume(newVolumeName: str, sourceVolumeName: str, sourceSnapshotName: str = None, unixUID: str = None, unixGID: str = None, mountpoint: str = None, printOutput: bool = False) : + clone_volume(new_volume_name=newVolumeName, source_volume_name=sourceVolumeName, source_snapshot_name=sourceSnapshotName, + mountpoint=mountpoint, unix_uid=unixUID, unix_gid=unixGID, print_output=printOutput) - elif target in ("snapmirror-relationship", "snapmirror"): - uuid = None - volumeName = None - svmName = None - clusterName = None - waitUntilComplete = False - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hi:wn:u:v:", ["help", "cluster-name=","svm=","name=","uuid=", "wait"]) - except Exception as err: - print(err) - handleInvalidCommand(helpText=helpTextSyncSnapMirrorRelationship, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help"): - print(helpTextSyncSnapMirrorRelationship) - sys.exit(0) - elif opt in ("-v", "--svm"): - svmName = arg - elif opt in ("-u", "--cluster-name"): - clusterName = arg - elif opt in ("-n", "--name"): - volumeName = arg - elif opt in ("-i", "--uuid"): - uuid = arg - elif opt in ("-w", "--wait"): - waitUntilComplete = True - - # Check for required options - if not uuid and not volumeName: - handleInvalidCommand(helpText=helpTextSyncSnapMirrorRelationship, invalidOptArg=True) - - if uuid and volumeName: - handleInvalidCommand(helpText=helpTextSyncSnapMirrorRelationship, invalidOptArg=True) - - # Update SnapMirror relationship - try: - sync_snap_mirror_relationship(uuid=uuid, svm_name=svmName, volume_name=volumeName, cluster_name=clusterName, wait_until_complete=waitUntilComplete, print_output=True) - except ( - InvalidConfigError, APIConnectionError, InvalidSnapMirrorParameterError, - SnapMirrorSyncOperationError) : - sys.exit(1) +@deprecated +def createSnapshot(volumeName: str, snapshotName: str = None, printOutput: bool = False) : + create_snapshot(volume_name=volumeName, snapshot_name=snapshotName, print_output=printOutput) - else: - handleInvalidCommand() - elif action in ("version", "v", "-v", "--version"): - print("NetApp DataOps Toolkit for Traditional Environments - version " - + traditional.__version__) +@deprecated +def createVolume(volumeName: str, volumeSize: str, guaranteeSpace: bool = False, volumeType: str = "flexvol", unixPermissions: str = "0777", unixUID: str = "0", unixGID: str = "0", exportPolicy: str = "default", snapshotPolicy: str = "none", aggregate: str = None, mountpoint: str = None, printOutput: bool = False) : + create_volume(volume_name=volumeName, volume_size=volumeSize, guarantee_space=guaranteeSpace, volume_type=volumeType, unix_permissions=unixPermissions, unix_uid=unixUID, + unix_gid=unixGID, export_policy=exportPolicy, snapshot_policy=snapshotPolicy, aggregate=aggregate, mountpoint=mountpoint, print_output=printOutput) - else: - handleInvalidCommand() + +@deprecated +def deleteSnapshot(volumeName: str, snapshotName: str, printOutput: bool = False) : + delete_snapshot(volume_name=volumeName, snapshot_name=snapshotName, print_output=printOutput) + + +@deprecated +def deleteVolume(volumeName: str, printOutput: bool = False) : + delete_volume(volume_name=volumeName, print_output=printOutput) + + +@deprecated +def listCloudSyncRelationships(printOutput: bool = False) -> list() : + return list_cloud_sync_relationships(print_output=printOutput) + + +@deprecated +def listSnapMirrorRelationships(printOutput: bool = False) -> list() : + return list_snap_mirror_relationships(print_output=printOutput) + + +@deprecated +def listSnapshots(volumeName: str, printOutput: bool = False) -> list() : + return list_snapshots(volume_name=volumeName, print_output=printOutput) + + +@deprecated +def listVolumes(checkLocalMounts: bool = False, includeSpaceUsageDetails: bool = False, printOutput: bool = False) -> list() : + return list_volumes(check_local_mounts=checkLocalMounts, include_space_usage_details=includeSpaceUsageDetails, print_output=printOutput) + + +@deprecated +def mountVolume(volumeName: str, mountpoint: str, printOutput: bool = False) : + mount_volume(volume_name=volumeName, mountpoint=mountpoint, print_output=printOutput) + + +@deprecated +def prepopulateFlexCache(volumeName: str, paths: list, printOutput: bool = False) : + prepopulate_flex_cache(volume_name=volumeName, paths=paths, print_output=printOutput) + + +@deprecated +def pullBucketFromS3(s3Bucket: str, localDirectory: str, s3ObjectKeyPrefix: str = "", printOutput: bool = False) : + pull_bucket_from_s3(s3_bucket=s3Bucket, local_directory=localDirectory, s3_object_key_prefix=s3ObjectKeyPrefix, print_output=printOutput) + + +@deprecated +def pullObjectFromS3(s3Bucket: str, s3ObjectKey: str, localFile: str = None, printOutput: bool = False) : + pull_object_from_s3(s3_bucket=s3Bucket, s3_object_key=s3ObjectKey, local_file=localFile, print_output=printOutput) + + +@deprecated +def pushDirectoryToS3(s3Bucket: str, localDirectory: str, s3ObjectKeyPrefix: str = "", s3ExtraArgs: str = None, printOutput: bool = False) : + push_directory_to_s3(s3_bucket=s3Bucket, local_directory=localDirectory, s3_object_key_prefix=s3ObjectKeyPrefix, s3_extra_args=s3ExtraArgs, print_output=printOutput) + + +@deprecated +def pushFileToS3(s3Bucket: str, localFile: str, s3ObjectKey: str = None, s3ExtraArgs: str = None, printOutput: bool = False) : + push_file_to_s3(s3_bucket=s3Bucket, s3_object_key=s3ObjectKey, local_file=localFile, s3_extra_args=s3ExtraArgs, print_output=printOutput) + + +@deprecated +def restoreSnapshot(volumeName: str, snapshotName: str, printOutput: bool = False) : + restore_snapshot(volume_name=volumeName, snapshot_name=snapshotName, print_output=printOutput) + + +@deprecated +def syncCloudSyncRelationship(relationshipID: str, waitUntilComplete: bool = False, printOutput: bool = False) : + sync_cloud_sync_relationship(relationship_id=relationshipID, wait_until_complete=waitUntilComplete, print_output=printOutput) + + +@deprecated +def syncSnapMirrorRelationship(uuid: str, waitUntilComplete: bool = False, printOutput: bool = False) : + sync_snap_mirror_relationship(uuid=uuid, wait_until_complete=waitUntilComplete, print_output=printOutput) From a93d71b60442a90bd4387cf6e9da232b22c6fb03 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Tue, 23 Aug 2022 14:09:05 -0500 Subject: [PATCH 28/56] Automatically unmount volume option for delete function. --- .../netapp_dataops/traditional.py | 49 ++++++++++++------- 1 file changed, 32 insertions(+), 17 deletions(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index 3d1e613..54e1990 100644 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -1076,7 +1076,7 @@ def delete_snapshot(volume_name: str, snapshot_name: str, cluster_name: str = No raise ConnectionTypeError() -def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = None, mountpoint: str = None, delete_mirror: bool = False, +def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = None, mountpoint: str = None, check_local_mounts: bool = False, delete_mirror: bool = False, delete_non_clone: bool = False, print_output: bool = False): # Retrieve config details from config file try: @@ -1109,7 +1109,6 @@ def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = No if print_output : _print_invalid_config_error() raise InvalidConfigError() - try: # Retrieve volume volume = NetAppVolume.find(name=volume_name, svm=svm) @@ -1167,13 +1166,29 @@ def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = No if print_output: print("Error: ONTAP Rest API Error: ", err) - if mountpoint: - #check if volume is mounted locally, and then unmount it. - try: - unmount_volume(mountpoint=mountpoint, print_output=True) - except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): - if print_output: - print("Error: Error mounting volume.") + #Unmount volume and skip if not sudo or not locally mounted + try: + volumes = list_volumes(check_local_mounts=True) + for localmount in volumes: + if localmount["Volume Name"] == volume_name: + x=localmount["Local Mountpoint"] + if x == "": + break + elif x != "": + if os.getuid() != 0: + print("You need to have root privileges to run unmount command.") + break + else: + try: + unmount = unmount_volume(mountpoint=x) + except (InvalidConfigError, APIConnectionError): + if print_output: + print("Error: unmounting volume.") + raise MountOperationError(err) + + except (InvalidConfigError, APIConnectionError): + if print_output: + print("Error: volume retrieval failed for unmount operation.") raise try: @@ -1419,7 +1434,7 @@ def list_snapshots(volume_name: str, cluster_name: str = None, svm_name: str = N raise ConnectionTypeError() -def list_volumes(check_local_mounts: bool = False, include_space_usage_details: bool = False, print_output: bool = False, cluster_name: str = None, svm_name: str = None) -> list(): +def list_volumes(check_local_mounts: bool = False, include_space_usage_details: bool = False, print_output: bool = False, cluster_name: str = None, svm_name: str = None, localMountpoint: str = None) -> list(): # Retrieve config details from config file try: config = _retrieve_config(print_output=print_output) @@ -1546,10 +1561,11 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: volumeDict["NFS Mount Target"] = nfsMountTarget if check_local_mounts: localMountpoint = "" - for mount in mounts.split("\n") : - mountDetails = mount.split(" ") - if mountDetails[0] == nfsMountTarget : - localMountpoint = mountDetails[2] + if nfsMountTarget: + for mount in mounts.split("\n") : + mountDetails = mount.split(" ") + if mountDetails[0].strip() == nfsMountTarget.strip() : + localMountpoint = mountDetails[2] volumeDict["Local Mountpoint"] = localMountpoint volumeDict["FlexCache"] = flexcache volumeDict["Clone"] = clone @@ -1565,18 +1581,17 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: print("Error: ONTAP Rest API Error: ", err) raise APIConnectionError(err) + # Print list of volumes if print_output: # Convert volumes array to Pandas DataFrame volumesDF = pd.DataFrame.from_dict(volumesList, dtype="string") print(tabulate(volumesDF, showindex=False, headers=volumesDF.columns)) - return volumesList - else: raise ConnectionTypeError() -def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, svm_name: str = None, mount_options: str = None, lif_name: str = None, is_sudo: bool = False, readonly: bool = False, print_output: bool = False): +def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, svm_name: str = None, mount_options: str = None, lif_name: str = None, readonly: bool = False, print_output: bool = False): nfsMountTarget = None svm = None From 8f4690b1f2a348c5bdae30bff1fbd40df6216a28 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Thu, 25 Aug 2022 10:50:40 -0500 Subject: [PATCH 29/56] Fixing delete function additions. --- .../netapp_dataops/netapp_dataops_cli.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py index 5a9a55f..6368f20 100755 --- a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py +++ b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py @@ -1088,11 +1088,10 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = force = False deleteMirror = False deleteNonClone = False - mountpoint = None # Get command line options try: - opts, args = getopt.getopt(sys.argv[3:], "hfv:n:u:m:p:", ["cluster-name=","help", "svm=", "name=", "force", "delete-non-clone", "delete-mirror", "mountpoint="]) + opts, args = getopt.getopt(sys.argv[3:], "hfv:n:u:m", ["cluster-name=","help", "svm=", "name=", "force", "delete-non-clone", "delete-mirror"]) except Exception as err: print(err) handleInvalidCommand(helpText=helpTextDeleteVolume, invalidOptArg=True) @@ -1108,8 +1107,6 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = clusterName = arg elif opt in ("-n", "--name"): volumeName = arg - elif opt in ("-p", "--mountpoint"): - mountpoint = arg elif opt in ("-f", "--force"): force = True elif opt in ("-m", "--delete-mirror"): From 6f8f43ef038b66b38ad6e16ad73927775bc7e637 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Thu, 25 Aug 2022 10:56:32 -0500 Subject: [PATCH 30/56] Fixed delete function. --- netapp_dataops_traditional/netapp_dataops/traditional.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index 54e1990..33b0dac 100644 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -52,6 +52,7 @@ class CloudSyncSyncOperationError(Exception) : """Error that will be raised when a Cloud Sync sync operation fails""" pass + class ConnectionTypeError(Exception): """Error that will be raised when an invalid connection type is given""" pass @@ -522,7 +523,6 @@ def clone_volume(new_volume_name: str, source_volume_name: str, cluster_name: st else: junction = "/"+new_volume_name - # Construct dict representing new volume newVolumeDict = { "name": new_volume_name, @@ -1044,7 +1044,6 @@ def delete_snapshot(volume_name: str, snapshot_name: str, cluster_name: str = No # Retrieve snapshot snapshot = NetAppSnapshot.find(volume.uuid, name=snapshot_name) - if not snapshot: if print_output: print("Error: Invalid snapshot name.") @@ -1076,7 +1075,7 @@ def delete_snapshot(volume_name: str, snapshot_name: str, cluster_name: str = No raise ConnectionTypeError() -def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = None, mountpoint: str = None, check_local_mounts: bool = False, delete_mirror: bool = False, +def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = None, check_local_mounts: bool = False, delete_mirror: bool = False, delete_non_clone: bool = False, print_output: bool = False): # Retrieve config details from config file try: @@ -1434,7 +1433,7 @@ def list_snapshots(volume_name: str, cluster_name: str = None, svm_name: str = N raise ConnectionTypeError() -def list_volumes(check_local_mounts: bool = False, include_space_usage_details: bool = False, print_output: bool = False, cluster_name: str = None, svm_name: str = None, localMountpoint: str = None) -> list(): +def list_volumes(check_local_mounts: bool = False, include_space_usage_details: bool = False, print_output: bool = False, cluster_name: str = None, svm_name: str = None) -> list(): # Retrieve config details from config file try: config = _retrieve_config(print_output=print_output) From c362c333645bb8704fddd2092b9bc3b22bdc8c70 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Thu, 25 Aug 2022 11:42:04 -0500 Subject: [PATCH 31/56] Fixed errors. --- netapp_dataops_traditional/README.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/netapp_dataops_traditional/README.md b/netapp_dataops_traditional/README.md index b31f518..c17df7c 100644 --- a/netapp_dataops_traditional/README.md +++ b/netapp_dataops_traditional/README.md @@ -262,7 +262,6 @@ The following options/arguments are optional: -v, --svm= Non default SVM name -f, --force Do not prompt user to confirm operation. -m, --delete-mirror Delete/release snapmirror relationship prior to volume deletion - -p, --mountpoint Mount point where volume is locally mounted. If specified volume will be unmounted (optional). --delete-non-clone Enable deletion of volume not created as clone by this tool -h, --help Print help text. @@ -365,7 +364,7 @@ The following options/arguments are optional: -l, --lif= non default lif (nfs server ip/name) -h, --help Print help text. -x, --readonly Mount volume locally as read-only. - -o, --options Enables users to specify custom NFS mount options. + -o, --options Specify custom NFS mount options. ``` ##### Example Usage @@ -1092,7 +1091,6 @@ def delete_volume( print_output: bool = False # Denotes whether or not to print messages to the console during execution. cluster_name: str = None, # Non default cluster name, same credentials as the default credentials should be used svm_name: str = None, # Non default svm name, same credentials as the default credentials should be used - mountpoint: str = None, # Mount point where volume is locally mounted. If specified volume will be unmounted (optional). delete_mirror: bool = False, # release snapmirror on source volume/delete snapmirror relation on destination volume delete_non_clone: bool = False, # Enable deletion of non clone volume (extra step not to incedently delete important volume) print_output: bool = False # Denotes whether or not to print messages to the console during execution. @@ -1175,7 +1173,7 @@ def mount_volume( cluster_name: str = None, # Non default cluster name, same credentials as the default credentials should be used svm_name: str = None, # Non default svm name, same credentials as the default credentials should be used mountpoint: str, # Local mountpoint to mount volume at (required). - mount_options: str = None # Enables users to specify custom NFS mount options. + mount_options: str = None # Specify custom NFS mount options. readonly: bool = False, # Mount volume locally as "read-only." If not specified volume will be mounted as "read-write". On Linux hosts - if specified, calling program must be run as root. print_output: bool = False # Denotes whether or not to print messages to the console during execution. ) : From 67d20af047745ba28714435f72890e5b3e32c405 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Thu, 25 Aug 2022 11:44:37 -0500 Subject: [PATCH 32/56] Fixed some errors. --- .../netapp_dataops/netapp_dataops_cli.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py index 6368f20..5b64128 100755 --- a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py +++ b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py @@ -229,7 +229,6 @@ \t-u, --cluster-name=\tnon default hosting cluster \t-v, --svm \t\tnon default SVM name \t-f, --force\t\tDo not prompt user to confirm operation. -\t-p, --mountpoint\t\tMount point for the locally mounted volume. \t-m, --delete-mirror\tdelete/release snapmirror relationship prior to volume deletion \t --delete-non-clone\tEnable deletion of volume not created as clone by this tool \t-h, --help\t\tPrint help text. @@ -322,7 +321,7 @@ \t-l, --lif \t\tnon default lif (nfs server ip/name) \t-h, --help\t\tPrint help text. \t-x, --readonly\t\tMount volume locally as read-only. -\t-o, --options\t\tEnables users to Specify custom NFS mount options. +\t-o, --options\t\tSpecify custom NFS mount options. Examples: \tsudo -E netapp_dataops_cli.py mount volume --name=project1 --mountpoint=/mnt/project1 From b598a522261b870ca1d8adce59affc0af10c414e Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Thu, 25 Aug 2022 12:17:48 -0500 Subject: [PATCH 33/56] test for mig instances. --- netapp_dataops_k8s/test_scripts/test_triton.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/netapp_dataops_k8s/test_scripts/test_triton.sh b/netapp_dataops_k8s/test_scripts/test_triton.sh index 6547fee..04e6f11 100755 --- a/netapp_dataops_k8s/test_scripts/test_triton.sh +++ b/netapp_dataops_k8s/test_scripts/test_triton.sh @@ -43,7 +43,7 @@ printf "\n" ### Create workspace 2 server2_name="b-1-w2" model_pvc="model-repo-test" -command="netapp_dataops_k8s_cli.py create triton-server --server-name=$server2_name --model-repo-pvc-name=$model_pvc --image=nvcr.io/nvidia/tritonserver:21.11-py3 --memory=512Mi --cpu=0.25" +command="netapp_dataops_k8s_cli.py create triton-server --server-name=$server2_name --model-repo-pvc-name=$model_pvc --image=nvcr.io/nvidia/tritonserver:21.11-py3 --memory=512Mi --cpu=0.25 --allocate-resource=nvidia.com/mig-1g.5gb=1" echo "Running: $command" eval $command printf "\n" @@ -104,7 +104,7 @@ printf "\n" server4_name="b-2-w2" model_pvc="model-repo-test1" namespace="dsk-test" -command="netapp_dataops_k8s_cli.py create triton-server --namespace=$namespace --server-name=$server4_name --model-repo-pvc-name=$model_pvc --image=nvcr.io/nvidia/tritonserver:22.03-py3 --memory=512Mi --cpu=0.25 --nvidia-gpu=1 --load-balancer" +command="netapp_dataops_k8s_cli.py create triton-server --namespace=$namespace --server-name=$server4_name --model-repo-pvc-name=$model_pvc --image=nvcr.io/nvidia/tritonserver:22.03-py3 --memory=512Mi --cpu=0.25 --nvidia-gpu=1 --load-balancer --allocate-resource=nvidia.com/mig-1g.5gb=1" echo "Running: $command" eval $command printf "\n" @@ -165,7 +165,7 @@ printf "\n" server6_name="b-3-w2" model_pvc="model-repo-test1" namespace="dsk-test" -command="netapp_dataops_k8s_cli.py create triton-server -n $namespace -s $server6_name -v $model_pvc -i nvcr.io/nvidia/tritonserver:22.03-py3 -m 512Mi -p 0.25 -g 1 -b" +command="netapp_dataops_k8s_cli.py create triton-server -n $namespace -s $server6_name -v $model_pvc -i nvcr.io/nvidia/tritonserver:22.03-py3 -m 512Mi -p 0.25 -g 1 -b -r nvidia.com/mig-1g.5gb=1" echo "Running: $command" eval $command printf "\n" From 4f5f19ab1f0a234f2ee1862a4be4234bd8421e48 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Thu, 15 Sep 2022 08:00:15 -0500 Subject: [PATCH 34/56] Fixed spacing issues and cleaned up code. --- netapp_dataops_traditional/netapp_dataops/traditional.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index 33b0dac..47451e3 100644 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -1075,7 +1075,7 @@ def delete_snapshot(volume_name: str, snapshot_name: str, cluster_name: str = No raise ConnectionTypeError() -def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = None, check_local_mounts: bool = False, delete_mirror: bool = False, +def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = None, delete_mirror: bool = False, delete_non_clone: bool = False, print_output: bool = False): # Retrieve config details from config file try: @@ -1108,6 +1108,7 @@ def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = No if print_output : _print_invalid_config_error() raise InvalidConfigError() + try: # Retrieve volume volume = NetAppVolume.find(name=volume_name, svm=svm) @@ -1580,13 +1581,14 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: print("Error: ONTAP Rest API Error: ", err) raise APIConnectionError(err) - # Print list of volumes if print_output: # Convert volumes array to Pandas DataFrame volumesDF = pd.DataFrame.from_dict(volumesList, dtype="string") print(tabulate(volumesDF, showindex=False, headers=volumesDF.columns)) + return volumesList + else: raise ConnectionTypeError() @@ -1681,7 +1683,7 @@ def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, sv continue mount_cmd_opts_str = mount_cmd_opts_str + item + "," mount_cmd_opts_str = mount_cmd_opts_str[:-1] - exit("You need to have root privileges to run mount command." + sys.exit("You need to have root privileges to run mount command." "\nTo mount the volume run the following command as root:" "\n"+ "mount -o "+ mount_cmd_opts_str+ " " + nfsMountTarget + " " + mountpoint) From 77ba21dc03f5d7e35441fffae8f3f0f167420e6c Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Thu, 6 Oct 2022 10:52:58 -0500 Subject: [PATCH 35/56] Fixed some bugs. --- .../netapp_dataops/traditional.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index 47451e3..544b8f0 100644 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -1176,7 +1176,7 @@ def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = No break elif x != "": if os.getuid() != 0: - print("You need to have root privileges to run unmount command.") + print("Warning: Volume was not unmounted. You need to have root privileges to run unmount command.") break else: try: @@ -1683,9 +1683,14 @@ def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, sv continue mount_cmd_opts_str = mount_cmd_opts_str + item + "," mount_cmd_opts_str = mount_cmd_opts_str[:-1] - sys.exit("You need to have root privileges to run mount command." - "\nTo mount the volume run the following command as root:" - "\n"+ "mount -o "+ mount_cmd_opts_str+ " " + nfsMountTarget + " " + mountpoint) + if mount_cmd_opts_str: + sys.exit("You need to have root privileges to run mount command." + "\nTo mount the volume run the following command as root:" + "\n"+ "mount -o "+ mount_cmd_opts_str+ " " + nfsMountTarget + " " + mountpoint) + else: + sys.exit("You need to have root privileges to run mount command." + "\nTo mount the volume run the following command as root:" + "\n"+ "mount"+ mount_cmd_opts_str+ " " + nfsMountTarget + " " + mountpoint) try: subprocess.check_call(mount_cmd) From 2b08fa70262ec72fe4ebf9056a0ed4813713c812 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Thu, 6 Oct 2022 11:01:26 -0500 Subject: [PATCH 36/56] Fixed some bugs. --- .../netapp_dataops/netapp_dataops_cli.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py index 5b64128..5195032 100755 --- a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py +++ b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py @@ -1268,6 +1268,14 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = mountpoint = None mount_options = None readonly = False + + # Check for required options + if not volumeName: + handleInvalidCommand(helpText=helpTextMountVolume, invalidOptArg=True) + + if not mountpoint: + handleInvalidCommand(helpText=helpTextMountVolume, invalidOptArg=True) + # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hv:n:l:m:u:o:x", ["cluster-name=","help", "lif=","svm=", "name=", "mountpoint=", "readonly", "options="]) From fc81f97232fcb745291ec9059c820c740d7bed50 Mon Sep 17 00:00:00 2001 From: "Ahmad, Sufian" Date: Thu, 6 Oct 2022 11:14:25 -0500 Subject: [PATCH 37/56] Fixed bugs. --- .../netapp_dataops/netapp_dataops_cli.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py index 5195032..69885fa 100755 --- a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py +++ b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py @@ -1268,14 +1268,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = mountpoint = None mount_options = None readonly = False - - # Check for required options - if not volumeName: - handleInvalidCommand(helpText=helpTextMountVolume, invalidOptArg=True) - - if not mountpoint: - handleInvalidCommand(helpText=helpTextMountVolume, invalidOptArg=True) - + # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hv:n:l:m:u:o:x", ["cluster-name=","help", "lif=","svm=", "name=", "mountpoint=", "readonly", "options="]) @@ -1303,6 +1296,13 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-x", "--readonly"): readonly = True + # Check for required options + if not volumeName: + handleInvalidCommand(helpText=helpTextMountVolume, invalidOptArg=True) + + if not mountpoint: + handleInvalidCommand(helpText=helpTextMountVolume, invalidOptArg=True) + # Mount volume try: mount_volume(svm_name = svmName, cluster_name=clusterName, lif_name = lifName, volume_name=volumeName, mountpoint=mountpoint, mount_options=mount_options, readonly=readonly, print_output=True) From 4edc7fa19dc7e3f8802074b5e36687c28e97b474 Mon Sep 17 00:00:00 2001 From: "Oglesby, Michael" Date: Tue, 26 Mar 2024 16:56:48 -0400 Subject: [PATCH 38/56] remove test scripts --- .../test_scripts/test_s3_cli.sh | 590 ------------------ .../test_scripts/test_triton.sh | 208 ------ 2 files changed, 798 deletions(-) delete mode 100755 netapp_dataops_k8s/test_scripts/test_s3_cli.sh delete mode 100755 netapp_dataops_k8s/test_scripts/test_triton.sh diff --git a/netapp_dataops_k8s/test_scripts/test_s3_cli.sh b/netapp_dataops_k8s/test_scripts/test_s3_cli.sh deleted file mode 100755 index 00cd8b7..0000000 --- a/netapp_dataops_k8s/test_scripts/test_s3_cli.sh +++ /dev/null @@ -1,590 +0,0 @@ -#!/usr/bin/env bash - -proceed_prompt () { - while true; do - read -p "Are you ready to proceed (y/n)? " proceed - case $proceed in - [Yy]* ) break;; - [Nn]* ) exit;; - * ) printf "Invalid entry\n";; - esac - done -} - -print_configuration_help() { - printf "\n" - printf "To configure this test script create a file named s3_cli_config with the following shell variables set." - printf "\n\n" - printf "cli_path=VALUE # The path to the cli script. Used for testing in development environment. Use empty string if you don't want to set a specific path.\n" - printf "test_secret_name=VALUE # The name of the K8s secret used by the test\n" - printf "s3_access_key=VALUE # The S3 access key\n" - printf "s3_secret_key=VALUE # The S3 secret key\n" - printf "root_ca_cert_path=VALUE # The path to a valid root ca certificate file\n" - printf "inter_ca_cert_path=VALUE # The path to an intermediate ca certificate file. This file may not need to be valid?\n" - printf "s3_host=VALUE # The hostname or IP address of the S3 service\n" - printf "s3_port=VALUE # The port number the S3 service is listening on\n" - printf "s3_protocol=VALUE # The protocol to use. Either http or https\n" - printf "target_pvc=VALUE # The name of a pvc to use for getting files from S3\n" - printf "source_bucket=VALUE # The name of a bucket with data to use to get data from S3\n" - printf "source_object=VALUE # The name of an object to copy from the source bucket\n" - printf "target_bucket=VALUE # The name of a bucket to copy files to\n" - printf "alt_namespace=VALUE # The name of an alternate namespace to use. This should be created before running this script.\n" - - exit -} - -printf "NetApp DataOps Toolkit for Kubernetes - Interactive test script\n\n" - -printf "Prerequisites:\n" -printf " * 2 namespaces: default plus one manually defined namespace (configured in s3_cli_config)\n" -printf " * Environment configuration file s3_cli_config\n" -printf " * Have an S3 service with https enabled available\n" -printf " * Have a bucket with data in it available in S3\n" -printf " * Have an empty bucket available in S3 (to verify PUTs)\n" -printf " * Have a PVC with data in it (for PUTs) (both namespaces)\n" -printf " * Have an empty PVC available (for GETs) (both namespaces)\n" -printf "" -proceed_prompt - -# Source PVC Setup instructions -# 1. Create a PVC in the cluster named dataops-test-source-pvc -# 2. Apply the following yaml to create a pod to generate files on the pvc -# apiVersion: v1 -# kind: Pod -# metadata: -# name: dataops-test-create-volumedata -# namespace: default -# spec: -# containers: -# - name: dataops-test-create-pod -# image: busybox:stable-musl -# volumeMounts: -# - name: dataops-test-volume -# mountPath: /mnt/data -# command: ["sh"] -# args: ["-c", "echo 'file1' > /mnt/data/one.txt;echo 'file2' > /mnt/data/two.txt;mkdir /mnt/data/layer2;mkdir /mnt/data/layer2/layer3;echo 'file3' > /mnt/data/layer2/three.txt;echo 'file4' > /mnt/data/layer2/layer3/four.txt"] -# volumes: -# - name: dataops-test-volume -# persistentVolumeClaim: -# claimName: dataops-test-source-pvc -# restartPolicy: Never - -############################################### -# Test class D - Data Movement # -############################################### -printf "\n* Starting: Test class D - Data Movement (S3)\n\n" - -printf "\n* Checking for Test Configuration File s3_cli_config...\n" -if [ -f "s3_cli_config" ] -then - . s3_cli_config - - [ ! -v "cli_path" ] && print_configuration_help - [ ! -v "test_secret_name" ] && print_configuration_help - [ ! -v "s3_access_key" ] && print_configuration_help - [ ! -v "s3_secret_key" ] && print_configuration_help - [ ! -v "root_ca_cert_path" ] && print_configuration_help - [ ! -v "s3_host" ] && print_configuration_help - [ ! -v "s3_port" ] && print_configuration_help - [ ! -v "s3_protocol" ] && print_configuration_help - [ ! -v "target_pvc" ] && printf " Missing target_pvc\n" && print_configuration_help - [ ! -v "source_bucket" ] && printf " Missing source_bucket\n" && print_configuration_help - [ ! -v "source_object" ] && printf " Missing source_object\n" && print_configuration_help - [ ! -v "target_bucket" ] && printf " Missing target_bucket\n" && print_configuration_help - [ ! -v "alt_namespace" ] && printf " Missing alt_namespace\n" && print_configuration_help - -else - print_configuration_help -fi - -################################################## -## Test set D.1. - default namespace, long opts ## -################################################## -printf "** Starting: Test set D.1. - default namespace, long opts\n\n" - -# These are variables we don't need the user to set -primary_ca_map_name="datops-s3-root-ca-map" -source_pvc="dataops-test-source-pvc" -image_name="minio/mc:RELEASE.2021-06-13T17-48-22Z" - -# Determine if we need to use HTTPS or not -# Initially assume yes. We can add logic to determine this if needed. -protocol_flag="--use-https" - -### Create a secret -printf "\n*** Testing the create s3-secret command ***\n" -command="${cli_path}netapp_dataops_k8s_cli.py create s3-secret --secret-name=$test_secret_name --access-key=$s3_access_key --secret-key=$s3_secret_key" -echo "Running: $command" -eval $command -printf "\nRetrieving secret:\n" -kubectl get secret $test_secret_name -o yaml -printf "\n" -proceed_prompt -printf "\n" - -### Create a CA Config Map -printf "\n*** Testing the create ca-config-map command ***\n" -command="${cli_path}netapp_dataops_k8s_cli.py create ca-config-map --config-map-name=$primary_ca_map_name --file=$root_ca_cert_path" -echo "Running: $command" -eval $command -printf "\nRetrieving the config map:\n" -kubectl get configmaps $primary_ca_map_name -o yaml -printf "\n" -proceed_prompt -printf "\n" - -### Test get-s3 bucket -printf "\n*** Testing the get-s3 bucket command ***\n" -command="${cli_path}netapp_dataops_k8s_cli.py get-s3 bucket --credentials-secret=$test_secret_name --s3-host=$s3_host --bucket-name=$source_bucket --pvc-name=$target_pvc --s3-port=$s3_port $protocol_flag" -echo "Running: $command" -eval $command -printf "\nChecking the existing jobs:\n" -kubectl get job -o yaml -printf "\n" -proceed_prompt -printf "\n" - -### Test getting the job status -printf "\n*** Testing show s3-job command ***\n" -read -p "Enter the job name: " job_name -command="${cli_path}netapp_dataops_k8s_cli.py show s3-job --job=$job_name" -echo "Running: $command" -eval $command -printf "\n" -proceed_prompt -printf "\n" - -### Test deleting the job -printf "\n*** Testing the delete s3-job command ***\n" -command="${cli_path}netapp_dataops_k8s_cli.py delete s3-job --job=$job_name" -echo "Running: $command" -eval $command -printf "\nChecking existing jobs after 5 seconds:\n" -sleep 5 -kubectl get job -printf "If the job still shows up try checking for the job in another window for awhile to see if it goes away." -printf "\n" -proceed_prompt -printf "\n" - -### Test get-s3 object -printf "\n*** Testing get-s3 object command ***\n" -command="${cli_path}netapp_dataops_k8s_cli.py get-s3 object --credentials-secret=$test_secret_name --s3-host=$s3_host --bucket-name=$source_bucket --pvc-name=$target_pvc --s3-port=$s3_port $protocol_flag --object-key=$source_object" -echo "Running: $command" -eval $command -printf "\nChecking the existing jobs:\n" -sleep 2 -kubectl get job -o yaml -read -p "Enter the job name: " transfer_job -printf "Here are the pods associated with the job.\n" -kubectl get pods --selector=job-name=$transfer_job -read -p "Enter a pod name: " pod_name -printf "\nShow the job logs for $pod_name" -kubectl logs $pod_name -printf "\n" -proceed_prompt -printf "\n" - - -### Cleanup get-s3 object job -printf "\nCleanup job for get-s3 object command" -command="${cli_path}netapp_dataops_k8s_cli.py delete s3-job --job=$transfer_job" -echo "Running: $command" -eval $command -printf "\nChecking existing jobs after 5 seconds:\n" -sleep 5 -kubectl get job -printf "If the job still shows up try checking for the job in another window for awhile to see if it goes away." -printf "\n" -proceed_prompt -printf "\n" - - -### Test put-s3 object -printf "\n*** Testing the put-s3 object command ***\n" -command="${cli_path}netapp_dataops_k8s_cli.py put-s3 object --credentials-secret=$test_secret_name --s3-host=$s3_host --bucket-name=$target_bucket --pvc-name=$source_pvc --s3-port=$s3_port $protocol_flag --object-key=one.txt --file-location=one.txt" -echo "Running: $command" -eval $command -printf "\nChecking the existing jobs:\n" -kubectl get job -o yaml -read -p "Enter the job name: " transfer_job -printf "Here are the pods associated with the job.\n" -kubectl get pods --selector=job-name=$transfer_job -read -p "Enter a pod name: " pod_name -printf "\nShow the job logs for $pod_name" -kubectl logs $pod_name -printf "\n" -proceed_prompt -printf "\n" - -### Cleanup put-s3 object job -printf "\nCleanup job for put-s3 object command" -command="${cli_path}netapp_dataops_k8s_cli.py delete s3-job --job=$transfer_job" -echo "Running: $command" -eval $command -printf "\nChecking existing jobs after 5 seconds:\n" -sleep 5 -kubectl get job -printf "If the job still shows up try checking for the job in another window for awhile to see if it goes away." -printf "\n" -proceed_prompt -printf "\n" - -### Test put-s3 bucket -printf "\n*** Testing the put-s3 bucket command ***\n" -command="${cli_path}netapp_dataops_k8s_cli.py put-s3 bucket --credentials-secret=$test_secret_name --s3-host=$s3_host --bucket-name=$target_bucket --pvc-name=$source_pvc --s3-port=$s3_port $protocol_flag" -echo "Running: $command" -eval $command -printf "\nChecking the existing jobs:\n" -kubectl get job -o yaml -read -p "Enter the job name: " transfer_job -printf "Here are the pods associated with the job.\n" -kubectl get pods --selector=job-name=$transfer_job -read -p "Enter a pod name: " pod_name -printf "\nShow the job logs for $pod_name" -kubectl logs $pod_name -printf "\n" -proceed_prompt -printf "\n" - -### Cleanup put-s3 bucket job -printf "\nCleanup job for put-s3 bucket command" -command="${cli_path}netapp_dataops_k8s_cli.py delete s3-job --job=$transfer_job" -echo "Running: $command" -eval $command -printf "\nChecking existing jobs after 5 seconds:\n" -sleep 5 -kubectl get job -printf "If the job still shows up try checking for the job in another window for awhile to see if it goes away." -printf "\n" -proceed_prompt -printf "\n" - -### Test delete ca-config-map -printf "\n*** Testing the delete ca-config-map command ***\n" -command="${cli_path}netapp_dataops_k8s_cli.py delete ca-config-map --config-map-name=$primary_ca_map_name" -echo "Running: $command" -eval $command -printf "\nRetrieving the config map:\n" -kubectl get configmaps $primary_ca_map_name -o yaml -printf "\n" -proceed_prompt -printf "\n" - -### Test delete s3-secret -printf "\n*** Testing the delete s3-secret command ***\n" -command="${cli_path}netapp_dataops_k8s_cli.py delete s3-secret --secret-name=$test_secret_name" -echo "Running: $command" -eval $command -printf "\nRetrieving secret:\n" -kubectl get secret $test_secret_name -o yaml -printf "\n" -proceed_prompt -printf "\n" - -######################################################## -## Test set D.2. - short opts, alternate namespace ## -######################################################## -printf "** Starting: Test set D.2. - testing short options and all options\n\n" - -### Create a secret -printf "\n*** Testing the create s3-secret command with short options ***\n" -command="${cli_path}netapp_dataops_k8s_cli.py create s3-secret -d $test_secret_name -a $s3_access_key -s $s3_secret_key -n $alt_namespace" -echo "Running: $command" -eval $command -printf "\nRetrieving secret:\n" -kubectl get secret $test_secret_name --namespace=$alt_namespace -o yaml -printf "\n" -proceed_prompt -printf "\n" - -### Create a CA Config Map -printf "\n*** Testing the create ca-config-map command with short options ***\n" -command="${cli_path}netapp_dataops_k8s_cli.py create ca-config-map -c $primary_ca_map_name -f $root_ca_cert_path -n $alt_namespace" -echo "Running: $command" -eval $command -printf "\nRetrieving the config map:\n" -kubectl get configmaps $primary_ca_map_name --namespace=$alt_namespace -o yaml -printf "\n" -proceed_prompt -printf "\n" - -### Test get-s3 bucket -printf "\n*** Testing the get-s3 bucket command with short options ***\n" -command="${cli_path}netapp_dataops_k8s_cli.py get-s3 bucket -c $test_secret_name -o $s3_host -b $source_bucket -p $target_pvc -t $s3_port $protocol_flag -v -m $primary_ca_map_name -n $alt_namespace" -echo "Running: $command" -eval $command -printf "\nChecking the existing jobs:\n" -kubectl get job --namespace=$alt_namespace -o yaml -printf "\n" -proceed_prompt -printf "\n" - -### Test getting the job status -printf "\n*** Testing show s3-job command with short options ***\n" -read -p "Enter the job name: " job_name -command="${cli_path}netapp_dataops_k8s_cli.py show s3-job -j $job_name -n $alt_namespace" -echo "Running: $command" -eval $command -printf "\n" -proceed_prompt -printf "\n" - -### Test deleting the job -printf "\n*** Testing the delete s3-job command with short options ***\n" -command="${cli_path}netapp_dataops_k8s_cli.py delete s3-job -j $job_name -n $alt_namespace" -echo "Running: $command" -eval $command -printf "\nChecking existing jobs after 5 seconds:\n" -sleep 5 -kubectl get job --namespace=$alt_namespace -printf "If the job still shows up try checking for the job in another window for awhile to see if it goes away." -printf "\n" -proceed_prompt -printf "\n" - -### Test get-s3 object -printf "\n*** Testing get-s3 object command with short options ***\n" -command="${cli_path}netapp_dataops_k8s_cli.py get-s3 object -c $test_secret_name -o $s3_host -b $source_bucket -p $target_pvc -t $s3_port $protocol_flag -k $source_object -v -m $primary_ca_map_name -n $alt_namespace -f copied_file.txt" -echo "Running: $command" -eval $command -printf "\nChecking the existing jobs:\n" -sleep 2 -kubectl get job --namespace=$alt_namespace -o yaml -read -p "Enter the job name: " transfer_job -printf "Here are the pods associated with the job.\n" -kubectl get pods --namespace=$alt_namespace --selector=job-name=$transfer_job -read -p "Enter a pod name: " pod_name -printf "\nShow the job logs for $pod_name" -kubectl logs --namespace=$alt_namespace $pod_name -printf "\n" -proceed_prompt -printf "\n" - -### Cleanup get-s3 object job -printf "\nCleanup job for get-s3 object command" -command="${cli_path}netapp_dataops_k8s_cli.py delete s3-job -j $transfer_job -n $alt_namespace" -echo "Running: $command" -eval $command -printf "\nChecking existing jobs after 5 seconds:\n" -sleep 5 -kubectl get job --namespace=$alt_namespace -printf "If the job still shows up try checking for the job in another window for awhile to see if it goes away." -printf "\n" -proceed_prompt -printf "\n" - -### Test put-s3 object -printf "\n*** Testing the put-s3 object command with short options ***\n" -command="${cli_path}netapp_dataops_k8s_cli.py put-s3 object -c $test_secret_name -o $s3_host -b $target_bucket -p $source_pvc -t $s3_port $protocol_flag -k one.txt -f one.txt -v -m $primary_ca_map_name -n $alt_namespace" -echo "Running: $command" -eval $command -printf "\nChecking the existing jobs:\n" -kubectl get job --namespace=$alt_namespace -o yaml -read -p "Enter the job name: " transfer_job -printf "Here are the pods associated with the job.\n" -kubectl get pods --namespace=$alt_namespace --selector=job-name=$transfer_job -read -p "Enter a pod name: " pod_name -printf "\nShow the job logs for $pod_name" -kubectl logs --namespace=$alt_namespace $pod_name -printf "\n" -proceed_prompt -printf "\n" - -### Cleanup put-s3 object job -printf "\nCleanup job for put-s3 object command" -command="${cli_path}netapp_dataops_k8s_cli.py delete s3-job -j $transfer_job -n $alt_namespace" -echo "Running: $command" -eval $command -printf "\nChecking existing jobs after 5 seconds:\n" -sleep 5 -kubectl get job --namespace=$alt_namespace -printf "If the job still shows up try checking for the job in another window for awhile to see if it goes away." -printf "\n" -proceed_prompt -printf "\n" - -### Test put-s3 bucket -printf "\n*** Testing the put-s3 bucket command with short options ***\n" -command="${cli_path}netapp_dataops_k8s_cli.py put-s3 bucket -c $test_secret_name -o $s3_host -b $target_bucket -p $source_pvc -t $s3_port $protocol_flag -v -m $primary_ca_map_name -n $alt_namespace -d layer2" -echo "Running: $command" -eval $command -printf "\nChecking the existing jobs:\n" -kubectl get job --namespace=$alt_namespace -o yaml -read -p "Enter the job name: " transfer_job -printf "Here are the pods associated with the job.\n" -kubectl get pods --namespace=$alt_namespace --selector=job-name=$transfer_job -read -p "Enter a pod name: " pod_name -printf "\nShow the job logs for $pod_name" -kubectl logs --namespace=$alt_namespace $pod_name -printf "\n" -proceed_prompt -printf "\n" - -### Cleanup put-s3 bucket job -printf "\nCleanup job for put-s3 bucket command" -command="${cli_path}netapp_dataops_k8s_cli.py delete s3-job -j $transfer_job -n $alt_namespace" -echo "Running: $command" -eval $command -printf "\nChecking existing jobs after 5 seconds:\n" -sleep 5 -kubectl get job --namespace=$alt_namespace -printf "If the job still shows up try checking for the job in another window for awhile to see if it goes away." -printf "\n" -proceed_prompt -printf "\n" - -######################################################## -## Test set D.3. - miscellaneous options ## -######################################################## -printf "** Starting: Test set D.3. - testing miscellaneous options\n\n" - -# Set resource variables -requested_memory="8000Ki" -requested_cpu="300m" -limit_memory="50000Ki" -limit_cpu="500m" - -### Test get-s3 bucket with image and resource limits -printf "\n*** Testing the get-s3 bucket command with extra options ***\n" -command="${cli_path}netapp_dataops_k8s_cli.py get-s3 bucket -c $test_secret_name -o $s3_host -b $source_bucket -p $target_pvc -t $s3_port $protocol_flag -n $alt_namespace -i $image_name --cpu-request=$requested_cpu --cpu-limit=$limit_cpu --memory-request=$requested_memory --memory-limit=$limit_memory" -echo "Running: $command" -eval $command -printf "\nChecking the existing jobs:\n" -kubectl get job --namespace=$alt_namespace -o yaml -read -p "Enter the job name: " transfer_job -printf "Here are the pods associated with the job.\n" -kubectl get pods --namespace=$alt_namespace --selector=job-name=$transfer_job -read -p "Enter a pod name: " pod_name -printf "\nShow the job logs for $pod_name" -kubectl logs --namespace=$alt_namespace $pod_name -printf "\n" -proceed_prompt -printf "\n" - -### Cleanup get-s3 bucket job -printf "\nCleanup job for get-s3 bucket command\n" -command="${cli_path}netapp_dataops_k8s_cli.py delete s3-job -j $transfer_job -n $alt_namespace" -echo "Running: $command" -eval $command -printf "\nChecking existing jobs after 5 seconds:\n" -sleep 5 -kubectl get job --namespace=$alt_namespace -printf "If the job still shows up try checking for the job in another window for awhile to see if it goes away." -printf "\n" -proceed_prompt -printf "\n" - -### Test get-s3 object -printf "\n*** Testing get-s3 object command with extra options ***\n" -command="${cli_path}netapp_dataops_k8s_cli.py get-s3 object -c $test_secret_name -o $s3_host -b $source_bucket -p $target_pvc -t $s3_port $protocol_flag -k $source_object -n $alt_namespace -i $image_name --cpu-request=$requested_cpu --cpu-limit=$limit_cpu --memory-request=$requested_memory --memory-limit=$limit_memory" -echo "Running: $command" -eval $command -printf "\nChecking the existing jobs:\n" -sleep 2 -kubectl get job --namespace=$alt_namespace -o yaml -read -p "Enter the job name: " transfer_job -printf "Here are the pods associated with the job.\n" -kubectl get pods --namespace=$alt_namespace --selector=job-name=$transfer_job -read -p "Enter a pod name: " pod_name -printf "\nShow the job logs for $pod_name" -kubectl logs --namespace=$alt_namespace $pod_name -printf "\n" -proceed_prompt -printf "\n" - -### Cleanup get-s3 object job -printf "\nCleanup job for get-s3 object command\n" -command="${cli_path}netapp_dataops_k8s_cli.py delete s3-job -j $transfer_job -n $alt_namespace" -echo "Running: $command" -eval $command -printf "\nChecking existing jobs after 5 seconds:\n" -sleep 5 -kubectl get job --namespace=$alt_namespace -printf "If the job still shows up try checking for the job in another window for awhile to see if it goes away." -printf "\n" -proceed_prompt -printf "\n" - -### Test put-s3 object -printf "\n*** Testing the put-s3 object command with extra options ***\n" -command="${cli_path}netapp_dataops_k8s_cli.py put-s3 object -c $test_secret_name -o $s3_host -b $target_bucket -p $source_pvc -t $s3_port $protocol_flag -k one.txt -f one.txt -n $alt_namespace -i $image_name --cpu-request=$requested_cpu --cpu-limit=$limit_cpu --memory-request=$requested_memory --memory-limit=$limit_memory" -echo "Running: $command" -eval $command -printf "\nChecking the existing jobs:\n" -kubectl get job --namespace=$alt_namespace -o yaml -read -p "Enter the job name: " transfer_job -printf "Here are the pods associated with the job.\n" -kubectl get pods --namespace=$alt_namespace --selector=job-name=$transfer_job -read -p "Enter a pod name: " pod_name -printf "\nShow the job logs for $pod_name" -kubectl logs --namespace=$alt_namespace $pod_name -printf "\n" -proceed_prompt -printf "\n" - -### Cleanup put-s3 object job -printf "\nCleanup job for put-s3 object command\n" -command="${cli_path}netapp_dataops_k8s_cli.py delete s3-job -j $transfer_job -n $alt_namespace" -echo "Running: $command" -eval $command -printf "\nChecking existing jobs after 5 seconds:\n" -sleep 5 -kubectl get job --namespace=$alt_namespace -printf "If the job still shows up try checking for the job in another window for awhile to see if it goes away." -printf "\n" -proceed_prompt -printf "\n" - -### Test put-s3 bucket -printf "\n*** Testing the put-s3 bucket command with extra options ***\n" -command="${cli_path}netapp_dataops_k8s_cli.py put-s3 bucket -c $test_secret_name -o $s3_host -b $target_bucket -p $source_pvc -t $s3_port $protocol_flag -n $alt_namespace -i $image_name --cpu-request=$requested_cpu --cpu-limit=$limit_cpu --memory-request=$requested_memory --memory-limit=$limit_memory" -echo "Running: $command" -eval $command -printf "\nChecking the existing jobs:\n" -kubectl get job --namespace=$alt_namespace -o yaml -read -p "Enter the job name: " transfer_job -printf "Here are the pods associated with the job.\n" -kubectl get pods --namespace=$alt_namespace --selector=job-name=$transfer_job -read -p "Enter a pod name: " pod_name -printf "\nShow the job logs for $pod_name" -kubectl logs --namespace=$alt_namespace $pod_name -printf "\n" -proceed_prompt -printf "\n" - -### Cleanup put-s3 bucket job -printf "\nCleanup job for put-s3 bucket command\n" -command="${cli_path}netapp_dataops_k8s_cli.py delete s3-job -j $transfer_job -n $alt_namespace" -echo "Running: $command" -eval $command -printf "\nChecking existing jobs after 5 seconds:\n" -sleep 5 -kubectl get job --namespace=$alt_namespace -printf "If the job still shows up try checking for the job in another window for awhile to see if it goes away." -printf "\n" -proceed_prompt -printf "\n" - -### Test delete ca-config-map -printf "\n*** Testing the delete ca-config-map command with short options***\n" -command="${cli_path}netapp_dataops_k8s_cli.py delete ca-config-map -c $primary_ca_map_name -n $alt_namespace" -echo "Running: $command" -eval $command -printf "\nRetrieving the config map:\n" -kubectl get configmaps $primary_ca_map_name --namespace=$alt_namespace -o yaml -printf "\n" -proceed_prompt -printf "\n" - -### Test delete s3-secret -printf "\n*** Testing the delete s3-secret command with short options***\n" -command="${cli_path}netapp_dataops_k8s_cli.py delete s3-secret -d $test_secret_name -n $alt_namespace" -echo "Running: $command" -eval $command -printf "\nRetrieving secret:\n" -kubectl get secret $test_secret_name --namespace=$alt_namespace -o yaml -printf "\n" -proceed_prompt -printf "\n" - diff --git a/netapp_dataops_k8s/test_scripts/test_triton.sh b/netapp_dataops_k8s/test_scripts/test_triton.sh deleted file mode 100755 index 04e6f11..0000000 --- a/netapp_dataops_k8s/test_scripts/test_triton.sh +++ /dev/null @@ -1,208 +0,0 @@ -#!/usr/bin/env bash - -proceed_prompt () { - while true; do - read -p "Are you ready to proceed (y/n)? " proceed - case $proceed in - [Yy]* ) break;; - [Nn]* ) exit;; - * ) printf "Invalid entry\n";; - esac - done -} - -printf "NetApp DataOps Toolkit for Kubernetes - Interactive test script\n\n" - -printf "Prerequisites:\n" -printf "2 namespaces: default, dsk-test\n" -printf "2 PVCs that already exists with the model repo named 'model-repo-test' (in default namespace) and 'model-repo-test1' (in dsk-test namespace) and model repo's loaded.\n" -proceed_prompt - -################################################## -# Test class B - NVIDID Triton Inference Server management # -################################################## -printf "\n* Starting: Test class B - NVIDIA Triton Inference Server management\n\n" - -################################################## -## Test set B.1. - default namespace, long opts ## -################################################## -printf "** Starting: Test set B.1. - default namespace, long opts\n\n" - -### Create workspace 1 -server1_name="b-1-w1" -model_pvc="model-repo-test" -command="netapp_dataops_k8s_cli.py create triton-server --server-name=$server1_name --model-repo-pvc-name=$model_pvc" -echo "Running: $command" -eval $command -printf "\n" -kubectl get deployment ntap-dsutil-triton-$server1_name -o yaml -printf "\n" -proceed_prompt -printf "\n" - -### Create workspace 2 -server2_name="b-1-w2" -model_pvc="model-repo-test" -command="netapp_dataops_k8s_cli.py create triton-server --server-name=$server2_name --model-repo-pvc-name=$model_pvc --image=nvcr.io/nvidia/tritonserver:21.11-py3 --memory=512Mi --cpu=0.25 --allocate-resource=nvidia.com/mig-1g.5gb=1" -echo "Running: $command" -eval $command -printf "\n" -kubectl get deployment ntap-dsutil-triton-$server2_name -o yaml -printf "\n" -proceed_prompt -printf "\n" - -### Use List to check if the servers still exist in default namespace -command="netapp_dataops_k8s_cli.py list triton-servers" -echo "Running: $command" -eval $command -printf "\nRetrieving server:\n" -printf "\n" -proceed_prompt -printf "\n" - -### Delete Triton Inference instance 1 -command="netapp_dataops_k8s_cli.py delete triton-server --server-name=$server1_name --force" -echo "Running: $command" -eval $command -printf "\nRetrieving server:\n" -kubectl get deployment ntap-dsutil-triton-$server1_name -o yaml -printf "\n" -proceed_prompt -printf "\n" - -### Delete Triton Inference instance 2 -command="netapp_dataops_k8s_cli.py delete triton-server --server-name=$server2_name --force" -echo "Running: $command" -eval $command -printf "\nRetrieving server:\n" -kubectl get deployment ntap-dsutil-triton-$server2_name -o yaml -printf "\n" -proceed_prompt -printf "\n" - -#################################################### -## Test set B.2. - dsk-test namespace, long opts ## -#################################################### -printf "** Starting: Test set B.2. - dsk-test namespace, long opts\n\n" -namespace="dsk-test" - -### Create workspace 1 -server3_name="b-2-w1" -model_pvc="model-repo-test1" -namespace="dsk-test" -command="netapp_dataops_k8s_cli.py create triton-server --namespace=$namespace --server-name=$server3_name --model-repo-pvc-name=$model_pvc" -echo "Running: $command" -eval $command -printf "\n" -kubectl -n $namespace get deployment ntap-dsutil-triton-$server3_name -o yaml -printf "\n" -proceed_prompt -printf "\n" - -### Create workspace 2 -server4_name="b-2-w2" -model_pvc="model-repo-test1" -namespace="dsk-test" -command="netapp_dataops_k8s_cli.py create triton-server --namespace=$namespace --server-name=$server4_name --model-repo-pvc-name=$model_pvc --image=nvcr.io/nvidia/tritonserver:22.03-py3 --memory=512Mi --cpu=0.25 --nvidia-gpu=1 --load-balancer --allocate-resource=nvidia.com/mig-1g.5gb=1" -echo "Running: $command" -eval $command -printf "\n" -kubectl -n $namespace get deployment ntap-dsutil-triton-$server4_name -o yaml -printf "\n" -proceed_prompt -printf "\n" - -### Use List to check if the servers still exist in namespace: "dsk-test" -command="netapp_dataops_k8s_cli.py list triton-servers --namespace=$namespace" -echo "Running: $command" -eval $command -printf "\nRetrieving server:\n" -printf "\n" -proceed_prompt -printf "\n" - -### Delete Triton Inference instance 3 -command="netapp_dataops_k8s_cli.py delete triton-server --server-name=$server3_name --namespace=$namespace --force" -echo "Running: $command" -eval $command -printf "\nRetrieving server:\n" -kubectl -n $namespace get deployment ntap-dsutil-triton-$server3_name -o yaml -printf "\n" -proceed_prompt -printf "\n" - -### Delete Triton Inference instance 4 -command="netapp_dataops_k8s_cli.py delete triton-server --server-name=$server4_name --namespace=$namespace --force" -echo "Running: $command" -eval $command -printf "\nRetrieving server:\n" -kubectl -n $namespace get deployment ntap-dsutil-triton-$server4_name -o yaml -printf "\n" -proceed_prompt -printf "\n" - -##################################################### -## Test set B.3. - dsk-test namespace, short opts ## -##################################################### -printf "** Starting: Test set B.3. - dsk-test namespace, short opts\n\n" -namespace="dsk-test" - -### Create workspace 1 -server5_name="b-3-w1" -model_pvc="model-repo-test1" -namespace="dsk-test" -command="netapp_dataops_k8s_cli.py create triton-server -n $namespace -s $server5_name -v $model_pvc" -echo "Running: $command" -eval $command -printf "\n" -kubectl -n $namespace get deployment ntap-dsutil-triton-$server5_name -o yaml -printf "\n" -proceed_prompt -printf "\n" - -### Create workspace 2 -server6_name="b-3-w2" -model_pvc="model-repo-test1" -namespace="dsk-test" -command="netapp_dataops_k8s_cli.py create triton-server -n $namespace -s $server6_name -v $model_pvc -i nvcr.io/nvidia/tritonserver:22.03-py3 -m 512Mi -p 0.25 -g 1 -b -r nvidia.com/mig-1g.5gb=1" -echo "Running: $command" -eval $command -printf "\n" -kubectl -n $namespace get deployment ntap-dsutil-triton-$server6_name -o yaml -printf "\n" -proceed_prompt -printf "\n" - -### Use List to check if the servers still exist in namespace: "dsk-test" -command="netapp_dataops_k8s_cli.py list triton-servers -n $namespace" -echo "Running: $command" -eval $command -printf "\nRetrieving server:\n" -printf "\n" -proceed_prompt -printf "\n" - -### Delete Triton Inference instance 5 -command="netapp_dataops_k8s_cli.py delete triton-server -s $server5_name -n $namespace -f" -echo "Running: $command" -eval $command -printf "\nRetrieving server:\n" -kubectl -n $namespace get deployment ntap-dsutil-triton-$server5_name -o yaml -printf "\n" -proceed_prompt -printf "\n" - -### Delete Triton Inference instance 6 -command="netapp_dataops_k8s_cli.py delete triton-server -s $server6_name -n $namespace -f" -echo "Running: $command" -eval $command -printf "\nRetrieving server:\n" -kubectl -n $namespace get deployment ntap-dsutil-triton-$server6_name -o yaml -printf "\n" -proceed_prompt -printf "\n" - -################################### -printf "* Test class B complete!\n" - From 33111a59075ac3e4730679848f04958ba8ecfdbb Mon Sep 17 00:00:00 2001 From: "Acharya, Mohan" Date: Thu, 18 May 2023 13:28:44 -0500 Subject: [PATCH 39/56] Add Snaplock type to create_volume --- .../netapp_dataops/traditional.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index f19ac83..7a7d505 100644 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -909,7 +909,13 @@ def create_volume(volume_name: str, volume_size: str, guarantee_space: bool = Fa if print_output: print("Error: Invalid volume size specified. Acceptable values are '1024MB', '100GB', '10TB', etc.") raise InvalidVolumeParameterError("size") - + + # Create option to choose snaplock type + if snaplock_type not in ['compliance', 'enterprise', None]: + if print_output: + print("Error: Invalid snaplock volume type specified. Value must be either 'Compliance' or 'Enterprise' ") + raise InvalidVolumeParameterError("snaplockVolume") + # Create option to choose junction path. if junction: junction=junction @@ -967,6 +973,11 @@ def create_volume(volume_name: str, volume_size: str, guarantee_space: bool = Fa volumeDict["aggregates"] = [] for aggr in aggregate.split(','): volumeDict["aggregates"].append({'name': aggr}) + + # if snaplock type is valid + if snaplock_type: + volumeDict['snaplock_type'] = {"name": snaplock_type} + #if tiering policy provided if tiering_policy: volumeDict['tiering'] = {'policy': tiering_policy} From 9e687f6c4e0c7cd8d7a865b20ce2beb50cceade5 Mon Sep 17 00:00:00 2001 From: "Acharya, Mohan" Date: Wed, 21 Jun 2023 10:35:32 -0500 Subject: [PATCH 40/56] Add snaplock argument to create_volume --- netapp_dataops_traditional/netapp_dataops/traditional.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index 7a7d505..9be3033 100644 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -818,7 +818,7 @@ def create_snapshot(volume_name: str, cluster_name: str = None, svm_name: str = def create_volume(volume_name: str, volume_size: str, guarantee_space: bool = False, cluster_name: str = None, svm_name: str = None, volume_type: str = "flexvol", unix_permissions: str = "0777", - unix_uid: str = "0", unix_gid: str = "0", export_policy: str = "default", + unix_uid: str = "0", unix_gid: str = "0", export_policy: str = "default", snaplock_type: str = None, snapshot_policy: str = None, aggregate: str = None, mountpoint: str = None, junction: str = None, readonly: bool = False, print_output: bool = False, tiering_policy: str = None, vol_dp: bool = False): # Retrieve config details from config file From 08b40405b552556e1e03b020568dd78f05ea293a Mon Sep 17 00:00:00 2001 From: "Acharya, Mohan" Date: Wed, 21 Jun 2023 10:49:06 -0500 Subject: [PATCH 41/56] Add snaplock_type arguments for create_volume --- netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py index 69885fa..4486857 100755 --- a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py +++ b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py @@ -180,6 +180,7 @@ \t-r, --guarantee-space\tGuarantee sufficient storage space for full capacity of the volume (i.e. do not use thin provisioning). \t-t, --type=\t\tVolume type to use when creating new volume (flexgroup/flexvol). \t-u, --uid=\t\tUnix filesystem user id (uid) to apply when creating new volume (ex. '0' for root user). +\t-w, --snaplock_type=\t\tSnaplock type to apply for new volume. (can be 'compliance','enterprise',None) \t-x, --readonly\t\tRead-only option for mounting volumes locally. \t-j, --junction\t\tSpecify a custom junction path for the volume to be exported at. \t-f, --tiering-policy\tSpecify tiering policy for fabric-pool enabled systems (default is 'none'). @@ -198,6 +199,7 @@ \tnetapp_dataops_cli.py create volume -n testvol -s 10GB -t flexvol -p 0755 -u 1000 -g 1000 -j /project1 \tsudo -E netapp_dataops_cli.py create volume -n vol1 -s 5GB -t flexvol --export-policy=team1 -m /mnt/vol1 \tnetapp_dataops_cli.py create vol -n test2 -s 10GB -t flexvol --snapshot-policy=default --tiering-policy=auto +\tnetapp_dataops_cli.py create volume --name=project1 --size=100GB --snaplock-type=compliance ''' helpTextDeleteSnapshot = ''' Command: delete snapshot From 93c35a9f02bd7f5d1495d632f153da45fe6082cc Mon Sep 17 00:00:00 2001 From: "Acharya, Mohan" Date: Wed, 21 Jun 2023 11:30:16 -0500 Subject: [PATCH 42/56] Add snaplock_type to cli.py --- .../netapp_dataops/netapp_dataops_cli.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py index 4486857..0e3b29f 100755 --- a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py +++ b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py @@ -913,6 +913,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = exportPolicy = None snapshotPolicy = None mountpoint = None + snaplock_type = None aggregate = None junction = None readonly = False @@ -921,7 +922,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Get command line options try: - opts, args = getopt.getopt(sys.argv[3:], "l:hv:t:n:s:rt:p:u:g:e:d:m:a:j:xu:y", ["cluster-name=","help", "svm=", "name=", "size=", "guarantee-space", "type=", "permissions=", "uid=", "gid=", "export-policy=", "snapshot-policy=", "mountpoint=", "aggregate=", "junction=" ,"readonly","tiering-policy=","dp"]) + opts, args = getopt.getopt(sys.argv[3:], "l:hv:t:n:s:rt:p:u:g:e:d:m:a:j:xu:yw:", ["cluster-name=","help", "svm=", "name=", "size=", "guarantee-space", "type=", "permissions=", "uid=", "gid=", "export-policy=", "snapshot-policy=", "mountpoint=", "aggregate=", "junction=" ,"readonly","tiering-policy=","dp","snaplock-type="]) except Exception as err: print(err) handleInvalidCommand(helpText=helpTextCreateVolume, invalidOptArg=True) @@ -965,6 +966,8 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = tieringPolicy = arg elif opt in ("-y", "--dp"): volDP = True + elif opt in ("-w", "--snaplock-type"): + snaplock_type = arg # Check for required options if not volumeName or not volumeSize: @@ -979,7 +982,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = try: create_volume(svm_name=svmName, volume_name=volumeName, cluster_name=clusterName, volume_size=volumeSize, guarantee_space=guaranteeSpace, volume_type=volumeType, unix_permissions=unixPermissions, unix_uid=unixUID, unix_gid=unixGID, export_policy=exportPolicy, snapshot_policy=snapshotPolicy, aggregate=aggregate, mountpoint=mountpoint, junction=junction, readonly=readonly, - print_output=True, tiering_policy=tieringPolicy, vol_dp=volDP) + print_output=True, tiering_policy=tieringPolicy, vol_dp=volDP, snaplock_type = snaplock_type) except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): sys.exit(1) From 2de72e67be531c8b8046ad577a46d3a413be3d50 Mon Sep 17 00:00:00 2001 From: "Acharya, Mohan" Date: Fri, 7 Jul 2023 09:31:43 -0500 Subject: [PATCH 43/56] Align tab for snaplock_type --- netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py index 0e3b29f..16851aa 100755 --- a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py +++ b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py @@ -180,7 +180,7 @@ \t-r, --guarantee-space\tGuarantee sufficient storage space for full capacity of the volume (i.e. do not use thin provisioning). \t-t, --type=\t\tVolume type to use when creating new volume (flexgroup/flexvol). \t-u, --uid=\t\tUnix filesystem user id (uid) to apply when creating new volume (ex. '0' for root user). -\t-w, --snaplock_type=\t\tSnaplock type to apply for new volume. (can be 'compliance','enterprise',None) +\t-w, --snaplock_type=\tSnaplock type to apply for new volume. (can be 'compliance','enterprise',None) \t-x, --readonly\t\tRead-only option for mounting volumes locally. \t-j, --junction\t\tSpecify a custom junction path for the volume to be exported at. \t-f, --tiering-policy\tSpecify tiering policy for fabric-pool enabled systems (default is 'none'). From 9696a036f2fbaedb978c13591717f3e1347f61ec Mon Sep 17 00:00:00 2001 From: "Acharya, Mohan" Date: Fri, 7 Jul 2023 09:35:13 -0500 Subject: [PATCH 44/56] Modify snaplock_type in create_volume --- netapp_dataops_traditional/netapp_dataops/traditional.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index 9be3033..03409a0 100644 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -913,7 +913,7 @@ def create_volume(volume_name: str, volume_size: str, guarantee_space: bool = Fa # Create option to choose snaplock type if snaplock_type not in ['compliance', 'enterprise', None]: if print_output: - print("Error: Invalid snaplock volume type specified. Value must be either 'Compliance' or 'Enterprise' ") + print("Error: Invalid snaplock volume type specified. Value must be either 'Compliance' or 'Enterprise'") raise InvalidVolumeParameterError("snaplockVolume") # Create option to choose junction path. @@ -976,7 +976,7 @@ def create_volume(volume_name: str, volume_size: str, guarantee_space: bool = Fa # if snaplock type is valid if snaplock_type: - volumeDict['snaplock_type'] = {"name": snaplock_type} + volumeDict['snaplock'] = {"type": snaplock_type} #if tiering policy provided if tiering_policy: From 42c8016bada271d3532a74f33f88c4148f4bf6d2 Mon Sep 17 00:00:00 2001 From: "Acharya, Mohan" Date: Thu, 27 Jul 2023 10:36:24 -0500 Subject: [PATCH 45/56] Fix typo for snaplock type --- netapp_dataops_traditional/netapp_dataops/traditional.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional.py index 03409a0..8de3ba5 100644 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional.py @@ -913,7 +913,7 @@ def create_volume(volume_name: str, volume_size: str, guarantee_space: bool = Fa # Create option to choose snaplock type if snaplock_type not in ['compliance', 'enterprise', None]: if print_output: - print("Error: Invalid snaplock volume type specified. Value must be either 'Compliance' or 'Enterprise'") + print("Error: Invalid snaplock volume type specified. Value must be either 'compliance' or 'enterprise'") raise InvalidVolumeParameterError("snaplockVolume") # Create option to choose junction path. From ca4e7ffdfa7d8b1d180a8c6d0c4c7009f296c0b7 Mon Sep 17 00:00:00 2001 From: "Acharya, Mohan" Date: Tue, 1 Aug 2023 11:15:50 -0500 Subject: [PATCH 46/56] Update README.md --- netapp_dataops_traditional/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/netapp_dataops_traditional/README.md b/netapp_dataops_traditional/README.md index c17df7c..0296b39 100644 --- a/netapp_dataops_traditional/README.md +++ b/netapp_dataops_traditional/README.md @@ -217,6 +217,7 @@ The following options/arguments are optional: -j, --junction Specify a custom junction path for the volume to be exported at. -f, --tiering-policy Specify tiering policy for fabric-pool enabled systems (default is 'none'). -y, --dp Create volume as DP volume (the volume will be used as snapmirror target) + -w, --snaplock_type Specify snaplock type to use when creating new volume (compliance/enterprise). ``` ##### Example Usage From 9b2062bec690cd9e1a259fa8d82383337086f55e Mon Sep 17 00:00:00 2001 From: "Acharya, Mohan" Date: Thu, 28 Sep 2023 11:24:18 -0500 Subject: [PATCH 47/56] Change --snaplock-type in arguments --- netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py index 16851aa..0ba27d3 100755 --- a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py +++ b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py @@ -180,7 +180,7 @@ \t-r, --guarantee-space\tGuarantee sufficient storage space for full capacity of the volume (i.e. do not use thin provisioning). \t-t, --type=\t\tVolume type to use when creating new volume (flexgroup/flexvol). \t-u, --uid=\t\tUnix filesystem user id (uid) to apply when creating new volume (ex. '0' for root user). -\t-w, --snaplock_type=\tSnaplock type to apply for new volume. (can be 'compliance','enterprise',None) +\t-w, --snaplock-type=\tSnaplock type to apply for new volume. (can be 'compliance','enterprise',None) \t-x, --readonly\t\tRead-only option for mounting volumes locally. \t-j, --junction\t\tSpecify a custom junction path for the volume to be exported at. \t-f, --tiering-policy\tSpecify tiering policy for fabric-pool enabled systems (default is 'none'). From 7f880a8933e344509ec0df536ca51ee9b5cdd392 Mon Sep 17 00:00:00 2001 From: "Acharya, Mohan" Date: Thu, 28 Sep 2023 11:30:24 -0500 Subject: [PATCH 48/56] Update README.md to include snaplock_type --- netapp_dataops_traditional/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/netapp_dataops_traditional/README.md b/netapp_dataops_traditional/README.md index 0296b39..746d130 100644 --- a/netapp_dataops_traditional/README.md +++ b/netapp_dataops_traditional/README.md @@ -1061,6 +1061,7 @@ def create_volume: print_output: bool = False, # Denotes whether or not to print messages to the console during execution. tiering_policy: str = None, # For fabric pool enabled system tiering policy can be: none,auto,snapshot-only,all vol_dp: bool = False # Create volume as type DP which can be used as snapmirror destination + snaplock_type: str = None, # Snaplock type to apply for new volume (ex. 'compliance' or 'enterprise') ``` ##### Return Value From aa8b1724ec81ee4791f7c8e3b63271bc4a054d65 Mon Sep 17 00:00:00 2001 From: "Acharya, Mohan" Date: Thu, 28 Sep 2023 11:32:14 -0500 Subject: [PATCH 49/56] fix indent for snaplock_type --- netapp_dataops_traditional/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/netapp_dataops_traditional/README.md b/netapp_dataops_traditional/README.md index 746d130..df7a9c9 100644 --- a/netapp_dataops_traditional/README.md +++ b/netapp_dataops_traditional/README.md @@ -217,7 +217,7 @@ The following options/arguments are optional: -j, --junction Specify a custom junction path for the volume to be exported at. -f, --tiering-policy Specify tiering policy for fabric-pool enabled systems (default is 'none'). -y, --dp Create volume as DP volume (the volume will be used as snapmirror target) - -w, --snaplock_type Specify snaplock type to use when creating new volume (compliance/enterprise). + -w, --snaplock_type Specify snaplock type to use when creating new volume (compliance/enterprise). ``` ##### Example Usage From 2bc1d8f17d05a3934b3c0496e02a08d0030a429c Mon Sep 17 00:00:00 2001 From: "Acharya, Mohan" Date: Thu, 28 Sep 2023 11:32:52 -0500 Subject: [PATCH 50/56] fix indent for snaplock_type --- netapp_dataops_traditional/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/netapp_dataops_traditional/README.md b/netapp_dataops_traditional/README.md index df7a9c9..a35e0ad 100644 --- a/netapp_dataops_traditional/README.md +++ b/netapp_dataops_traditional/README.md @@ -217,7 +217,7 @@ The following options/arguments are optional: -j, --junction Specify a custom junction path for the volume to be exported at. -f, --tiering-policy Specify tiering policy for fabric-pool enabled systems (default is 'none'). -y, --dp Create volume as DP volume (the volume will be used as snapmirror target) - -w, --snaplock_type Specify snaplock type to use when creating new volume (compliance/enterprise). + -w, --snaplock_type Specify snaplock type to use when creating new volume (compliance/enterprise). ``` ##### Example Usage From e56628420150546be831047c139fa654e8a2124d Mon Sep 17 00:00:00 2001 From: "Oglesby, Michael" Date: Tue, 17 Oct 2023 16:21:47 -0400 Subject: [PATCH 51/56] update pypi packaging structure --- .../netapp_dataops/{traditional.py => traditional/__init__.py} | 2 +- netapp_dataops_traditional/setup.cfg | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) rename netapp_dataops_traditional/netapp_dataops/{traditional.py => traditional/__init__.py} (99%) diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional/__init__.py similarity index 99% rename from netapp_dataops_traditional/netapp_dataops/traditional.py rename to netapp_dataops_traditional/netapp_dataops/traditional/__init__.py index 8de3ba5..5b7f3b8 100644 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional/__init__.py @@ -34,7 +34,7 @@ import yaml -__version__ = "2.4.0" +__version__ = "2.5.0b1" # Using this decorator in lieu of using a dependency to manage deprecation diff --git a/netapp_dataops_traditional/setup.cfg b/netapp_dataops_traditional/setup.cfg index 09d45ba..aceb56e 100644 --- a/netapp_dataops_traditional/setup.cfg +++ b/netapp_dataops_traditional/setup.cfg @@ -23,7 +23,7 @@ long_description = The NetApp DataOps Toolkit for Traditional Environments is a long_description_content_type = text/markdown [options] -py_modules = netapp_dataops.traditional +packages = find_namespace: scripts = netapp_dataops/netapp_dataops_cli.py install_requires = From a93410caf3300032a09226aa520c38b871cffa0c Mon Sep 17 00:00:00 2001 From: "Oglesby, Michael" Date: Tue, 17 Oct 2023 16:24:20 -0400 Subject: [PATCH 52/56] update version number --- README.md | 2 +- netapp_dataops_k8s/netapp_dataops/k8s/__init__.py | 2 +- .../netapp_dataops/traditional/__init__.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 42f9b70..2416718 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ The NetApp DataOps Toolkit is a Python-based tool that simplifies the management ## Getting Started -The latest stable release of the NetApp DataOps Toolkit is version 2.4.0. It is recommended to always use the latest stable release. You can access the documentation for the latest stable release [here](https://github.com/NetApp/netapp-dataops-toolkit/tree/v2.4.0) +The latest stable release of the NetApp DataOps Toolkit is version 2.5.0. It is recommended to always use the latest stable release. You can access the documentation for the latest stable release [here](https://github.com/NetApp/netapp-dataops-toolkit/tree/v2.5.0) The NetApp DataOps Toolkit comes in two different flavors. For access to the most capabilities, we recommend using the [NetApp DataOps Toolkit for Kubernetes](netapp_dataops_k8s/). This flavor supports the full functionality of the toolkit, including JupyterLab workspace and NVIDIA Triton Inference Server management capabilities, but requires access to a Kubernetes cluster. diff --git a/netapp_dataops_k8s/netapp_dataops/k8s/__init__.py b/netapp_dataops_k8s/netapp_dataops/k8s/__init__.py index 033432c..d6a6261 100644 --- a/netapp_dataops_k8s/netapp_dataops/k8s/__init__.py +++ b/netapp_dataops_k8s/netapp_dataops/k8s/__init__.py @@ -4,7 +4,7 @@ by applications using the import method of utilizing the toolkit. """ -__version__ = "2.5.0b1" +__version__ = "2.5.0" import base64 from datetime import datetime diff --git a/netapp_dataops_traditional/netapp_dataops/traditional/__init__.py b/netapp_dataops_traditional/netapp_dataops/traditional/__init__.py index 5b7f3b8..b6e8674 100644 --- a/netapp_dataops_traditional/netapp_dataops/traditional/__init__.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional/__init__.py @@ -34,7 +34,7 @@ import yaml -__version__ = "2.5.0b1" +__version__ = "2.5.0" # Using this decorator in lieu of using a dependency to manage deprecation From 15088a340007fc6f8ddd7cacdaaa25569ec1e366 Mon Sep 17 00:00:00 2001 From: "Oglesby, Michael" Date: Wed, 18 Oct 2023 13:38:35 -0400 Subject: [PATCH 53/56] update compatible python versions --- netapp_dataops_k8s/README.md | 2 +- netapp_dataops_traditional/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/netapp_dataops_k8s/README.md b/netapp_dataops_k8s/README.md index 33bdfbf..e90959a 100644 --- a/netapp_dataops_k8s/README.md +++ b/netapp_dataops_k8s/README.md @@ -24,7 +24,7 @@ The toolkit is currently compatible with all versions of the BeeGFS CSI driver, ### Prerequisites -The NetApp DataOps Toolkit for Kubernetes requires that Python 3.8 or above be installed on the local host. Additionally, the toolkit requires that pip for Python3 be installed on the local host. For more details regarding pip, including installation instructions, refer to the [pip documentation](https://pip.pypa.io/en/stable/installing/). +The NetApp DataOps Toolkit for Kubernetes requires that Python 3.8, 3.9, 3.10, or 3.11 be installed on the local host. Additionally, the toolkit requires that pip for Python3 be installed on the local host. For more details regarding pip, including installation instructions, refer to the [pip documentation](https://pip.pypa.io/en/stable/installing/). ### Installation Instructions diff --git a/netapp_dataops_traditional/README.md b/netapp_dataops_traditional/README.md index a35e0ad..5355a3c 100644 --- a/netapp_dataops_traditional/README.md +++ b/netapp_dataops_traditional/README.md @@ -20,7 +20,7 @@ Note: The 'prepopulate flexcache' operation only supports ONTAP 9.8 and above. A ### Prerequisites -The NetApp DataOps Toolkit for Traditional Environments requires that Python 3.8 or above be installed on the local host. Additionally, the toolkit requires that pip for Python3 be installed on the local host. For more details regarding pip, including installation instructions, refer to the [pip documentation](https://pip.pypa.io/en/stable/installing/). +The NetApp DataOps Toolkit for Traditional Environments requires that Python 3.8, 3.9, 3.10, or 3.11 be installed on the local host. Additionally, the toolkit requires that pip for Python3 be installed on the local host. For more details regarding pip, including installation instructions, refer to the [pip documentation](https://pip.pypa.io/en/stable/installing/). ### Installation Instructions From d2e787d3a3486e6a747ffb90d5e285aed25c71f8 Mon Sep 17 00:00:00 2001 From: "Oglesby, Michael" Date: Wed, 18 Oct 2023 13:48:54 -0400 Subject: [PATCH 54/56] update container image in Kubeflow and Airflow examples --- netapp_dataops_k8s/Examples/Airflow/ai-training-run.py | 4 ++-- netapp_dataops_k8s/Examples/Airflow/clone-volume.py | 4 ++-- .../Examples/Kubeflow/Pipelines/ai-training-run.py | 4 ++-- .../Examples/Kubeflow/Pipelines/clone-volume.py | 2 +- .../Examples/Kubeflow/Pipelines/delete-snapshot.py | 2 +- .../Examples/Kubeflow/Pipelines/delete-volume.py | 2 +- .../Examples/Kubeflow/Pipelines/replicate-data-cloud-sync.py | 4 ++-- .../Examples/Kubeflow/Pipelines/replicate-data-snapmirror.py | 2 +- 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/netapp_dataops_k8s/Examples/Airflow/ai-training-run.py b/netapp_dataops_k8s/Examples/Airflow/ai-training-run.py index 20a19ff..8a18680 100644 --- a/netapp_dataops_k8s/Examples/Airflow/ai-training-run.py +++ b/netapp_dataops_k8s/Examples/Airflow/ai-training-run.py @@ -109,7 +109,7 @@ # Define step to take a snapshot of the dataset volume for traceability dataset_snapshot = KubernetesPodOperator( namespace=namespace, - image="python:3", + image="python:3.11", cmds=["/bin/bash", "-c"], arguments=["\ python3 -m pip install netapp-dataops-k8s && \ @@ -144,7 +144,7 @@ # Define step to take a snapshot of the model volume for versioning/baselining model_snapshot = KubernetesPodOperator( namespace=namespace, - image="python:3", + image="python:3.11", cmds=["/bin/bash", "-c"], arguments=["\ python3 -m pip install netapp-dataops-k8s && \ diff --git a/netapp_dataops_k8s/Examples/Airflow/clone-volume.py b/netapp_dataops_k8s/Examples/Airflow/clone-volume.py index 6a39ccb..0bc964c 100644 --- a/netapp_dataops_k8s/Examples/Airflow/clone-volume.py +++ b/netapp_dataops_k8s/Examples/Airflow/clone-volume.py @@ -53,11 +53,11 @@ # Define step to clone source volume clone_volume = KubernetesPodOperator( namespace=namespace, - image="python:3", + image="python:3.11", cmds=["/bin/bash", "-c"], arguments=[arg], name="clone-volume-clone-volume", task_id="clone-volume", is_delete_operator_pod=True, hostnetwork=False - ) \ No newline at end of file + ) diff --git a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/ai-training-run.py b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/ai-training-run.py index 0838573..7956c2e 100644 --- a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/ai-training-run.py +++ b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/ai-training-run.py @@ -49,7 +49,7 @@ def ai_training_run( volume_snapshot_name = "dataset-{{workflow.uid}}" dataset_snapshot = dsl.ContainerOp( name="dataset-snapshot", - image="python:3", + image="python:3.11", command=["/bin/bash", "-c"], arguments=["\ python3 -m pip install netapp-dataops-k8s && \ @@ -85,7 +85,7 @@ def ai_training_run( volume_snapshot_name = "kfp-model-{{workflow.uid}}" model_snapshot = dsl.ContainerOp( name="model-snapshot", - image="python:3", + image="python:3.11", command=["/bin/bash", "-c"], arguments=["\ python3 -m pip install netapp-dataops-k8s && \ diff --git a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/clone-volume.py b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/clone-volume.py index cfa3991..11be3cf 100644 --- a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/clone-volume.py +++ b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/clone-volume.py @@ -19,7 +19,7 @@ def clone_volume( # Create a clone of the source volume name = "clone-volume" - image = "python:3" + image = "python:3.11" command = ["/bin/bash", "-c"] file_outputs = {"new_volume_pvc_name": "/new_volume_pvc_name.txt"} args = "\ diff --git a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/delete-snapshot.py b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/delete-snapshot.py index a88f88d..4a79cf9 100644 --- a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/delete-snapshot.py +++ b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/delete-snapshot.py @@ -17,7 +17,7 @@ def delete_volume( # Delete Snapshot delete_snapshot = dsl.ContainerOp( name="delete-snapshot", - image="python:3", + image="python:3.11", command=["/bin/bash", "-c"], arguments=["\ python3 -m pip install netapp-dataops-k8s && \ diff --git a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/delete-volume.py b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/delete-volume.py index cb60453..7559c15 100644 --- a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/delete-volume.py +++ b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/delete-volume.py @@ -17,7 +17,7 @@ def delete_volume( # Delete Volume delete_volume = dsl.ContainerOp( name="delete-volume", - image="python:3", + image="python:3.11", command=["/bin/bash", "-c"], arguments=["\ python3 -m pip install netapp-dataops-k8s && \ diff --git a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/replicate-data-cloud-sync.py b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/replicate-data-cloud-sync.py index 85f48d5..078498b 100644 --- a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/replicate-data-cloud-sync.py +++ b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/replicate-data-cloud-sync.py @@ -30,7 +30,7 @@ def netappCloudSyncUpdate(relationshipId: str, printResponse: bool = True, keepC syncCloudSyncRelationship(relationshipID=relationshipId, waitUntilComplete=keepCheckingUntilComplete, printOutput=printResponse) # Convert netappCloudSyncUpdate function to Kubeflow Pipeline ContainerOp named 'NetappCloudSyncUpdateOp' -NetappCloudSyncUpdateOp = comp.func_to_container_op(netappCloudSyncUpdate, base_image='python:3') +NetappCloudSyncUpdateOp = comp.func_to_container_op(netappCloudSyncUpdate, base_image='python:3.11') # Define Kubeflow Pipeline @@ -61,4 +61,4 @@ def replicate_data_cloud_sync( if __name__ == '__main__' : import kfp.compiler as compiler - compiler.Compiler().compile(replicate_data_cloud_sync, __file__ + '.yaml') \ No newline at end of file + compiler.Compiler().compile(replicate_data_cloud_sync, __file__ + '.yaml') diff --git a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/replicate-data-snapmirror.py b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/replicate-data-snapmirror.py index 4a90076..c7e39b4 100644 --- a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/replicate-data-snapmirror.py +++ b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/replicate-data-snapmirror.py @@ -54,7 +54,7 @@ def netappSnapMirrorUpdate( syncSnapMirrorRelationship(uuid=uuid, waitUntilComplete=waitUntilComplete, printOutput=True) # Convert netappSnapMirrorUpdate function to Kubeflow Pipeline ContainerOp named 'NetappSnapMirrorUpdateOp' -NetappSnapMirrorUpdateOp = comp.func_to_container_op(netappSnapMirrorUpdate, base_image='python:3') +NetappSnapMirrorUpdateOp = comp.func_to_container_op(netappSnapMirrorUpdate, base_image='python:3.11') # Define Kubeflow Pipeline From 786fdfd644708d530d1f039f66ebd8a19293c938 Mon Sep 17 00:00:00 2001 From: "Oglesby, Michael" Date: Wed, 18 Oct 2023 14:39:21 -0400 Subject: [PATCH 55/56] update python version compatibility --- netapp_dataops_k8s/setup.cfg | 2 +- netapp_dataops_traditional/setup.cfg | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/netapp_dataops_k8s/setup.cfg b/netapp_dataops_k8s/setup.cfg index b5b127b..bc18204 100644 --- a/netapp_dataops_k8s/setup.cfg +++ b/netapp_dataops_k8s/setup.cfg @@ -30,7 +30,7 @@ install_requires = numpy>=1.22.0 tabulate kubernetes -python_requires = >=3.8 +python_requires = '>=3.8, <=3.11' [options.packages.find] exclude = Examples.* diff --git a/netapp_dataops_traditional/setup.cfg b/netapp_dataops_traditional/setup.cfg index aceb56e..171be74 100644 --- a/netapp_dataops_traditional/setup.cfg +++ b/netapp_dataops_traditional/setup.cfg @@ -34,4 +34,4 @@ install_requires = requests boto3 pyyaml -python_requires = >=3.8 +python_requires = '>=3.8, <=3.11' From 51982c897aad010c4602254b07ecd2f6731e950a Mon Sep 17 00:00:00 2001 From: "Oglesby, Michael" Date: Tue, 26 Mar 2024 18:20:49 -0400 Subject: [PATCH 56/56] Update setup.cfg --- netapp_dataops_k8s/setup.cfg | 4 +++- netapp_dataops_traditional/setup.cfg | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/netapp_dataops_k8s/setup.cfg b/netapp_dataops_k8s/setup.cfg index bc18204..fc7ba3e 100644 --- a/netapp_dataops_k8s/setup.cfg +++ b/netapp_dataops_k8s/setup.cfg @@ -13,6 +13,8 @@ classifiers = Programming Language :: Python :: 3 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 + Programming Language :: Python :: 3.10 + Programming Language :: Python :: 3.11 project_urls = Bug Tracker = https://github.com/NetApp/netapp-data-science-toolkit/issues Documentation = https://github.com/NetApp/netapp-data-science-toolkit/blob/main/README.md @@ -30,7 +32,7 @@ install_requires = numpy>=1.22.0 tabulate kubernetes -python_requires = '>=3.8, <=3.11' +python_requires = >=3.8,<3.12 [options.packages.find] exclude = Examples.* diff --git a/netapp_dataops_traditional/setup.cfg b/netapp_dataops_traditional/setup.cfg index 171be74..75b4d42 100644 --- a/netapp_dataops_traditional/setup.cfg +++ b/netapp_dataops_traditional/setup.cfg @@ -15,6 +15,8 @@ classifiers = Programming Language :: Python :: 3 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 + Programming Language :: Python :: 3.10 + Programming Language :: Python :: 3.11 project_urls = Bug Tracker = https://github.com/NetApp/netapp-data-science-toolkit/issues Documentation = https://github.com/NetApp/netapp-data-science-toolkit/blob/main/README.md @@ -34,4 +36,4 @@ install_requires = requests boto3 pyyaml -python_requires = '>=3.8, <=3.11' +python_requires = >=3.8,<3.12