diff --git a/README.md b/README.md index 42f9b70..2416718 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ The NetApp DataOps Toolkit is a Python-based tool that simplifies the management ## Getting Started -The latest stable release of the NetApp DataOps Toolkit is version 2.4.0. It is recommended to always use the latest stable release. You can access the documentation for the latest stable release [here](https://github.com/NetApp/netapp-dataops-toolkit/tree/v2.4.0) +The latest stable release of the NetApp DataOps Toolkit is version 2.5.0. It is recommended to always use the latest stable release. You can access the documentation for the latest stable release [here](https://github.com/NetApp/netapp-dataops-toolkit/tree/v2.5.0) The NetApp DataOps Toolkit comes in two different flavors. For access to the most capabilities, we recommend using the [NetApp DataOps Toolkit for Kubernetes](netapp_dataops_k8s/). This flavor supports the full functionality of the toolkit, including JupyterLab workspace and NVIDIA Triton Inference Server management capabilities, but requires access to a Kubernetes cluster. diff --git a/netapp_dataops_k8s/Examples/Airflow/ai-training-run.py b/netapp_dataops_k8s/Examples/Airflow/ai-training-run.py index 20a19ff..8a18680 100644 --- a/netapp_dataops_k8s/Examples/Airflow/ai-training-run.py +++ b/netapp_dataops_k8s/Examples/Airflow/ai-training-run.py @@ -109,7 +109,7 @@ # Define step to take a snapshot of the dataset volume for traceability dataset_snapshot = KubernetesPodOperator( namespace=namespace, - image="python:3", + image="python:3.11", cmds=["/bin/bash", "-c"], arguments=["\ python3 -m pip install netapp-dataops-k8s && \ @@ -144,7 +144,7 @@ # Define step to take a snapshot of the model volume for versioning/baselining model_snapshot = KubernetesPodOperator( namespace=namespace, - image="python:3", + image="python:3.11", cmds=["/bin/bash", "-c"], arguments=["\ python3 -m pip install netapp-dataops-k8s && \ diff --git a/netapp_dataops_k8s/Examples/Airflow/clone-volume.py b/netapp_dataops_k8s/Examples/Airflow/clone-volume.py index 6a39ccb..0bc964c 100644 --- a/netapp_dataops_k8s/Examples/Airflow/clone-volume.py +++ b/netapp_dataops_k8s/Examples/Airflow/clone-volume.py @@ -53,11 +53,11 @@ # Define step to clone source volume clone_volume = KubernetesPodOperator( namespace=namespace, - image="python:3", + image="python:3.11", cmds=["/bin/bash", "-c"], arguments=[arg], name="clone-volume-clone-volume", task_id="clone-volume", is_delete_operator_pod=True, hostnetwork=False - ) \ No newline at end of file + ) diff --git a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/ai-training-run.py b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/ai-training-run.py index 0838573..7956c2e 100644 --- a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/ai-training-run.py +++ b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/ai-training-run.py @@ -49,7 +49,7 @@ def ai_training_run( volume_snapshot_name = "dataset-{{workflow.uid}}" dataset_snapshot = dsl.ContainerOp( name="dataset-snapshot", - image="python:3", + image="python:3.11", command=["/bin/bash", "-c"], arguments=["\ python3 -m pip install netapp-dataops-k8s && \ @@ -85,7 +85,7 @@ def ai_training_run( volume_snapshot_name = "kfp-model-{{workflow.uid}}" model_snapshot = dsl.ContainerOp( name="model-snapshot", - image="python:3", + image="python:3.11", command=["/bin/bash", "-c"], arguments=["\ python3 -m pip install netapp-dataops-k8s && \ diff --git a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/clone-volume.py b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/clone-volume.py index cfa3991..11be3cf 100644 --- a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/clone-volume.py +++ b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/clone-volume.py @@ -19,7 +19,7 @@ def clone_volume( # Create a clone of the source volume name = "clone-volume" - image = "python:3" + image = "python:3.11" command = ["/bin/bash", "-c"] file_outputs = {"new_volume_pvc_name": "/new_volume_pvc_name.txt"} args = "\ diff --git a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/delete-snapshot.py b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/delete-snapshot.py index a88f88d..4a79cf9 100644 --- a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/delete-snapshot.py +++ b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/delete-snapshot.py @@ -17,7 +17,7 @@ def delete_volume( # Delete Snapshot delete_snapshot = dsl.ContainerOp( name="delete-snapshot", - image="python:3", + image="python:3.11", command=["/bin/bash", "-c"], arguments=["\ python3 -m pip install netapp-dataops-k8s && \ diff --git a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/delete-volume.py b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/delete-volume.py index cb60453..7559c15 100644 --- a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/delete-volume.py +++ b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/delete-volume.py @@ -17,7 +17,7 @@ def delete_volume( # Delete Volume delete_volume = dsl.ContainerOp( name="delete-volume", - image="python:3", + image="python:3.11", command=["/bin/bash", "-c"], arguments=["\ python3 -m pip install netapp-dataops-k8s && \ diff --git a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/replicate-data-cloud-sync.py b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/replicate-data-cloud-sync.py index 85f48d5..078498b 100644 --- a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/replicate-data-cloud-sync.py +++ b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/replicate-data-cloud-sync.py @@ -30,7 +30,7 @@ def netappCloudSyncUpdate(relationshipId: str, printResponse: bool = True, keepC syncCloudSyncRelationship(relationshipID=relationshipId, waitUntilComplete=keepCheckingUntilComplete, printOutput=printResponse) # Convert netappCloudSyncUpdate function to Kubeflow Pipeline ContainerOp named 'NetappCloudSyncUpdateOp' -NetappCloudSyncUpdateOp = comp.func_to_container_op(netappCloudSyncUpdate, base_image='python:3') +NetappCloudSyncUpdateOp = comp.func_to_container_op(netappCloudSyncUpdate, base_image='python:3.11') # Define Kubeflow Pipeline @@ -61,4 +61,4 @@ def replicate_data_cloud_sync( if __name__ == '__main__' : import kfp.compiler as compiler - compiler.Compiler().compile(replicate_data_cloud_sync, __file__ + '.yaml') \ No newline at end of file + compiler.Compiler().compile(replicate_data_cloud_sync, __file__ + '.yaml') diff --git a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/replicate-data-snapmirror.py b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/replicate-data-snapmirror.py index 4a90076..c7e39b4 100644 --- a/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/replicate-data-snapmirror.py +++ b/netapp_dataops_k8s/Examples/Kubeflow/Pipelines/replicate-data-snapmirror.py @@ -54,7 +54,7 @@ def netappSnapMirrorUpdate( syncSnapMirrorRelationship(uuid=uuid, waitUntilComplete=waitUntilComplete, printOutput=True) # Convert netappSnapMirrorUpdate function to Kubeflow Pipeline ContainerOp named 'NetappSnapMirrorUpdateOp' -NetappSnapMirrorUpdateOp = comp.func_to_container_op(netappSnapMirrorUpdate, base_image='python:3') +NetappSnapMirrorUpdateOp = comp.func_to_container_op(netappSnapMirrorUpdate, base_image='python:3.11') # Define Kubeflow Pipeline diff --git a/netapp_dataops_k8s/README.md b/netapp_dataops_k8s/README.md index a30f75f..e90959a 100644 --- a/netapp_dataops_k8s/README.md +++ b/netapp_dataops_k8s/README.md @@ -9,7 +9,7 @@ The NetApp DataOps Toolkit for Kubernetes supports Linux and macOS hosts. The toolkit must be used in conjunction with a Kubernetes cluster in order to be useful. Additionally, [Trident](https://netapp.io/persistent-storage-provisioner-for-kubernetes/), NetApp's dynamic storage orchestrator for Kubernetes, and/or the [BeeGFS CSI driver](https://github.com/NetApp/beegfs-csi-driver/) must be installed within the Kubernetes cluster. The toolkit simplifies performing of various data management tasks that are actually executed by a NetApp maintained CSI driver. In order to facilitate this, the toolkit communicates with the appropriate driver via the Kubernetes API. -The toolkit is currently compatible with Kubernetes versions 1.17 and above, and OpenShift versions 4.4 and above. +The toolkit is currently compatible with Kubernetes versions 1.20 and above, and OpenShift versions 4.7 and above. The toolkit is currently compatible with Trident versions 20.07 and above. Additionally, the toolkit is compatible with the following Trident backend types: @@ -24,7 +24,7 @@ The toolkit is currently compatible with all versions of the BeeGFS CSI driver, ### Prerequisites -The NetApp DataOps Toolkit for Kubernetes requires that Python 3.8 or above be installed on the local host. Additionally, the toolkit requires that pip for Python3 be installed on the local host. For more details regarding pip, including installation instructions, refer to the [pip documentation](https://pip.pypa.io/en/stable/installing/). +The NetApp DataOps Toolkit for Kubernetes requires that Python 3.8, 3.9, 3.10, or 3.11 be installed on the local host. Additionally, the toolkit requires that pip for Python3 be installed on the local host. For more details regarding pip, including installation instructions, refer to the [pip documentation](https://pip.pypa.io/en/stable/installing/). ### Installation Instructions @@ -67,23 +67,6 @@ In the [Examples](Examples/) directory, you will find the following examples per Refer to the [Kubernetes documentation](https://kubernetes.io/docs/tasks/run-application/access-api-from-pod/) for more information on accessing the Kubernetes API from within a pod. -## Extended Functionality with Astra Control - -The NetApp DataOps Toolkit provides several extended capabilities that require [Astra Control](https://cloud.netapp.com/astra). Any operation that requires Astra Control is specifically noted within the documentation as requiring Astra Control. The prerequisites outlined in this section are required in order to perform any operation that requires Astra Control. - -The toolkit uses the Astra Control Python SDK to interface with the Astra Control API. The Astra Control Python SDK is installed automatically when you install the NetApp DataOps Toolkit using pip. - -In order for the Astra Control Python SDK to be able to communicate with the Astra Control API, you must create a 'config.yaml' file containing your Astra Control API connection details. Refer to the [Astra Control Python SDK README](https://github.com/NetApp/netapp-astra-toolkits/tree/v2.1.3) for formatting details. Note that you do not need to follow the installation instructions outlined in the Astra Control Python SDK README; you only need to create the 'config.yaml' file. Once you have created the 'config.yaml' file, you must store it in one of the following locations: -- ~/.config/astra-toolkits/ -- /etc/astra-toolkits/ -- The directory pointed to by the shell environment variable 'ASTRATOOLKITS_CONF' - -Additionally, you must set the shell environment variable 'ASTRA_K8S_CLUSTER_NAME' to the name of your specific Kubernetes cluster in Astra Control. - -```sh -export ASTRA_K8S_CLUSTER_NAME=" - -#### Clone a JupyterLab Workspace to a Brand New Namespace - -The NetApp DataOps Toolkit can be used to rapidly provision a new JupyterLab workspace (within a brand new Kubernetes namespace) that is an exact copy of an existing JupyterLab workspace. In other words, the NetApp DataOps Toolkit can be used to rapidly clone a JupyterLab workspace to a brand new namespace. The command for cloning a JupyterLab workspace to a brand new namespace is `netapp_dataops_k8s_cli.py clone-to-new-ns jupyterlab`. - -Note: This command requires Astra Control. - -The following options/arguments are required: - -``` - -j, --source-workspace-name= Name of JupyterLab workspace to use as source for clone. - -n, --new-namespace= Kubernetes namespace to create new workspace in. This namespace must not exist; it will be created during this operation. -``` - -The following options/arguments are optional: - -``` - -c, --clone-to-cluster-name= Name of destination Kubernetes cluster within Astra Control. Workspace will be cloned a to a new namespace in this cluster. If not specified, then the workspace will be cloned to a new namespace within the user's current cluster. - -h, --help Print help text. - -s, --source-namespace= Kubernetes namespace that source workspace is located in. If not specified, namespace "default" will be used. -``` - -##### Example Usage - -Clone the JupyterLab workspace 'ws1' in namespace 'default' to a brand new namespace named 'team2'. - -```sh -netapp_dataops_k8s_cli.py clone-to-new-ns jupyterlab --source-workspace-name=ws1 --new-namespace=team2 -Creating new JupyterLab workspace 'ws1' in namespace 'team2' within your cluster using Astra Control... -New workspace is being cloned from source workspace 'ws1' in namespace 'default' within your cluster... - -Astra SDK output: -{'type': 'application/astra-managedApp', 'version': '1.1', 'id': '9949c21c-8c36-44e8-b3cf-317eb393177f', 'name': 'ntap-dsutil-jupyterlab-ws1', 'state': 'provisioning', 'stateUnready': [], 'managedState': 'managed', 'managedStateUnready': [], 'managedTimestamp': '2021-10-11T20:36:09Z', 'protectionState': '', 'protectionStateUnready': [], 'appDefnSource': 'other', 'appLabels': [], 'system': 'false', 'namespace': 'team2', 'clusterName': 'ocp1', 'clusterID': '50b1e635-075f-42bb-bf81-3a6fd5518d2b', 'clusterType': 'openshift', 'sourceAppID': 'e6ac2e92-6abf-43c9-ac94-0437dc543149', 'sourceClusterID': '50b1e635-075f-42bb-bf81-3a6fd5518d2b', 'backupID': 'ee24afec-93c7-4226-9da3-006b2a870458', 'metadata': {'labels': [{'name': 'astra.netapp.io/labels/read-only/appType', 'value': 'clone'}], 'creationTimestamp': '2021-10-11T20:36:09Z', 'modificationTimestamp': '2021-10-11T20:36:09Z', 'createdBy': '946d8bb0-0d88-4469-baf4-8cfef52a7a90'}} - -Clone operation has been initiated. The operation may take several minutes to complete. -If the new workspace is being created within your cluster, run 'netapp_dataops_k8s_cli.py list jupyterlabs -n team2 -a' to check the status of the new workspace. -``` - #### Create a New JupyterLab Workspace @@ -182,7 +140,6 @@ The following options/arguments are optional: -n, --namespace= Kubernetes namespace to create new workspace in. If not specified, workspace will be created in namespace "default". -p, --cpu= Number of CPUs to reserve for JupyterLab workspace. Format: '0.5', '1', etc. If not specified, no CPUs will be reserved. -b, --load-balancer Option to choose a LoadBalancer service instead of using NodePort service. If not specified, NodePort service will be utilized. - -a, --register-with-astra Register new workspace with Astra Control (requires Astra Control). -v, --mount-pvc Option to attach an additional existing PVC that can be mounted at a spefic path whithin the container. Format: -v/--mount-pvc=existing_pvc_name:mount_point. If not specified, no additional PVC will be attached. -r, --allocate-resource= Option to specify custom resource allocations, ex. 'nvidia.com/mig-1g.5gb=1'. If not specified, no custom resource will be allocated. ``` @@ -284,7 +241,6 @@ The following options/arguments are optional: ``` -h, --help Print help text. -n, --namespace= Kubernetes namespace for which to retrieve list of workspaces. If not specified, namespace "default" will be used. - -a, --include-astra-app-id Include Astra Control app IDs in the output (requires Astra Control API access). ``` ##### Example Usage @@ -450,74 +406,6 @@ Waiting for Deployment 'ntap-dsutil-jupyterlab-mike' to reach Ready state. JupyterLab workspace snapshot successfully restored. ``` - - -#### Register an Existing JupyterLab Workspace with Astra Control - -The NetApp DataOps Toolkit can be used to register an existing JupyterLab workspace with Astra Control. The command for registering an existing JupyterLab workspace with Astra Control is `netapp_dataops_k8s_cli.py register-with-astra jupyterlab`. - -Note: This command requires Astra Control. - -The following options/arguments are required: - -``` - -w, --workspace-name= Name of JupyterLab workspace to be registered. -``` - -The following options/arguments are optional: - -``` - -h, --help Print help text. - -n, --namespace= Kubernetes namespace that the workspace is located in. If not specified, namespace "default" will be used. -``` - -##### Example Usage - -Register the workspace 'mike' in namespace 'project1' with Astra Control. - -```sh -netapp_dataops_k8s_cli.py register-with-astra jupyterlab -n project1 -w mike -Registering JupyterLab workspace 'mike' in namespace 'project1' with Astra Control... -JupyterLab workspace is now managed by Astra Control. -``` - - - -#### Backup a JupyterLab Workspace Using Astra Control - -The NetApp DataOps Toolkit can be used to trigger a backup of an existing JupyterLab workspace using Astra Control. The command for triggering a backup of an existing JupyterLab workspace using Astra Control is `netapp_dataops_k8s_cli.py backup-with-astra jupyterlab`. - -Note: This command requires Astra Control. - -The following options/arguments are required: - -``` - -w, --workspace-name= Name of JupyterLab workspace to be backed up. - -b, --backup-name= Name to be applied to new backup. -``` - -The following options/arguments are optional: - -``` - -h, --help Print help text. - -n, --namespace= Kubernetes namespace that the workspace is located in. If not specified, namespace "default" will be used. -``` - -##### Example Usage - -Backup the workspace 'ws1' in namespace 'default' using Astra Control; name the backup 'backup1'. - -```sh -netapp_dataops_k8s_cli.py backup-with-astra jupyterlab --workspace-name=ws1 --backup-name=backup1 -Trigerring backup of workspace 'ws1' in namespace 'default' using Astra Control... - -Astra SDK output: -{'type': 'application/astra-appBackup', 'version': '1.1', 'id': 'bd4ee39e-a3f6-4cf4-a75e-2a09d71b2b03', 'name': 'backup1', 'bucketID': '1e547cee-fbb9-4097-9a64-f542a79d6e80', 'state': 'pending', 'stateUnready': [], 'metadata': {'labels': [], 'creationTimestamp': '2021-10-11T20:39:59Z', 'modificationTimestamp': '2021-10-11T20:39:59Z', 'createdBy': '946d8bb0-0d88-4469-baf4-8cfef52a7a90'}} - -Backup operation has been initiated. The operation may take several minutes to complete. -Access the Astra Control dashboard to check the status of the backup operation. -``` - ## Advanced: Set of Functions @@ -525,24 +413,21 @@ Access the Astra Control dashboard to check the status of the backup operation. The NetApp DataOps Toolkit for Kubernetes provides a set of functions that can be imported into any Python program or Jupyter Notebook. In this manner, data scientists and data engineers can easily incorporate Kubernetes-native data management tasks into their existing projects, programs, and workflows. This functionality is only recommended for advanced users who are proficient in Python. ```py -from netapp_dataops.k8s import clone_jupyter_lab, clone_jupyter_lab_to_new_namespace, create_jupyter_lab, delete_jupyter_lab, list_jupyter_labs, create_jupyter_lab_snapshot, list_jupyter_lab_snapshots, restore_jupyter_lab_snapshot, register_jupyter_lab_with_astra, backup_jupyter_lab_with_astra +from netapp_dataops.k8s import clone_jupyter_lab, create_jupyter_lab, delete_jupyter_lab, list_jupyter_labs, create_jupyter_lab_snapshot, list_jupyter_lab_snapshots, restore_jupyter_lab_snapshot ``` The following workspace management operations are available within the set of functions. -| JupyterLab workspace management operations | Supported by BeeGFS | Supported by Trident | Requires Astra Control | -| ------------------------------------------------------------------------------------ | ------------------- | -------------------- | ---------------------- | -| [Clone a JupyterLab workspace within the same namespace.](#lib-clone-jupyterlab) | No | Yes | No | -| [Clone a JupyterLab workspace to a brand new namespace.](#lib-clone-new-jupyterlab) | No | Yes | Yes | -| [Create a new JupyterLab workspace.](#lib-create-jupyterlab) | Yes | Yes | No | -| [Delete an existing JupyterLab workspace.](#lib-delete-jupyterlab) | Yes | Yes | No | -| [List all JupyterLab workspaces.](#lib-list-jupyterlabs) | Yes | Yes | No | -| [Create a new snapshot for a JupyterLab workspace.](#lib-create-jupyterlab-snapshot) | No | Yes | No | -| [Delete an existing snapshot.](#lib-delete-jupyterlab-snapshot) | No | Yes | No | -| [List all snapshots.](#lib-list-jupyterlab-snapshots) | No | Yes | No | -| [Restore a snapshot.](#lib-restore-jupyterlab-snapshot) | No | Yes | No | -| [Register a JupyterLab workspace with Astra Control.](#lib-register-jupyterlab) | No | Yes | Yes | -| [Backup a JupyterLab workspace using Astra Control.](#lib-backup-jupyterlab) | No | Yes | Yes | +| JupyterLab workspace management operations | Supported by BeeGFS | Supported by Trident | +| ------------------------------------------------------------------------------------ | ------------------- | -------------------- | +| [Clone a JupyterLab workspace within the same namespace.](#lib-clone-jupyterlab) | No | Yes | +| [Create a new JupyterLab workspace.](#lib-create-jupyterlab) | Yes | Yes | +| [Delete an existing JupyterLab workspace.](#lib-delete-jupyterlab) | Yes | Yes | +| [List all JupyterLab workspaces.](#lib-list-jupyterlabs) | Yes | Yes | +| [Create a new snapshot for a JupyterLab workspace.](#lib-create-jupyterlab-snapshot) | No | Yes | +| [Delete an existing snapshot.](#lib-delete-jupyterlab-snapshot) | No | Yes | +| [List all snapshots.](#lib-list-jupyterlab-snapshots) | No | Yes | +| [Restore a snapshot.](#lib-restore-jupyterlab-snapshot) | No | Yes | ### JupyterLab Workspace Management Operations @@ -587,41 +472,6 @@ APIConnectionError # The Kubernetes API returned an error. ServiceUnavailableError # A Kubernetes service is not available. ``` - - -#### Clone a JupyterLab Workspace to a Brand New Naamespace - -The NetApp DataOps Toolkit can be used to rapidly provision a new JupyterLab workspace (within a brand new Kubernetes namespace) that is an exact copy of an existing JupyterLab workspace, as part of any Python program or workflow. In other words, the NetApp DataOps Toolkit can be used to rapidly clone a JupyterLab workspace to a brand new namespace. - -Note: This function requires Astra Control. - -##### Function Definition - -```py -def clone_jupyter_lab_to_new_namespace( - source_workspace_name: str, # Name of JupyterLab workspace to use as source for clone (required). - new_namespace: str, # Kubernetes namespace to create new workspace in (required). This namespace must not exist; it will be created during this operation. - source_workspace_namespace: str = "default", # Kubernetes namespace that source workspace is located in. If not specified, namespace "default" will be used. - clone_to_cluster_name: str = None, # Name of destination Kubernetes cluster within Astra Control. Workspace will be cloned a to a new namespace in this cluster. If not specified, then the workspace will be cloned to a new namespace within the user's current cluster. - print_output: bool = False # Denotes whether or not to print messages to the console during execution. -) : -``` - -##### Return Value - -None - -##### Error Handling - -If an error is encountered, the function will raise an exception of one of the following types. These exception types are defined in `netapp_dataops.k8s`. - -```py -InvalidConfigError # kubeconfig file is missing or is invalid. -APIConnectionError # The Kubernetes or Astra API returned an error. -AstraAppNotManagedError # The source JupyterLab workspace has not been registered with Astra Control. -AstraClusterDoesNotExistError # The destination cluster does not exist in Astra Control. -``` - #### Create a New JupyterLab Workspace @@ -645,7 +495,6 @@ def create_jupyter_lab( request_cpu: str = None, # Number of CPUs to reserve for JupyterLab workspace. Format: '0.5', '1', etc. If not specified, no CPUs will be reserved. request_memory: str = None, # Amount of memory to reserve for JupyterLab workspace. Format: '1024Mi', '100Gi', '10Ti', etc. If not specified, no memory will be reserved. request_nvidia_gpu: str = None, # Number of NVIDIA GPUs to allocate to JupyterLab workspace. Format: '1', '4', etc. If not specified, no GPUs will be allocated. - register_with_astra: bool = False, # Register new workspace with Astra Control (requires Astra Control). allocate_resource: str = None, # Option to specify custom resource allocations, ex. 'nvidia.com/mig-1g.5gb=1'. If not specified, no custom resource will be allocated. print_output: bool = False # Denotes whether or not to print messages to the console during execution. ) -> str : @@ -706,14 +555,13 @@ The NetApp DataOps Toolkit can be used to retrieve a list of all existing Jupyte ```py def list_jupyter_labs( namespace: str = "default", # Kubernetes namespace for which to retrieve list of workspaces. If not specified, namespace "default" will be used. - include_astra_app_id: bool = False, # Include Astra Control app IDs in the output (requires Astra Control API access). print_output: bool = False # Denotes whether or not to print messages to the console during execution. ) -> list : ``` ##### Return Value -The function returns a list of all existing JupyterLab workspaces. Each item in the list will be a dictionary containing details regarding a specific workspace. The keys for the values in this dictionary are "Workspace Name", "Status", "Size", "StorageClass", "Access URL", "Clone" (Yes/No), "Source Workspace", and "Source VolumeSnapshot". If `include_astra_app_id` is set to `True`, then "Astra Control App ID" will also be included as a key in the dictionary. +The function returns a list of all existing JupyterLab workspaces. Each item in the list will be a dictionary containing details regarding a specific workspace. The keys for the values in this dictionary are "Workspace Name", "Status", "Size", "StorageClass", "Access URL", "Clone" (Yes/No), "Source Workspace", and "Source VolumeSnapshot". Note: The value of the "Clone" field will be "Yes" only if the workspace was cloned, using the DataOps Toolkit, from a source workspace within the same namespace. @@ -828,68 +676,3 @@ If an error is encountered, the function will raise an exception of one of the f InvalidConfigError # kubeconfig file is missing or is invalid. APIConnectionError # The Kubernetes API returned an error. ``` - - - -#### Register an Existing JupyterLab Workspace with Astra Control - -The NetApp DataOps Toolkit can be used to register an existing JupyterLab workspace with Astra Control as part of any Python program or workflow. - -Note: This function requires Astra Control. - -##### Function Definition - -```py -def register_jupyter_lab_with_astra( - workspace_name: str, # Name of JupyterLab workspace to be registered (required). - namespace: str = "default", # Kubernetes namespace that the workspace is located in. If not specified, namespace "default" will be used. - print_output: bool = False # Denotes whether or not to print messages to the console during execution. -) : -``` - -##### Return Value - -None - -##### Error Handling - -If an error is encountered, the function will raise an exception of one of the following types. These exception types are defined in `netapp_dataops.k8s`. - -```py -InvalidConfigError # kubeconfig or AstraSDK config file is missing or is invalid. -APIConnectionError # The Kubernetes or Astra Control API returned an error. -AstraAppDoesNotExistError # App does not exist in Astra. Are you sure that the workspace name is correct? -``` - - - -#### Backup a JupyterLab Workspace Using Astra Control - -The NetApp DataOps Toolkit can be used to trigger a backup of an existing JupyterLab workspace using Astra Control as part of any Python program or workflow. - -Note: This function requires Astra Control. - -##### Function Definition - -```py -def backup_jupyter_lab_with_astra( - workspace_name: str, # Name of JupyterLab workspace to be backed up (required). - backup_name: str, # Name to be applied to new backup (required) - namespace: str = "default", # Kubernetes namespace that the workspace is located in. If not specified, namespace "default" will be used. - print_output: bool = False # Denotes whether or not to print messages to the console during execution. -) : -``` - -##### Return Value - -None - -##### Error Handling - -If an error is encountered, the function will raise an exception of one of the following types. These exception types are defined in `netapp_dataops.k8s`. - -```py -InvalidConfigError # kubeconfig or AstraSDK config file is missing or is invalid. -APIConnectionError # The Kubernetes or Astra Control API returned an error. -AstraAppNotManagedError # JupyterLab workspace has not been registered with Astra Control. -``` diff --git a/netapp_dataops_k8s/netapp_dataops/k8s/__init__.py b/netapp_dataops_k8s/netapp_dataops/k8s/__init__.py index 7ec6412..d6a6261 100644 --- a/netapp_dataops_k8s/netapp_dataops/k8s/__init__.py +++ b/netapp_dataops_k8s/netapp_dataops/k8s/__init__.py @@ -4,7 +4,7 @@ by applications using the import method of utilizing the toolkit. """ -__version__ = "2.4.0" +__version__ = "2.5.0" import base64 from datetime import datetime @@ -24,7 +24,6 @@ from kubernetes.client.rest import ApiException from tabulate import tabulate import pandas as pd -import astraSDK # Using this decorator in lieu of using a dependency to manage deprecation @@ -53,21 +52,6 @@ class InvalidConfigError(Exception): pass -class AstraAppNotManagedError(Exception): - '''Error that will be raised when an application hasn't been registered with Astra''' - pass - - -class AstraClusterDoesNotExistError(Exception): - '''Error that will be raised when a cluster doesn't exist within Astra Control''' - pass - - -class AstraAppDoesNotExistError(Exception): - '''Error that will be raised when an app doesn't exist within Astra Control''' - pass - - class ServiceUnavailableError(Exception): '''Error that will be raised when a service is not available''' pass @@ -142,20 +126,20 @@ def _load_kube_config2(print_output: bool = False): if print_output: _print_invalid_config_error() raise InvalidConfigError() + + +def _astra_not_supported_message(print_output: bool = False) : + error_text = "Error: Astra Control functionality within the DataOps Toolkit is no longer supported. Please use the Astra SDK and/or toolkit. For details, visit https://github.com/NetApp/netapp-astra-toolkits." + if print_output : + print(error_text) + raise APIConnectionError(error_text) -def _get_astra_k8s_cluster_name() -> str : - return os.environ['ASTRA_K8S_CLUSTER_NAME'] - def _print_invalid_config_error(): print( "Error: Missing or invalid kubeconfig file. The NetApp DataOps Toolkit for Kubernetes requires that a valid kubeconfig file be present on the host, located at $HOME/.kube or at another path specified by the KUBECONFIG environment variable.") -def _print_astra_k8s_cluster_name_error() : - print("Error: ASTRA_K8S_CLUSTER_NAME environment variable is not set. This environment variable should be set to the name of your Kubernetes cluster within Astra Control.") - - def _retrieve_image_for_jupyter_lab_deployment(workspaceName: str, namespace: str = "default", printOutput: bool = False) -> str: # Retrieve kubeconfig @@ -459,7 +443,7 @@ def _get_snapshot_api_group() -> str: def _get_snapshot_api_version() -> str: - return "v1beta1" + return "v1" def _wait_for_jupyter_lab_deployment_ready(workspaceName: str, namespace: str = "default", printOutput: bool = False): @@ -516,36 +500,6 @@ def _wait_for_triton_dev_deployment(server_name: str, namespace: str = "default" sleep(5) -def _retrieve_astra_app_id_for_jupyter_lab(astra_apps: dict, workspace_name: str, include_full_app_details: bool = False) -> str : - # Get Astra K8s cluster name - try : - astra_k8s_cluster_name = _get_astra_k8s_cluster_name() - except KeyError : - raise InvalidConfigError() - - # Parse Astra Apps - for app_details in astra_apps["items"] : - # Check cluster name - if app_details["clusterName"] != astra_k8s_cluster_name : - pass - - # Get app label for workspace - workspace_app_label = _get_jupyter_lab_labels(workspaceName=workspace_name)["app"] - - # See if app label matches - for app_labels in app_details["appLabels"] : - if (app_labels["name"] == "app") and (app_labels["value"] == workspace_app_label) : - if include_full_app_details : - return app_details["id"], app_details - else : - return app_details["id"] - - if include_full_app_details : - return "", None - else : - return "" - - # # Public classes # @@ -656,79 +610,6 @@ def clone_jupyter_lab(new_workspace_name: str, source_workspace_name: str, sourc return url -def clone_jupyter_lab_to_new_namespace(source_workspace_name: str, new_namespace: str, source_workspace_namespace: str = "default", clone_to_cluster_name: str = None, print_output: bool = False) : - # Retrieve list of Astra apps - try : - astra_apps = astraSDK.getApps().main(namespace=source_workspace_namespace) - except Exception as err : - if print_output : - print("Error: Astra Control API Error: ", err) - raise APIConnectionError(err) - - # Determine Astra App ID for source workspace - try : - source_astra_app_id, source_astra_app_details = _retrieve_astra_app_id_for_jupyter_lab(astra_apps=astra_apps, workspace_name=source_workspace_name, include_full_app_details=True) - except InvalidConfigError : - if print_output : - _print_astra_k8s_cluster_name_error() - raise InvalidConfigError() - - # Handle situation where workspace has not been registered with Astra. - if not source_astra_app_id : - error_message = "Source JupyterLab workspace has not been registered with Astra Control." - if print_output : - print("Error:", error_message) - print("Hint: use the 'netapp_dataops_k8s_cli.py register-with-astra jupyterlab' command to register a JupyterLab workspace with Astra Control.") - raise AstraAppNotManagedError(error_message) - - # Determine Astra cluster ID for source workspace - source_astra_cluster_id = source_astra_app_details["clusterID"] - - # Determine Astra cluster ID for "clone-to" cluster - if clone_to_cluster_name : - clone_to_cluster_id = None - try : - astra_clusters = astraSDK.getClusters().main(hideUnmanaged=True) - except Exception as err : - if print_output : - print("Error: Astra Control API Error: ", err) - raise APIConnectionError(err) - - for cluster_info in astra_clusters["items"] : - if cluster_info["name"] == clone_to_cluster_name : - clone_to_cluster_id = cluster_info["id"] - - if not clone_to_cluster_id : - error_message = "Cluster '" + clone_to_cluster_name + "' does not exist in Astra Control." - if print_output : - print("Error:", error_message) - raise AstraClusterDoesNotExistError(error_message) - - print("Creating new JupyterLab workspace '" + source_workspace_name + "' in namespace '" + new_namespace + "' within cluster '" + clone_to_cluster_name + "' using Astra Control...") - else : - clone_to_cluster_id = source_astra_cluster_id - print("Creating new JupyterLab workspace '" + source_workspace_name + "' in namespace '" + new_namespace + "' within your cluster using Astra Control...") - - # Clone workspace to new namespace using Astra - print("New workspace is being cloned from source workspace '" + source_workspace_name + "' in namespace '" + source_workspace_namespace + "' within your cluster...") - print("\nAstra SDK output:") - try : - ret = astraSDK.cloneApp(quiet=False).main(cloneName=_get_jupyter_lab_deployment(source_workspace_name), clusterID=clone_to_cluster_id, sourceClusterID=source_astra_cluster_id, namespace=new_namespace, sourceAppID=source_astra_app_id) - except Exception as err : - if print_output : - print("\nError: Astra Control API Error: ", err) - raise APIConnectionError(err) - - if ret == False : - if print_output : - print("\nError: Astra Control API error. See Astra SDK output above for details") - raise APIConnectionError("Astra Control API error.") - - if print_output : - print("\nClone operation has been initiated. The operation may take several minutes to complete.") - print("If the new workspace is being created within your cluster, run 'netapp_dataops_k8s_cli.py list jupyterlabs -n " + new_namespace + " -a' to check the status of the new workspace.") - - def clone_volume(new_pvc_name: str, source_pvc_name: str, source_snapshot_name: str = None, volume_snapshot_class: str = "csi-snapclass", namespace: str = "default", print_output: bool = False, pvc_labels: dict = None): @@ -1020,9 +901,7 @@ def create_jupyter_lab(workspace_name: str, workspace_size: str, mount_pvc: str # (Optional) Step 5 - Register workspace with Astra Control if register_with_astra : - if print_output : - print() - register_jupyter_lab_with_astra(workspace_name=workspace_name, namespace=namespace, print_output=print_output) + _astra_not_supported_message(print_output=print_output) if print_output: print("\nWorkspace successfully created.") @@ -1524,6 +1403,7 @@ def delete_jupyter_lab(workspace_name: str, namespace: str = "default", preserve if print_output: print("Workspace successfully deleted.") + def delete_triton_server(server_name: str, namespace: str = "default", print_output: bool = False): # Retrieve kubeconfig @@ -1558,6 +1438,7 @@ def delete_triton_server(server_name: str, namespace: str = "default", if print_output: print("Triton Server instance successfully deleted.") + def delete_k8s_config_map(name: str, namespace: str, print_output: bool = False): """Delete a Kubernetes config map with the provided name from the provided namespace. @@ -1700,12 +1581,7 @@ def list_jupyter_labs(namespace: str = "default", include_astra_app_id: bool = F # Retrieve list of Astra apps if include_astra_app_id : - try : - astra_apps = astraSDK.getApps().main(namespace=namespace) - except Exception as err : - if print_output: - print("Error: Astra Control API Error: ", err) - raise APIConnectionError(err) + _astra_not_supported_message(print_output=print_output) # Construct list of workspaces workspacesList = list() @@ -1776,15 +1652,6 @@ def list_jupyter_labs(namespace: str = "default", include_astra_app_id: bool = F workspaceDict["Source Workspace"] = "" workspaceDict["Source VolumeSnapshot"] = "" - # Retrieve Astra App ID - if include_astra_app_id : - try : - workspaceDict["Astra Control App ID"] = _retrieve_astra_app_id_for_jupyter_lab(astra_apps=astra_apps, workspace_name=workspaceName) - except InvalidConfigError : - if print_output : - _print_astra_k8s_cluster_name_error() - raise InvalidConfigError() - # Append dict to list of workspaces workspacesList.append(workspaceDict) @@ -2026,115 +1893,6 @@ def list_volume_snapshots(pvc_name: str = None, namespace: str = "default", prin return snapshotsList -def register_jupyter_lab_with_astra(workspace_name: str, namespace: str = "default", print_output: bool = False) : - # Retrieve list of unmanaged Astra apps - try : - astra_apps_unmanaged = astraSDK.getApps().main(discovered=True, namespace=namespace) - except Exception as err : - if print_output: - print("Error: Astra Control API Error: ", err) - raise APIConnectionError(err) - - # Determine Astra App ID for workspace - try : - astra_app_id, astra_app_details = _retrieve_astra_app_id_for_jupyter_lab(astra_apps=astra_apps_unmanaged, workspace_name=workspace_name, include_full_app_details=True) - except InvalidConfigError : - if print_output : - _print_astra_k8s_cluster_name_error() - raise InvalidConfigError() - - # Fail if app doesn't exist in Astra - if not astra_app_details : - if print_output : - print("Error: App does not exist in Astra. Are you sure that the workspace name is correct?") - raise AstraAppDoesNotExistError() - - # Wait until app has a status of "running" in Astra - while True : - try : - if astra_app_details["state"] == "running" : - break - except KeyError : - pass - - if print_output : - print("It appears that Astra Control is still discovering the JupyterLab workspace. If this persists, confirm that you typed the workspace name correctly and/or check your Astra Control setup. Sleeping for 60 seconds before checking again...") - sleep(60) - - # Retrieve list of unmanaged Astra apps again - try : - astra_apps_unmanaged = astraSDK.getApps().main(discovered=True, namespace=namespace) - astra_app_id, astra_app_details = _retrieve_astra_app_id_for_jupyter_lab(astra_apps=astra_apps_unmanaged, workspace_name=workspace_name, include_full_app_details=True) - except Exception as err : - if print_output: - print("Error: Astra Control API Error: ", err) - raise APIConnectionError(err) - - # Manage app (i.e. register app with Astra) - if print_output : - print("Registering JupyterLab workspace '" + workspace_name + "' in namespace '" + namespace + "' with Astra Control...") - try : - managed = astraSDK.manageApp().main(appID=astra_app_id) - except Exception as err : - if print_output: - print("Error: Astra Control API Error: ", err) - raise APIConnectionError(err) - - # Determine success or error - if managed : - if print_output : - print("JupyterLab workspace is now managed by Astra Control.") - else : - if print_output : - print("Error: Astra Control API Error.") - raise APIConnectionError() - - -def backup_jupyter_lab_with_astra(workspace_name: str, backup_name: str, namespace: str = "default", print_output: bool = False) : - # Retrieve list of Astra apps - try : - astra_apps = astraSDK.getApps().main(namespace=namespace) - except Exception as err : - if print_output : - print("Error: Astra Control API Error: ", err) - raise APIConnectionError(err) - - # Determine Astra App ID for source workspace - try : - astra_app_id = _retrieve_astra_app_id_for_jupyter_lab(astra_apps=astra_apps, workspace_name=workspace_name) - except InvalidConfigError : - if print_output : - _print_astra_k8s_cluster_name_error() - raise InvalidConfigError() - - # Handle situation where workspace has not been registered with Astra. - if not astra_app_id : - error_message = "JupyterLab workspace has not been registered with Astra Control." - if print_output : - print("Error:", error_message) - print("Hint: use the 'netapp_dataops_k8s_cli.py register-with-astra jupyterlab' command to register a JupyterLab workspace with Astra Control.") - raise AstraAppNotManagedError(error_message) - - # Trigger backup - print("Trigerring backup of workspace '" + workspace_name + "' in namespace '" + namespace + "' using Astra Control...") - print("\nAstra SDK output:") - try : - ret = astraSDK.takeBackup(quiet=False).main(appID=astra_app_id, backupName=backup_name) - except Exception as err : - if print_output : - print("\nError: Astra Control API Error: ", err) - raise APIConnectionError(err) - - if ret == False : - if print_output : - print("\nError: Astra Control API error. See Astra SDK output above for details") - raise APIConnectionError("Astra Control API error.") - - if print_output : - print("\nBackup operation has been initiated. The operation may take several minutes to complete.") - print("Access the Astra Control dashboard to check the status of the backup operation.") - - def restore_jupyter_lab_snapshot(snapshot_name: str = None, namespace: str = "default", print_output: bool = False): # Retrieve source PVC name sourcePvcName = _retrieve_source_volume_details_for_volume_snapshot(snapshotName=snapshot_name, namespace=namespace, @@ -2293,3 +2051,17 @@ def restoreJupyterLabSnapshot(snapshotName: str = None, namespace: str = "defaul def restoreVolumeSnapshot(snapshotName: str, namespace: str = "default", printOutput: bool = False, pvcLabels: dict = {"created-by": "ntap-dsutil", "created-by-operation": "restore-volume-snapshot"}) : restore_volume_snapshot(snapshot_name=snapshotName, namespace=namespace, print_output=printOutput, pvc_labels=pvcLabels) + +@deprecated +def clone_jupyter_lab_to_new_namespace(source_workspace_name: str, new_namespace: str, source_workspace_namespace: str = "default", clone_to_cluster_name: str = None, print_output: bool = False) : + _astra_not_supported_message(print_output=print_output) + + +@deprecated +def register_jupyter_lab_with_astra(workspace_name: str, namespace: str = "default", print_output: bool = False) : + _astra_not_supported_message(print_output=print_output) + + +@deprecated +def backup_jupyter_lab_with_astra(workspace_name: str, backup_name: str, namespace: str = "default", print_output: bool = False) : + _astra_not_supported_message(print_output=print_output) diff --git a/netapp_dataops_k8s/netapp_dataops/netapp_dataops_k8s_cli.py b/netapp_dataops_k8s/netapp_dataops/netapp_dataops_k8s_cli.py index 7c60076..3b88b7f 100755 --- a/netapp_dataops_k8s/netapp_dataops/netapp_dataops_k8s_cli.py +++ b/netapp_dataops_k8s/netapp_dataops/netapp_dataops_k8s_cli.py @@ -2,12 +2,10 @@ """NetApp DataOps Toolkit for Kubernetes Script Interface.""" from netapp_dataops import k8s from netapp_dataops.k8s import ( - backup_jupyter_lab_with_astra, clone_volume, create_volume_snapshot, create_volume, clone_jupyter_lab, - clone_jupyter_lab_to_new_namespace, create_triton_server, create_jupyter_lab, create_jupyter_lab_snapshot, @@ -20,13 +18,9 @@ list_jupyter_lab_snapshots, list_volumes, list_triton_servers, - register_jupyter_lab_with_astra, restore_jupyter_lab_snapshot, restore_volume_snapshot, APIConnectionError, - AstraAppNotManagedError, - AstraClusterDoesNotExistError, - AstraAppDoesNotExistError, CAConfigMap, InvalidConfigError ) @@ -37,6 +31,7 @@ ) # Define contents of help text +astra_error_text = "Error: Astra Control functionality within the DataOps Toolkit is no longer supported. Please use the Astra SDK and/or toolkit. For details, visit https://github.com/NetApp/netapp-astra-toolkits." helpTextStandard = ''' The NetApp DataOps Toolkit for Kubernetes is a Python library that makes it simple for data scientists and data engineers to perform various data management tasks, such as provisioning a new data volume, near-instantaneously cloning a data volume, and near-instantaneously snapshotting a data volume for traceability/baselining. @@ -49,15 +44,12 @@ Note: To view details regarding options/arguments for a specific command, run the command with the '-h' or '--help' option. \tclone jupyterlab\t\tClone a JupyterLab workspace within the same namespace. -\tclone-to-new-ns jupyterlab\tClone a JupyterLab workspace to a brand new namespace. \tcreate jupyterlab\t\tProvision a JupyterLab workspace. \tdelete jupyterlab\t\tDelete an existing JupyterLab workspace. \tlist jupyterlabs\t\tList all JupyterLab workspaces. \tcreate jupyterlab-snapshot\tCreate a new snapshot for a JupyterLab workspace. \tlist jupyterlab-snapshots\tList all snapshots. \trestore jupyterlab-snapshot\tRestore a snapshot. -\tregister-with-astra jupyterlab\tRegister an existing JupyterLab workspace with Astra Control. -\tbackup-with-astra jupyterlab\tBackup an existing JupyterLab workspace using Astra Control. NVIDIA Triton Inference Server Management Commands: Note: To view details regarding options/arguments for a specific command, run the command with the '-h' or '--help' option. @@ -92,24 +84,7 @@ \tshow s3-job\t\t\tShow the status of the specifed Kubernetes job. \tdelete s3-job\t\t\tDelete a Kubernetes S3 job. ''' -helpTextBackupJupyterLab = ''' -Command: backup-with-astra jupyterlab - -Backup an existing JupyterLab workspace using Astra Control. - -Note: This command requires Astra Control. - -Required Options/Arguments: -\t-w, --workspace-name=\tName of JupyterLab workspace to be backed up. -\t-b, --backup-name=\tName to be applied to new backup. - -Optional Options/Arguments: -\t-h, --help\t\tPrint help text. -\t-n, --namespace=\tKubernetes namespace that the workspace is located in. If not specified, namespace "default" will be used. - -Examples: -\tnetapp_dataops_k8s_cli.py backup-with-astra jupyterlab --workspace-name=mike --backup-name=backup1 -''' +helpTextBackupJupyterLab = astra_error_text helpTextCloneJupyterLab = ''' Command: clone jupyterlab @@ -136,26 +111,7 @@ \tnetapp_dataops_k8s_cli.py clone jupyterlab --new-workspace-name=project1-experiment1 --source-workspace-name=project1 --nvidia-gpu=1 \tnetapp_dataops_k8s_cli.py clone jupyterlab -w project2-mike -s project2-snap1 -n team1 -g 1 -p 0.5 -m 1Gi -b ''' -helpTextCloneToNewNsJupyterLab = ''' -Command: clone-to-new-ns jupyterlab - -Clone a JupyterLab workspace to a brand new namespace. - -Note: This command requires Astra Control. - -Required Options/Arguments: -\t-j, --source-workspace-name=\tName of JupyterLab workspace to use as source for clone. -\t-n, --new-namespace=\t\tKubernetes namespace to create new workspace in. This namespace must not exist; it will be created during this operation. - -Optional Options/Arguments: -\t-c, --clone-to-cluster-name=\tName of destination Kubernetes cluster within Astra Control. Workspace will be cloned a to a new namespace in this cluster. If not specified, then the workspace will be cloned to a new namespace within the user's current cluster. -\t-h, --help\t\t\tPrint help text. -\t-s, --source-namespace=\t\tKubernetes namespace that source workspace is located in. If not specified, namespace "default" will be used. - -Examples: -\tnetapp_dataops_k8s_cli.py clone-to-new-ns jupyterlab --source-workspace-name=ws1 --new-namespace=project1 -\tnetapp_dataops_k8s_cli.py clone-to-new-ns jupyterlab -j ws1 -n team2 -s team1 -c ocp1 -''' +helpTextCloneToNewNsJupyterLab = astra_error_text helpTextCloneVolume = ''' Command: clone volume @@ -212,7 +168,6 @@ \t-n, --namespace=\t\tKubernetes namespace to create new workspace in. If not specified, workspace will be created in namespace "default". \t-p, --cpu=\t\t\tNumber of CPUs to reserve for JupyterLab workspace. Format: '0.5', '1', etc. If not specified, no CPUs will be reserved. \t-b, --load-balancer\t\tOption to use a LoadBalancer instead of using NodePort service. If not specified, NodePort service will be utilized. -\t-a, --register-with-astra\tRegister new workspace with Astra Control (requires Astra Control). \t-v, --mount-pvc=\t\tOption to attach an additional existing PVC that can be mounted at a spefic path whithin the container. Format: -v/--mount-pvc=existing_pvc_name:mount_point. If not specified, no additional PVC will be attached. \t-r, --allocate-resource=\tOption to specify custom resource allocations, ex. 'nvidia.com/mig-1g.5gb=1'. If not specified, no custom resource will be allocated. @@ -522,7 +477,6 @@ Optional Options/Arguments: \t-h, --help\t\t\tPrint help text. \t-n, --namespace=\t\tKubernetes namespace for which to retrieve list of workspaces. If not specified, namespace "default" will be used. -\t-a, --include-astra-app-id\tInclude Astra Control app IDs in the output (requires Astra Control). Examples: \tnetapp_dataops_k8s_cli.py list jupyterlabs -n team1 @@ -653,24 +607,7 @@ \tnetapp_dataops_k8s_cli.py put-s3 object -c mycreds -o host.example.com -b one -p mypvc -u -v -k sample.txt \tnetapp_dataops_k8s_cli.py put-s3 object -c mycreds -o host.example.com -b one -p mypvc -u -v -f "dir5/" -k sample.txt ''' -helpTextRegisterJupyterLab = ''' -Command: register-with-astra jupyterlab - -Register an existing JupyterLab workspace with Astra Control. - -Note: This command requires Astra Control. - -Required Options/Arguments: -\t-w, --workspace-name=\tName of JupyterLab workspace to be registered. - -Optional Options/Arguments: -\t-h, --help\t\tPrint help text. -\t-n, --namespace=\tKubernetes namespace that the workspace is located in. If not specified, namespace "default" will be used. - -Examples: -\tnetapp_dataops_k8s_cli.py register-with-astra jupyterlab --workspace-name=mike -\tnetapp_dataops_k8s_cli.py register-with-astra jupyterlab -w dave -n dst-test -''' +helpTextRegisterJupyterLab = astra_error_text helpTextRestoreJupyterLabSnapshot = ''' Command: restore jupyterlab-snapshot @@ -761,38 +698,7 @@ def getTarget(args: list) -> str: # Invoke desired action based on target if target in ("jupyterlab", "jupyter"): - workspace_name = None - backup_name = None - namespace = "default" - - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hw:b:n:", - ["help", "workspace-name=", "backup-name=", "namespace="]) - except: - handleInvalidCommand(helpText=helpTextBackupJupyterLab, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help"): - print(helpTextBackupJupyterLab) - sys.exit(0) - elif opt in ("-w", "--workspace-name"): - workspace_name = arg - elif opt in ("-b", "--backup-name"): - backup_name = arg - elif opt in ("-n", "--namespace"): - namespace = arg - - # Check for required options - if not workspace_name or not backup_name: - handleInvalidCommand(helpText=helpTextBackupJupyterLab, invalidOptArg=True) - - # Back up JupyterLab workspace - try: - backup_jupyter_lab_with_astra(workspace_name=workspace_name, backup_name=backup_name, namespace=namespace, print_output=True) - except (InvalidConfigError, APIConnectionError, AstraAppNotManagedError): - sys.exit(1) + handleInvalidCommand(helpText=helpTextBackupJupyterLab) else: handleInvalidCommand() @@ -921,41 +827,7 @@ def getTarget(args: list) -> str: # Invoke desired action based on target if target in ("jupyterlab", "jupyter"): - source_workspace_name = None - new_namespace = None - clone_to_cluster_name = None - source_namespace = "default" - - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hj:n:c:s:", - ["help", "source-workspace-name=", "new-namespace=", "clone-to-cluster-name=", "source-namespace="]) - except: - handleInvalidCommand(helpText=helpTextCloneToNewNsJupyterLab, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help"): - print(helpTextCloneToNewNsJupyterLab) - sys.exit(0) - elif opt in ("-j", "--source-workspace-name"): - source_workspace_name = arg - elif opt in ("-n", "--new-namespace"): - new_namespace = arg - elif opt in ("-c", "--clone-to-cluster-name"): - clone_to_cluster_name = arg - elif opt in ("-s", "--source-namespace"): - source_namespace = arg - - # Check for required options - if not source_workspace_name or not new_namespace : - handleInvalidCommand(helpText=helpTextCloneToNewNsJupyterLab, invalidOptArg=True) - - # Clone JupyterLab to new namespace - try: - clone_jupyter_lab_to_new_namespace(source_workspace_name=source_workspace_name, new_namespace=new_namespace, source_workspace_namespace=source_namespace, clone_to_cluster_name=clone_to_cluster_name, print_output=True) - except (InvalidConfigError, APIConnectionError, AstraAppNotManagedError, AstraClusterDoesNotExistError): - sys.exit(1) + handleInvalidCommand(helpText=helpTextCloneToNewNsJupyterLab) else: handleInvalidCommand() @@ -2199,35 +2071,7 @@ def getTarget(args: list) -> str: # Invoke desired action based on target if target in ("jupyterlab", "jupyter"): - workspaceName = None - namespace = "default" - - # Get command line options - try: - opts, args = getopt.getopt(sys.argv[3:], "hw:n:", - ["help", "workspace-name=", "namespace="]) - except: - handleInvalidCommand(helpText=helpTextRegisterJupyterLab, invalidOptArg=True) - - # Parse command line options - for opt, arg in opts: - if opt in ("-h", "--help"): - print(helpTextRegisterJupyterLab) - sys.exit(0) - elif opt in ("-w", "--workspace-name"): - workspaceName = arg - elif opt in ("-n", "--namespace"): - namespace = arg - - # Check for required options - if not workspaceName: - handleInvalidCommand(helpText=helpTextRegisterJupyterLab, invalidOptArg=True) - - # Register JupyterLab workspace - try: - register_jupyter_lab_with_astra(workspace_name=workspaceName, namespace=namespace, print_output=True) - except (InvalidConfigError, APIConnectionError, AstraAppDoesNotExistError): - sys.exit(1) + handleInvalidCommand(helpText=helpTextRegisterJupyterLab) else: handleInvalidCommand() diff --git a/netapp_dataops_k8s/setup.cfg b/netapp_dataops_k8s/setup.cfg index 260fe15..fc7ba3e 100644 --- a/netapp_dataops_k8s/setup.cfg +++ b/netapp_dataops_k8s/setup.cfg @@ -13,6 +13,8 @@ classifiers = Programming Language :: Python :: 3 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 + Programming Language :: Python :: 3.10 + Programming Language :: Python :: 3.11 project_urls = Bug Tracker = https://github.com/NetApp/netapp-data-science-toolkit/issues Documentation = https://github.com/NetApp/netapp-data-science-toolkit/blob/main/README.md @@ -25,22 +27,12 @@ packages = find_namespace: scripts = netapp_dataops/netapp_dataops_k8s_cli.py install_requires = - notebook + notebook<7.0.0 pandas numpy>=1.22.0 - actoolkit==2.1.3 - certifi==2020.12.5 - chardet==4.0.0 - dnspython==2.1.0 - idna==2.10 - PyYAML==5.4.1 - requests==2.25.1 - tabulate==0.8.9 - termcolor==1.1.0 - urllib3==1.26.5 - func_timeout==4.3.5 - kubernetes==23.6.0 -python_requires = >=3.8 + tabulate + kubernetes +python_requires = >=3.8,<3.12 [options.packages.find] exclude = Examples.* diff --git a/netapp_dataops_traditional/README.md b/netapp_dataops_traditional/README.md index 48babb5..5355a3c 100644 --- a/netapp_dataops_traditional/README.md +++ b/netapp_dataops_traditional/README.md @@ -20,7 +20,7 @@ Note: The 'prepopulate flexcache' operation only supports ONTAP 9.8 and above. A ### Prerequisites -The NetApp DataOps Toolkit for Traditional Environments requires that Python 3.8 or above be installed on the local host. Additionally, the toolkit requires that pip for Python3 be installed on the local host. For more details regarding pip, including installation instructions, refer to the [pip documentation](https://pip.pypa.io/en/stable/installing/). +The NetApp DataOps Toolkit for Traditional Environments requires that Python 3.8, 3.9, 3.10, or 3.11 be installed on the local host. Additionally, the toolkit requires that pip for Python3 be installed on the local host. For more details regarding pip, including installation instructions, refer to the [pip documentation](https://pip.pypa.io/en/stable/installing/). ### Installation Instructions @@ -217,6 +217,7 @@ The following options/arguments are optional: -j, --junction Specify a custom junction path for the volume to be exported at. -f, --tiering-policy Specify tiering policy for fabric-pool enabled systems (default is 'none'). -y, --dp Create volume as DP volume (the volume will be used as snapmirror target) + -w, --snaplock_type Specify snaplock type to use when creating new volume (compliance/enterprise). ``` ##### Example Usage @@ -364,6 +365,7 @@ The following options/arguments are optional: -l, --lif= non default lif (nfs server ip/name) -h, --help Print help text. -x, --readonly Mount volume locally as read-only. + -o, --options Specify custom NFS mount options. ``` ##### Example Usage @@ -1059,6 +1061,7 @@ def create_volume: print_output: bool = False, # Denotes whether or not to print messages to the console during execution. tiering_policy: str = None, # For fabric pool enabled system tiering policy can be: none,auto,snapshot-only,all vol_dp: bool = False # Create volume as type DP which can be used as snapmirror destination + snaplock_type: str = None, # Snaplock type to apply for new volume (ex. 'compliance' or 'enterprise') ``` ##### Return Value @@ -1089,7 +1092,7 @@ def delete_volume( volume_name: str, # Name of volume (required). print_output: bool = False # Denotes whether or not to print messages to the console during execution. cluster_name: str = None, # Non default cluster name, same credentials as the default credentials should be used - svm_name: str = None, # Non default svm name, same credentials as the default credentials should be used + svm_name: str = None, # Non default svm name, same credentials as the default credentials should be used delete_mirror: bool = False, # release snapmirror on source volume/delete snapmirror relation on destination volume delete_non_clone: bool = False, # Enable deletion of non clone volume (extra step not to incedently delete important volume) print_output: bool = False # Denotes whether or not to print messages to the console during execution. @@ -1164,15 +1167,15 @@ APIConnectionError # The storage system/service API returned an err #### Mount an Existing Data Volume Locally The NetApp DataOps Toolkit can be used to mount an existing data volume as "read-only" or "read-write" on your local host as part of any Python program or workflow. On Linux hosts, mounting requires root privileges, so any Python program that invokes this function must be run as root. It is usually not necessary to invoke this function as root on macOS hosts. - ##### Function Definition ```py def mount_volume( volume_name: str, # Name of volume (required). - cluster_name: str = None, # Non default cluster name, same credentials as the default credentials should be used - svm_name: str = None, # Non default svm name, same credentials as the default credentials should be used + cluster_name: str = None, # Non default cluster name, same credentials as the default credentials should be used + svm_name: str = None, # Non default svm name, same credentials as the default credentials should be used mountpoint: str, # Local mountpoint to mount volume at (required). + mount_options: str = None # Specify custom NFS mount options. readonly: bool = False, # Mount volume locally as "read-only." If not specified volume will be mounted as "read-write". On Linux hosts - if specified, calling program must be run as root. print_output: bool = False # Denotes whether or not to print messages to the console during execution. ) : diff --git a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py index 007effb..0ba27d3 100755 --- a/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py +++ b/netapp_dataops_traditional/netapp_dataops/netapp_dataops_cli.py @@ -100,8 +100,8 @@ Optional Options/Arguments: \t-l, --cluster-name=\tnon default hosting cluster -\t-c, --source-svm=\tnon default source svm name -\t-t, --target-svm=\tnon default target svm name +\t-c, --source-svm=\tnon default source svm name +\t-t, --target-svm=\tnon default target svm name \t-g, --gid=\t\tUnix filesystem group id (gid) to apply when creating new volume (if not specified, gid of source volume will be retained) (Note: cannot apply gid of '0' when creating clone). \t-h, --help\t\tPrint help text. \t-m, --mountpoint=\tLocal mountpoint to mount new volume at after creating. If not specified, new volume will not be mounted locally. On Linux hosts - if specified, must be run as root. @@ -114,8 +114,8 @@ \t-e, --export-policy\texport policy name to attach to the volume, default policy will be used if export-hosts/export-policy not provided \t-d, --snapshot-policy\tsnapshot-policy to attach to the volume, default snapshot policy will be used if not provided \t-s, --split\t\tstart clone split after creation -\t-r, --refresh\t\tdelete existing clone if exists before creating a new one -\t-d, --svm-dr-unprotect\tdisable svm dr protection if svm-dr protection exists +\t-r, --refresh\t\tdelete existing clone if exists before creating a new one +\t-d, --svm-dr-unprotect\tdisable svm dr protection if svm-dr protection exists Examples (basic usage): \tnetapp_dataops_cli.py clone volume --name=project1 --source-volume=gold_dataset @@ -147,9 +147,9 @@ \t-s, --svm=\t\tNon defaul svm name. \t-h, --help\t\tPrint help text. \t-n, --name=\t\tName of new snapshot. If not specified, will be set to 'netapp_dataops_'. -\t-r, --retention=\tSnapshot name will be suffixed by and excesive snapshots will be deleted. +\t-r, --retention=\tSnapshot name will be suffixed by and excesive snapshots will be deleted. \t \tCan be count of snapshots when int (ex. 10) or days when retention is suffixed by d (ex. 10d) -\t-l, --snapmirror-label=\tif provided snapmirror label will be configured on the created snapshot +\t-l, --snapmirror-label=\tif provided snapmirror label will be configured on the created snapshot Examples: \tnetapp_dataops_cli.py create snapshot --volume=project1 --name=snap1 @@ -169,7 +169,7 @@ Optional Options/Arguments: \t-l, --cluster-name=\tnon default hosting cluster -\t-v, --svm=\t\tnon default svm name +\t-v, --svm=\t\tnon default svm name \t-a, --aggregate=\tAggregate to use when creating new volume (flexvol) or optional comma seperated aggrlist when specific aggregates are required for FG. \t-d, --snapshot-policy=\tSnapshot policy to apply for new volume. \t-e, --export-policy=\tNFS export policy to use when exporting new volume. @@ -180,6 +180,7 @@ \t-r, --guarantee-space\tGuarantee sufficient storage space for full capacity of the volume (i.e. do not use thin provisioning). \t-t, --type=\t\tVolume type to use when creating new volume (flexgroup/flexvol). \t-u, --uid=\t\tUnix filesystem user id (uid) to apply when creating new volume (ex. '0' for root user). +\t-w, --snaplock-type=\tSnaplock type to apply for new volume. (can be 'compliance','enterprise',None) \t-x, --readonly\t\tRead-only option for mounting volumes locally. \t-j, --junction\t\tSpecify a custom junction path for the volume to be exported at. \t-f, --tiering-policy\tSpecify tiering policy for fabric-pool enabled systems (default is 'none'). @@ -198,6 +199,7 @@ \tnetapp_dataops_cli.py create volume -n testvol -s 10GB -t flexvol -p 0755 -u 1000 -g 1000 -j /project1 \tsudo -E netapp_dataops_cli.py create volume -n vol1 -s 5GB -t flexvol --export-policy=team1 -m /mnt/vol1 \tnetapp_dataops_cli.py create vol -n test2 -s 10GB -t flexvol --snapshot-policy=default --tiering-policy=auto +\tnetapp_dataops_cli.py create volume --name=project1 --size=100GB --snaplock-type=compliance ''' helpTextDeleteSnapshot = ''' Command: delete snapshot @@ -229,7 +231,7 @@ \t-u, --cluster-name=\tnon default hosting cluster \t-v, --svm \t\tnon default SVM name \t-f, --force\t\tDo not prompt user to confirm operation. -\t-m, --delete-mirror\tdelete/release snapmirror relationship prior to volume deletion +\t-m, --delete-mirror\tdelete/release snapmirror relationship prior to volume deletion \t --delete-non-clone\tEnable deletion of volume not created as clone by this tool \t-h, --help\t\tPrint help text. @@ -321,11 +323,13 @@ \t-l, --lif \t\tnon default lif (nfs server ip/name) \t-h, --help\t\tPrint help text. \t-x, --readonly\t\tMount volume locally as read-only. +\t-o, --options\t\tSpecify custom NFS mount options. Examples: \tsudo -E netapp_dataops_cli.py mount volume --name=project1 --mountpoint=/mnt/project1 \tsudo -E netapp_dataops_cli.py mount volume -m ~/testvol -n testvol -x \tsudo -E netapp_dataops_cli.py mount volume --name=project1 --mountpoint=/mnt/project1 --readonly +\tsudo -E netapp_dataops_cli.py mount volume --name=project1 --mountpoint=/mnt/project1 --readonly --options=rsize=262144,wsize=262144,nconnect=16 ''' helpTextPullFromS3Bucket = ''' Command: pull-from-s3 bucket @@ -491,7 +495,7 @@ helpTextCreateSnapMirrorRelationship = ''' Command: create snapmirror-relationship -create snapmirror relationship +create snapmirror relationship Required Options/Arguments: \t-n, --target-vol=\tName of target volume @@ -499,7 +503,7 @@ \t-v, --source-vol=\tSource volume name Optional Options/Arguments: -\t-u, --cluster-name=\tnon default hosting cluster +\t-u, --cluster-name=\tnon default hosting cluster \t-t, --target-svm=\tnon default target SVM \t-c, --schedule=\t\tnon default schedule (default is hourly) \t-p, --policy=\t\tnon default policy (default is MirrorAllSnapshots @@ -507,7 +511,7 @@ \t-h, --help\t\tPrint help text. Examples: -\tnetapp_dataops_cli.py create snapmirror-relationship -u cluster1 -s svm1 -t svm2 -v vol1 -n vol1 -p MirrorAllSnapshots -c hourly +\tnetapp_dataops_cli.py create snapmirror-relationship -u cluster1 -s svm1 -t svm2 -v vol1 -n vol1 -p MirrorAllSnapshots -c hourly \tnetapp_dataops_cli.py create snapmirror-relationship -u cluster1 -s svm1 -t svm2 -v vol1 -n vol1 -p MirrorAllSnapshots -c hourly -a resync ''' @@ -737,9 +741,9 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Invoke desired action based on target if target in ("volume", "vol"): newVolumeName = None - clusterName = None - sourceSVM = None - targetSVM = None + clusterName = None + sourceSVM = None + targetSVM = None sourceVolumeName = None sourceSnapshotName = None mountpoint = None @@ -757,7 +761,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hl:c:t:n:v:s:m:u:g:j:xe:p:i:srd", ["help", "cluster-name=", "source-svm=","target-svm=","name=", "source-volume=", "source-snapshot=", "mountpoint=", "uid=", "gid=", "junction=", "readonly","export-hosts=","export-policy=","snapshot-policy=","split","refresh","svm-dr-unprotect"]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextCloneVolume, invalidOptArg=True) @@ -767,15 +771,15 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = print(helpTextCloneVolume) sys.exit(0) elif opt in ("-l", "--cluster-name"): - clusterName = arg + clusterName = arg elif opt in ("-n", "--name"): newVolumeName = arg elif opt in ("-c", "--source-svm"): sourceSVM = arg elif opt in ("-t", "--target-svm"): - targetSVM = arg + targetSVM = arg elif opt in ("-v", "--source-volume"): - sourceVolumeName = arg + sourceVolumeName = arg elif opt in ("-s", "--source-snapshot"): sourceSnapshotName = arg elif opt in ("-m", "--mountpoint"): @@ -789,17 +793,17 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-x", "--readonly"): readonly = True elif opt in ("-s", "--split"): - split = True + split = True elif opt in ("-r", "--refresh"): - refresh = True + refresh = True elif opt in ("-d", "--svm-dr-unprotect"): - svmDrUnprotect = True + svmDrUnprotect = True elif opt in ("-p", "--export-policy"): - exportPolicy = arg + exportPolicy = arg elif opt in ("-i", "--snapshot-policy"): - snapshotPolicy = arg + snapshotPolicy = arg elif opt in ("-e", "--export-hosts"): - exportHosts = arg + exportHosts = arg # Check for required options if not newVolumeName or not sourceVolumeName: @@ -813,9 +817,9 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Clone volume try: - clone_volume(new_volume_name=newVolumeName, source_volume_name=sourceVolumeName, source_snapshot_name=sourceSnapshotName, - cluster_name=clusterName, source_svm=sourceSVM, target_svm=targetSVM, export_policy=exportPolicy, export_hosts=exportHosts, - snapshot_policy=snapshotPolicy, split=split, refresh=refresh, mountpoint=mountpoint, unix_uid=unixUID, unix_gid=unixGID, + clone_volume(new_volume_name=newVolumeName, source_volume_name=sourceVolumeName, source_snapshot_name=sourceSnapshotName, + cluster_name=clusterName, source_svm=sourceSVM, target_svm=targetSVM, export_policy=exportPolicy, export_hosts=exportHosts, + snapshot_policy=snapshotPolicy, split=split, refresh=refresh, mountpoint=mountpoint, unix_uid=unixUID, unix_gid=unixGID, junction=junction, svm_dr_unprotect=svmDrUnprotect, readonly=readonly, print_output=True) except (InvalidConfigError, APIConnectionError, InvalidSnapshotParameterError, InvalidVolumeParameterError, MountOperationError): @@ -846,8 +850,8 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = if target in ("snapshot", "snap"): volumeName = None snapshotName = None - clusterName = None - svmName = None + clusterName = None + svmName = None retentionCount = 0 retentionDays = False snapmirrorLabel = None @@ -855,7 +859,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hn:v:s:r:u:l:", ["cluster-name=","help", "svm=", "name=", "volume=", "retention=", "snapmirror-label="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextCreateSnapshot, invalidOptArg=True) @@ -867,20 +871,20 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-n", "--name"): snapshotName = arg elif opt in ("-u", "--cluster-name"): - clusterName = arg + clusterName = arg elif opt in ("-s", "--svm"): svmName = arg elif opt in ("-r", "--retention"): - retentionCount = arg + retentionCount = arg elif opt in ("-v", "--volume"): volumeName = arg elif opt in ("-l", "--snapmirror-label"): - snapmirrorLabel = arg + snapmirrorLabel = arg # Check for required options if not volumeName: handleInvalidCommand(helpText=helpTextCreateSnapshot, invalidOptArg=True) - + if retentionCount: if not retentionCount.isnumeric(): matchObj = re.match("^(\d+)d$",retentionCount) @@ -897,8 +901,8 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = sys.exit(1) elif target in ("volume", "vol"): - clusterName = None - svmName = None + clusterName = None + svmName = None volumeName = None volumeSize = None guaranteeSpace = False @@ -909,16 +913,17 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = exportPolicy = None snapshotPolicy = None mountpoint = None + snaplock_type = None aggregate = None junction = None readonly = False - tieringPolicy = None + tieringPolicy = None volDP = False # Get command line options try: - opts, args = getopt.getopt(sys.argv[3:], "l:hv:t:n:s:rt:p:u:g:e:d:m:a:j:xu:y", ["cluster-name=","help", "svm=", "name=", "size=", "guarantee-space", "type=", "permissions=", "uid=", "gid=", "export-policy=", "snapshot-policy=", "mountpoint=", "aggregate=", "junction=" ,"readonly","tiering-policy=","dp"]) - except Exception as err: + opts, args = getopt.getopt(sys.argv[3:], "l:hv:t:n:s:rt:p:u:g:e:d:m:a:j:xu:yw:", ["cluster-name=","help", "svm=", "name=", "size=", "guarantee-space", "type=", "permissions=", "uid=", "gid=", "export-policy=", "snapshot-policy=", "mountpoint=", "aggregate=", "junction=" ,"readonly","tiering-policy=","dp","snaplock-type="]) + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextCreateVolume, invalidOptArg=True) @@ -928,9 +933,9 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = print(helpTextCreateVolume) sys.exit(0) elif opt in ("-v", "--svm"): - svmName = arg + svmName = arg elif opt in ("-l", "--cluster-name"): - clusterName = arg + clusterName = arg elif opt in ("-n", "--name"): volumeName = arg elif opt in ("-s", "--size"): @@ -961,6 +966,8 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = tieringPolicy = arg elif opt in ("-y", "--dp"): volDP = True + elif opt in ("-w", "--snaplock-type"): + snaplock_type = arg # Check for required options if not volumeName or not volumeSize: @@ -974,17 +981,17 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Create volume try: create_volume(svm_name=svmName, volume_name=volumeName, cluster_name=clusterName, volume_size=volumeSize, guarantee_space=guaranteeSpace, volume_type=volumeType, unix_permissions=unixPermissions, unix_uid=unixUID, - unix_gid=unixGID, export_policy=exportPolicy, snapshot_policy=snapshotPolicy, aggregate=aggregate, mountpoint=mountpoint, junction=junction, readonly=readonly, - print_output=True, tiering_policy=tieringPolicy, vol_dp=volDP) + unix_gid=unixGID, export_policy=exportPolicy, snapshot_policy=snapshotPolicy, aggregate=aggregate, mountpoint=mountpoint, junction=junction, readonly=readonly, + print_output=True, tiering_policy=tieringPolicy, vol_dp=volDP, snaplock_type = snaplock_type) except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): sys.exit(1) elif target in ("snapmirror-relationship", "sm","snapmirror"): - clusterName = None - sourceSvm = None - targetSvm = None - sourceVol = None - targetVol = None + clusterName = None + sourceSvm = None + targetSvm = None + sourceVol = None + targetVol = None policy = 'MirrorAllSnapshots' schedule = "hourly" volumeSize = None @@ -1005,13 +1012,13 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-t", "--target-svm"): targetSvm = arg elif opt in ("-n", "--target-vol"): - targetVol = arg + targetVol = arg elif opt in ("-s", "--source-svm"): - sourceSvm = arg + sourceSvm = arg elif opt in ("-v", "--source-vol"): - sourceVol = arg + sourceVol = arg elif opt in ("-u", "--cluster-name"): - clusterName = arg + clusterName = arg elif opt in ("-c", "--schedule"): schedule = arg elif opt in ("-p", "--policy"): @@ -1026,9 +1033,9 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = if action not in [None,'resync','initialize']: handleInvalidCommand(helpText=helpTextCreateSnapMirrorRelationship, invalidOptArg=True) - # Create snapmirror + # Create snapmirror try: - create_snap_mirror_relationship(source_svm=sourceSvm, target_svm=targetSvm, source_vol=sourceVol, target_vol=targetVol, schedule=schedule, policy=policy, + create_snap_mirror_relationship(source_svm=sourceSvm, target_svm=targetSvm, source_vol=sourceVol, target_vol=targetVol, schedule=schedule, policy=policy, cluster_name=clusterName, action=action, print_output=True) except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): sys.exit(1) @@ -1044,13 +1051,13 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = if target in ("snapshot", "snap"): volumeName = None snapshotName = None - svmName = None - clusterName = None + svmName = None + clusterName = None # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hn:v:s:u:", ["cluster-name=","help", "svm=", "name=", "volume="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextDeleteSnapshot, invalidOptArg=True) @@ -1064,7 +1071,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-s", "--svm"): svmName = arg elif opt in ("-u", "--cluster-name"): - clusterName = arg + clusterName = arg elif opt in ("-v", "--volume"): volumeName = arg @@ -1081,15 +1088,15 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif target in ("volume", "vol"): volumeName = None svmName = None - clusterName = None + clusterName = None force = False - deleteMirror = False + deleteMirror = False deleteNonClone = False # Get command line options try: - opts, args = getopt.getopt(sys.argv[3:], "hfv:n:u:m", ["cluster-name=","help", "svm=", "name=", "force", "delete-non-clone","delete-mirror"]) - except Exception as err: + opts, args = getopt.getopt(sys.argv[3:], "hfv:n:u:m", ["cluster-name=","help", "svm=", "name=", "force", "delete-non-clone", "delete-mirror"]) + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextDeleteVolume, invalidOptArg=True) @@ -1101,15 +1108,15 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-v", "--svm"): svmName = arg elif opt in ("-u", "--cluster-name"): - clusterName = arg + clusterName = arg elif opt in ("-n", "--name"): volumeName = arg elif opt in ("-f", "--force"): force = True elif opt in ("-m", "--delete-mirror"): - deleteMirror = True + deleteMirror = True elif opt in ("--delete-non-clone"): - deleteNonClone = True + deleteNonClone = True # Check for required options if not volumeName: @@ -1161,14 +1168,14 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif target in ("snapmirror-relationship", "snapmirror", "snapmirror-relationships", "snapmirrors","sm"): svmName = None - clusterName = None + clusterName = None # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hv:u:", ["cluster-name=","help", "svm="]) - except Exception as err: + except Exception as err: print(err) - handleInvalidCommand(helpText=helpTextListSnapMirrorRelationships, invalidOptArg=True) + handleInvalidCommand(helpText=helpTextListSnapMirrorRelationships, invalidOptArg=True) # Parse command line options for opt, arg in opts: @@ -1178,9 +1185,9 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-v", "--svm"): svmName = arg elif opt in ("-u", "--cluster-name"): - clusterName = arg + clusterName = arg - # List snapmirror relationships + # List snapmirror relationships try: list_snap_mirror_relationships(print_output=True, cluster_name=clusterName) except (InvalidConfigError, APIConnectionError): @@ -1188,13 +1195,13 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif target in ("snapshot", "snap", "snapshots", "snaps"): volumeName = None - clusterName = None - svmName = None + clusterName = None + svmName = None # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hv:s:u:", ["cluster-name=","help", "volume=","svm="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextListSnapshots, invalidOptArg=True) @@ -1208,7 +1215,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-s", "--svm"): svmName = arg elif opt in ("-u", "--cluster-name"): - clusterName = arg + clusterName = arg # Check for required options if not volumeName: @@ -1223,12 +1230,12 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif target in ("volume", "vol", "volumes", "vols"): includeSpaceUsageDetails = False svmName = None - clusterName = None + clusterName = None # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hsv:u:", ["cluster-name=","help", "include-space-usage-details","svm="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextListVolumes, invalidOptArg=True) @@ -1242,7 +1249,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-s", "--include-space-usage-details"): includeSpaceUsageDetails = True elif opt in ("-u", "--cluster-name"): - clusterName = arg + clusterName = arg # List volumes try: @@ -1260,15 +1267,17 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Invoke desired action based on target if target in ("volume", "vol"): volumeName = None - svmName = None - clusterName = None - lifName = None + svmName = None + clusterName = None + lifName = None mountpoint = None + mount_options = None readonly = False + # Get command line options try: - opts, args = getopt.getopt(sys.argv[3:], "hv:n:l:m:u:", ["cluster-name=","help", "lif=","svm=", "name=", "mountpoint=", "readonly"]) - except Exception as err: + opts, args = getopt.getopt(sys.argv[3:], "hv:n:l:m:u:o:x", ["cluster-name=","help", "lif=","svm=", "name=", "mountpoint=", "readonly", "options="]) + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextMountVolume, invalidOptArg=True) @@ -1280,19 +1289,28 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-v", "--svm"): svmName = arg elif opt in ("-u", "--cluster-name"): - clusterName = arg + clusterName = arg elif opt in ("-l", "--lif"): - lifName = arg + lifName = arg elif opt in ("-n", "--name"): volumeName = arg elif opt in ("-m", "--mountpoint"): mountpoint = arg + elif opt in ("-o", "--options"): + mount_options = arg elif opt in ("-x", "--readonly"): readonly = True + # Check for required options + if not volumeName: + handleInvalidCommand(helpText=helpTextMountVolume, invalidOptArg=True) + + if not mountpoint: + handleInvalidCommand(helpText=helpTextMountVolume, invalidOptArg=True) + # Mount volume try: - mount_volume(svm_name = svmName, cluster_name=clusterName, lif_name = lifName, volume_name=volumeName, mountpoint=mountpoint, readonly=readonly, print_output=True) + mount_volume(svm_name = svmName, cluster_name=clusterName, lif_name = lifName, volume_name=volumeName, mountpoint=mountpoint, mount_options=mount_options, readonly=readonly, print_output=True) except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError): sys.exit(1) @@ -1309,7 +1327,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hm:", ["help", "mountpoint="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextUnmountVolume, invalidOptArg=True) @@ -1345,7 +1363,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hn:p:", ["help", "name=", "paths="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextPrepopulateFlexCache, invalidOptArg=True) @@ -1388,7 +1406,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hb:p:d:e:", ["help", "bucket=", "key-prefix=", "directory="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextPullFromS3Bucket, invalidOptArg=True) @@ -1422,7 +1440,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hb:k:f:", ["help", "bucket=", "key=", "file=", "extra-args="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextPullFromS3Object, invalidOptArg=True) @@ -1465,7 +1483,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hb:p:d:e:", ["help", "bucket=", "key-prefix=", "directory=", "extra-args="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextPushToS3Directory, invalidOptArg=True) @@ -1502,7 +1520,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hb:k:f:e:", ["help", "bucket=", "key=", "file=", "extra-args="]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextPushToS3File, invalidOptArg=True) @@ -1541,14 +1559,14 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = if target in ("snapshot", "snap"): volumeName = None snapshotName = None - svmName = None - clusterName = None + svmName = None + clusterName = None force = False # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hs:n:v:fu:", ["cluster-name=","help", "svm=", "name=", "volume=", "force"]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextRestoreSnapshot, invalidOptArg=True) @@ -1560,9 +1578,9 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif opt in ("-n", "--name"): snapshotName = arg elif opt in ("-s", "--svm"): - svmName = arg + svmName = arg elif opt in ("-u", "--cluster-name"): - clusterName = arg + clusterName = arg elif opt in ("-v", "--volume"): volumeName = arg elif opt in ("-f", "--force"): @@ -1605,7 +1623,7 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hi:w", ["help", "id=", "wait"]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextSyncCloudSyncRelationship, invalidOptArg=True) @@ -1632,14 +1650,14 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = elif target in ("snapmirror-relationship", "snapmirror"): uuid = None volumeName = None - svmName = None - clusterName = None + svmName = None + clusterName = None waitUntilComplete = False # Get command line options try: opts, args = getopt.getopt(sys.argv[3:], "hi:wn:u:v:", ["help", "cluster-name=","svm=","name=","uuid=", "wait"]) - except Exception as err: + except Exception as err: print(err) handleInvalidCommand(helpText=helpTextSyncSnapMirrorRelationship, invalidOptArg=True) @@ -1649,11 +1667,11 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = print(helpTextSyncSnapMirrorRelationship) sys.exit(0) elif opt in ("-v", "--svm"): - svmName = arg + svmName = arg elif opt in ("-u", "--cluster-name"): - clusterName = arg + clusterName = arg elif opt in ("-n", "--name"): - volumeName = arg + volumeName = arg elif opt in ("-i", "--uuid"): uuid = arg elif opt in ("-w", "--wait"): @@ -1683,3 +1701,4 @@ def handleInvalidCommand(helpText: str = helpTextStandard, invalidOptArg: bool = else: handleInvalidCommand() + diff --git a/netapp_dataops_traditional/netapp_dataops/traditional.py b/netapp_dataops_traditional/netapp_dataops/traditional/__init__.py similarity index 92% rename from netapp_dataops_traditional/netapp_dataops/traditional.py rename to netapp_dataops_traditional/netapp_dataops/traditional/__init__.py index b0c7f09..b6e8674 100644 --- a/netapp_dataops_traditional/netapp_dataops/traditional.py +++ b/netapp_dataops_traditional/netapp_dataops/traditional/__init__.py @@ -34,7 +34,7 @@ import yaml -__version__ = "2.4.0" +__version__ = "2.5.0" # Using this decorator in lieu of using a dependency to manage deprecation @@ -52,7 +52,7 @@ class CloudSyncSyncOperationError(Exception) : """Error that will be raised when a Cloud Sync sync operation fails""" pass - + class ConnectionTypeError(Exception): """Error that will be raised when an invalid connection type is given""" pass @@ -360,7 +360,7 @@ def _convert_bytes_to_pretty_size(size_in_bytes: str, num_decimal_points: int = def clone_volume(new_volume_name: str, source_volume_name: str, cluster_name: str = None, source_snapshot_name: str = None, - source_svm: str = None, target_svm: str = None, export_hosts: str = None, export_policy: str = None, split: bool = False, + source_svm: str = None, target_svm: str = None, export_hosts: str = None, export_policy: str = None, split: bool = False, unix_uid: str = None, unix_gid: str = None, mountpoint: str = None, junction: str= None, readonly: bool = False, snapshot_policy: str = None, refresh: bool = False, svm_dr_unprotect: bool = False, print_output: bool = False): # Retrieve config details from config file @@ -374,9 +374,9 @@ def clone_volume(new_volume_name: str, source_volume_name: str, cluster_name: st if print_output: _print_invalid_config_error() raise InvalidConfigError() - + if cluster_name: - config["hostname"] = cluster_name + config["hostname"] = cluster_name if connectionType == "ONTAP": # Instantiate connection to ONTAP cluster @@ -386,14 +386,14 @@ def clone_volume(new_volume_name: str, source_volume_name: str, cluster_name: st raise # Retrieve values from config file if not passed into function - try: + try: sourcesvm = config["svm"] - if source_svm: - sourcesvm = source_svm - + if source_svm: + sourcesvm = source_svm + targetsvm = sourcesvm if target_svm: - targetsvm = target_svm + targetsvm = target_svm if not unix_uid: unix_uid = config["defaultUnixUID"] @@ -422,27 +422,27 @@ def clone_volume(new_volume_name: str, source_volume_name: str, cluster_name: st print("Error: Invalid unix gid specified. Value must be an integer. Example: '0' for root group.") raise InvalidVolumeParameterError("unixGID") - #check if clone volume already exists + #check if clone volume already exists try: - currentVolume = NetAppVolume.find(name=new_volume_name, svm=targetsvm) + currentVolume = NetAppVolume.find(name=new_volume_name, svm=targetsvm) if currentVolume and not refresh: if print_output: print("Error: clone:"+new_volume_name+" already exists.") - raise InvalidVolumeParameterError("name") - + raise InvalidVolumeParameterError("name") + #for refresh we want to keep the existing policy if currentVolume and refresh and not export_policy and not export_hosts: export_policy = currentVolume.nas.export_policy.name # if refresh and not provided new snapshot_policy - if currentVolume and refresh and not snapshot_policy: + if currentVolume and refresh and not snapshot_policy: snapshot_policy = currentVolume.snapshot_policy.name except NetAppRestError as err: if print_output: print("Error: ONTAP Rest API Error: ", err) raise APIConnectionError(err) - + #delete existing clone when refresh try: if currentVolume and refresh: @@ -451,19 +451,19 @@ def clone_volume(new_volume_name: str, source_volume_name: str, cluster_name: st else: if print_output: print("Error: refresh clone is only supported when existing clone created using the tool (based on volume comment)") - raise InvalidVolumeParameterError("name") + raise InvalidVolumeParameterError("name") except: print("Error: could not delete previous clone") - raise InvalidVolumeParameterError("name") - + raise InvalidVolumeParameterError("name") + try: - if not snapshot_policy : + if not snapshot_policy : snapshot_policy = config["defaultSnapshotPolicy"] except: print("Error: default snapshot policy could not be found in config file") - raise InvalidVolumeParameterError("name") + raise InvalidVolumeParameterError("name") - # check export policies + # check export policies try: if not export_policy and not export_hosts: export_policy = config["defaultExportPolicy"] @@ -483,11 +483,11 @@ def clone_volume(new_volume_name: str, source_volume_name: str, cluster_name: st print("Error: ONTAP Rest API Error: ", err) raise APIConnectionError(err) - #exists check if snapshot-policy + #exists check if snapshot-policy try: - snapshotPoliciesDetails = NetAppSnapshotPolicy.get_collection(**{"name":snapshot_policy}) + snapshotPoliciesDetails = NetAppSnapshotPolicy.get_collection(**{"name":snapshot_policy}) clusterSnapshotPolicy = False - svmSnapshotPolicy = False + svmSnapshotPolicy = False for snapshotPolicyDetails in snapshotPoliciesDetails: if str(snapshotPolicyDetails.name) == snapshot_policy: try: @@ -499,11 +499,11 @@ def clone_volume(new_volume_name: str, source_volume_name: str, cluster_name: st if not clusterSnapshotPolicy and not svmSnapshotPolicy: if print_output: print("Error: snapshot-policy:"+snapshot_policy+" could not be found") - raise InvalidVolumeParameterError("snapshot_policy") + raise InvalidVolumeParameterError("snapshot_policy") except NetAppRestError as err: if print_output: print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) + raise APIConnectionError(err) # Create volume if print_output: @@ -522,7 +522,6 @@ def clone_volume(new_volume_name: str, source_volume_name: str, cluster_name: st junction=junction else: junction = "/"+new_volume_name - # Construct dict representing new volume newVolumeDict = { @@ -544,7 +543,7 @@ def clone_volume(new_volume_name: str, source_volume_name: str, cluster_name: st } } } - + if unix_uid != 0: newVolumeDict["nas"]["uid"] = unix_uid else: @@ -564,20 +563,20 @@ def clone_volume(new_volume_name: str, source_volume_name: str, cluster_name: st if print_output: print("Error: Invalid source snapshot name.") raise InvalidSnapshotParameterError("name") - + # Append source snapshot details to volume dict newVolumeDict["clone"]["parent_snapshot"] = { "name": sourceSnapshot.name, "uuid": sourceSnapshot.uuid } - + if source_snapshot_name and source_snapshot_name.endswith("*"): source_snapshot_prefix = source_snapshot_name[:-1] - latest_source_snapshot = None - latest_source_snapshot_uuid = None + latest_source_snapshot = None + latest_source_snapshot_uuid = None - # Retrieve all source snapshot from last to 1st + # Retrieve all source snapshot from last to 1st for snapshot in NetAppSnapshot.get_collection(sourceVolume.uuid): snapshot.get() if snapshot.name.startswith(source_snapshot_prefix): @@ -593,16 +592,16 @@ def clone_volume(new_volume_name: str, source_volume_name: str, cluster_name: st "name": latest_source_snapshot, "uuid": latest_source_snapshot_uuid } - print("Snapshot '" + latest_source_snapshot+ "' will be used to create the clone.") + print("Snapshot '" + latest_source_snapshot+ "' will be used to create the clone.") - # set clone volume commnet parameter + # set clone volume commnet parameter comment = 'PARENTSVM:'+sourcesvm+',PARENTVOL:'+newVolumeDict["clone"]["parent_volume"]["name"]+',CLONESVM:'+targetsvm+',CLONENAME:'+newVolumeDict["name"] - if source_snapshot_name: comment += ' SNAP:'+newVolumeDict["clone"]["parent_snapshot"]["name"] + if source_snapshot_name: comment += ' SNAP:'+newVolumeDict["clone"]["parent_snapshot"]["name"] comment += " netapp-dataops" - + newVolumeDict["comment"] = comment - # Create new volume clone + # Create new volume clone newVolume = NetAppVolume.from_dict(newVolumeDict) newVolume.post(poll=True, poll_timeout=120) if print_output: @@ -616,23 +615,23 @@ def clone_volume(new_volume_name: str, source_volume_name: str, cluster_name: st if svm_dr_unprotect: try: if print_output: - print("Disabling svm-dr protection") + print("Disabling svm-dr protection") response = NetAppCLI().execute("volume modify",vserver=targetsvm,volume=new_volume_name,body={"vserver_dr_protection": "unprotected"}) except NetAppRestError as err: if "volume is not part of a Vserver DR configuration" in str(err): if print_output: - print("Warning: could not disable svm-dr-protection since volume is not protected using svm-dr") + print("Warning: could not disable svm-dr-protection since volume is not protected using svm-dr") else: if print_output: - print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) + print("Error: ONTAP Rest API Error: ", err) + raise APIConnectionError(err) - #create custom export policy if needed + #create custom export policy if needed if export_hosts: - try: + try: if print_output: - print("Creating export-policy:"+export_policy) - # Construct dict representing new export policy + print("Creating export-policy:"+export_policy) + # Construct dict representing new export policy newExportPolicyDict = { "name" : export_policy, "svm": {"name": targetsvm}, @@ -641,44 +640,44 @@ def clone_volume(new_volume_name: str, source_volume_name: str, cluster_name: st for client in export_hosts.split(":"): newExportPolicyDict['rules'].append({ "clients": [{"match": client }], "ro_rule": ["sys"], "rw_rule": ["sys"], "superuser": ["sys"]}) - # Create new export policy + # Create new export policy newExportPolicy = NetAppExportPolicy.from_dict(newExportPolicyDict) newExportPolicy.post(poll=True, poll_timeout=120) - + except NetAppRestError as err: if print_output: print("Error: ONTAP Rest API Error: ", err) raise APIConnectionError(err) - #set export policy and snapshot policy + #set export policy and snapshot policy try: if print_output: - print("Setting export-policy:"+export_policy+ " snapshot-policy:"+snapshot_policy) - volumeDetails = NetAppVolume.find(name=new_volume_name, svm=targetsvm) + print("Setting export-policy:"+export_policy+ " snapshot-policy:"+snapshot_policy) + volumeDetails = NetAppVolume.find(name=new_volume_name, svm=targetsvm) updatedVolumeDetails = NetAppVolume(uuid=volumeDetails.uuid) updatedVolumeDetails.nas = {"export_policy": {"name": export_policy}} updatedVolumeDetails.snapshot_policy = {"name": snapshot_policy} - updatedVolumeDetails.patch(poll=True, poll_timeout=120) + updatedVolumeDetails.patch(poll=True, poll_timeout=120) except NetAppRestError as err: if print_output: print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) + raise APIConnectionError(err) - #split clone + #split clone try: - if split: + if split: if print_output: - print("Splitting clone") - volumeDetails = NetAppVolume.find(name=new_volume_name, svm=targetsvm) - #get volume details - updatedVolumeDetails = NetAppVolume(uuid=volumeDetails.uuid) + print("Splitting clone") + volumeDetails = NetAppVolume.find(name=new_volume_name, svm=targetsvm) + #get volume details + updatedVolumeDetails = NetAppVolume(uuid=volumeDetails.uuid) updatedVolumeDetails.clone = {"split_initiated": True} - updatedVolumeDetails.patch() + updatedVolumeDetails.patch() except NetAppRestError as err: if print_output: print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) + raise APIConnectionError(err) # Optionally mount newly created volume if mountpoint: @@ -707,7 +706,7 @@ def create_snapshot(volume_name: str, cluster_name: str = None, svm_name: str = raise InvalidConfigError() if cluster_name: - config["hostname"] = cluster_name + config["hostname"] = cluster_name if connectionType == "ONTAP": # Instantiate connection to ONTAP cluster @@ -723,14 +722,14 @@ def create_snapshot(volume_name: str, cluster_name: str = None, svm_name: str = try: svm = config["svm"] if svm_name: - svm = svm_name + svm = svm_name except: if print_output: _print_invalid_config_error() raise InvalidConfigError() snapshot_name_original = snapshot_name - # Set snapshot name if not passed into function or retention provided + # Set snapshot name if not passed into function or retention provided if not snapshot_name or int(retention_count) > 0: timestamp = '.'+datetime.datetime.today().strftime("%Y-%m-%d_%H%M%S") snapshot_name += timestamp @@ -746,14 +745,14 @@ def create_snapshot(volume_name: str, cluster_name: str = None, svm_name: str = print("Error: Invalid volume name.") raise InvalidVolumeParameterError("name") - # create snapshot dict + # create snapshot dict snapshotDict = { 'name': snapshot_name, 'volume': volume.to_dict() } if snapmirror_label: if print_output: - print("Setting snapmirror label as:"+snapmirror_label) + print("Setting snapmirror label as:"+snapmirror_label) snapshotDict['snapmirror_label'] = snapmirror_label # Create snapshot @@ -769,27 +768,27 @@ def create_snapshot(volume_name: str, cluster_name: str = None, svm_name: str = raise APIConnectionError(err) #delete snapshots exceeding retention count if provided - retention_count = int(retention_count) + retention_count = int(retention_count) if retention_count > 0: - try: - # Retrieve all source snapshot from last to 1st + try: + # Retrieve all source snapshot from last to 1st # Retrieve volume volume = NetAppVolume.find(name=volume_name, svm=svm) if not volume: if print_output: print("Error: Invalid volume name.") - raise InvalidVolumeParameterError("name") + raise InvalidVolumeParameterError("name") if retention_days: retention_date = datetime.datetime.today() - datetime.timedelta(days=retention_count) - - last_snapshot_list = [] + + last_snapshot_list = [] snapshot_list = [] for snapshot in NetAppSnapshot.get_collection(volume.uuid): snapshot.get() if snapshot.name.startswith(snapshot_name_original+'.'): if not retention_days: - snapshot_list.append(snapshot.name) + snapshot_list.append(snapshot.name) last_snapshot_list.append(snapshot.name) if len(last_snapshot_list) > retention_count: last_snapshot_list.pop(0) @@ -799,12 +798,12 @@ def create_snapshot(volume_name: str, cluster_name: str = None, svm_name: str = if matchObj: snapshot_date = matchObj.group(1) snapshot_date_obj = datetime.datetime.strptime(snapshot_date, "%Y-%m-%d_%H%M%S") - snapshot_list.append(snapshot.name) + snapshot_list.append(snapshot.name) last_snapshot_list.append(snapshot.name) if snapshot_date_obj < retention_date: last_snapshot_list.pop(0) - - #delete snapshots not in retention + + #delete snapshots not in retention for snap in snapshot_list: if snap not in last_snapshot_list: delete_snapshot(volume_name=volume_name, svm_name = svm, snapshot_name=snap, skip_owned=True, print_output=True) @@ -812,14 +811,14 @@ def create_snapshot(volume_name: str, cluster_name: str = None, svm_name: str = except NetAppRestError as err: if print_output: print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) + raise APIConnectionError(err) else: raise ConnectionTypeError() def create_volume(volume_name: str, volume_size: str, guarantee_space: bool = False, cluster_name: str = None, svm_name: str = None, volume_type: str = "flexvol", unix_permissions: str = "0777", - unix_uid: str = "0", unix_gid: str = "0", export_policy: str = "default", + unix_uid: str = "0", unix_gid: str = "0", export_policy: str = "default", snaplock_type: str = None, snapshot_policy: str = None, aggregate: str = None, mountpoint: str = None, junction: str = None, readonly: bool = False, print_output: bool = False, tiering_policy: str = None, vol_dp: bool = False): # Retrieve config details from config file @@ -835,7 +834,7 @@ def create_volume(volume_name: str, volume_size: str, guarantee_space: bool = Fa raise InvalidConfigError() if cluster_name: - config["hostname"] = cluster_name + config["hostname"] = cluster_name if connectionType == "ONTAP": # Instantiate connection to ONTAP cluster @@ -847,8 +846,8 @@ def create_volume(volume_name: str, volume_size: str, guarantee_space: bool = Fa # Retrieve values from config file if not passed into function try: svm = config["svm"] - if svm_name: - svm = svm_name + if svm_name: + svm = svm_name if not volume_type : volume_type = config["defaultVolumeType"] if not unix_permissions : @@ -910,7 +909,13 @@ def create_volume(volume_name: str, volume_size: str, guarantee_space: bool = Fa if print_output: print("Error: Invalid volume size specified. Acceptable values are '1024MB', '100GB', '10TB', etc.") raise InvalidVolumeParameterError("size") - + + # Create option to choose snaplock type + if snaplock_type not in ['compliance', 'enterprise', None]: + if print_output: + print("Error: Invalid snaplock volume type specified. Value must be either 'compliance' or 'enterprise'") + raise InvalidVolumeParameterError("snaplockVolume") + # Create option to choose junction path. if junction: junction=junction @@ -918,13 +923,13 @@ def create_volume(volume_name: str, volume_size: str, guarantee_space: bool = Fa junction = "/"+volume_name - #check tiering policy + #check tiering policy if not tiering_policy in ['none','auto','snapshot-only','all', None]: if print_output: print("Error: tiering policy can be: none,auto,snapshot-only or all") - raise InvalidVolumeParameterError("tieringPolicy") + raise InvalidVolumeParameterError("tieringPolicy") - #vol dp type + #vol dp type if vol_dp: # Create dict representing volume of type dp volumeDict = { @@ -951,7 +956,7 @@ def create_volume(volume_name: str, volume_size: str, guarantee_space: bool = Fa "uid": unix_uid, "gid": unix_gid }, - "snapshot_policy": {"name": snapshot_policy}, + "snapshot_policy": {"name": snapshot_policy}, } # Set space guarantee field @@ -967,8 +972,13 @@ def create_volume(volume_name: str, volume_size: str, guarantee_space: bool = Fa if aggregate: volumeDict["aggregates"] = [] for aggr in aggregate.split(','): - volumeDict["aggregates"].append({'name': aggr}) - #if tiering policy provided + volumeDict["aggregates"].append({'name': aggr}) + + # if snaplock type is valid + if snaplock_type: + volumeDict['snaplock'] = {"type": snaplock_type} + + #if tiering policy provided if tiering_policy: volumeDict['tiering'] = {'policy': tiering_policy} @@ -1012,7 +1022,7 @@ def delete_snapshot(volume_name: str, snapshot_name: str, cluster_name: str = No raise InvalidConfigError() if cluster_name: - config["hostname"] = cluster_name + config["hostname"] = cluster_name if connectionType == "ONTAP": # Instantiate connection to ONTAP cluster @@ -1024,8 +1034,8 @@ def delete_snapshot(volume_name: str, snapshot_name: str, cluster_name: str = No # Retrieve svm from config file try: svm = config["svm"] - if svm_name: - svm = svm_name + if svm_name: + svm = svm_name except: if print_output: _print_invalid_config_error() @@ -1044,15 +1054,14 @@ def delete_snapshot(volume_name: str, snapshot_name: str, cluster_name: str = No # Retrieve snapshot snapshot = NetAppSnapshot.find(volume.uuid, name=snapshot_name) - - + if not snapshot: if print_output: print("Error: Invalid snapshot name.") raise InvalidSnapshotParameterError("name") - + if hasattr(snapshot,'owners'): - + if not skip_owned: if print_output: print('Error: Snapshot cannot be deleted since it has owners:'+','.join(snapshot.owners)) @@ -1077,7 +1086,7 @@ def delete_snapshot(volume_name: str, snapshot_name: str, cluster_name: str = No raise ConnectionTypeError() -def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = None, delete_mirror: bool = False, +def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = None, delete_mirror: bool = False, delete_non_clone: bool = False, print_output: bool = False): # Retrieve config details from config file try: @@ -1092,7 +1101,7 @@ def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = No raise InvalidConfigError() if cluster_name: - config["hostname"] = cluster_name + config["hostname"] = cluster_name if connectionType == "ONTAP": # Instantiate connection to ONTAP cluster @@ -1110,7 +1119,7 @@ def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = No if print_output : _print_invalid_config_error() raise InvalidConfigError() - + try: # Retrieve volume volume = NetAppVolume.find(name=volume_name, svm=svm) @@ -1122,7 +1131,7 @@ def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = No if not "CLONENAME:" in volume.comment and not delete_non_clone: if print_output: print("Error: volume is not a clone created by this tool. add --delete-non-clone to delete it") - raise InvalidVolumeParameterError("delete-non-clone") + raise InvalidVolumeParameterError("delete-non-clone") except NetAppRestError as err: if print_output: print("Error: ONTAP Rest API Error: ", err) @@ -1140,17 +1149,17 @@ def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = No uuid = rel.uuid except NetAppRestError as err: if print_output: - print("Error: ONTAP Rest API Error: ", err) + print("Error: ONTAP Rest API Error: ", err) - if uuid: + if uuid: if print_output: - print("Deleting snapmirror relationship: "+svm+":"+volume_name) + print("Deleting snapmirror relationship: "+svm+":"+volume_name) try: deleteRelation = NetAppSnapmirrorRelationship(uuid=uuid) deleteRelation.delete(poll=True, poll_timeout=120) except NetAppRestError as err: if print_output: - print("Error: ONTAP Rest API Error: ", err) + print("Error: ONTAP Rest API Error: ", err) #check if this volume has snapmirror destination relationship uuid = None @@ -1161,12 +1170,37 @@ def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = No rel.get(list_destinations_only=True) uuid = rel.uuid if print_output: - print("release relationship: "+rel.source.path+" -> "+rel.destination.path) + print("release relationship: "+rel.source.path+" -> "+rel.destination.path) deleteRelation = NetAppSnapmirrorRelationship(uuid=uuid) deleteRelation.delete(poll=True, poll_timeout=120,source_only=True) except NetAppRestError as err: if print_output: - print("Error: ONTAP Rest API Error: ", err) + print("Error: ONTAP Rest API Error: ", err) + + #Unmount volume and skip if not sudo or not locally mounted + try: + volumes = list_volumes(check_local_mounts=True) + for localmount in volumes: + if localmount["Volume Name"] == volume_name: + x=localmount["Local Mountpoint"] + if x == "": + break + elif x != "": + if os.getuid() != 0: + print("Warning: Volume was not unmounted. You need to have root privileges to run unmount command.") + break + else: + try: + unmount = unmount_volume(mountpoint=x) + except (InvalidConfigError, APIConnectionError): + if print_output: + print("Error: unmounting volume.") + raise MountOperationError(err) + + except (InvalidConfigError, APIConnectionError): + if print_output: + print("Error: volume retrieval failed for unmount operation.") + raise try: if print_output: @@ -1180,9 +1214,9 @@ def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = No except NetAppRestError as err: if print_output: if "You must delete the SnapMirror relationships before" in str(err): - print("Error: volume is snapmirror destination. add --delete-mirror to delete snapmirror relationship before deleting the volume") + print("Error: volume is snapmirror destination. add --delete-mirror to delete snapmirror relationship before deleting the volume") elif "the source endpoint of one or more SnapMirror relationships" in str(err): - print("Error: volume is snapmirror source. add --delete-mirror to release snapmirror relationship before deleting the volume") + print("Error: volume is snapmirror source. add --delete-mirror to release snapmirror relationship before deleting the volume") else: print("Error: ONTAP Rest API Error: ", err) raise APIConnectionError(err) @@ -1258,7 +1292,7 @@ def list_snap_mirror_relationships(print_output: bool = False, cluster_name: str raise InvalidConfigError() if cluster_name: - config["hostname"] = cluster_name + config["hostname"] = cluster_name if connectionType == "ONTAP": # Instantiate connection to ONTAP cluster @@ -1352,9 +1386,9 @@ def list_snapshots(volume_name: str, cluster_name: str = None, svm_name: str = N if print_output: _print_invalid_config_error() raise InvalidConfigError() - + if cluster_name: - config["hostname"] = cluster_name + config["hostname"] = cluster_name if connectionType == "ONTAP": # Instantiate connection to ONTAP cluster @@ -1424,7 +1458,7 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: _print_invalid_config_error() raise InvalidConfigError() if cluster_name: - config["hostname"] = cluster_name + config["hostname"] = cluster_name if connectionType == "ONTAP": # Instantiate connection to ONTAP cluster @@ -1436,7 +1470,7 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: try: svmname=config["svm"] if svm_name: - svmname = svm_name + svmname = svm_name # Retrieve all volumes for SVM volumes = NetAppVolume.get_collection(svm=svmname) @@ -1487,7 +1521,7 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: cloneParentSnapshot = "" try: - cloneParentSvm = volume.clone.parent_svm.name + cloneParentSvm = volume.clone.parent_svm.name cloneParentVolume = volume.clone.parent_volume.name cloneParentSnapshot = volume.clone.parent_snapshot.name clone = "yes" @@ -1538,10 +1572,11 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: volumeDict["NFS Mount Target"] = nfsMountTarget if check_local_mounts: localMountpoint = "" - for mount in mounts.split("\n") : - mountDetails = mount.split(" ") - if mountDetails[0] == nfsMountTarget : - localMountpoint = mountDetails[2] + if nfsMountTarget: + for mount in mounts.split("\n") : + mountDetails = mount.split(" ") + if mountDetails[0].strip() == nfsMountTarget.strip() : + localMountpoint = mountDetails[2] volumeDict["Local Mountpoint"] = localMountpoint volumeDict["FlexCache"] = flexcache volumeDict["Clone"] = clone @@ -1562,16 +1597,15 @@ def list_volumes(check_local_mounts: bool = False, include_space_usage_details: # Convert volumes array to Pandas DataFrame volumesDF = pd.DataFrame.from_dict(volumesList, dtype="string") print(tabulate(volumesDF, showindex=False, headers=volumesDF.columns)) - + return volumesList - + else: raise ConnectionTypeError() - -def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, svm_name: str = None, lif_name: str = None, readonly: bool = False, print_output: bool = False): +def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, svm_name: str = None, mount_options: str = None, lif_name: str = None, readonly: bool = False, print_output: bool = False): nfsMountTarget = None - + svm = None try: config = _retrieve_config(print_output=print_output) @@ -1587,7 +1621,7 @@ def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, sv raise InvalidConfigError() if cluster_name: - config["hostname"] = cluster_name + config["hostname"] = cluster_name # Retrieve list of volumes try: @@ -1608,13 +1642,14 @@ def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, sv if volume_name == volume["Volume Name"]: # Retrieve NFS mount target nfsMountTarget = volume["NFS Mount Target"] + nfsMountTarget = nfsMountTarget.strip() # Raise error if invalid volume name was entered if not nfsMountTarget: if print_output: print("Error: Invalid volume name specified.") raise InvalidVolumeParameterError("name") - + try: if lif_name: nfsMountTarget = lif_name+':'+nfsMountTarget.split(':')[1] @@ -1623,6 +1658,7 @@ def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, sv print("Error: Error retrieving NFS mount target for volume.") raise + # Print message describing action to be understaken if print_output: if readonly: @@ -1638,25 +1674,43 @@ def mount_volume(volume_name: str, mountpoint: str, cluster_name: str = None, sv pass # Mount volume + mount_cmd_opts = [] + if readonly: - try: - subprocess.check_call(['mount', '-o', 'ro', nfsMountTarget, mountpoint]) - if print_output: - print("Volume mounted successfully.") - except subprocess.CalledProcessError as err: - if print_output: - print("Error: Error running mount command: ", err) - raise MountOperationError(err) - else: - try: - subprocess.check_call(['mount', nfsMountTarget, mountpoint]) - if print_output: - print("Volume mounted successfully.") - except subprocess.CalledProcessError as err: - if print_output: - print("Error: Error running mount command: ", err) - raise MountOperationError(err) + mount_cmd_opts.append('-o') + mount_cmd_opts.append('ro') + if mount_options: + mount_cmd_opts.remove('ro') + mount_cmd_opts.append('ro'+','+mount_options) + elif mount_options: + mount_cmd_opts.append('-o') + mount_cmd_opts.append(mount_options) + mount_cmd = ['mount'] + mount_cmd_opts + [nfsMountTarget, mountpoint] + + if os.getuid() != 0: + mount_cmd_opts_str = "" + for item in mount_cmd_opts : + if item == "-o" : + continue + mount_cmd_opts_str = mount_cmd_opts_str + item + "," + mount_cmd_opts_str = mount_cmd_opts_str[:-1] + if mount_cmd_opts_str: + sys.exit("You need to have root privileges to run mount command." + "\nTo mount the volume run the following command as root:" + "\n"+ "mount -o "+ mount_cmd_opts_str+ " " + nfsMountTarget + " " + mountpoint) + else: + sys.exit("You need to have root privileges to run mount command." + "\nTo mount the volume run the following command as root:" + "\n"+ "mount"+ mount_cmd_opts_str+ " " + nfsMountTarget + " " + mountpoint) + try: + subprocess.check_call(mount_cmd) + if print_output: + print("Volume mounted successfully.") + except subprocess.CalledProcessError as err: + if print_output: + print("Error: Error running mount command: ", err) + raise MountOperationError(err) # Function to unmount volume @@ -1859,9 +1913,9 @@ def restore_snapshot(volume_name: str, snapshot_name: str, cluster_name: str = N if print_output: _print_invalid_config_error() raise InvalidConfigError() - + if cluster_name: - config["hostname"] = cluster_name + config["hostname"] = cluster_name if connectionType == "ONTAP": # Instantiate connection to ONTAP cluster @@ -2006,7 +2060,7 @@ def sync_cloud_sync_relationship(relationship_id: str, wait_until_complete: bool # Sleep for 60 seconds before checking progress again time.sleep(60) -def create_snap_mirror_relationship(source_svm: str, source_vol: str, target_vol: str, target_svm: str = None, cluster_name: str = None, +def create_snap_mirror_relationship(source_svm: str, source_vol: str, target_vol: str, target_svm: str = None, cluster_name: str = None, schedule: str = '', policy: str = 'MirrorAllSnapshots', action: str = None, print_output: bool = False): # Retrieve config details from config file try: @@ -2021,7 +2075,7 @@ def create_snap_mirror_relationship(source_svm: str, source_vol: str, target_vol raise InvalidConfigError() if cluster_name: - config["hostname"] = cluster_name + config["hostname"] = cluster_name if connectionType == "ONTAP": # Instantiate connection to ONTAP cluster @@ -2031,10 +2085,10 @@ def create_snap_mirror_relationship(source_svm: str, source_vol: str, target_vol raise svm = config["svm"] - if not target_svm: - target_svm = svm + if not target_svm: + target_svm = svm - try: + try: uuid = None snapmirror_relationship = NetAppSnapmirrorRelationship.get_collection(**{"destination.path": target_svm+":"+target_vol}) for rel in snapmirror_relationship: @@ -2052,17 +2106,17 @@ def create_snap_mirror_relationship(source_svm: str, source_vol: str, target_vol except NetAppRestError as err: if print_output: print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) - + raise APIConnectionError(err) + try: newRelationDict = { "source": { "path": source_svm+":"+source_vol - }, - "destination": { + }, + "destination": { "path": target_svm+":"+target_vol } - #due to bug 1311226 setting the policy wil be done using cli api + #due to bug 1311226 setting the policy wil be done using cli api # "policy": { # "name": policy, # }, @@ -2086,9 +2140,9 @@ def create_snap_mirror_relationship(source_svm: str, source_vol: str, target_vol except NetAppRestError as err: if print_output: print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) + raise APIConnectionError(err) - try: + try: uuid = None relation = None snapmirror_relationship = NetAppSnapmirrorRelationship.get_collection(**{"destination.path": target_svm+":"+target_vol}) @@ -2120,7 +2174,7 @@ def create_snap_mirror_relationship(source_svm: str, source_vol: str, target_vol except NetAppRestError as err: if print_output: print("Error: ONTAP Rest API Error: ", err) - raise APIConnectionError(err) + raise APIConnectionError(err) def sync_snap_mirror_relationship(uuid: str = None, svm_name: str = None, volume_name: str = None, cluster_name: str = None, wait_until_complete: bool = False, print_output: bool = False): # Retrieve config details from config file @@ -2136,7 +2190,7 @@ def sync_snap_mirror_relationship(uuid: str = None, svm_name: str = None, volume raise InvalidConfigError() if cluster_name: - config["hostname"] = cluster_name + config["hostname"] = cluster_name if connectionType == "ONTAP": # Instantiate connection to ONTAP cluster @@ -2147,7 +2201,7 @@ def sync_snap_mirror_relationship(uuid: str = None, svm_name: str = None, volume if volume_name: svm = config["svm"] - if svm_name: + if svm_name: svm = svm_name snapmirror_relationship = NetAppSnapmirrorRelationship.get_collection(**{"destination.path": svm+":"+volume_name}) @@ -2168,9 +2222,9 @@ def sync_snap_mirror_relationship(uuid: str = None, svm_name: str = None, volume except NetAppRestError as err: if print_output: print("Error: ONTAP Rest API Error: ", err) - if uuid: + if uuid: if print_output: - print("volume is part of svm-dr relationshitp: "+svm+":") + print("volume is part of svm-dr relationshitp: "+svm+":") if not uuid: if print_output: @@ -2331,3 +2385,4 @@ def syncCloudSyncRelationship(relationshipID: str, waitUntilComplete: bool = Fal @deprecated def syncSnapMirrorRelationship(uuid: str, waitUntilComplete: bool = False, printOutput: bool = False) : sync_snap_mirror_relationship(uuid=uuid, wait_until_complete=waitUntilComplete, print_output=printOutput) + diff --git a/netapp_dataops_traditional/setup.cfg b/netapp_dataops_traditional/setup.cfg index 09d45ba..75b4d42 100644 --- a/netapp_dataops_traditional/setup.cfg +++ b/netapp_dataops_traditional/setup.cfg @@ -15,6 +15,8 @@ classifiers = Programming Language :: Python :: 3 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 + Programming Language :: Python :: 3.10 + Programming Language :: Python :: 3.11 project_urls = Bug Tracker = https://github.com/NetApp/netapp-data-science-toolkit/issues Documentation = https://github.com/NetApp/netapp-data-science-toolkit/blob/main/README.md @@ -23,7 +25,7 @@ long_description = The NetApp DataOps Toolkit for Traditional Environments is a long_description_content_type = text/markdown [options] -py_modules = netapp_dataops.traditional +packages = find_namespace: scripts = netapp_dataops/netapp_dataops_cli.py install_requires = @@ -34,4 +36,4 @@ install_requires = requests boto3 pyyaml -python_requires = >=3.8 +python_requires = >=3.8,<3.12