From f756c8cc55749bf47ffa7f540608b566a7e66fe9 Mon Sep 17 00:00:00 2001 From: Amrutha Ramanathan <105342664+ramrutha497@users.noreply.github.com> Date: Mon, 8 Apr 2024 16:58:50 +0530 Subject: [PATCH] ocean gke implementation --- spotinst_sdk2/__init__.py | 4 +- spotinst_sdk2/clients/ocean/__init__.py | 659 +++++++++++++ spotinst_sdk2/models/ocean/gcp/__init__.py | 1002 ++++++++++++++++++++ 3 files changed, 1664 insertions(+), 1 deletion(-) create mode 100644 spotinst_sdk2/models/ocean/gcp/__init__.py diff --git a/spotinst_sdk2/__init__.py b/spotinst_sdk2/__init__.py index 4e794102..2427e8ec 100755 --- a/spotinst_sdk2/__init__.py +++ b/spotinst_sdk2/__init__.py @@ -56,7 +56,9 @@ def client(self, service, print_output=True, log_level=None, user_agent=None, ti "ocean_aws": OceanAwsClient(session=self.session, print_output=print_output, log_level=log_level, user_agent=user_agent, timeout=timeout), "ocean_azure": OceanAzureClient(session=self.session, print_output=print_output, log_level=log_level, - user_agent=user_agent, timeout=timeout), + user_agent=user_agent, timeout=timeout), + "ocean_gcp": OceanGCPClient(session=self.session, print_output=print_output, log_level=log_level, + user_agent=user_agent, timeout=timeout), "oceancd": OceanCDClient(session=self.session, print_output=print_output, log_level=log_level, user_agent=user_agent, timeout=timeout), "managed_instance_aws": ManagedInstanceAwsClient(session=self.session, print_output=print_output, diff --git a/spotinst_sdk2/clients/ocean/__init__.py b/spotinst_sdk2/clients/ocean/__init__.py index 975d2eee..5974a9a1 100644 --- a/spotinst_sdk2/clients/ocean/__init__.py +++ b/spotinst_sdk2/clients/ocean/__init__.py @@ -4,6 +4,7 @@ from spotinst_sdk2.client import Client import spotinst_sdk2.models.ocean.aws as aws_ocean import spotinst_sdk2.models.ocean.azure as azure_ocean +import spotinst_sdk2.models.ocean.gcp as gcp_ocean # region AWS @@ -1693,3 +1694,661 @@ def list_migrations(self, ocean_id: str): return formatted_response["response"] # endregion + + +class OceanGCPClient(Client): + __base_ocean_url = "/ocean/k8s/cluster/" + __base_ocean_cluster_url = "/ocean/gcp/k8s/cluster" + __base_ocean_launchspec_url = "/ocean/gcp/k8s/launchSpec" + + def get_heartbeat_status(self, ocean_id: str): + """ + Get the heartbeat status of the Ocean Controller for the cluster. + The response returns the heartbeat status and the last heartbeat timestamp. + + # Arguments + ocean_id (String): ID of the Ocean Cluster + + # Returns + (Object): Ocean Get Heartbeat response + """ + response = self.send_get( + url=self.__base_ocean_url + ocean_id + "/controllerHeartbeat", + entity_name="ocean (Cluster Heartbeat)" + ) + + formatted_response = self.convert_json( + response, self.camel_to_underscore) + + return formatted_response["response"]["items"][0] + + def create_ocean_cluster(self, ocean: gcp_ocean.Ocean): + """ + Create an Ocean Cluster + + # Arguments + ocean (Ocean): Ocean Object + + # Returns + (Object): Ocean API response + """ + ocean = gcp_ocean.OceanRequest(ocean) + + excluded_missing_dict = self.exclude_missing( + json.loads(ocean.toJSON())) + + formatted_missing_dict = self.convert_json( + excluded_missing_dict, self.underscore_to_camel) + + body_json = json.dumps(formatted_missing_dict) + + response = self.send_post( + body=body_json, + url=self.__base_ocean_cluster_url, + entity_name='ocean') + + formatted_response = self.convert_json(response, + self.camel_to_underscore) + + return formatted_response["response"]["items"][0] + + def get_all_ocean_cluster(self): + """ + List the configurations for all Ocean clusters in the specified account. + + # Returns + (Object): Ocean API response + """ + + response = self.send_get( + url=self.__base_ocean_cluster_url, + entity_name="ocean" + ) + + formatted_response = self.convert_json( + response, self.camel_to_underscore) + + return formatted_response["response"]["items"] + + def delete_ocean_cluster(self, ocean_id: str): + """ + Delete a specified Ocean cluster. + + # Arguments + ocean_id (String): ID of the Ocean Cluster + + # Returns + (Object): Ocean API response + """ + return self.send_delete( + url=self.__base_ocean_cluster_url + "/" + ocean_id, + entity_name="ocean" + ) + + def get_ocean_cluster(self, ocean_id: str): + """ + Get the configuration for a specified Ocean cluster. + + # Arguments + ocean_id (String): ID of the Ocean Cluster + + # Returns + (Object): Ocean API response + """ + response = self.send_get( + url=self.__base_ocean_cluster_url + "/" + ocean_id, + entity_name="ocean" + ) + + formatted_response = self.convert_json( + response, self.camel_to_underscore) + + return formatted_response["response"]["items"][0] + + def update_ocean_cluster(self, ocean_id: str, ocean: gcp_ocean.Ocean): + """ + All Ocean parameters are updatable, excluding the Name and controllerClusterId. + This API supports partial updates, so specific fields can be updated separately. + + # Arguments + ocean_id (String): ID of the Ocean Cluster + ocean (Ocean): Ocean object + + # Returns + (Object): Ocean API response + """ + ocean = gcp_ocean.OceanRequest(ocean) + + excluded_missing_dict = self.exclude_missing( + json.loads(ocean.toJSON())) + + formatted_missing_dict = self.convert_json( + excluded_missing_dict, self.underscore_to_camel) + + body_json = json.dumps(formatted_missing_dict) + + response = self.send_put( + body=body_json, + url=self.__base_ocean_cluster_url + "/" + ocean_id, + entity_name='ocean') + + formatted_response = self.convert_json( + response, + self.camel_to_underscore) + + return formatted_response["response"]["items"][0] + + def reimport_ocean_cluster(self, ocean_id: str): + """ + Reimport the cluster's configuration from GKE. + + # Arguments + ocean_id (String): ID of the Ocean Cluster + ocean (Ocean): Ocean object + + # Returns + (Object): Reimport cluster response + """ + + response = self.send_put( + url=self.__base_ocean_cluster_url + "/" + ocean_id + '/reImport', + entity_name='ocean') + + formatted_response = self.convert_json( + response, + self.camel_to_underscore) + + return formatted_response["response"]["items"][0] + + def get_elastilog(self, ocean_id, from_date, to_date, severity=None, resource_id=None, limit=None): + """ + Get group’s Elastilog by + + # Arguments + to_date (String): to date + from_date (String): to date + severity(String) (Optional): Log level severity + resource_id(String) (Optional): Filter log extracted entires related to a + specific resource id + limit(String) (Optional): Maximum number of lines to extract in a response + + # Returns + (Object): Ocean Get Log API response + """ + geturl = self.__base_ocean_cluster_url + "/" + ocean_id + "/log" + query_params = dict(toDate=to_date, fromDate=from_date, severity=severity, + resourceId=resource_id, limit=limit) + + result = self.send_get( + url=geturl, entity_name='ocean_gcp_log', query_params=query_params) + + formatted_response = self.convert_json( + result, self.camel_to_underscore) + + return formatted_response["response"]["items"] + + def get_rightsizing_recommendations(self, ocean_id: str, filter: gcp_ocean.RightSizingRecommendationFilter = None): + """ + Get right-sizing recommendations for an Ocean cluster and filter them according to namespace or label. + + # Arguments + ocean_id (String): Id of the Ocean Cluster + filter (RightSizingRecommendationFilter): Optional - may be null. + + # Returns + (Object): Ocean API response + """ + recommendation_request = gcp_ocean.RightSizingRecommendationRequest( + filter) + + excluded_missing_dict = self.exclude_missing( + json.loads(recommendation_request.toJSON())) + + formatted_missing_dict = self.convert_json( + excluded_missing_dict, self.underscore_to_camel) + + body_json = json.dumps(formatted_missing_dict) + + group_response = self.send_post( + body=body_json, + url=self.__base_ocean_cluster_url + + "/" + ocean_id + "/rightSizing/suggestion", + entity_name='ocean') + + formatted_response = self.convert_json( + group_response, self.camel_to_underscore) + + return formatted_response["response"]["items"] + + def get_aggregated_cluster_costs(self, ocean_id: str, aggregated_cluster_costs: gcp_ocean.AggregatedClusterCosts): + """ + Get aggregated cluster costs + + # Arguments + ocean_id (String): ID of the Ocean Cluster + aggregated_cluster_costs (AggregatedClusterCosts): Aggregated Cluster Costs request + + # Returns + (Object): Aggregated Cluster Costs API response + """ + aggregated_cluster_costs_request = gcp_ocean.AggregatedClusterCostRequest( + aggregated_cluster_costs) + + excluded_missing_dict = self.exclude_missing( + json.loads(aggregated_cluster_costs_request.toJSON())) + + formatted_missing_dict = self.convert_json_with_list_of_lists( + excluded_missing_dict, self.underscore_to_camel) + + body_json = json.dumps(formatted_missing_dict) + + aggregated_costs_response = self.send_post( + body=body_json, + url=self.__base_ocean_cluster_url + "/" + ocean_id + "/aggregatedCosts", + entity_name='ocean (aggregated cluster costs)') + + formatted_response = self.convert_json( + aggregated_costs_response, self.camel_to_underscore) + + return formatted_response["response"]["items"][0] + + def get_aggregated_summary_costs(self, ocean_id: str, aggregated_cluster_costs: gcp_ocean.AggregatedClusterCosts): + """ + Get aggregated cluster costs + + # Arguments + ocean_id (String): ID of the Ocean Cluster + aggregated_cluster_costs (AggregatedClusterCosts): Aggregated Cluster Costs request + + # Returns + (Object): Aggregated Cluster Costs API response + """ + aggregated_cluster_costs_request = gcp_ocean.AggregatedClusterCostRequest( + aggregated_cluster_costs) + + excluded_missing_dict = self.exclude_missing( + json.loads(aggregated_cluster_costs_request.toJSON())) + + formatted_missing_dict = self.convert_json_with_list_of_lists( + excluded_missing_dict, self.underscore_to_camel) + + body_json = json.dumps(formatted_missing_dict) + + aggregated_costs_response = self.send_post( + body=body_json, + url=self.__base_ocean_cluster_url + "/" + ocean_id + "/aggregatedCosts/summary", + entity_name='ocean (aggregated cluster costs)') + + formatted_response = self.convert_json( + aggregated_costs_response, self.camel_to_underscore) + + return formatted_response["response"]["items"][0] + + def create_virtual_node_group(self, vng: gcp_ocean.VirtualNodeGroup): + """ + Create a virtual node group. + + # Arguments + vng (VirtualNodeGroup): VirtualNodeGroup Object + initial_nodes: When set to an integer greater than 0, a corresponding number of nodes will be launched from the virtual node group created. + + # Returns + (Object): Ocean Launch Spec response + """ + ocean = gcp_ocean.VNGRequest(vng) + + excluded_missing_dict = self.exclude_missing( + json.loads(ocean.toJSON())) + + formatted_missing_dict = self.convert_json( + excluded_missing_dict, self.underscore_to_camel) + + body_json = json.dumps(formatted_missing_dict) + + response = self.send_post( + body=body_json, + url=self.__base_ocean_launchspec_url, + entity_name='ocean_gcp_vng') + + formatted_response = self.convert_json(response, + self.camel_to_underscore) + + return formatted_response["response"]["items"][0] + + def get_all_ocean_vngs(self, ocean_id: str): + """ + List the configurations for all virtual node groups in the account + or in a specified cluster. + + # Returns + (Object): Ocean VNG API response + """ + + response = self.send_get( + url=self.__base_ocean_launchspec_url, + entity_name="ocean_gcp_vng", + query_params=dict(oceanId=ocean_id) + ) + + formatted_response = self.convert_json( + response, self.camel_to_underscore) + + return formatted_response["response"]["items"] + + def import_vng_configuration(self, node_pool_name: str, ocean_id: str): + """ + Import cluster configuration of an AKS cluster to use in create_ocean_cluster api call + + # Returns + (Object): Ocean API response + """ + + response = self.send_post_with_params( + body=None, + url=self.__base_ocean_launchspec_url + "/import", + entity_name='ocean_gcp', + user_query_params=dict(nodePoolName=node_pool_name, oceanId=ocean_id)) + + formatted_response = self.convert_json(response, + self.camel_to_underscore) + + return formatted_response["response"]["items"][0] + + def delete_virtual_node_group(self, vng_id: str, delete_nodes: bool = None): + """ + Delete an Ocean Cluster + + # Arguments + vng_id (String): ID of the Ocean VNG + delete_nodes (Bool): When set to "true", all instances belonging to the deleted launch specification will be drained, detached, and terminated. + + # Returns + (Object): Ocean Launch Specification Delete response + """ + return self.send_delete_with_params( + url=self.__base_ocean_launchspec_url + "/" + vng_id, + entity_name="ocean_gcp_vng", + user_query_params=dict(deleteNodes=delete_nodes) + ) + + def update_virtual_node_group(self, vng_id: str, vng: gcp_ocean.VirtualNodeGroup): + """ + Update an existing VNG inside an Ocean Cluster + + # Arguments + vng_id (String): ID of the Ocean Virtual Node Group + ocean (Ocean): Ocean object + + # Returns + (Object): Ocean Launch Spec response + """ + ocean = gcp_ocean.VNGRequest(vng) + + excluded_missing_dict = self.exclude_missing( + json.loads(ocean.toJSON())) + + formatted_missing_dict = self.convert_json( + excluded_missing_dict, self.underscore_to_camel) + + body_json = json.dumps(formatted_missing_dict) + + response = self.send_put( + body=body_json, + url=self.__base_ocean_launchspec_url + "/" + vng_id, + entity_name='ocean_aws_vng') + + formatted_response = self.convert_json( + response, + self.camel_to_underscore) + + return formatted_response["response"]["items"][0] + + def get_virtual_node_group(self, ocean_launch_spec_id: str): + """ + Get Virtual Node Group of the cluster + + # Arguments + ocean_launch_spec_id (String): Ocean cluster launch specification identifier + + # Returns + (Object): Ocean Allowed Instance Types response + """ + response = self.send_get( + url=self.__base_ocean_launchspec_url + "/" + ocean_launch_spec_id, + entity_name="ocean" + ) + + formatted_response = self.convert_json( + response, self.camel_to_underscore) + + return formatted_response["response"]["items"] + + def initiate_roll(self, ocean_id: str, cluster_roll: gcp_ocean.Roll): + """ + Initiate Cluster Rolls + + # Arguments + ocean_id (String): ID of the Ocean Cluster + cluster_roll (Roll): Cluster Roll / Roll with Instance Ids/ Launch specification Ids + + # Returns + (Object): Cluster Roll API response + """ + roll_request = gcp_ocean.ClusterRollInitiateRequest(cluster_roll) + + excluded_missing_dict = self.exclude_missing( + json.loads(roll_request.toJSON())) + + formatted_missing_dict = self.convert_json_with_list_of_lists( + excluded_missing_dict, self.underscore_to_camel) + + body_json = json.dumps(formatted_missing_dict) + + rolls_response = self.send_post( + body=body_json, + url=self.__base_ocean_cluster_url + "/" + ocean_id + "/roll", + entity_name='ocean (Cluster Roll)') + + formatted_response = self.convert_json( + rolls_response, self.camel_to_underscore) + + return formatted_response["response"]["items"][0] + + def list_rolls(self, ocean_id: str): + """ + Get status for all rolls of an Ocean cluster. + + # Arguments + ocean_id (String): ID of the Ocean Cluster + + # Returns + (Object): List of Cluster Roll API response + """ + response = self.send_get( + url=self.__base_ocean_cluster_url + "/" + ocean_id + "/roll", + entity_name="ocean (Cluster Roll)" + ) + + formatted_response = self.convert_json( + response, self.camel_to_underscore) + + return formatted_response["response"]["items"] + + def update_roll(self, ocean_id: str, roll_id: str, status: str): + """ + Update a roll of an Ocean cluster. + Performing the request will stop the next batch in a roll. + + # Arguments + ocean_id (String): ID of the Ocean Cluster + roll_id (String): Ocean cluster roll identifier + update_roll (UpdateRoll): update roll request + + # Returns + (Object): Cluster Roll API response + """ + update_roll_request = gcp_ocean.ClusterRollUpdateRequest(status) + + excluded_missing_dict = self.exclude_missing( + json.loads(update_roll_request.toJSON())) + + formatted_missing_dict = self.convert_json( + excluded_missing_dict, self.underscore_to_camel) + + body_json = json.dumps(formatted_missing_dict) + + response = self.send_put( + body=body_json, + url=self.__base_ocean_cluster_url + "/" + ocean_id + "/roll/" + roll_id, + entity_name='ocean (Cluster Roll)') + + formatted_response = self.convert_json( + response, self.camel_to_underscore) + + return formatted_response["response"]["items"][0] + + def get_roll(self, ocean_id: str, roll_id: str): + """ + Get status for a roll of an Ocean cluster. + + # Arguments + ocean_id (String): ID of the Ocean Cluster + account_id (String): The ID of the account associated with your token. + roll_id (String): Ocean cluster roll identifier + + # Returns + (Object): Cluster Roll API response + """ + response = self.send_get( + url=self.__base_ocean_cluster_url + "/" + ocean_id + "/roll/" + roll_id, + entity_name="ocean (Cluster Roll)" + ) + + formatted_response = self.convert_json( + response, self.camel_to_underscore) + + return formatted_response["response"]["items"][0] + + def get_cluster_nodes(self, ocean_id: str, instance_name: str = None, launch_spec_id: str = None): + """ + Get nodes data of an Ocean cluster. + + # Arguments + ocean_id (String): ID of the Ocean Cluster + instance_name (String): Get a specific node by instance id + launch_spec_id (String): Ocean cluster launch specification identifier. + + # Returns + (Object): Ocean Kubernetes AWS Nodes Data response + """ + query_params = dict(instanceName=instance_name, + launchSpecId=launch_spec_id) + + response = self.send_get( + url=self.__base_ocean_cluster_url + "/" + ocean_id + "/nodes", + entity_name="ocean (Cluster Nodes)", + query_params=query_params + ) + + formatted_response = self.convert_json( + response, self.camel_to_underscore) + + return formatted_response["response"]["items"] + + def update_elastigroup_to_ocean(self, group_id: str): + """ + Upgrade an Elastigroup with Kubernetes integration to Ocean for Kubernetes cluster. + + # Arguments + group_id (str): Elastigroup identifier + + # Returns + (Object): Ocean API response + """ + + query_params = dict(groupId=group_id) + + response = self.send_post_with_params( + body=None, + url=self.__base_ocean_cluster_url + "/import", + entity_name='ocean_gcp_update_eg_to_ocean', + user_query_params=query_params) + + formatted_response = self.convert_json(response, + self.camel_to_underscore) + + return formatted_response["response"]["items"][0] + + def import_gke_cluster_to_ocean(self, cluster_name: str, include_launchSpecs: bool, location: str, + node_pool_name: str, import_gke_to_ocean: gcp_ocean.ImportGkeClusterToOcean): + """ + Create an Ocean configuration according to an GKE Cluster configuration. + + # Arguments + cluster_name (String): Name of the GKE Cluster. + include_launchSpecs (String): When set to "true", GKE cluster node pools will be imported to Ocean custom VNG ("customLaunchSpec") configurations. + location (String): Location GKE Cluster Master. + node_pool_name (String): Name of the Node Pool to use as a default for the Cluster configuration. + import_gke_to_ocean (ImportGkeClusterToOcean): ImportGkeClusterToOcean Object + + # Returns + (Object): Ocean GKE Cluster Import Response + """ + import_gke_to_ocean_body = gcp_ocean.ImportGkeClusterToOceanRequest( + import_gke_to_ocean) + + excluded_missing_dict = self.exclude_missing( + json.loads(import_gke_to_ocean_body.toJSON())) + + formatted_missing_dict = self.convert_json( + excluded_missing_dict, self.underscore_to_camel) + + body_json = json.dumps(formatted_missing_dict) + + geturl = self.__base_ocean_cluster_url + "/gke/import" + + query_params = dict( + clusterName=cluster_name, includeLaunchSpecs=include_launchSpecs, location=location, nodePoolName=node_pool_name) + + result = self.send_post_with_params( + body=body_json, + url=geturl, + entity_name='import_gke_cluster_to_ocean', + user_query_params=query_params) + + formatted_response = self.convert_json( + result, self.camel_to_underscore) + + return formatted_response["response"]["items"] + + def launch_nodes_in_vng(self, ocean_launch_spec_id: str, amount: int): + """ + Launch nodes in Virtual Node Group. + + # Arguments + ocean_launch_spec_id (String): Ocean cluster launch specification identifier. + amount (int): The number of nodes to launch. + + # Returns + (Object): Ocean Virtual Node Group Launch API response + """ + launch_node_request = gcp_ocean.LaunchNodesRequest(amount) + + excluded_missing_dict = self.exclude_missing( + json.loads(launch_node_request.toJSON())) + + formatted_missing_dict = self.convert_json( + excluded_missing_dict, self.underscore_to_camel) + + body_json = json.dumps(formatted_missing_dict) + + response = self.send_put( + body=body_json, + url=self.__base_ocean_launchspec_url + "/" + + ocean_launch_spec_id + "/launchNodes", + entity_name='ocean (Cluster Roll)') + + formatted_response = self.convert_json( + response, self.camel_to_underscore) + + return formatted_response["response"]["items"][0] diff --git a/spotinst_sdk2/models/ocean/gcp/__init__.py b/spotinst_sdk2/models/ocean/gcp/__init__.py new file mode 100644 index 00000000..077a4503 --- /dev/null +++ b/spotinst_sdk2/models/ocean/gcp/__init__.py @@ -0,0 +1,1002 @@ +import json +from enum import Enum +from typing import List + +none = "d3043820717d74d9a17694c176d39733" + + +# region AutoScaler +class Down: + """ + # Arguments + evaluation_periods: int + max_scale_down_percentage: int + """ + + def __init__( + self, + evaluation_periods: int = none, + max_scale_down_percentage: int = none + ): + self.evaluation_periods = evaluation_periods + self.max_scale_down_percentage = max_scale_down_percentage + + +class HeadRoom: + """ + # Arguments + cpu_per_unit: int + gpu_per_unit: int + memory_per_unit: int + num_of_unit: int + """ + + def __init__( + self, + cpu_per_unit: int = none, + gpu_per_unit: int = none, + memory_per_unit: int = none, + num_of_unit: int = none + ): + self.cpu_per_unit = cpu_per_unit + self.gpu_per_unit = gpu_per_unit + self.memory_per_unit = memory_per_unit + self.num_of_unit = num_of_unit + + +class ResourceLimits: + """ + # Arguments + max_memory_gib: int + max_vcpu: int + """ + + def __init__( + self, + max_memory_gib: int = none, + max_vcpu: int = none + ): + self.max_memory_gib = max_memory_gib + self.max_vcpu = max_vcpu + + +class AutoScaler: + """ + # Arguments + auto_headroom_percentage: int + cooldown: int + down: Down + enable_automatic_and_manual_headroom: bool + head_room: HeadRoom + is_auto_config: bool + is_enabled: bool + resource_limits: ResourceLimits + """ + + def __init__( + self, + auto_headroom_percentage: int = none, + cooldown: int = none, + down: Down = none, + enable_automatic_and_manual_headroom: bool = none, + head_room: HeadRoom = none, + is_auto_config: bool = none, + is_enabled: bool = none, + resource_limits: ResourceLimits = none + ): + self.auto_headroom_percentage = auto_headroom_percentage + self.cooldown = cooldown + self.down = down + self.enable_automatic_and_manual_headroom = enable_automatic_and_manual_headroom + self.head_room = head_room + self.is_auto_config = is_auto_config + self.is_enabled = is_enabled + self.resource_limits = resource_limits +# endregion + + +# region Capacity +class Capacity: + """ + # Arguments + maximum: int + minimum: int + target: int + """ + def __init__( + self, + maximum: int = none, + minimum: int = none, + target: int = none + ): + self.maximum = maximum + self.minimum = minimum + self.target = target +# endregion + + +# region Compute +class LocationType(Enum): + regional = "regional" + _global = "global" + + +class NamedPorts: + """ + # Arguments + name: str + ports: List[int] + """ + def __init__( + self, + name: str = none, + ports: List[int] = none): + self.name = name + self.ports = ports + + +class Scheme(Enum): + external = "EXTERNAL" + internal = "INTERNAL" + + +class BackendServices: + """ + # Arguments + backend_service_name: str + location_type: LocationType + named_ports: NamedPorts + scheme: Scheme + """ + def __init__( + self, + backend_service_name: str = none, + location_type: LocationType = none, + named_ports: NamedPorts = none, + scheme: Scheme = none + ): + self.backend_service_name = backend_service_name + self.location_type = location_type + self.named_ports = named_ports + self.scheme = scheme + + +class InstanceTypes: + """ + # Arguments + blacklist: List[str] + whitelist: List[str] + """ + def __init__( + self, + blacklist: List[str] = none, + whitelist: List[str] = none): + self.blacklist = blacklist + self.whitelist = whitelist + + +class Labels: + """ + # Arguments + key: str + value: str + """ + def __init__( + self, + key: str = none, + value: str = none): + self.key = key + self.value = value + + +class Metadata: + """ + # Arguments + key: str + value: str + """ + def __init__( + self, + key: str = none, + value: str = none): + self.key = key + self.value = value + + +class RootVolumeType(Enum): + pd_standard = "pd-standard" + pd_ssd = "pd-ssd" + + +class ShieldedInstanceConfig: + """ + # Arguments + enable_integrity_monitoring: bool + enable_secure_boot: bool + """ + def __init__( + self, + enable_integrity_monitoring: bool = none, + enable_secure_boot: bool = none): + self.enable_integrity_monitoring = enable_integrity_monitoring + self.enable_secure_boot = enable_secure_boot + + +class LaunchSpecification: + """ + # Arguments + ip_forwarding: bool + labels: List[Labels] + metadata: List[Metadata] + min_cpu_platform: str + root_volume_size_in_gb: int + root_volume_type: RootVolumeType + service_account: str + shielded_instance_config: ShieldedInstanceConfig + source_image: str + tags: List[str] + use_as_template_only: bool + """ + def __init__( + self, + ip_forwarding: bool = none, + labels: List[Labels] = none, + metadata: List[Metadata] = none, + min_cpu_platform: str = none, + root_volume_size_in_gb: int = none, + root_volume_type: RootVolumeType = none, + service_account: str = none, + shielded_instance_config: ShieldedInstanceConfig = none, + source_image: str = none, + tags: List[str] = none, + use_as_template_only: bool = none + ): + self.ip_forwarding = ip_forwarding + self.labels = labels + self.metadata = metadata + self.min_cpu_platform = min_cpu_platform + self.root_volume_size_in_gb = root_volume_size_in_gb + self.root_volume_type = root_volume_type + self.service_account = service_account + self.shielded_instance_config = shielded_instance_config + self.source_image = source_image + self.tags = tags + self.use_as_template_only = use_as_template_only + + +class AccessConfigs: + """ + # Arguments + name: str + type: str + """ + def __init__( + self, + name: str = none, + type: str = none + ): + self.name = name + self.type = type + + +class AliasIpRanges: + """ + # Arguments + ip_cidr_range: str + subnetwork_range_name: str + """ + def __init__( + self, + ip_cidr_range: str = none, + subnetwork_range_name: str = none + ): + self.ip_cidr_range = ip_cidr_range + self.subnetwork_range_name = subnetwork_range_name + + +class NetworkInterfaces: + """ + # Arguments + access_configs: AccessConfigs + alias_ip_ranges: AliasIpRanges + network: str + project_id: str + """ + def __init__( + self, + access_configs: AccessConfigs = none, + alias_ip_ranges: AliasIpRanges = none, + network: str = none, + project_id: str = none, + ): + self.access_configs = access_configs + self.alias_ip_ranges = alias_ip_ranges + self.network = network + self.project_id = project_id + + +class Compute: + """ + # Arguments + availability_zones: List[str] + backend_services: BackendServices + instance_types: InstanceTypes + launch_specification: LaunchSpecification + network_interfaces: NetworkInterfaces + subnet_name: str + """ + def __init__( + self, + availability_zones: List[str] = none, + backend_services: BackendServices = none, + instance_types: InstanceTypes = none, + launch_specification: LaunchSpecification = none, + network_interfaces: NetworkInterfaces = none, + subnet_name: str = none + ): + self.availability_zones = availability_zones + self.backend_services = backend_services + self.instance_types = instance_types + self.launch_specification = launch_specification + self.network_interfaces = network_interfaces + self.subnet_name = subnet_name +# endregion + + +# region GKE +class GKE: + """ + # Arguments + cluster_name: str + master_location: str + """ + def __init__( + self, + cluster_name: str = none, + master_location: str = none + ): + self.cluster_name = cluster_name + self.master_location = master_location +# endregion + + +# region Scheduling +class ShutdownHours: + """ + # Arguments + is_enabled: bool + time_windows: List[str] + """ + def __init__( + self, + is_enabled: bool = none, + time_windows: List[str] = none + ): + self.is_enabled = is_enabled + self.time_windows = time_windows + + +class ClusterRoll: + """ + # Arguments + batch_min_healthy_percentage: int + batch_size_percentage: int + comment: str + respect_pdb: bool + """ + def __init__( + self, + batch_min_healthy_percentage: int = none, + batch_size_percentage: int = none, + comment: str = none, + respect_pdb: bool = none + ): + self.batch_min_healthy_percentage = batch_min_healthy_percentage + self.batch_size_percentage = batch_size_percentage + self.comment = comment + self.respect_pdb = respect_pdb + + +class Parameters: + """ + # Arguments + cluster_roll: ClusterRoll + """ + def __init__( + self, + cluster_roll: ClusterRoll = none): + self.cluster_roll = cluster_roll + + +class Tasks: + """ + # Arguments + cron_expression: str + is_enabled: bool + parameters: Parameters + task_type: str + """ + def __init__( + self, + cron_expression: str = none, + is_enabled: bool = none, + parameters: Parameters = none, + task_type: str = none + ): + self.cron_expression = cron_expression + self.is_enabled = is_enabled + self.parameters = parameters + self.task_type = task_type + + +class Scheduling: + """ + # Arguments + shutdown_hours: ShutdownHours + tasks: List[Tasks] + """ + def __init__( + self, + shutdown_hours: ShutdownHours = none, + tasks: List[Tasks] = none): + self.shutdown_hours = shutdown_hours + self.tasks = tasks +# endregion + + +# region Security +class ContainerImage: + """ + approved_images: List[str] + """ + def __init__( + self, + approved_images: List[str] = none + ): + self.approved_images = approved_images + + +class Security: + """ + # Arguments + container_image: ContainerImage + """ + def __init__( + self, + container_image: ContainerImage = none): + self.container_image = container_image +# endregion + + +# region Strategy +class ProvisioningModel(Enum): + spot = "SPOT" + preemptible = "PREEMPTIBLE" + + +class Strategy: + """ + # Arguments + draining_timeout: int + preemptible_percentage: int + provisioning_model: ProvisioningModel + """ + def __init__( + self, + draining_timeout: int = none, + preemptible_percentage: int = none, + provisioning_model: ProvisioningModel = none + ): + self.draining_timeout = draining_timeout + self.preemptible_percentage = preemptible_percentage + self.provisioning_model = provisioning_model +# endregion + + +# region Ocean +class Ocean: + """ + # Arguments + auto_scaler: AutoScaler + capacity: Capacity + compute: Compute + controller_cluster_id: str + gke: GKE + name: str + scheduling: Scheduling + security: Security + strategy: Strategy + """ + def __init__( + self, + auto_scaler: AutoScaler = none, + capacity: Capacity = none, + compute: Compute = none, + controller_cluster_id: str = none, + gke: GKE = none, + name: str = none, + scheduling: Scheduling = none, + security: Security = none, + strategy: Strategy = none + ): + self.auto_scaler = auto_scaler + self.capacity = capacity + self.compute = compute + self.controller_cluster_id = controller_cluster_id + self.gke = gke + self.name = name + self.scheduling = scheduling + self.security = security + self.strategy = strategy +# endregion + + +# region OceanRequest +class OceanRequest: + def __init__(self, ocean: Ocean): + self.cluster = ocean + + def toJSON(self): + return json.dumps(self, default=lambda o: o.__dict__, + sort_keys=True, indent=4) +# endregion + + +class Type(Enum): + label = "label" + annotation = "annotation" + + +class Operator(Enum): + equals = "equals" + not_equals = "notEquals" + exists = "exists" + does_not_exist = "doesNotExist" + + +class Attribute: + """ + # Arguments + type: Type + key: str + operator: Operator + value: str + """ + + def __init__( + self, + type: Type = none, + key: str = none, + operator: Operator = none, + value: str = none): + self.type = type + self.key = key + self.operator = operator + self.value = value + + +class RightSizingRecommendationFilter: + """ + # Attribute + namespaces: List[str] + attribute: Attribute + """ + + def __init__( + self, + namespaces: List[str] = none, + attribute: Attribute = none): + self.namespaces = namespaces + self.attribute = attribute + + +class RightSizingRecommendationRequest: + def __init__(self, filter: RightSizingRecommendationFilter = none): + self.filter = filter + + def toJSON(self): + return json.dumps(self, default=lambda o: o.__dict__, + sort_keys=True, indent=4) + + +class AllMatch: + """ + # Arguments + all_matches: List[Attribute] + """ + + def __init__( + self, + all_matches: List[Attribute] = none): + self.all_matches = all_matches + + +class Conditions: + """ + # Arguments + any_match: List[AllMatch] + """ + + def __init__( + self, + any_match: List[AllMatch] = none): + self.any_match = any_match + + +class Scope(Enum): + namespace = "namespace" + resource = "resource" + + +class Filter: + """ + # Arguments + conditions: Conditions + scope: Scope + """ + + def __init__( + self, + conditions: Conditions = none, + scope: Scope = none): + self.conditions = conditions + self.scope = scope + + +class GroupBy(Enum): + namespace = "namespace" + namespace_label = "namespace.label.${labelKey}" + resource_label = "resource.label.${labelKey}" + namespace_annotation = "namespace.annotation.${annotationKey}" + resource_annotation = "resource.annotation.${annotationKey}" + + +class AggregatedClusterCosts: + """ + # Arguments + end_time: str + aggregated_filter: Filter + group_by: GroupBy + start_time: str + """ + + def __init__( + self, + end_time: str = none, + aggregated_filter: Filter = none, + group_by: GroupBy = GroupBy.namespace.value, + start_time: str = none): + self.end_time = end_time + self.aggregated_filter = aggregated_filter + self.group_by = group_by + self.start_time = start_time + + +class AggregatedClusterCostRequest: + def __init__(self, aggregated_cluster_costs: AggregatedClusterCosts = none): + self.end_time = aggregated_cluster_costs.end_time + self.start_time = aggregated_cluster_costs.start_time + self.group_by = aggregated_cluster_costs.group_by + self.filter = aggregated_cluster_costs.aggregated_filter + + def toJSON(self): + return json.dumps(self, default=lambda o: o.__dict__, + sort_keys=True, indent=4) + + +# region VirtualNodeGroup +class Headroom: + """ + # Arguments + cpu_per_unit: int + gpu_per_unit: int + memory_per_unit: int + num_of_units: int + """ + def __init( + self, + cpu_per_unit: int = none, + gpu_per_unit: int = none, + memory_per_unit: int = none, + num_of_units: int = none + ): + self.cpu_per_unit = cpu_per_unit + self.gpu_per_unit = gpu_per_unit + self.memory_per_unit = memory_per_unit + self.num_of_units = num_of_units + + +class AutoScale: + """ + # Arguments + auto_headroom_percentage: int + headrooms: List[Headroom] + """ + def __init__( + self, + auto_headroom_percentage: int = none, + headrooms: List[Headroom] = none + ): + self.auto_headroom_percentage = auto_headroom_percentage + self.headrooms = headrooms + + +class VNGNetworkInterfaces: + """ + # Arguments + access_configs: AccessConfigs + alias_ip_ranges: AliasIpRanges + network: str + project_id: str + """ + def __init__( + self, + access_configs: List[AccessConfigs] = none, + alias_ip_ranges: List[AliasIpRanges] = none, + network: str = none, + project_id: str = none, + ): + self.access_configs = access_configs + self.alias_ip_ranges = alias_ip_ranges + self.network = network + self.project_id = project_id + + +class VNGResourceLimits: + """ + # Arguments + max_instance_count: int + min_instance_count: int + """ + def __init__( + self, + max_instance_count: int = none, + min_instance_count: int = none + ): + self.max_instance_count = max_instance_count + self.min_instance_count = min_instance_count + + +class VNGTasks: + """ + # Arguments + config: List[Headroom] + cron_expression: str + is_enabled: bool + task_type: str + """ + def __init__( + self, + config: List[Headroom] = none, + cron_expression: str = none, + is_enabled: bool = none, + task_type: str = none + ): + self.config = config + self.cron_expression = cron_expression + self.is_enabled = is_enabled + self.task_type = task_type + + +class VNGScheduling: + """ + # Arguments + tasks: List[VNGTasks] + """ + def __init__( + self, + tasks: VNGTasks): + self.tasks = tasks + + +class Storage: + """ + # Arguments + local_ssd_count: int + """ + def __init__( + self, + local_ssd_count: int = none): + self.local_ssd_count = local_ssd_count + + +class VNGStrategy: + """ + # Arguments + preemptible_percentage: int + """ + def __init__( + self, + preemptible_percentage: int = none + ): + self.preemptible_percentage = preemptible_percentage + + +class Taints: + """ + # Arguments + effect: str + key: str + value: str + """ + def __init__( + self, + effect: str = none, + key: str = none, + value: str = none + ): + self.effect = effect + self.key = key + self.value = value + + +class VirtualNodeGroup: + """ + # Arguments + auto_scale: AutoScale + availability_zones: List[str] + instance_types: List[str] + labels: List[Labels] + metadata: List[Metadata] + name: str + network_interfaces: List[VNGNetworkInterfaces] + ocean_id: str + resourceLimits: ResourceLimits + restrict_scale_down: bool + root_volume_size: int + root_volume_type: str + scheduling: Scheduling + service_account: str + shielded_instance_config: ShieldedInstanceConfig + source_image: str + storage: Storage + strategy: VNGStrategy + tags: List[str] + taints: List[Taints] + """ + def __init__( + self, + auto_scale: AutoScale = none, + availability_zones: List[str] = none, + instance_types: List[str] = none, + labels: List[Labels] = none, + metadata: List[Metadata] = none, + name: str = none, + network_interfaces: List[VNGNetworkInterfaces] = none, + ocean_id: str = none, + resourceLimits: VNGResourceLimits = none, + restrict_scale_down: bool = none, + root_volume_size: int = none, + root_volume_type: str = none, + scheduling: VNGScheduling = none, + service_account: str = none, + shielded_instance_config: ShieldedInstanceConfig = none, + source_image: str = none, + storage: Storage = none, + strategy: VNGStrategy = none, + tags: List[str] = none, + taints: List[Taints] = none + ): + self.auto_scale = auto_scale + self.availability_zones = availability_zones + self.instance_types = instance_types + self.labels = labels + self.metadata = metadata + self.name = name + self.network_interfaces = network_interfaces + self.ocean_id = ocean_id + self.resourceLimits = resourceLimits + self.restrict_scale_down = restrict_scale_down + self.root_volume_size = root_volume_size + self.root_volume_type = root_volume_type + self.scheduling = scheduling + self.service_account = service_account + self.shielded_instance_config = shielded_instance_config + self.source_image = source_image + self.storage = storage + self.strategy = strategy + self.tags = tags + self.taints = taints +# endregion + + +class VNGRequest: + def __init__(self, vng: VirtualNodeGroup): + self.launch_spec = vng + + def toJSON(self): + return json.dumps(self, default=lambda o: o.__dict__, + sort_keys=True, indent=4) + + +class Roll: + """ + # Arguments + batch_min_healthy_percentage: int + batch_size_percentage: int + comment: str + instance_names: List[str] + launch_spec_ids: List[str] + respect_pdb: bool + """ + + def __init__( + self, + batch_min_healthy_percentage: int = none, + batch_size_percentage: int = none, + comment: str = none, + launch_spec_ids: List[str] = none, + instance_ids: List[str] = none, + respect_pdb: bool = none): + self.batch_min_healthy_percentage = batch_min_healthy_percentage + self.batch_size_percentage = batch_size_percentage + self.comment = comment + self.instance_ids = instance_ids + self.launch_spec_ids = launch_spec_ids + self.respect_pdb = respect_pdb + + +class ClusterRollInitiateRequest: + def __init__(self, roll: Roll = none): + self.roll = roll + + def toJSON(self): + return json.dumps(self, default=lambda o: o.__dict__, + sort_keys=True, indent=4) + + +class ClusterRollUpdateRequest: + def __init__(self, status: str = none): + self.roll = dict(status=status) + + def toJSON(self): + return json.dumps(self, default=lambda o: o.__dict__, + sort_keys=True, indent=4) + + +class ImportGkeClusterToOcean: + """ + # Arguments + autoScaler: AutoScaler + availability_zones: List[str] + backend_services: List[BackendServices] + capacity: Capacity + controller_cluster_id: str + instance_types: InstanceTypes + name: str + """ + def __init__( + self, + autoScaler: AutoScaler = none, + availability_zones: List[str] = none, + backend_services: List[BackendServices] = none, + capacity: Capacity = none, + controller_cluster_id: str = none, + instance_types: InstanceTypes = none, + name: str = none + ): + self.autoScaler = autoScaler + self.availability_zones = availability_zones + self.backend_services = backend_services + self.capacity = capacity + self.controller_cluster_id = controller_cluster_id + self.instance_types = instance_types + self.name = name + + +class ImportGkeClusterToOceanRequest: + def __init__(self, cluster: ImportGkeClusterToOcean = none): + self.cluster = cluster + + def toJSON(self): + return json.dumps(self, default=lambda o: o.__dict__, + sort_keys=True, indent=4) + + +class LaunchNodesRequest: + def __init__(self, amount: int = none): + self.launch_request = dict(amount=amount) + + def toJSON(self): + return json.dumps(self, default=lambda o: o.__dict__, + sort_keys=True, indent=4)